diff --git a/whisper/whisper_base_finetuned/README.md b/whisper/whisper_base_finetuned/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4dc951a672dbff8701d7e29f5a9a7a3fee9b604c
--- /dev/null
+++ b/whisper/whisper_base_finetuned/README.md
@@ -0,0 +1,89 @@
+---
+tags:
+- generated_from_trainer
+datasets:
+- audiofolder
+metrics:
+- wer
+model-index:
+- name: whisper_base_finetuned
+ results:
+ - task:
+ name: Automatic Speech Recognition
+ type: automatic-speech-recognition
+ dataset:
+ name: audiofolder
+ type: audiofolder
+ config: default
+ split: validation
+ args: default
+ metrics:
+ - name: Wer
+ type: wer
+ value: 0.3192600084831479
+---
+
+
+
+[](https://wandb.ai/querying/huggingface/runs/1wtpwccg)
+# whisper_base_finetuned
+
+This model was trained from scratch on the audiofolder dataset.
+It achieves the following results on the evaluation set:
+- Loss: 0.3375
+- Wer: 0.3193
+
+## Model description
+
+More information needed
+
+## Intended uses & limitations
+
+More information needed
+
+## Training and evaluation data
+
+More information needed
+
+## Training procedure
+
+### Training hyperparameters
+
+The following hyperparameters were used during training:
+- learning_rate: 1e-05
+- train_batch_size: 16
+- eval_batch_size: 8
+- seed: 42
+- distributed_type: multi-GPU
+- num_devices: 4
+- total_train_batch_size: 64
+- total_eval_batch_size: 32
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- lr_scheduler_type: linear
+- lr_scheduler_warmup_steps: 500
+- num_epochs: 10
+- mixed_precision_training: Native AMP
+
+### Training results
+
+| Training Loss | Epoch | Step | Validation Loss | Wer |
+|:-------------:|:-----:|:----:|:---------------:|:------:|
+| 0.4111 | 1.0 | 973 | 0.4590 | 0.4551 |
+| 0.4068 | 2.0 | 1946 | 0.3847 | 0.4812 |
+| 0.3617 | 3.0 | 2919 | 0.3585 | 0.4326 |
+| 0.3144 | 4.0 | 3892 | 0.3436 | 0.3594 |
+| 0.272 | 5.0 | 4865 | 0.3425 | 0.3639 |
+| 0.2246 | 6.0 | 5838 | 0.3371 | 0.3341 |
+| 0.1541 | 7.0 | 6811 | 0.3404 | 0.3377 |
+| 0.1387 | 8.0 | 7784 | 0.3370 | 0.3196 |
+| 0.1554 | 9.0 | 8757 | 0.3387 | 0.3113 |
+| 0.1692 | 10.0 | 9730 | 0.3375 | 0.3193 |
+
+
+### Framework versions
+
+- Transformers 4.42.0.dev0
+- Pytorch 2.2.1
+- Datasets 2.19.0
+- Tokenizers 0.19.1
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/config.json b/whisper/whisper_base_finetuned/checkpoint-1946/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..91728b7bc6c3a43bb11e0d161949a286ca009408
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/config.json
@@ -0,0 +1,52 @@
+{
+ "_name_or_path": "whisper_base_finetuned",
+ "activation_dropout": 0.0,
+ "activation_function": "gelu",
+ "apply_spec_augment": true,
+ "architectures": [
+ "WhisperForConditionalGeneration"
+ ],
+ "attention_dropout": 0.0,
+ "begin_suppress_tokens": [
+ 220,
+ 50257
+ ],
+ "bos_token_id": 50257,
+ "classifier_proj_size": 256,
+ "d_model": 512,
+ "decoder_attention_heads": 8,
+ "decoder_ffn_dim": 2048,
+ "decoder_layerdrop": 0.0,
+ "decoder_layers": 6,
+ "decoder_start_token_id": 50258,
+ "dropout": 0.0,
+ "encoder_attention_heads": 8,
+ "encoder_ffn_dim": 2048,
+ "encoder_layerdrop": 0.0,
+ "encoder_layers": 6,
+ "eos_token_id": 50257,
+ "forced_decoder_ids": null,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "mask_feature_length": 10,
+ "mask_feature_min_masks": 0,
+ "mask_feature_prob": 0.05,
+ "mask_time_length": 10,
+ "mask_time_min_masks": 2,
+ "mask_time_prob": 0.05,
+ "max_length": 448,
+ "max_source_positions": 1500,
+ "max_target_positions": 448,
+ "median_filter_width": 7,
+ "model_type": "whisper",
+ "num_hidden_layers": 6,
+ "num_mel_bins": 80,
+ "pad_token_id": 50257,
+ "scale_embedding": false,
+ "suppress_tokens": [],
+ "torch_dtype": "float32",
+ "transformers_version": "4.42.0.dev0",
+ "use_cache": true,
+ "use_weighted_layer_sum": false,
+ "vocab_size": 51865
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/generation_config.json b/whisper/whisper_base_finetuned/checkpoint-1946/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3ce877d310342bb057324d0dfcf6f83dc6055c1a
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/generation_config.json
@@ -0,0 +1,256 @@
+{
+ "alignment_heads": [
+ [
+ 3,
+ 1
+ ],
+ [
+ 4,
+ 2
+ ],
+ [
+ 4,
+ 3
+ ],
+ [
+ 4,
+ 7
+ ],
+ [
+ 5,
+ 1
+ ],
+ [
+ 5,
+ 2
+ ],
+ [
+ 5,
+ 4
+ ],
+ [
+ 5,
+ 6
+ ]
+ ],
+ "begin_suppress_tokens": [
+ 220,
+ 50257
+ ],
+ "bos_token_id": 50257,
+ "decoder_start_token_id": 50258,
+ "eos_token_id": 50257,
+ "forced_decoder_ids": [
+ [
+ 1,
+ null
+ ],
+ [
+ 2,
+ 50359
+ ]
+ ],
+ "is_multilingual": true,
+ "lang_to_id": {
+ "<|af|>": 50327,
+ "<|am|>": 50334,
+ "<|ar|>": 50272,
+ "<|as|>": 50350,
+ "<|az|>": 50304,
+ "<|ba|>": 50355,
+ "<|be|>": 50330,
+ "<|bg|>": 50292,
+ "<|bn|>": 50302,
+ "<|bo|>": 50347,
+ "<|br|>": 50309,
+ "<|bs|>": 50315,
+ "<|ca|>": 50270,
+ "<|cs|>": 50283,
+ "<|cy|>": 50297,
+ "<|da|>": 50285,
+ "<|de|>": 50261,
+ "<|el|>": 50281,
+ "<|en|>": 50259,
+ "<|es|>": 50262,
+ "<|et|>": 50307,
+ "<|eu|>": 50310,
+ "<|fa|>": 50300,
+ "<|fi|>": 50277,
+ "<|fo|>": 50338,
+ "<|fr|>": 50265,
+ "<|gl|>": 50319,
+ "<|gu|>": 50333,
+ "<|haw|>": 50352,
+ "<|ha|>": 50354,
+ "<|he|>": 50279,
+ "<|hi|>": 50276,
+ "<|hr|>": 50291,
+ "<|ht|>": 50339,
+ "<|hu|>": 50286,
+ "<|hy|>": 50312,
+ "<|id|>": 50275,
+ "<|is|>": 50311,
+ "<|it|>": 50274,
+ "<|ja|>": 50266,
+ "<|jw|>": 50356,
+ "<|ka|>": 50329,
+ "<|kk|>": 50316,
+ "<|km|>": 50323,
+ "<|kn|>": 50306,
+ "<|ko|>": 50264,
+ "<|la|>": 50294,
+ "<|lb|>": 50345,
+ "<|ln|>": 50353,
+ "<|lo|>": 50336,
+ "<|lt|>": 50293,
+ "<|lv|>": 50301,
+ "<|mg|>": 50349,
+ "<|mi|>": 50295,
+ "<|mk|>": 50308,
+ "<|ml|>": 50296,
+ "<|mn|>": 50314,
+ "<|mr|>": 50320,
+ "<|ms|>": 50282,
+ "<|mt|>": 50343,
+ "<|my|>": 50346,
+ "<|ne|>": 50313,
+ "<|nl|>": 50271,
+ "<|nn|>": 50342,
+ "<|no|>": 50288,
+ "<|oc|>": 50328,
+ "<|pa|>": 50321,
+ "<|pl|>": 50269,
+ "<|ps|>": 50340,
+ "<|pt|>": 50267,
+ "<|ro|>": 50284,
+ "<|ru|>": 50263,
+ "<|sa|>": 50344,
+ "<|sd|>": 50332,
+ "<|si|>": 50322,
+ "<|sk|>": 50298,
+ "<|sl|>": 50305,
+ "<|sn|>": 50324,
+ "<|so|>": 50326,
+ "<|sq|>": 50317,
+ "<|sr|>": 50303,
+ "<|su|>": 50357,
+ "<|sv|>": 50273,
+ "<|sw|>": 50318,
+ "<|ta|>": 50287,
+ "<|te|>": 50299,
+ "<|tg|>": 50331,
+ "<|th|>": 50289,
+ "<|tk|>": 50341,
+ "<|tl|>": 50348,
+ "<|tr|>": 50268,
+ "<|tt|>": 50351,
+ "<|uk|>": 50280,
+ "<|ur|>": 50290,
+ "<|uz|>": 50337,
+ "<|vi|>": 50278,
+ "<|yi|>": 50335,
+ "<|yo|>": 50325,
+ "<|zh|>": 50260
+ },
+ "max_initial_timestamp_index": 50,
+ "max_length": 448,
+ "no_timestamps_token_id": 50363,
+ "pad_token_id": 50257,
+ "prev_sot_token_id": 50361,
+ "return_timestamps": false,
+ "suppress_tokens": [
+ 1,
+ 2,
+ 7,
+ 8,
+ 9,
+ 10,
+ 14,
+ 25,
+ 26,
+ 27,
+ 28,
+ 29,
+ 31,
+ 58,
+ 59,
+ 60,
+ 61,
+ 62,
+ 63,
+ 90,
+ 91,
+ 92,
+ 93,
+ 359,
+ 503,
+ 522,
+ 542,
+ 873,
+ 893,
+ 902,
+ 918,
+ 922,
+ 931,
+ 1350,
+ 1853,
+ 1982,
+ 2460,
+ 2627,
+ 3246,
+ 3253,
+ 3268,
+ 3536,
+ 3846,
+ 3961,
+ 4183,
+ 4667,
+ 6585,
+ 6647,
+ 7273,
+ 9061,
+ 9383,
+ 10428,
+ 10929,
+ 11938,
+ 12033,
+ 12331,
+ 12562,
+ 13793,
+ 14157,
+ 14635,
+ 15265,
+ 15618,
+ 16553,
+ 16604,
+ 18362,
+ 18956,
+ 20075,
+ 21675,
+ 22520,
+ 26130,
+ 26161,
+ 26435,
+ 28279,
+ 29464,
+ 31650,
+ 32302,
+ 32470,
+ 36865,
+ 42863,
+ 47425,
+ 49870,
+ 50254,
+ 50258,
+ 50358,
+ 50359,
+ 50360,
+ 50361,
+ 50362
+ ],
+ "task_to_id": {
+ "transcribe": 50359,
+ "translate": 50358
+ },
+ "transformers_version": "4.42.0.dev0"
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/model.safetensors b/whisper/whisper_base_finetuned/checkpoint-1946/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d3ffdfb85687050faeb83213beaa74fff65de794
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dd026348fe37281549fd7c6a808f086ca0fd97d58b2cacc1f727f1b30983206
+size 290403936
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/optimizer.pt b/whisper/whisper_base_finetuned/checkpoint-1946/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7e40da14e9976a0d6a0f63ba7bcad08369a4e853
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:553f15ba654f04fd871be21212d6df91de0ae8724da43efc4f15ac36ba17d344
+size 574811514
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/preprocessor_config.json b/whisper/whisper_base_finetuned/checkpoint-1946/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..91876762a536a746d268353c5cba57286e76b058
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/preprocessor_config.json
@@ -0,0 +1,14 @@
+{
+ "chunk_length": 30,
+ "feature_extractor_type": "WhisperFeatureExtractor",
+ "feature_size": 80,
+ "hop_length": 160,
+ "n_fft": 400,
+ "n_samples": 480000,
+ "nb_max_frames": 3000,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "processor_class": "WhisperProcessor",
+ "return_attention_mask": false,
+ "sampling_rate": 16000
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_0.pth b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e85eea7a525d710ba9857dc2024abefc4fd5f3ce
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a110c9d357e287a75aaf1239a21b93307ef274903bc341868b29e95bf582f6a
+size 14960
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_1.pth b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7f065db1d42192691d649a6f3b410e4182bf0929
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dc7a73450f09e1dfbf3d92b7766dd920803a16d72378745ade594e61b71ef99
+size 14960
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_2.pth b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..d572e386b045dba8ffedeefabdd36097b5947544
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0850a53101d32aee75186072819d788c275b8018ee81aca2b2e264027b3f706e
+size 14960
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_3.pth b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6e9a0fa9bb800b68146b141b223bf27cfe150008
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0f219c1ad064e6a471d6dd3b4c8e03edef564bd702d0376fe0265ca34089229
+size 14960
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/scheduler.pt b/whisper/whisper_base_finetuned/checkpoint-1946/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ef56ee237ccd93ea2d7530bbf9795010a21e3961
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9047ca18e71ad6d17bbd68121c15bff7b1e23d09f7900127f0bf720216251fbf
+size 1064
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/trainer_state.json b/whisper/whisper_base_finetuned/checkpoint-1946/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..001aaa593dfbbb51b2fd57b294ba01b0f5046ab5
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/trainer_state.json
@@ -0,0 +1,1409 @@
+{
+ "best_metric": 0.48120656465137523,
+ "best_model_checkpoint": "./whisper_base_finetuned/checkpoint-1946",
+ "epoch": 2.0,
+ "eval_steps": 500,
+ "global_step": 1946,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.010277492291880781,
+ "grad_norm": 78.41651916503906,
+ "learning_rate": 1.0000000000000001e-07,
+ "loss": 3.3505,
+ "step": 10
+ },
+ {
+ "epoch": 0.020554984583761562,
+ "grad_norm": 74.72834777832031,
+ "learning_rate": 3.0000000000000004e-07,
+ "loss": 3.2167,
+ "step": 20
+ },
+ {
+ "epoch": 0.030832476875642344,
+ "grad_norm": 48.560585021972656,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 2.9513,
+ "step": 30
+ },
+ {
+ "epoch": 0.041109969167523124,
+ "grad_norm": 30.085025787353516,
+ "learning_rate": 7.000000000000001e-07,
+ "loss": 2.6321,
+ "step": 40
+ },
+ {
+ "epoch": 0.051387461459403906,
+ "grad_norm": 21.896045684814453,
+ "learning_rate": 9.000000000000001e-07,
+ "loss": 2.376,
+ "step": 50
+ },
+ {
+ "epoch": 0.06166495375128469,
+ "grad_norm": 16.388851165771484,
+ "learning_rate": 1.1e-06,
+ "loss": 2.1473,
+ "step": 60
+ },
+ {
+ "epoch": 0.07194244604316546,
+ "grad_norm": 15.245757102966309,
+ "learning_rate": 1.3e-06,
+ "loss": 1.9096,
+ "step": 70
+ },
+ {
+ "epoch": 0.08221993833504625,
+ "grad_norm": 12.178542137145996,
+ "learning_rate": 1.5e-06,
+ "loss": 1.7493,
+ "step": 80
+ },
+ {
+ "epoch": 0.09249743062692703,
+ "grad_norm": 9.62636947631836,
+ "learning_rate": 1.7000000000000002e-06,
+ "loss": 1.5233,
+ "step": 90
+ },
+ {
+ "epoch": 0.10277492291880781,
+ "grad_norm": 9.04529094696045,
+ "learning_rate": 1.9000000000000002e-06,
+ "loss": 1.3753,
+ "step": 100
+ },
+ {
+ "epoch": 0.1130524152106886,
+ "grad_norm": 8.049132347106934,
+ "learning_rate": 2.1000000000000002e-06,
+ "loss": 1.2293,
+ "step": 110
+ },
+ {
+ "epoch": 0.12332990750256938,
+ "grad_norm": 7.2756147384643555,
+ "learning_rate": 2.3000000000000004e-06,
+ "loss": 1.1131,
+ "step": 120
+ },
+ {
+ "epoch": 0.13360739979445016,
+ "grad_norm": 7.049572944641113,
+ "learning_rate": 2.5e-06,
+ "loss": 1.1889,
+ "step": 130
+ },
+ {
+ "epoch": 0.14388489208633093,
+ "grad_norm": 6.429234981536865,
+ "learning_rate": 2.7000000000000004e-06,
+ "loss": 1.0655,
+ "step": 140
+ },
+ {
+ "epoch": 0.15416238437821173,
+ "grad_norm": 6.281942844390869,
+ "learning_rate": 2.9e-06,
+ "loss": 0.9696,
+ "step": 150
+ },
+ {
+ "epoch": 0.1644398766700925,
+ "grad_norm": 6.72721004486084,
+ "learning_rate": 3.1000000000000004e-06,
+ "loss": 1.0683,
+ "step": 160
+ },
+ {
+ "epoch": 0.1747173689619733,
+ "grad_norm": 5.773904800415039,
+ "learning_rate": 3.3000000000000006e-06,
+ "loss": 1.0132,
+ "step": 170
+ },
+ {
+ "epoch": 0.18499486125385406,
+ "grad_norm": 6.2021870613098145,
+ "learning_rate": 3.5e-06,
+ "loss": 0.9325,
+ "step": 180
+ },
+ {
+ "epoch": 0.19527235354573483,
+ "grad_norm": 6.268314838409424,
+ "learning_rate": 3.7e-06,
+ "loss": 0.8658,
+ "step": 190
+ },
+ {
+ "epoch": 0.20554984583761562,
+ "grad_norm": 6.014781951904297,
+ "learning_rate": 3.900000000000001e-06,
+ "loss": 0.9349,
+ "step": 200
+ },
+ {
+ "epoch": 0.2158273381294964,
+ "grad_norm": 7.0086188316345215,
+ "learning_rate": 4.1e-06,
+ "loss": 0.9347,
+ "step": 210
+ },
+ {
+ "epoch": 0.2261048304213772,
+ "grad_norm": 6.318301200866699,
+ "learning_rate": 4.3e-06,
+ "loss": 0.8023,
+ "step": 220
+ },
+ {
+ "epoch": 0.23638232271325796,
+ "grad_norm": 5.627261638641357,
+ "learning_rate": 4.5e-06,
+ "loss": 0.871,
+ "step": 230
+ },
+ {
+ "epoch": 0.24665981500513876,
+ "grad_norm": 5.793784141540527,
+ "learning_rate": 4.7e-06,
+ "loss": 0.8654,
+ "step": 240
+ },
+ {
+ "epoch": 0.2569373072970195,
+ "grad_norm": 5.449838638305664,
+ "learning_rate": 4.9000000000000005e-06,
+ "loss": 0.7499,
+ "step": 250
+ },
+ {
+ "epoch": 0.2672147995889003,
+ "grad_norm": 4.993557453155518,
+ "learning_rate": 5.1e-06,
+ "loss": 0.7281,
+ "step": 260
+ },
+ {
+ "epoch": 0.2774922918807811,
+ "grad_norm": 6.478201866149902,
+ "learning_rate": 5.300000000000001e-06,
+ "loss": 0.7543,
+ "step": 270
+ },
+ {
+ "epoch": 0.28776978417266186,
+ "grad_norm": 5.355103492736816,
+ "learning_rate": 5.500000000000001e-06,
+ "loss": 0.7545,
+ "step": 280
+ },
+ {
+ "epoch": 0.29804727646454265,
+ "grad_norm": 4.926327228546143,
+ "learning_rate": 5.7e-06,
+ "loss": 0.7397,
+ "step": 290
+ },
+ {
+ "epoch": 0.30832476875642345,
+ "grad_norm": 5.97158145904541,
+ "learning_rate": 5.9e-06,
+ "loss": 0.7262,
+ "step": 300
+ },
+ {
+ "epoch": 0.3186022610483042,
+ "grad_norm": 5.3029584884643555,
+ "learning_rate": 6.1e-06,
+ "loss": 0.7085,
+ "step": 310
+ },
+ {
+ "epoch": 0.328879753340185,
+ "grad_norm": 4.928351879119873,
+ "learning_rate": 6.300000000000001e-06,
+ "loss": 0.6726,
+ "step": 320
+ },
+ {
+ "epoch": 0.3391572456320658,
+ "grad_norm": 5.714938640594482,
+ "learning_rate": 6.5000000000000004e-06,
+ "loss": 0.6985,
+ "step": 330
+ },
+ {
+ "epoch": 0.3494347379239466,
+ "grad_norm": 4.394782543182373,
+ "learning_rate": 6.700000000000001e-06,
+ "loss": 0.6555,
+ "step": 340
+ },
+ {
+ "epoch": 0.3597122302158273,
+ "grad_norm": 4.944245338439941,
+ "learning_rate": 6.9e-06,
+ "loss": 0.6629,
+ "step": 350
+ },
+ {
+ "epoch": 0.3699897225077081,
+ "grad_norm": 5.048007965087891,
+ "learning_rate": 7.100000000000001e-06,
+ "loss": 0.65,
+ "step": 360
+ },
+ {
+ "epoch": 0.3802672147995889,
+ "grad_norm": 5.267819404602051,
+ "learning_rate": 7.3e-06,
+ "loss": 0.5775,
+ "step": 370
+ },
+ {
+ "epoch": 0.39054470709146966,
+ "grad_norm": 5.552557945251465,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.683,
+ "step": 380
+ },
+ {
+ "epoch": 0.40082219938335045,
+ "grad_norm": 4.37172269821167,
+ "learning_rate": 7.7e-06,
+ "loss": 0.5931,
+ "step": 390
+ },
+ {
+ "epoch": 0.41109969167523125,
+ "grad_norm": 5.033542156219482,
+ "learning_rate": 7.9e-06,
+ "loss": 0.6662,
+ "step": 400
+ },
+ {
+ "epoch": 0.42137718396711205,
+ "grad_norm": 4.7163190841674805,
+ "learning_rate": 8.1e-06,
+ "loss": 0.7605,
+ "step": 410
+ },
+ {
+ "epoch": 0.4316546762589928,
+ "grad_norm": 5.401676177978516,
+ "learning_rate": 8.3e-06,
+ "loss": 0.5939,
+ "step": 420
+ },
+ {
+ "epoch": 0.4419321685508736,
+ "grad_norm": 5.293227672576904,
+ "learning_rate": 8.5e-06,
+ "loss": 0.6061,
+ "step": 430
+ },
+ {
+ "epoch": 0.4522096608427544,
+ "grad_norm": 5.0345940589904785,
+ "learning_rate": 8.700000000000001e-06,
+ "loss": 0.5386,
+ "step": 440
+ },
+ {
+ "epoch": 0.4624871531346352,
+ "grad_norm": 4.590668678283691,
+ "learning_rate": 8.900000000000001e-06,
+ "loss": 0.5455,
+ "step": 450
+ },
+ {
+ "epoch": 0.4727646454265159,
+ "grad_norm": 5.621304512023926,
+ "learning_rate": 9.100000000000001e-06,
+ "loss": 0.514,
+ "step": 460
+ },
+ {
+ "epoch": 0.4830421377183967,
+ "grad_norm": 5.061606407165527,
+ "learning_rate": 9.3e-06,
+ "loss": 0.5945,
+ "step": 470
+ },
+ {
+ "epoch": 0.4933196300102775,
+ "grad_norm": 4.499748229980469,
+ "learning_rate": 9.5e-06,
+ "loss": 0.5241,
+ "step": 480
+ },
+ {
+ "epoch": 0.5035971223021583,
+ "grad_norm": 4.244873523712158,
+ "learning_rate": 9.7e-06,
+ "loss": 0.5179,
+ "step": 490
+ },
+ {
+ "epoch": 0.513874614594039,
+ "grad_norm": 5.057434558868408,
+ "learning_rate": 9.9e-06,
+ "loss": 0.5744,
+ "step": 500
+ },
+ {
+ "epoch": 0.5241521068859198,
+ "grad_norm": 5.502700328826904,
+ "learning_rate": 9.994582881906827e-06,
+ "loss": 0.5752,
+ "step": 510
+ },
+ {
+ "epoch": 0.5344295991778006,
+ "grad_norm": 4.976474285125732,
+ "learning_rate": 9.983748645720479e-06,
+ "loss": 0.6109,
+ "step": 520
+ },
+ {
+ "epoch": 0.5447070914696814,
+ "grad_norm": 4.148839473724365,
+ "learning_rate": 9.972914409534129e-06,
+ "loss": 0.5643,
+ "step": 530
+ },
+ {
+ "epoch": 0.5549845837615622,
+ "grad_norm": 3.930744171142578,
+ "learning_rate": 9.96208017334778e-06,
+ "loss": 0.4671,
+ "step": 540
+ },
+ {
+ "epoch": 0.5652620760534429,
+ "grad_norm": 5.741941928863525,
+ "learning_rate": 9.95124593716143e-06,
+ "loss": 0.5608,
+ "step": 550
+ },
+ {
+ "epoch": 0.5755395683453237,
+ "grad_norm": 4.145395755767822,
+ "learning_rate": 9.940411700975083e-06,
+ "loss": 0.4698,
+ "step": 560
+ },
+ {
+ "epoch": 0.5858170606372045,
+ "grad_norm": 4.2532830238342285,
+ "learning_rate": 9.929577464788733e-06,
+ "loss": 0.4084,
+ "step": 570
+ },
+ {
+ "epoch": 0.5960945529290853,
+ "grad_norm": 4.680564880371094,
+ "learning_rate": 9.918743228602385e-06,
+ "loss": 0.6091,
+ "step": 580
+ },
+ {
+ "epoch": 0.6063720452209661,
+ "grad_norm": 3.8156168460845947,
+ "learning_rate": 9.907908992416035e-06,
+ "loss": 0.5567,
+ "step": 590
+ },
+ {
+ "epoch": 0.6166495375128469,
+ "grad_norm": 4.722325801849365,
+ "learning_rate": 9.897074756229687e-06,
+ "loss": 0.5543,
+ "step": 600
+ },
+ {
+ "epoch": 0.6269270298047277,
+ "grad_norm": 5.177743911743164,
+ "learning_rate": 9.886240520043338e-06,
+ "loss": 0.477,
+ "step": 610
+ },
+ {
+ "epoch": 0.6372045220966084,
+ "grad_norm": 4.9859209060668945,
+ "learning_rate": 9.875406283856989e-06,
+ "loss": 0.4592,
+ "step": 620
+ },
+ {
+ "epoch": 0.6474820143884892,
+ "grad_norm": 4.872037887573242,
+ "learning_rate": 9.86457204767064e-06,
+ "loss": 0.5632,
+ "step": 630
+ },
+ {
+ "epoch": 0.65775950668037,
+ "grad_norm": 4.967211723327637,
+ "learning_rate": 9.85373781148429e-06,
+ "loss": 0.5553,
+ "step": 640
+ },
+ {
+ "epoch": 0.6680369989722508,
+ "grad_norm": 4.748555660247803,
+ "learning_rate": 9.842903575297942e-06,
+ "loss": 0.5228,
+ "step": 650
+ },
+ {
+ "epoch": 0.6783144912641316,
+ "grad_norm": 4.945960521697998,
+ "learning_rate": 9.832069339111592e-06,
+ "loss": 0.5018,
+ "step": 660
+ },
+ {
+ "epoch": 0.6885919835560124,
+ "grad_norm": 3.7931437492370605,
+ "learning_rate": 9.821235102925244e-06,
+ "loss": 0.5347,
+ "step": 670
+ },
+ {
+ "epoch": 0.6988694758478932,
+ "grad_norm": 4.149494171142578,
+ "learning_rate": 9.810400866738896e-06,
+ "loss": 0.413,
+ "step": 680
+ },
+ {
+ "epoch": 0.7091469681397738,
+ "grad_norm": 4.979891300201416,
+ "learning_rate": 9.799566630552548e-06,
+ "loss": 0.4836,
+ "step": 690
+ },
+ {
+ "epoch": 0.7194244604316546,
+ "grad_norm": 5.043586730957031,
+ "learning_rate": 9.788732394366198e-06,
+ "loss": 0.5286,
+ "step": 700
+ },
+ {
+ "epoch": 0.7297019527235354,
+ "grad_norm": 4.017364978790283,
+ "learning_rate": 9.77789815817985e-06,
+ "loss": 0.3785,
+ "step": 710
+ },
+ {
+ "epoch": 0.7399794450154162,
+ "grad_norm": 4.4453959465026855,
+ "learning_rate": 9.7670639219935e-06,
+ "loss": 0.52,
+ "step": 720
+ },
+ {
+ "epoch": 0.750256937307297,
+ "grad_norm": 4.624840259552002,
+ "learning_rate": 9.756229685807152e-06,
+ "loss": 0.5339,
+ "step": 730
+ },
+ {
+ "epoch": 0.7605344295991778,
+ "grad_norm": 4.6119771003723145,
+ "learning_rate": 9.745395449620802e-06,
+ "loss": 0.4857,
+ "step": 740
+ },
+ {
+ "epoch": 0.7708119218910586,
+ "grad_norm": 4.147925853729248,
+ "learning_rate": 9.734561213434454e-06,
+ "loss": 0.4363,
+ "step": 750
+ },
+ {
+ "epoch": 0.7810894141829393,
+ "grad_norm": 5.529519557952881,
+ "learning_rate": 9.723726977248104e-06,
+ "loss": 0.5206,
+ "step": 760
+ },
+ {
+ "epoch": 0.7913669064748201,
+ "grad_norm": 3.9015376567840576,
+ "learning_rate": 9.712892741061756e-06,
+ "loss": 0.4836,
+ "step": 770
+ },
+ {
+ "epoch": 0.8016443987667009,
+ "grad_norm": 4.5102057456970215,
+ "learning_rate": 9.702058504875406e-06,
+ "loss": 0.4437,
+ "step": 780
+ },
+ {
+ "epoch": 0.8119218910585817,
+ "grad_norm": 5.272336006164551,
+ "learning_rate": 9.691224268689058e-06,
+ "loss": 0.4402,
+ "step": 790
+ },
+ {
+ "epoch": 0.8221993833504625,
+ "grad_norm": 4.404648303985596,
+ "learning_rate": 9.68039003250271e-06,
+ "loss": 0.4443,
+ "step": 800
+ },
+ {
+ "epoch": 0.8324768756423433,
+ "grad_norm": 4.636880397796631,
+ "learning_rate": 9.66955579631636e-06,
+ "loss": 0.4943,
+ "step": 810
+ },
+ {
+ "epoch": 0.8427543679342241,
+ "grad_norm": 4.826484203338623,
+ "learning_rate": 9.658721560130012e-06,
+ "loss": 0.5385,
+ "step": 820
+ },
+ {
+ "epoch": 0.8530318602261048,
+ "grad_norm": 4.46310567855835,
+ "learning_rate": 9.647887323943664e-06,
+ "loss": 0.413,
+ "step": 830
+ },
+ {
+ "epoch": 0.8633093525179856,
+ "grad_norm": 4.603589057922363,
+ "learning_rate": 9.637053087757314e-06,
+ "loss": 0.4801,
+ "step": 840
+ },
+ {
+ "epoch": 0.8735868448098664,
+ "grad_norm": 3.7884819507598877,
+ "learning_rate": 9.626218851570966e-06,
+ "loss": 0.4294,
+ "step": 850
+ },
+ {
+ "epoch": 0.8838643371017472,
+ "grad_norm": 3.0480997562408447,
+ "learning_rate": 9.615384615384616e-06,
+ "loss": 0.4737,
+ "step": 860
+ },
+ {
+ "epoch": 0.894141829393628,
+ "grad_norm": 4.840622901916504,
+ "learning_rate": 9.604550379198268e-06,
+ "loss": 0.4806,
+ "step": 870
+ },
+ {
+ "epoch": 0.9044193216855088,
+ "grad_norm": 4.64235782623291,
+ "learning_rate": 9.59371614301192e-06,
+ "loss": 0.5359,
+ "step": 880
+ },
+ {
+ "epoch": 0.9146968139773896,
+ "grad_norm": 4.615347385406494,
+ "learning_rate": 9.58288190682557e-06,
+ "loss": 0.4526,
+ "step": 890
+ },
+ {
+ "epoch": 0.9249743062692704,
+ "grad_norm": 4.345542907714844,
+ "learning_rate": 9.572047670639221e-06,
+ "loss": 0.5112,
+ "step": 900
+ },
+ {
+ "epoch": 0.935251798561151,
+ "grad_norm": 3.5318965911865234,
+ "learning_rate": 9.561213434452872e-06,
+ "loss": 0.455,
+ "step": 910
+ },
+ {
+ "epoch": 0.9455292908530318,
+ "grad_norm": 4.852155685424805,
+ "learning_rate": 9.550379198266523e-06,
+ "loss": 0.5012,
+ "step": 920
+ },
+ {
+ "epoch": 0.9558067831449126,
+ "grad_norm": 4.666072368621826,
+ "learning_rate": 9.539544962080174e-06,
+ "loss": 0.4781,
+ "step": 930
+ },
+ {
+ "epoch": 0.9660842754367934,
+ "grad_norm": 4.7242865562438965,
+ "learning_rate": 9.528710725893825e-06,
+ "loss": 0.5102,
+ "step": 940
+ },
+ {
+ "epoch": 0.9763617677286742,
+ "grad_norm": 3.9831533432006836,
+ "learning_rate": 9.517876489707475e-06,
+ "loss": 0.416,
+ "step": 950
+ },
+ {
+ "epoch": 0.986639260020555,
+ "grad_norm": 4.294024467468262,
+ "learning_rate": 9.507042253521127e-06,
+ "loss": 0.4254,
+ "step": 960
+ },
+ {
+ "epoch": 0.9969167523124358,
+ "grad_norm": 4.132877826690674,
+ "learning_rate": 9.496208017334777e-06,
+ "loss": 0.4111,
+ "step": 970
+ },
+ {
+ "epoch": 1.0,
+ "eval_loss": 0.4590415954589844,
+ "eval_runtime": 1119.5991,
+ "eval_samples_per_second": 5.241,
+ "eval_steps_per_second": 0.164,
+ "eval_wer": 0.45510457111161867,
+ "step": 973
+ },
+ {
+ "epoch": 1.0071942446043165,
+ "grad_norm": 3.725668430328369,
+ "learning_rate": 9.485373781148431e-06,
+ "loss": 0.3556,
+ "step": 980
+ },
+ {
+ "epoch": 1.0174717368961974,
+ "grad_norm": 4.162373065948486,
+ "learning_rate": 9.474539544962081e-06,
+ "loss": 0.3531,
+ "step": 990
+ },
+ {
+ "epoch": 1.027749229188078,
+ "grad_norm": 3.697767734527588,
+ "learning_rate": 9.463705308775733e-06,
+ "loss": 0.3627,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0380267214799588,
+ "grad_norm": 4.862727642059326,
+ "learning_rate": 9.452871072589383e-06,
+ "loss": 0.4393,
+ "step": 1010
+ },
+ {
+ "epoch": 1.0483042137718397,
+ "grad_norm": 4.021687030792236,
+ "learning_rate": 9.442036836403035e-06,
+ "loss": 0.4544,
+ "step": 1020
+ },
+ {
+ "epoch": 1.0585817060637204,
+ "grad_norm": 3.82734751701355,
+ "learning_rate": 9.431202600216685e-06,
+ "loss": 0.3937,
+ "step": 1030
+ },
+ {
+ "epoch": 1.0688591983556013,
+ "grad_norm": 3.5762205123901367,
+ "learning_rate": 9.420368364030337e-06,
+ "loss": 0.4722,
+ "step": 1040
+ },
+ {
+ "epoch": 1.079136690647482,
+ "grad_norm": 4.2156147956848145,
+ "learning_rate": 9.409534127843987e-06,
+ "loss": 0.3398,
+ "step": 1050
+ },
+ {
+ "epoch": 1.0894141829393629,
+ "grad_norm": 4.575065612792969,
+ "learning_rate": 9.398699891657639e-06,
+ "loss": 0.3457,
+ "step": 1060
+ },
+ {
+ "epoch": 1.0996916752312436,
+ "grad_norm": 3.8678557872772217,
+ "learning_rate": 9.387865655471289e-06,
+ "loss": 0.3178,
+ "step": 1070
+ },
+ {
+ "epoch": 1.1099691675231242,
+ "grad_norm": 4.01522970199585,
+ "learning_rate": 9.377031419284941e-06,
+ "loss": 0.4911,
+ "step": 1080
+ },
+ {
+ "epoch": 1.1202466598150052,
+ "grad_norm": 4.648536205291748,
+ "learning_rate": 9.366197183098593e-06,
+ "loss": 0.4531,
+ "step": 1090
+ },
+ {
+ "epoch": 1.1305241521068858,
+ "grad_norm": 4.106440544128418,
+ "learning_rate": 9.355362946912243e-06,
+ "loss": 0.3942,
+ "step": 1100
+ },
+ {
+ "epoch": 1.1408016443987667,
+ "grad_norm": 4.35145378112793,
+ "learning_rate": 9.344528710725895e-06,
+ "loss": 0.4071,
+ "step": 1110
+ },
+ {
+ "epoch": 1.1510791366906474,
+ "grad_norm": 4.132904529571533,
+ "learning_rate": 9.333694474539545e-06,
+ "loss": 0.4148,
+ "step": 1120
+ },
+ {
+ "epoch": 1.1613566289825283,
+ "grad_norm": 3.6607720851898193,
+ "learning_rate": 9.322860238353197e-06,
+ "loss": 0.298,
+ "step": 1130
+ },
+ {
+ "epoch": 1.171634121274409,
+ "grad_norm": 3.625558376312256,
+ "learning_rate": 9.312026002166849e-06,
+ "loss": 0.4097,
+ "step": 1140
+ },
+ {
+ "epoch": 1.1819116135662897,
+ "grad_norm": 4.5726494789123535,
+ "learning_rate": 9.301191765980499e-06,
+ "loss": 0.3953,
+ "step": 1150
+ },
+ {
+ "epoch": 1.1921891058581706,
+ "grad_norm": 4.445627212524414,
+ "learning_rate": 9.29035752979415e-06,
+ "loss": 0.3835,
+ "step": 1160
+ },
+ {
+ "epoch": 1.2024665981500513,
+ "grad_norm": 4.57354211807251,
+ "learning_rate": 9.279523293607802e-06,
+ "loss": 0.3832,
+ "step": 1170
+ },
+ {
+ "epoch": 1.2127440904419322,
+ "grad_norm": 4.104644775390625,
+ "learning_rate": 9.268689057421453e-06,
+ "loss": 0.416,
+ "step": 1180
+ },
+ {
+ "epoch": 1.223021582733813,
+ "grad_norm": 4.074865818023682,
+ "learning_rate": 9.257854821235104e-06,
+ "loss": 0.3608,
+ "step": 1190
+ },
+ {
+ "epoch": 1.2332990750256938,
+ "grad_norm": 3.4953205585479736,
+ "learning_rate": 9.247020585048755e-06,
+ "loss": 0.3351,
+ "step": 1200
+ },
+ {
+ "epoch": 1.2435765673175745,
+ "grad_norm": 4.539699077606201,
+ "learning_rate": 9.236186348862406e-06,
+ "loss": 0.4058,
+ "step": 1210
+ },
+ {
+ "epoch": 1.2538540596094552,
+ "grad_norm": 4.369785308837891,
+ "learning_rate": 9.225352112676057e-06,
+ "loss": 0.3412,
+ "step": 1220
+ },
+ {
+ "epoch": 1.264131551901336,
+ "grad_norm": 3.9678955078125,
+ "learning_rate": 9.214517876489708e-06,
+ "loss": 0.3832,
+ "step": 1230
+ },
+ {
+ "epoch": 1.274409044193217,
+ "grad_norm": 4.361431121826172,
+ "learning_rate": 9.203683640303359e-06,
+ "loss": 0.4099,
+ "step": 1240
+ },
+ {
+ "epoch": 1.2846865364850977,
+ "grad_norm": 4.076035022735596,
+ "learning_rate": 9.19284940411701e-06,
+ "loss": 0.4206,
+ "step": 1250
+ },
+ {
+ "epoch": 1.2949640287769784,
+ "grad_norm": 4.084390640258789,
+ "learning_rate": 9.18201516793066e-06,
+ "loss": 0.4271,
+ "step": 1260
+ },
+ {
+ "epoch": 1.3052415210688593,
+ "grad_norm": 3.262382745742798,
+ "learning_rate": 9.171180931744312e-06,
+ "loss": 0.3687,
+ "step": 1270
+ },
+ {
+ "epoch": 1.31551901336074,
+ "grad_norm": 4.841338634490967,
+ "learning_rate": 9.160346695557964e-06,
+ "loss": 0.3659,
+ "step": 1280
+ },
+ {
+ "epoch": 1.3257965056526206,
+ "grad_norm": 4.267407417297363,
+ "learning_rate": 9.149512459371616e-06,
+ "loss": 0.3784,
+ "step": 1290
+ },
+ {
+ "epoch": 1.3360739979445015,
+ "grad_norm": 3.525167465209961,
+ "learning_rate": 9.138678223185266e-06,
+ "loss": 0.3593,
+ "step": 1300
+ },
+ {
+ "epoch": 1.3463514902363825,
+ "grad_norm": 4.050453186035156,
+ "learning_rate": 9.127843986998918e-06,
+ "loss": 0.3737,
+ "step": 1310
+ },
+ {
+ "epoch": 1.3566289825282631,
+ "grad_norm": 3.8488476276397705,
+ "learning_rate": 9.117009750812568e-06,
+ "loss": 0.417,
+ "step": 1320
+ },
+ {
+ "epoch": 1.3669064748201438,
+ "grad_norm": 4.5935540199279785,
+ "learning_rate": 9.10617551462622e-06,
+ "loss": 0.4056,
+ "step": 1330
+ },
+ {
+ "epoch": 1.3771839671120247,
+ "grad_norm": 4.8325581550598145,
+ "learning_rate": 9.09534127843987e-06,
+ "loss": 0.4208,
+ "step": 1340
+ },
+ {
+ "epoch": 1.3874614594039054,
+ "grad_norm": 4.0957818031311035,
+ "learning_rate": 9.084507042253522e-06,
+ "loss": 0.408,
+ "step": 1350
+ },
+ {
+ "epoch": 1.397738951695786,
+ "grad_norm": 5.2503767013549805,
+ "learning_rate": 9.073672806067174e-06,
+ "loss": 0.4164,
+ "step": 1360
+ },
+ {
+ "epoch": 1.408016443987667,
+ "grad_norm": 3.5155766010284424,
+ "learning_rate": 9.062838569880824e-06,
+ "loss": 0.3548,
+ "step": 1370
+ },
+ {
+ "epoch": 1.418293936279548,
+ "grad_norm": 3.994086503982544,
+ "learning_rate": 9.052004333694476e-06,
+ "loss": 0.4025,
+ "step": 1380
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 5.403133392333984,
+ "learning_rate": 9.041170097508126e-06,
+ "loss": 0.4171,
+ "step": 1390
+ },
+ {
+ "epoch": 1.4388489208633093,
+ "grad_norm": 3.069812774658203,
+ "learning_rate": 9.030335861321778e-06,
+ "loss": 0.3431,
+ "step": 1400
+ },
+ {
+ "epoch": 1.4491264131551902,
+ "grad_norm": 3.9196174144744873,
+ "learning_rate": 9.019501625135428e-06,
+ "loss": 0.4507,
+ "step": 1410
+ },
+ {
+ "epoch": 1.4594039054470709,
+ "grad_norm": 3.6087749004364014,
+ "learning_rate": 9.00866738894908e-06,
+ "loss": 0.336,
+ "step": 1420
+ },
+ {
+ "epoch": 1.4696813977389516,
+ "grad_norm": 4.544300079345703,
+ "learning_rate": 8.99783315276273e-06,
+ "loss": 0.4188,
+ "step": 1430
+ },
+ {
+ "epoch": 1.4799588900308325,
+ "grad_norm": 3.7331314086914062,
+ "learning_rate": 8.986998916576382e-06,
+ "loss": 0.4092,
+ "step": 1440
+ },
+ {
+ "epoch": 1.4902363823227134,
+ "grad_norm": 4.780219078063965,
+ "learning_rate": 8.976164680390034e-06,
+ "loss": 0.3587,
+ "step": 1450
+ },
+ {
+ "epoch": 1.500513874614594,
+ "grad_norm": 4.3289690017700195,
+ "learning_rate": 8.965330444203685e-06,
+ "loss": 0.3504,
+ "step": 1460
+ },
+ {
+ "epoch": 1.5107913669064748,
+ "grad_norm": 4.810067653656006,
+ "learning_rate": 8.954496208017336e-06,
+ "loss": 0.3261,
+ "step": 1470
+ },
+ {
+ "epoch": 1.5210688591983557,
+ "grad_norm": 4.626963138580322,
+ "learning_rate": 8.943661971830987e-06,
+ "loss": 0.41,
+ "step": 1480
+ },
+ {
+ "epoch": 1.5313463514902363,
+ "grad_norm": 4.636073589324951,
+ "learning_rate": 8.932827735644638e-06,
+ "loss": 0.3462,
+ "step": 1490
+ },
+ {
+ "epoch": 1.541623843782117,
+ "grad_norm": 4.311732769012451,
+ "learning_rate": 8.92199349945829e-06,
+ "loss": 0.3888,
+ "step": 1500
+ },
+ {
+ "epoch": 1.551901336073998,
+ "grad_norm": 4.3718461990356445,
+ "learning_rate": 8.91115926327194e-06,
+ "loss": 0.3813,
+ "step": 1510
+ },
+ {
+ "epoch": 1.5621788283658788,
+ "grad_norm": 3.1939361095428467,
+ "learning_rate": 8.900325027085591e-06,
+ "loss": 0.4208,
+ "step": 1520
+ },
+ {
+ "epoch": 1.5724563206577595,
+ "grad_norm": 4.095200538635254,
+ "learning_rate": 8.889490790899242e-06,
+ "loss": 0.3845,
+ "step": 1530
+ },
+ {
+ "epoch": 1.5827338129496402,
+ "grad_norm": 3.5258431434631348,
+ "learning_rate": 8.878656554712893e-06,
+ "loss": 0.3284,
+ "step": 1540
+ },
+ {
+ "epoch": 1.5930113052415211,
+ "grad_norm": 3.1825735569000244,
+ "learning_rate": 8.867822318526545e-06,
+ "loss": 0.3385,
+ "step": 1550
+ },
+ {
+ "epoch": 1.6032887975334018,
+ "grad_norm": 3.5939745903015137,
+ "learning_rate": 8.856988082340195e-06,
+ "loss": 0.4101,
+ "step": 1560
+ },
+ {
+ "epoch": 1.6135662898252825,
+ "grad_norm": 4.911982536315918,
+ "learning_rate": 8.846153846153847e-06,
+ "loss": 0.4328,
+ "step": 1570
+ },
+ {
+ "epoch": 1.6238437821171634,
+ "grad_norm": 3.6301517486572266,
+ "learning_rate": 8.835319609967497e-06,
+ "loss": 0.3399,
+ "step": 1580
+ },
+ {
+ "epoch": 1.6341212744090443,
+ "grad_norm": 3.6853671073913574,
+ "learning_rate": 8.82448537378115e-06,
+ "loss": 0.3271,
+ "step": 1590
+ },
+ {
+ "epoch": 1.644398766700925,
+ "grad_norm": 3.029378652572632,
+ "learning_rate": 8.8136511375948e-06,
+ "loss": 0.3638,
+ "step": 1600
+ },
+ {
+ "epoch": 1.6546762589928057,
+ "grad_norm": 4.740921497344971,
+ "learning_rate": 8.802816901408451e-06,
+ "loss": 0.2964,
+ "step": 1610
+ },
+ {
+ "epoch": 1.6649537512846866,
+ "grad_norm": 4.348399639129639,
+ "learning_rate": 8.791982665222103e-06,
+ "loss": 0.364,
+ "step": 1620
+ },
+ {
+ "epoch": 1.6752312435765673,
+ "grad_norm": 4.521662712097168,
+ "learning_rate": 8.781148429035755e-06,
+ "loss": 0.3946,
+ "step": 1630
+ },
+ {
+ "epoch": 1.685508735868448,
+ "grad_norm": 4.327390670776367,
+ "learning_rate": 8.770314192849405e-06,
+ "loss": 0.3325,
+ "step": 1640
+ },
+ {
+ "epoch": 1.6957862281603289,
+ "grad_norm": 4.260695934295654,
+ "learning_rate": 8.759479956663057e-06,
+ "loss": 0.4112,
+ "step": 1650
+ },
+ {
+ "epoch": 1.7060637204522098,
+ "grad_norm": 3.723114490509033,
+ "learning_rate": 8.748645720476707e-06,
+ "loss": 0.3777,
+ "step": 1660
+ },
+ {
+ "epoch": 1.7163412127440905,
+ "grad_norm": 3.6276798248291016,
+ "learning_rate": 8.737811484290359e-06,
+ "loss": 0.4307,
+ "step": 1670
+ },
+ {
+ "epoch": 1.7266187050359711,
+ "grad_norm": 4.2474446296691895,
+ "learning_rate": 8.726977248104009e-06,
+ "loss": 0.3451,
+ "step": 1680
+ },
+ {
+ "epoch": 1.736896197327852,
+ "grad_norm": 4.7757368087768555,
+ "learning_rate": 8.71614301191766e-06,
+ "loss": 0.3528,
+ "step": 1690
+ },
+ {
+ "epoch": 1.7471736896197327,
+ "grad_norm": 3.768132209777832,
+ "learning_rate": 8.705308775731311e-06,
+ "loss": 0.4086,
+ "step": 1700
+ },
+ {
+ "epoch": 1.7574511819116134,
+ "grad_norm": 4.793600559234619,
+ "learning_rate": 8.694474539544963e-06,
+ "loss": 0.414,
+ "step": 1710
+ },
+ {
+ "epoch": 1.7677286742034943,
+ "grad_norm": 4.651284217834473,
+ "learning_rate": 8.683640303358613e-06,
+ "loss": 0.3424,
+ "step": 1720
+ },
+ {
+ "epoch": 1.7780061664953752,
+ "grad_norm": 3.656557083129883,
+ "learning_rate": 8.672806067172265e-06,
+ "loss": 0.2813,
+ "step": 1730
+ },
+ {
+ "epoch": 1.788283658787256,
+ "grad_norm": 3.836421012878418,
+ "learning_rate": 8.661971830985915e-06,
+ "loss": 0.3834,
+ "step": 1740
+ },
+ {
+ "epoch": 1.7985611510791366,
+ "grad_norm": 4.500270843505859,
+ "learning_rate": 8.651137594799567e-06,
+ "loss": 0.337,
+ "step": 1750
+ },
+ {
+ "epoch": 1.8088386433710175,
+ "grad_norm": 3.3500618934631348,
+ "learning_rate": 8.640303358613219e-06,
+ "loss": 0.393,
+ "step": 1760
+ },
+ {
+ "epoch": 1.8191161356628982,
+ "grad_norm": 3.52258563041687,
+ "learning_rate": 8.62946912242687e-06,
+ "loss": 0.397,
+ "step": 1770
+ },
+ {
+ "epoch": 1.829393627954779,
+ "grad_norm": 4.57402229309082,
+ "learning_rate": 8.61863488624052e-06,
+ "loss": 0.4048,
+ "step": 1780
+ },
+ {
+ "epoch": 1.8396711202466598,
+ "grad_norm": 3.952526092529297,
+ "learning_rate": 8.607800650054172e-06,
+ "loss": 0.316,
+ "step": 1790
+ },
+ {
+ "epoch": 1.8499486125385407,
+ "grad_norm": 4.35211706161499,
+ "learning_rate": 8.596966413867823e-06,
+ "loss": 0.4373,
+ "step": 1800
+ },
+ {
+ "epoch": 1.8602261048304214,
+ "grad_norm": 3.061844825744629,
+ "learning_rate": 8.586132177681474e-06,
+ "loss": 0.2665,
+ "step": 1810
+ },
+ {
+ "epoch": 1.870503597122302,
+ "grad_norm": 3.6002986431121826,
+ "learning_rate": 8.575297941495125e-06,
+ "loss": 0.3807,
+ "step": 1820
+ },
+ {
+ "epoch": 1.880781089414183,
+ "grad_norm": 4.012722492218018,
+ "learning_rate": 8.564463705308776e-06,
+ "loss": 0.376,
+ "step": 1830
+ },
+ {
+ "epoch": 1.8910585817060637,
+ "grad_norm": 3.516463041305542,
+ "learning_rate": 8.553629469122428e-06,
+ "loss": 0.3497,
+ "step": 1840
+ },
+ {
+ "epoch": 1.9013360739979444,
+ "grad_norm": 4.711485385894775,
+ "learning_rate": 8.542795232936078e-06,
+ "loss": 0.2968,
+ "step": 1850
+ },
+ {
+ "epoch": 1.9116135662898253,
+ "grad_norm": 3.4084625244140625,
+ "learning_rate": 8.53196099674973e-06,
+ "loss": 0.3475,
+ "step": 1860
+ },
+ {
+ "epoch": 1.9218910585817062,
+ "grad_norm": 3.9419453144073486,
+ "learning_rate": 8.52112676056338e-06,
+ "loss": 0.342,
+ "step": 1870
+ },
+ {
+ "epoch": 1.9321685508735869,
+ "grad_norm": 3.4985804557800293,
+ "learning_rate": 8.510292524377032e-06,
+ "loss": 0.3805,
+ "step": 1880
+ },
+ {
+ "epoch": 1.9424460431654675,
+ "grad_norm": 4.257175445556641,
+ "learning_rate": 8.499458288190682e-06,
+ "loss": 0.3727,
+ "step": 1890
+ },
+ {
+ "epoch": 1.9527235354573484,
+ "grad_norm": 3.3339366912841797,
+ "learning_rate": 8.488624052004334e-06,
+ "loss": 0.3684,
+ "step": 1900
+ },
+ {
+ "epoch": 1.9630010277492291,
+ "grad_norm": 4.362495422363281,
+ "learning_rate": 8.477789815817984e-06,
+ "loss": 0.3216,
+ "step": 1910
+ },
+ {
+ "epoch": 1.9732785200411098,
+ "grad_norm": 4.4011549949646,
+ "learning_rate": 8.466955579631638e-06,
+ "loss": 0.3437,
+ "step": 1920
+ },
+ {
+ "epoch": 1.9835560123329907,
+ "grad_norm": 3.513015031814575,
+ "learning_rate": 8.456121343445288e-06,
+ "loss": 0.3621,
+ "step": 1930
+ },
+ {
+ "epoch": 1.9938335046248716,
+ "grad_norm": 3.66763973236084,
+ "learning_rate": 8.44528710725894e-06,
+ "loss": 0.4068,
+ "step": 1940
+ },
+ {
+ "epoch": 2.0,
+ "eval_loss": 0.38473397493362427,
+ "eval_runtime": 2272.3326,
+ "eval_samples_per_second": 2.582,
+ "eval_steps_per_second": 0.081,
+ "eval_wer": 0.48120656465137523,
+ "step": 1946
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 9730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 8.077923711992201e+18,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-1946/training_args.bin b/whisper/whisper_base_finetuned/checkpoint-1946/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cec0038665d32391824dfe472a35578679380744
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-1946/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9e2cc97af116b33d30c72667d46ddd426569f5f483ad8392e19d95860dfcc43
+size 5240
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/config.json b/whisper/whisper_base_finetuned/checkpoint-9730/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..91728b7bc6c3a43bb11e0d161949a286ca009408
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/config.json
@@ -0,0 +1,52 @@
+{
+ "_name_or_path": "whisper_base_finetuned",
+ "activation_dropout": 0.0,
+ "activation_function": "gelu",
+ "apply_spec_augment": true,
+ "architectures": [
+ "WhisperForConditionalGeneration"
+ ],
+ "attention_dropout": 0.0,
+ "begin_suppress_tokens": [
+ 220,
+ 50257
+ ],
+ "bos_token_id": 50257,
+ "classifier_proj_size": 256,
+ "d_model": 512,
+ "decoder_attention_heads": 8,
+ "decoder_ffn_dim": 2048,
+ "decoder_layerdrop": 0.0,
+ "decoder_layers": 6,
+ "decoder_start_token_id": 50258,
+ "dropout": 0.0,
+ "encoder_attention_heads": 8,
+ "encoder_ffn_dim": 2048,
+ "encoder_layerdrop": 0.0,
+ "encoder_layers": 6,
+ "eos_token_id": 50257,
+ "forced_decoder_ids": null,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "mask_feature_length": 10,
+ "mask_feature_min_masks": 0,
+ "mask_feature_prob": 0.05,
+ "mask_time_length": 10,
+ "mask_time_min_masks": 2,
+ "mask_time_prob": 0.05,
+ "max_length": 448,
+ "max_source_positions": 1500,
+ "max_target_positions": 448,
+ "median_filter_width": 7,
+ "model_type": "whisper",
+ "num_hidden_layers": 6,
+ "num_mel_bins": 80,
+ "pad_token_id": 50257,
+ "scale_embedding": false,
+ "suppress_tokens": [],
+ "torch_dtype": "float32",
+ "transformers_version": "4.42.0.dev0",
+ "use_cache": true,
+ "use_weighted_layer_sum": false,
+ "vocab_size": 51865
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/generation_config.json b/whisper/whisper_base_finetuned/checkpoint-9730/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3ce877d310342bb057324d0dfcf6f83dc6055c1a
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/generation_config.json
@@ -0,0 +1,256 @@
+{
+ "alignment_heads": [
+ [
+ 3,
+ 1
+ ],
+ [
+ 4,
+ 2
+ ],
+ [
+ 4,
+ 3
+ ],
+ [
+ 4,
+ 7
+ ],
+ [
+ 5,
+ 1
+ ],
+ [
+ 5,
+ 2
+ ],
+ [
+ 5,
+ 4
+ ],
+ [
+ 5,
+ 6
+ ]
+ ],
+ "begin_suppress_tokens": [
+ 220,
+ 50257
+ ],
+ "bos_token_id": 50257,
+ "decoder_start_token_id": 50258,
+ "eos_token_id": 50257,
+ "forced_decoder_ids": [
+ [
+ 1,
+ null
+ ],
+ [
+ 2,
+ 50359
+ ]
+ ],
+ "is_multilingual": true,
+ "lang_to_id": {
+ "<|af|>": 50327,
+ "<|am|>": 50334,
+ "<|ar|>": 50272,
+ "<|as|>": 50350,
+ "<|az|>": 50304,
+ "<|ba|>": 50355,
+ "<|be|>": 50330,
+ "<|bg|>": 50292,
+ "<|bn|>": 50302,
+ "<|bo|>": 50347,
+ "<|br|>": 50309,
+ "<|bs|>": 50315,
+ "<|ca|>": 50270,
+ "<|cs|>": 50283,
+ "<|cy|>": 50297,
+ "<|da|>": 50285,
+ "<|de|>": 50261,
+ "<|el|>": 50281,
+ "<|en|>": 50259,
+ "<|es|>": 50262,
+ "<|et|>": 50307,
+ "<|eu|>": 50310,
+ "<|fa|>": 50300,
+ "<|fi|>": 50277,
+ "<|fo|>": 50338,
+ "<|fr|>": 50265,
+ "<|gl|>": 50319,
+ "<|gu|>": 50333,
+ "<|haw|>": 50352,
+ "<|ha|>": 50354,
+ "<|he|>": 50279,
+ "<|hi|>": 50276,
+ "<|hr|>": 50291,
+ "<|ht|>": 50339,
+ "<|hu|>": 50286,
+ "<|hy|>": 50312,
+ "<|id|>": 50275,
+ "<|is|>": 50311,
+ "<|it|>": 50274,
+ "<|ja|>": 50266,
+ "<|jw|>": 50356,
+ "<|ka|>": 50329,
+ "<|kk|>": 50316,
+ "<|km|>": 50323,
+ "<|kn|>": 50306,
+ "<|ko|>": 50264,
+ "<|la|>": 50294,
+ "<|lb|>": 50345,
+ "<|ln|>": 50353,
+ "<|lo|>": 50336,
+ "<|lt|>": 50293,
+ "<|lv|>": 50301,
+ "<|mg|>": 50349,
+ "<|mi|>": 50295,
+ "<|mk|>": 50308,
+ "<|ml|>": 50296,
+ "<|mn|>": 50314,
+ "<|mr|>": 50320,
+ "<|ms|>": 50282,
+ "<|mt|>": 50343,
+ "<|my|>": 50346,
+ "<|ne|>": 50313,
+ "<|nl|>": 50271,
+ "<|nn|>": 50342,
+ "<|no|>": 50288,
+ "<|oc|>": 50328,
+ "<|pa|>": 50321,
+ "<|pl|>": 50269,
+ "<|ps|>": 50340,
+ "<|pt|>": 50267,
+ "<|ro|>": 50284,
+ "<|ru|>": 50263,
+ "<|sa|>": 50344,
+ "<|sd|>": 50332,
+ "<|si|>": 50322,
+ "<|sk|>": 50298,
+ "<|sl|>": 50305,
+ "<|sn|>": 50324,
+ "<|so|>": 50326,
+ "<|sq|>": 50317,
+ "<|sr|>": 50303,
+ "<|su|>": 50357,
+ "<|sv|>": 50273,
+ "<|sw|>": 50318,
+ "<|ta|>": 50287,
+ "<|te|>": 50299,
+ "<|tg|>": 50331,
+ "<|th|>": 50289,
+ "<|tk|>": 50341,
+ "<|tl|>": 50348,
+ "<|tr|>": 50268,
+ "<|tt|>": 50351,
+ "<|uk|>": 50280,
+ "<|ur|>": 50290,
+ "<|uz|>": 50337,
+ "<|vi|>": 50278,
+ "<|yi|>": 50335,
+ "<|yo|>": 50325,
+ "<|zh|>": 50260
+ },
+ "max_initial_timestamp_index": 50,
+ "max_length": 448,
+ "no_timestamps_token_id": 50363,
+ "pad_token_id": 50257,
+ "prev_sot_token_id": 50361,
+ "return_timestamps": false,
+ "suppress_tokens": [
+ 1,
+ 2,
+ 7,
+ 8,
+ 9,
+ 10,
+ 14,
+ 25,
+ 26,
+ 27,
+ 28,
+ 29,
+ 31,
+ 58,
+ 59,
+ 60,
+ 61,
+ 62,
+ 63,
+ 90,
+ 91,
+ 92,
+ 93,
+ 359,
+ 503,
+ 522,
+ 542,
+ 873,
+ 893,
+ 902,
+ 918,
+ 922,
+ 931,
+ 1350,
+ 1853,
+ 1982,
+ 2460,
+ 2627,
+ 3246,
+ 3253,
+ 3268,
+ 3536,
+ 3846,
+ 3961,
+ 4183,
+ 4667,
+ 6585,
+ 6647,
+ 7273,
+ 9061,
+ 9383,
+ 10428,
+ 10929,
+ 11938,
+ 12033,
+ 12331,
+ 12562,
+ 13793,
+ 14157,
+ 14635,
+ 15265,
+ 15618,
+ 16553,
+ 16604,
+ 18362,
+ 18956,
+ 20075,
+ 21675,
+ 22520,
+ 26130,
+ 26161,
+ 26435,
+ 28279,
+ 29464,
+ 31650,
+ 32302,
+ 32470,
+ 36865,
+ 42863,
+ 47425,
+ 49870,
+ 50254,
+ 50258,
+ 50358,
+ 50359,
+ 50360,
+ 50361,
+ 50362
+ ],
+ "task_to_id": {
+ "transcribe": 50359,
+ "translate": 50358
+ },
+ "transformers_version": "4.42.0.dev0"
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/model.safetensors b/whisper/whisper_base_finetuned/checkpoint-9730/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..950c821051deed0c0adfe241789e4a25a23001a1
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03e9cfe5ee86f9e21d408022d52d567bd7efc032907a8471aa8f9a7fac4898cf
+size 290403936
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/optimizer.pt b/whisper/whisper_base_finetuned/checkpoint-9730/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d2585e54cfeabe6badb9c5f4ea2b19eff5dcb213
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e7cda38c6ad5c6721aad9c9aec0e44b58367283c048ea387c021c671ae92f88b
+size 574811514
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/preprocessor_config.json b/whisper/whisper_base_finetuned/checkpoint-9730/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..91876762a536a746d268353c5cba57286e76b058
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/preprocessor_config.json
@@ -0,0 +1,14 @@
+{
+ "chunk_length": 30,
+ "feature_extractor_type": "WhisperFeatureExtractor",
+ "feature_size": 80,
+ "hop_length": 160,
+ "n_fft": 400,
+ "n_samples": 480000,
+ "nb_max_frames": 3000,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "processor_class": "WhisperProcessor",
+ "return_attention_mask": false,
+ "sampling_rate": 16000
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_0.pth b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7f727848c2bfedf310a49780c1de3e284e69e2b8
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b7fbfa4e910ce544e97f6dbeb7b208f4a49b65f2effd74d57de1af0d71c00d4
+size 15024
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_1.pth b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6027ff8500f65a8d37ad317adb8aeca8e304ea3f
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0142621a5bb1159d191b072ebd4a5652c1a11c8ae996f962a0fb69a7de1e8bd
+size 15024
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_2.pth b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..43a2d90fcc0c54516c5e05d24e91782a8d0f1914
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e628ef532a484d3fd015c141ac910a0e1bf2534dc77ccd7bf11f0599d3a390d5
+size 15024
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_3.pth b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..83c50ee23648caf8ba55ea2dda4623a892e65e47
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85c8d0adbc97f743992cc26dc8369f3b254ab17f01ecb27067c8bc9a87bec039
+size 15024
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/scheduler.pt b/whisper/whisper_base_finetuned/checkpoint-9730/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ca2c327186c42d9a462a1db5e0dd14700b98b336
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb1923bd551b2f286111cc5d97750c031553a4df582d098f6ee5d7f8fef85f9e
+size 1064
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/trainer_state.json b/whisper/whisper_base_finetuned/checkpoint-9730/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..50908bb1c1ad94a729e394266e832d7422bf6380
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/trainer_state.json
@@ -0,0 +1,6934 @@
+{
+ "best_metric": 0.48120656465137523,
+ "best_model_checkpoint": "./whisper_base_finetuned/checkpoint-1946",
+ "epoch": 10.0,
+ "eval_steps": 500,
+ "global_step": 9730,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.010277492291880781,
+ "grad_norm": 78.41651916503906,
+ "learning_rate": 1.0000000000000001e-07,
+ "loss": 3.3505,
+ "step": 10
+ },
+ {
+ "epoch": 0.020554984583761562,
+ "grad_norm": 74.72834777832031,
+ "learning_rate": 3.0000000000000004e-07,
+ "loss": 3.2167,
+ "step": 20
+ },
+ {
+ "epoch": 0.030832476875642344,
+ "grad_norm": 48.560585021972656,
+ "learning_rate": 5.000000000000001e-07,
+ "loss": 2.9513,
+ "step": 30
+ },
+ {
+ "epoch": 0.041109969167523124,
+ "grad_norm": 30.085025787353516,
+ "learning_rate": 7.000000000000001e-07,
+ "loss": 2.6321,
+ "step": 40
+ },
+ {
+ "epoch": 0.051387461459403906,
+ "grad_norm": 21.896045684814453,
+ "learning_rate": 9.000000000000001e-07,
+ "loss": 2.376,
+ "step": 50
+ },
+ {
+ "epoch": 0.06166495375128469,
+ "grad_norm": 16.388851165771484,
+ "learning_rate": 1.1e-06,
+ "loss": 2.1473,
+ "step": 60
+ },
+ {
+ "epoch": 0.07194244604316546,
+ "grad_norm": 15.245757102966309,
+ "learning_rate": 1.3e-06,
+ "loss": 1.9096,
+ "step": 70
+ },
+ {
+ "epoch": 0.08221993833504625,
+ "grad_norm": 12.178542137145996,
+ "learning_rate": 1.5e-06,
+ "loss": 1.7493,
+ "step": 80
+ },
+ {
+ "epoch": 0.09249743062692703,
+ "grad_norm": 9.62636947631836,
+ "learning_rate": 1.7000000000000002e-06,
+ "loss": 1.5233,
+ "step": 90
+ },
+ {
+ "epoch": 0.10277492291880781,
+ "grad_norm": 9.04529094696045,
+ "learning_rate": 1.9000000000000002e-06,
+ "loss": 1.3753,
+ "step": 100
+ },
+ {
+ "epoch": 0.1130524152106886,
+ "grad_norm": 8.049132347106934,
+ "learning_rate": 2.1000000000000002e-06,
+ "loss": 1.2293,
+ "step": 110
+ },
+ {
+ "epoch": 0.12332990750256938,
+ "grad_norm": 7.2756147384643555,
+ "learning_rate": 2.3000000000000004e-06,
+ "loss": 1.1131,
+ "step": 120
+ },
+ {
+ "epoch": 0.13360739979445016,
+ "grad_norm": 7.049572944641113,
+ "learning_rate": 2.5e-06,
+ "loss": 1.1889,
+ "step": 130
+ },
+ {
+ "epoch": 0.14388489208633093,
+ "grad_norm": 6.429234981536865,
+ "learning_rate": 2.7000000000000004e-06,
+ "loss": 1.0655,
+ "step": 140
+ },
+ {
+ "epoch": 0.15416238437821173,
+ "grad_norm": 6.281942844390869,
+ "learning_rate": 2.9e-06,
+ "loss": 0.9696,
+ "step": 150
+ },
+ {
+ "epoch": 0.1644398766700925,
+ "grad_norm": 6.72721004486084,
+ "learning_rate": 3.1000000000000004e-06,
+ "loss": 1.0683,
+ "step": 160
+ },
+ {
+ "epoch": 0.1747173689619733,
+ "grad_norm": 5.773904800415039,
+ "learning_rate": 3.3000000000000006e-06,
+ "loss": 1.0132,
+ "step": 170
+ },
+ {
+ "epoch": 0.18499486125385406,
+ "grad_norm": 6.2021870613098145,
+ "learning_rate": 3.5e-06,
+ "loss": 0.9325,
+ "step": 180
+ },
+ {
+ "epoch": 0.19527235354573483,
+ "grad_norm": 6.268314838409424,
+ "learning_rate": 3.7e-06,
+ "loss": 0.8658,
+ "step": 190
+ },
+ {
+ "epoch": 0.20554984583761562,
+ "grad_norm": 6.014781951904297,
+ "learning_rate": 3.900000000000001e-06,
+ "loss": 0.9349,
+ "step": 200
+ },
+ {
+ "epoch": 0.2158273381294964,
+ "grad_norm": 7.0086188316345215,
+ "learning_rate": 4.1e-06,
+ "loss": 0.9347,
+ "step": 210
+ },
+ {
+ "epoch": 0.2261048304213772,
+ "grad_norm": 6.318301200866699,
+ "learning_rate": 4.3e-06,
+ "loss": 0.8023,
+ "step": 220
+ },
+ {
+ "epoch": 0.23638232271325796,
+ "grad_norm": 5.627261638641357,
+ "learning_rate": 4.5e-06,
+ "loss": 0.871,
+ "step": 230
+ },
+ {
+ "epoch": 0.24665981500513876,
+ "grad_norm": 5.793784141540527,
+ "learning_rate": 4.7e-06,
+ "loss": 0.8654,
+ "step": 240
+ },
+ {
+ "epoch": 0.2569373072970195,
+ "grad_norm": 5.449838638305664,
+ "learning_rate": 4.9000000000000005e-06,
+ "loss": 0.7499,
+ "step": 250
+ },
+ {
+ "epoch": 0.2672147995889003,
+ "grad_norm": 4.993557453155518,
+ "learning_rate": 5.1e-06,
+ "loss": 0.7281,
+ "step": 260
+ },
+ {
+ "epoch": 0.2774922918807811,
+ "grad_norm": 6.478201866149902,
+ "learning_rate": 5.300000000000001e-06,
+ "loss": 0.7543,
+ "step": 270
+ },
+ {
+ "epoch": 0.28776978417266186,
+ "grad_norm": 5.355103492736816,
+ "learning_rate": 5.500000000000001e-06,
+ "loss": 0.7545,
+ "step": 280
+ },
+ {
+ "epoch": 0.29804727646454265,
+ "grad_norm": 4.926327228546143,
+ "learning_rate": 5.7e-06,
+ "loss": 0.7397,
+ "step": 290
+ },
+ {
+ "epoch": 0.30832476875642345,
+ "grad_norm": 5.97158145904541,
+ "learning_rate": 5.9e-06,
+ "loss": 0.7262,
+ "step": 300
+ },
+ {
+ "epoch": 0.3186022610483042,
+ "grad_norm": 5.3029584884643555,
+ "learning_rate": 6.1e-06,
+ "loss": 0.7085,
+ "step": 310
+ },
+ {
+ "epoch": 0.328879753340185,
+ "grad_norm": 4.928351879119873,
+ "learning_rate": 6.300000000000001e-06,
+ "loss": 0.6726,
+ "step": 320
+ },
+ {
+ "epoch": 0.3391572456320658,
+ "grad_norm": 5.714938640594482,
+ "learning_rate": 6.5000000000000004e-06,
+ "loss": 0.6985,
+ "step": 330
+ },
+ {
+ "epoch": 0.3494347379239466,
+ "grad_norm": 4.394782543182373,
+ "learning_rate": 6.700000000000001e-06,
+ "loss": 0.6555,
+ "step": 340
+ },
+ {
+ "epoch": 0.3597122302158273,
+ "grad_norm": 4.944245338439941,
+ "learning_rate": 6.9e-06,
+ "loss": 0.6629,
+ "step": 350
+ },
+ {
+ "epoch": 0.3699897225077081,
+ "grad_norm": 5.048007965087891,
+ "learning_rate": 7.100000000000001e-06,
+ "loss": 0.65,
+ "step": 360
+ },
+ {
+ "epoch": 0.3802672147995889,
+ "grad_norm": 5.267819404602051,
+ "learning_rate": 7.3e-06,
+ "loss": 0.5775,
+ "step": 370
+ },
+ {
+ "epoch": 0.39054470709146966,
+ "grad_norm": 5.552557945251465,
+ "learning_rate": 7.500000000000001e-06,
+ "loss": 0.683,
+ "step": 380
+ },
+ {
+ "epoch": 0.40082219938335045,
+ "grad_norm": 4.37172269821167,
+ "learning_rate": 7.7e-06,
+ "loss": 0.5931,
+ "step": 390
+ },
+ {
+ "epoch": 0.41109969167523125,
+ "grad_norm": 5.033542156219482,
+ "learning_rate": 7.9e-06,
+ "loss": 0.6662,
+ "step": 400
+ },
+ {
+ "epoch": 0.42137718396711205,
+ "grad_norm": 4.7163190841674805,
+ "learning_rate": 8.1e-06,
+ "loss": 0.7605,
+ "step": 410
+ },
+ {
+ "epoch": 0.4316546762589928,
+ "grad_norm": 5.401676177978516,
+ "learning_rate": 8.3e-06,
+ "loss": 0.5939,
+ "step": 420
+ },
+ {
+ "epoch": 0.4419321685508736,
+ "grad_norm": 5.293227672576904,
+ "learning_rate": 8.5e-06,
+ "loss": 0.6061,
+ "step": 430
+ },
+ {
+ "epoch": 0.4522096608427544,
+ "grad_norm": 5.0345940589904785,
+ "learning_rate": 8.700000000000001e-06,
+ "loss": 0.5386,
+ "step": 440
+ },
+ {
+ "epoch": 0.4624871531346352,
+ "grad_norm": 4.590668678283691,
+ "learning_rate": 8.900000000000001e-06,
+ "loss": 0.5455,
+ "step": 450
+ },
+ {
+ "epoch": 0.4727646454265159,
+ "grad_norm": 5.621304512023926,
+ "learning_rate": 9.100000000000001e-06,
+ "loss": 0.514,
+ "step": 460
+ },
+ {
+ "epoch": 0.4830421377183967,
+ "grad_norm": 5.061606407165527,
+ "learning_rate": 9.3e-06,
+ "loss": 0.5945,
+ "step": 470
+ },
+ {
+ "epoch": 0.4933196300102775,
+ "grad_norm": 4.499748229980469,
+ "learning_rate": 9.5e-06,
+ "loss": 0.5241,
+ "step": 480
+ },
+ {
+ "epoch": 0.5035971223021583,
+ "grad_norm": 4.244873523712158,
+ "learning_rate": 9.7e-06,
+ "loss": 0.5179,
+ "step": 490
+ },
+ {
+ "epoch": 0.513874614594039,
+ "grad_norm": 5.057434558868408,
+ "learning_rate": 9.9e-06,
+ "loss": 0.5744,
+ "step": 500
+ },
+ {
+ "epoch": 0.5241521068859198,
+ "grad_norm": 5.502700328826904,
+ "learning_rate": 9.994582881906827e-06,
+ "loss": 0.5752,
+ "step": 510
+ },
+ {
+ "epoch": 0.5344295991778006,
+ "grad_norm": 4.976474285125732,
+ "learning_rate": 9.983748645720479e-06,
+ "loss": 0.6109,
+ "step": 520
+ },
+ {
+ "epoch": 0.5447070914696814,
+ "grad_norm": 4.148839473724365,
+ "learning_rate": 9.972914409534129e-06,
+ "loss": 0.5643,
+ "step": 530
+ },
+ {
+ "epoch": 0.5549845837615622,
+ "grad_norm": 3.930744171142578,
+ "learning_rate": 9.96208017334778e-06,
+ "loss": 0.4671,
+ "step": 540
+ },
+ {
+ "epoch": 0.5652620760534429,
+ "grad_norm": 5.741941928863525,
+ "learning_rate": 9.95124593716143e-06,
+ "loss": 0.5608,
+ "step": 550
+ },
+ {
+ "epoch": 0.5755395683453237,
+ "grad_norm": 4.145395755767822,
+ "learning_rate": 9.940411700975083e-06,
+ "loss": 0.4698,
+ "step": 560
+ },
+ {
+ "epoch": 0.5858170606372045,
+ "grad_norm": 4.2532830238342285,
+ "learning_rate": 9.929577464788733e-06,
+ "loss": 0.4084,
+ "step": 570
+ },
+ {
+ "epoch": 0.5960945529290853,
+ "grad_norm": 4.680564880371094,
+ "learning_rate": 9.918743228602385e-06,
+ "loss": 0.6091,
+ "step": 580
+ },
+ {
+ "epoch": 0.6063720452209661,
+ "grad_norm": 3.8156168460845947,
+ "learning_rate": 9.907908992416035e-06,
+ "loss": 0.5567,
+ "step": 590
+ },
+ {
+ "epoch": 0.6166495375128469,
+ "grad_norm": 4.722325801849365,
+ "learning_rate": 9.897074756229687e-06,
+ "loss": 0.5543,
+ "step": 600
+ },
+ {
+ "epoch": 0.6269270298047277,
+ "grad_norm": 5.177743911743164,
+ "learning_rate": 9.886240520043338e-06,
+ "loss": 0.477,
+ "step": 610
+ },
+ {
+ "epoch": 0.6372045220966084,
+ "grad_norm": 4.9859209060668945,
+ "learning_rate": 9.875406283856989e-06,
+ "loss": 0.4592,
+ "step": 620
+ },
+ {
+ "epoch": 0.6474820143884892,
+ "grad_norm": 4.872037887573242,
+ "learning_rate": 9.86457204767064e-06,
+ "loss": 0.5632,
+ "step": 630
+ },
+ {
+ "epoch": 0.65775950668037,
+ "grad_norm": 4.967211723327637,
+ "learning_rate": 9.85373781148429e-06,
+ "loss": 0.5553,
+ "step": 640
+ },
+ {
+ "epoch": 0.6680369989722508,
+ "grad_norm": 4.748555660247803,
+ "learning_rate": 9.842903575297942e-06,
+ "loss": 0.5228,
+ "step": 650
+ },
+ {
+ "epoch": 0.6783144912641316,
+ "grad_norm": 4.945960521697998,
+ "learning_rate": 9.832069339111592e-06,
+ "loss": 0.5018,
+ "step": 660
+ },
+ {
+ "epoch": 0.6885919835560124,
+ "grad_norm": 3.7931437492370605,
+ "learning_rate": 9.821235102925244e-06,
+ "loss": 0.5347,
+ "step": 670
+ },
+ {
+ "epoch": 0.6988694758478932,
+ "grad_norm": 4.149494171142578,
+ "learning_rate": 9.810400866738896e-06,
+ "loss": 0.413,
+ "step": 680
+ },
+ {
+ "epoch": 0.7091469681397738,
+ "grad_norm": 4.979891300201416,
+ "learning_rate": 9.799566630552548e-06,
+ "loss": 0.4836,
+ "step": 690
+ },
+ {
+ "epoch": 0.7194244604316546,
+ "grad_norm": 5.043586730957031,
+ "learning_rate": 9.788732394366198e-06,
+ "loss": 0.5286,
+ "step": 700
+ },
+ {
+ "epoch": 0.7297019527235354,
+ "grad_norm": 4.017364978790283,
+ "learning_rate": 9.77789815817985e-06,
+ "loss": 0.3785,
+ "step": 710
+ },
+ {
+ "epoch": 0.7399794450154162,
+ "grad_norm": 4.4453959465026855,
+ "learning_rate": 9.7670639219935e-06,
+ "loss": 0.52,
+ "step": 720
+ },
+ {
+ "epoch": 0.750256937307297,
+ "grad_norm": 4.624840259552002,
+ "learning_rate": 9.756229685807152e-06,
+ "loss": 0.5339,
+ "step": 730
+ },
+ {
+ "epoch": 0.7605344295991778,
+ "grad_norm": 4.6119771003723145,
+ "learning_rate": 9.745395449620802e-06,
+ "loss": 0.4857,
+ "step": 740
+ },
+ {
+ "epoch": 0.7708119218910586,
+ "grad_norm": 4.147925853729248,
+ "learning_rate": 9.734561213434454e-06,
+ "loss": 0.4363,
+ "step": 750
+ },
+ {
+ "epoch": 0.7810894141829393,
+ "grad_norm": 5.529519557952881,
+ "learning_rate": 9.723726977248104e-06,
+ "loss": 0.5206,
+ "step": 760
+ },
+ {
+ "epoch": 0.7913669064748201,
+ "grad_norm": 3.9015376567840576,
+ "learning_rate": 9.712892741061756e-06,
+ "loss": 0.4836,
+ "step": 770
+ },
+ {
+ "epoch": 0.8016443987667009,
+ "grad_norm": 4.5102057456970215,
+ "learning_rate": 9.702058504875406e-06,
+ "loss": 0.4437,
+ "step": 780
+ },
+ {
+ "epoch": 0.8119218910585817,
+ "grad_norm": 5.272336006164551,
+ "learning_rate": 9.691224268689058e-06,
+ "loss": 0.4402,
+ "step": 790
+ },
+ {
+ "epoch": 0.8221993833504625,
+ "grad_norm": 4.404648303985596,
+ "learning_rate": 9.68039003250271e-06,
+ "loss": 0.4443,
+ "step": 800
+ },
+ {
+ "epoch": 0.8324768756423433,
+ "grad_norm": 4.636880397796631,
+ "learning_rate": 9.66955579631636e-06,
+ "loss": 0.4943,
+ "step": 810
+ },
+ {
+ "epoch": 0.8427543679342241,
+ "grad_norm": 4.826484203338623,
+ "learning_rate": 9.658721560130012e-06,
+ "loss": 0.5385,
+ "step": 820
+ },
+ {
+ "epoch": 0.8530318602261048,
+ "grad_norm": 4.46310567855835,
+ "learning_rate": 9.647887323943664e-06,
+ "loss": 0.413,
+ "step": 830
+ },
+ {
+ "epoch": 0.8633093525179856,
+ "grad_norm": 4.603589057922363,
+ "learning_rate": 9.637053087757314e-06,
+ "loss": 0.4801,
+ "step": 840
+ },
+ {
+ "epoch": 0.8735868448098664,
+ "grad_norm": 3.7884819507598877,
+ "learning_rate": 9.626218851570966e-06,
+ "loss": 0.4294,
+ "step": 850
+ },
+ {
+ "epoch": 0.8838643371017472,
+ "grad_norm": 3.0480997562408447,
+ "learning_rate": 9.615384615384616e-06,
+ "loss": 0.4737,
+ "step": 860
+ },
+ {
+ "epoch": 0.894141829393628,
+ "grad_norm": 4.840622901916504,
+ "learning_rate": 9.604550379198268e-06,
+ "loss": 0.4806,
+ "step": 870
+ },
+ {
+ "epoch": 0.9044193216855088,
+ "grad_norm": 4.64235782623291,
+ "learning_rate": 9.59371614301192e-06,
+ "loss": 0.5359,
+ "step": 880
+ },
+ {
+ "epoch": 0.9146968139773896,
+ "grad_norm": 4.615347385406494,
+ "learning_rate": 9.58288190682557e-06,
+ "loss": 0.4526,
+ "step": 890
+ },
+ {
+ "epoch": 0.9249743062692704,
+ "grad_norm": 4.345542907714844,
+ "learning_rate": 9.572047670639221e-06,
+ "loss": 0.5112,
+ "step": 900
+ },
+ {
+ "epoch": 0.935251798561151,
+ "grad_norm": 3.5318965911865234,
+ "learning_rate": 9.561213434452872e-06,
+ "loss": 0.455,
+ "step": 910
+ },
+ {
+ "epoch": 0.9455292908530318,
+ "grad_norm": 4.852155685424805,
+ "learning_rate": 9.550379198266523e-06,
+ "loss": 0.5012,
+ "step": 920
+ },
+ {
+ "epoch": 0.9558067831449126,
+ "grad_norm": 4.666072368621826,
+ "learning_rate": 9.539544962080174e-06,
+ "loss": 0.4781,
+ "step": 930
+ },
+ {
+ "epoch": 0.9660842754367934,
+ "grad_norm": 4.7242865562438965,
+ "learning_rate": 9.528710725893825e-06,
+ "loss": 0.5102,
+ "step": 940
+ },
+ {
+ "epoch": 0.9763617677286742,
+ "grad_norm": 3.9831533432006836,
+ "learning_rate": 9.517876489707475e-06,
+ "loss": 0.416,
+ "step": 950
+ },
+ {
+ "epoch": 0.986639260020555,
+ "grad_norm": 4.294024467468262,
+ "learning_rate": 9.507042253521127e-06,
+ "loss": 0.4254,
+ "step": 960
+ },
+ {
+ "epoch": 0.9969167523124358,
+ "grad_norm": 4.132877826690674,
+ "learning_rate": 9.496208017334777e-06,
+ "loss": 0.4111,
+ "step": 970
+ },
+ {
+ "epoch": 1.0,
+ "eval_loss": 0.4590415954589844,
+ "eval_runtime": 1119.5991,
+ "eval_samples_per_second": 5.241,
+ "eval_steps_per_second": 0.164,
+ "eval_wer": 0.45510457111161867,
+ "step": 973
+ },
+ {
+ "epoch": 1.0071942446043165,
+ "grad_norm": 3.725668430328369,
+ "learning_rate": 9.485373781148431e-06,
+ "loss": 0.3556,
+ "step": 980
+ },
+ {
+ "epoch": 1.0174717368961974,
+ "grad_norm": 4.162373065948486,
+ "learning_rate": 9.474539544962081e-06,
+ "loss": 0.3531,
+ "step": 990
+ },
+ {
+ "epoch": 1.027749229188078,
+ "grad_norm": 3.697767734527588,
+ "learning_rate": 9.463705308775733e-06,
+ "loss": 0.3627,
+ "step": 1000
+ },
+ {
+ "epoch": 1.0380267214799588,
+ "grad_norm": 4.862727642059326,
+ "learning_rate": 9.452871072589383e-06,
+ "loss": 0.4393,
+ "step": 1010
+ },
+ {
+ "epoch": 1.0483042137718397,
+ "grad_norm": 4.021687030792236,
+ "learning_rate": 9.442036836403035e-06,
+ "loss": 0.4544,
+ "step": 1020
+ },
+ {
+ "epoch": 1.0585817060637204,
+ "grad_norm": 3.82734751701355,
+ "learning_rate": 9.431202600216685e-06,
+ "loss": 0.3937,
+ "step": 1030
+ },
+ {
+ "epoch": 1.0688591983556013,
+ "grad_norm": 3.5762205123901367,
+ "learning_rate": 9.420368364030337e-06,
+ "loss": 0.4722,
+ "step": 1040
+ },
+ {
+ "epoch": 1.079136690647482,
+ "grad_norm": 4.2156147956848145,
+ "learning_rate": 9.409534127843987e-06,
+ "loss": 0.3398,
+ "step": 1050
+ },
+ {
+ "epoch": 1.0894141829393629,
+ "grad_norm": 4.575065612792969,
+ "learning_rate": 9.398699891657639e-06,
+ "loss": 0.3457,
+ "step": 1060
+ },
+ {
+ "epoch": 1.0996916752312436,
+ "grad_norm": 3.8678557872772217,
+ "learning_rate": 9.387865655471289e-06,
+ "loss": 0.3178,
+ "step": 1070
+ },
+ {
+ "epoch": 1.1099691675231242,
+ "grad_norm": 4.01522970199585,
+ "learning_rate": 9.377031419284941e-06,
+ "loss": 0.4911,
+ "step": 1080
+ },
+ {
+ "epoch": 1.1202466598150052,
+ "grad_norm": 4.648536205291748,
+ "learning_rate": 9.366197183098593e-06,
+ "loss": 0.4531,
+ "step": 1090
+ },
+ {
+ "epoch": 1.1305241521068858,
+ "grad_norm": 4.106440544128418,
+ "learning_rate": 9.355362946912243e-06,
+ "loss": 0.3942,
+ "step": 1100
+ },
+ {
+ "epoch": 1.1408016443987667,
+ "grad_norm": 4.35145378112793,
+ "learning_rate": 9.344528710725895e-06,
+ "loss": 0.4071,
+ "step": 1110
+ },
+ {
+ "epoch": 1.1510791366906474,
+ "grad_norm": 4.132904529571533,
+ "learning_rate": 9.333694474539545e-06,
+ "loss": 0.4148,
+ "step": 1120
+ },
+ {
+ "epoch": 1.1613566289825283,
+ "grad_norm": 3.6607720851898193,
+ "learning_rate": 9.322860238353197e-06,
+ "loss": 0.298,
+ "step": 1130
+ },
+ {
+ "epoch": 1.171634121274409,
+ "grad_norm": 3.625558376312256,
+ "learning_rate": 9.312026002166849e-06,
+ "loss": 0.4097,
+ "step": 1140
+ },
+ {
+ "epoch": 1.1819116135662897,
+ "grad_norm": 4.5726494789123535,
+ "learning_rate": 9.301191765980499e-06,
+ "loss": 0.3953,
+ "step": 1150
+ },
+ {
+ "epoch": 1.1921891058581706,
+ "grad_norm": 4.445627212524414,
+ "learning_rate": 9.29035752979415e-06,
+ "loss": 0.3835,
+ "step": 1160
+ },
+ {
+ "epoch": 1.2024665981500513,
+ "grad_norm": 4.57354211807251,
+ "learning_rate": 9.279523293607802e-06,
+ "loss": 0.3832,
+ "step": 1170
+ },
+ {
+ "epoch": 1.2127440904419322,
+ "grad_norm": 4.104644775390625,
+ "learning_rate": 9.268689057421453e-06,
+ "loss": 0.416,
+ "step": 1180
+ },
+ {
+ "epoch": 1.223021582733813,
+ "grad_norm": 4.074865818023682,
+ "learning_rate": 9.257854821235104e-06,
+ "loss": 0.3608,
+ "step": 1190
+ },
+ {
+ "epoch": 1.2332990750256938,
+ "grad_norm": 3.4953205585479736,
+ "learning_rate": 9.247020585048755e-06,
+ "loss": 0.3351,
+ "step": 1200
+ },
+ {
+ "epoch": 1.2435765673175745,
+ "grad_norm": 4.539699077606201,
+ "learning_rate": 9.236186348862406e-06,
+ "loss": 0.4058,
+ "step": 1210
+ },
+ {
+ "epoch": 1.2538540596094552,
+ "grad_norm": 4.369785308837891,
+ "learning_rate": 9.225352112676057e-06,
+ "loss": 0.3412,
+ "step": 1220
+ },
+ {
+ "epoch": 1.264131551901336,
+ "grad_norm": 3.9678955078125,
+ "learning_rate": 9.214517876489708e-06,
+ "loss": 0.3832,
+ "step": 1230
+ },
+ {
+ "epoch": 1.274409044193217,
+ "grad_norm": 4.361431121826172,
+ "learning_rate": 9.203683640303359e-06,
+ "loss": 0.4099,
+ "step": 1240
+ },
+ {
+ "epoch": 1.2846865364850977,
+ "grad_norm": 4.076035022735596,
+ "learning_rate": 9.19284940411701e-06,
+ "loss": 0.4206,
+ "step": 1250
+ },
+ {
+ "epoch": 1.2949640287769784,
+ "grad_norm": 4.084390640258789,
+ "learning_rate": 9.18201516793066e-06,
+ "loss": 0.4271,
+ "step": 1260
+ },
+ {
+ "epoch": 1.3052415210688593,
+ "grad_norm": 3.262382745742798,
+ "learning_rate": 9.171180931744312e-06,
+ "loss": 0.3687,
+ "step": 1270
+ },
+ {
+ "epoch": 1.31551901336074,
+ "grad_norm": 4.841338634490967,
+ "learning_rate": 9.160346695557964e-06,
+ "loss": 0.3659,
+ "step": 1280
+ },
+ {
+ "epoch": 1.3257965056526206,
+ "grad_norm": 4.267407417297363,
+ "learning_rate": 9.149512459371616e-06,
+ "loss": 0.3784,
+ "step": 1290
+ },
+ {
+ "epoch": 1.3360739979445015,
+ "grad_norm": 3.525167465209961,
+ "learning_rate": 9.138678223185266e-06,
+ "loss": 0.3593,
+ "step": 1300
+ },
+ {
+ "epoch": 1.3463514902363825,
+ "grad_norm": 4.050453186035156,
+ "learning_rate": 9.127843986998918e-06,
+ "loss": 0.3737,
+ "step": 1310
+ },
+ {
+ "epoch": 1.3566289825282631,
+ "grad_norm": 3.8488476276397705,
+ "learning_rate": 9.117009750812568e-06,
+ "loss": 0.417,
+ "step": 1320
+ },
+ {
+ "epoch": 1.3669064748201438,
+ "grad_norm": 4.5935540199279785,
+ "learning_rate": 9.10617551462622e-06,
+ "loss": 0.4056,
+ "step": 1330
+ },
+ {
+ "epoch": 1.3771839671120247,
+ "grad_norm": 4.8325581550598145,
+ "learning_rate": 9.09534127843987e-06,
+ "loss": 0.4208,
+ "step": 1340
+ },
+ {
+ "epoch": 1.3874614594039054,
+ "grad_norm": 4.0957818031311035,
+ "learning_rate": 9.084507042253522e-06,
+ "loss": 0.408,
+ "step": 1350
+ },
+ {
+ "epoch": 1.397738951695786,
+ "grad_norm": 5.2503767013549805,
+ "learning_rate": 9.073672806067174e-06,
+ "loss": 0.4164,
+ "step": 1360
+ },
+ {
+ "epoch": 1.408016443987667,
+ "grad_norm": 3.5155766010284424,
+ "learning_rate": 9.062838569880824e-06,
+ "loss": 0.3548,
+ "step": 1370
+ },
+ {
+ "epoch": 1.418293936279548,
+ "grad_norm": 3.994086503982544,
+ "learning_rate": 9.052004333694476e-06,
+ "loss": 0.4025,
+ "step": 1380
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 5.403133392333984,
+ "learning_rate": 9.041170097508126e-06,
+ "loss": 0.4171,
+ "step": 1390
+ },
+ {
+ "epoch": 1.4388489208633093,
+ "grad_norm": 3.069812774658203,
+ "learning_rate": 9.030335861321778e-06,
+ "loss": 0.3431,
+ "step": 1400
+ },
+ {
+ "epoch": 1.4491264131551902,
+ "grad_norm": 3.9196174144744873,
+ "learning_rate": 9.019501625135428e-06,
+ "loss": 0.4507,
+ "step": 1410
+ },
+ {
+ "epoch": 1.4594039054470709,
+ "grad_norm": 3.6087749004364014,
+ "learning_rate": 9.00866738894908e-06,
+ "loss": 0.336,
+ "step": 1420
+ },
+ {
+ "epoch": 1.4696813977389516,
+ "grad_norm": 4.544300079345703,
+ "learning_rate": 8.99783315276273e-06,
+ "loss": 0.4188,
+ "step": 1430
+ },
+ {
+ "epoch": 1.4799588900308325,
+ "grad_norm": 3.7331314086914062,
+ "learning_rate": 8.986998916576382e-06,
+ "loss": 0.4092,
+ "step": 1440
+ },
+ {
+ "epoch": 1.4902363823227134,
+ "grad_norm": 4.780219078063965,
+ "learning_rate": 8.976164680390034e-06,
+ "loss": 0.3587,
+ "step": 1450
+ },
+ {
+ "epoch": 1.500513874614594,
+ "grad_norm": 4.3289690017700195,
+ "learning_rate": 8.965330444203685e-06,
+ "loss": 0.3504,
+ "step": 1460
+ },
+ {
+ "epoch": 1.5107913669064748,
+ "grad_norm": 4.810067653656006,
+ "learning_rate": 8.954496208017336e-06,
+ "loss": 0.3261,
+ "step": 1470
+ },
+ {
+ "epoch": 1.5210688591983557,
+ "grad_norm": 4.626963138580322,
+ "learning_rate": 8.943661971830987e-06,
+ "loss": 0.41,
+ "step": 1480
+ },
+ {
+ "epoch": 1.5313463514902363,
+ "grad_norm": 4.636073589324951,
+ "learning_rate": 8.932827735644638e-06,
+ "loss": 0.3462,
+ "step": 1490
+ },
+ {
+ "epoch": 1.541623843782117,
+ "grad_norm": 4.311732769012451,
+ "learning_rate": 8.92199349945829e-06,
+ "loss": 0.3888,
+ "step": 1500
+ },
+ {
+ "epoch": 1.551901336073998,
+ "grad_norm": 4.3718461990356445,
+ "learning_rate": 8.91115926327194e-06,
+ "loss": 0.3813,
+ "step": 1510
+ },
+ {
+ "epoch": 1.5621788283658788,
+ "grad_norm": 3.1939361095428467,
+ "learning_rate": 8.900325027085591e-06,
+ "loss": 0.4208,
+ "step": 1520
+ },
+ {
+ "epoch": 1.5724563206577595,
+ "grad_norm": 4.095200538635254,
+ "learning_rate": 8.889490790899242e-06,
+ "loss": 0.3845,
+ "step": 1530
+ },
+ {
+ "epoch": 1.5827338129496402,
+ "grad_norm": 3.5258431434631348,
+ "learning_rate": 8.878656554712893e-06,
+ "loss": 0.3284,
+ "step": 1540
+ },
+ {
+ "epoch": 1.5930113052415211,
+ "grad_norm": 3.1825735569000244,
+ "learning_rate": 8.867822318526545e-06,
+ "loss": 0.3385,
+ "step": 1550
+ },
+ {
+ "epoch": 1.6032887975334018,
+ "grad_norm": 3.5939745903015137,
+ "learning_rate": 8.856988082340195e-06,
+ "loss": 0.4101,
+ "step": 1560
+ },
+ {
+ "epoch": 1.6135662898252825,
+ "grad_norm": 4.911982536315918,
+ "learning_rate": 8.846153846153847e-06,
+ "loss": 0.4328,
+ "step": 1570
+ },
+ {
+ "epoch": 1.6238437821171634,
+ "grad_norm": 3.6301517486572266,
+ "learning_rate": 8.835319609967497e-06,
+ "loss": 0.3399,
+ "step": 1580
+ },
+ {
+ "epoch": 1.6341212744090443,
+ "grad_norm": 3.6853671073913574,
+ "learning_rate": 8.82448537378115e-06,
+ "loss": 0.3271,
+ "step": 1590
+ },
+ {
+ "epoch": 1.644398766700925,
+ "grad_norm": 3.029378652572632,
+ "learning_rate": 8.8136511375948e-06,
+ "loss": 0.3638,
+ "step": 1600
+ },
+ {
+ "epoch": 1.6546762589928057,
+ "grad_norm": 4.740921497344971,
+ "learning_rate": 8.802816901408451e-06,
+ "loss": 0.2964,
+ "step": 1610
+ },
+ {
+ "epoch": 1.6649537512846866,
+ "grad_norm": 4.348399639129639,
+ "learning_rate": 8.791982665222103e-06,
+ "loss": 0.364,
+ "step": 1620
+ },
+ {
+ "epoch": 1.6752312435765673,
+ "grad_norm": 4.521662712097168,
+ "learning_rate": 8.781148429035755e-06,
+ "loss": 0.3946,
+ "step": 1630
+ },
+ {
+ "epoch": 1.685508735868448,
+ "grad_norm": 4.327390670776367,
+ "learning_rate": 8.770314192849405e-06,
+ "loss": 0.3325,
+ "step": 1640
+ },
+ {
+ "epoch": 1.6957862281603289,
+ "grad_norm": 4.260695934295654,
+ "learning_rate": 8.759479956663057e-06,
+ "loss": 0.4112,
+ "step": 1650
+ },
+ {
+ "epoch": 1.7060637204522098,
+ "grad_norm": 3.723114490509033,
+ "learning_rate": 8.748645720476707e-06,
+ "loss": 0.3777,
+ "step": 1660
+ },
+ {
+ "epoch": 1.7163412127440905,
+ "grad_norm": 3.6276798248291016,
+ "learning_rate": 8.737811484290359e-06,
+ "loss": 0.4307,
+ "step": 1670
+ },
+ {
+ "epoch": 1.7266187050359711,
+ "grad_norm": 4.2474446296691895,
+ "learning_rate": 8.726977248104009e-06,
+ "loss": 0.3451,
+ "step": 1680
+ },
+ {
+ "epoch": 1.736896197327852,
+ "grad_norm": 4.7757368087768555,
+ "learning_rate": 8.71614301191766e-06,
+ "loss": 0.3528,
+ "step": 1690
+ },
+ {
+ "epoch": 1.7471736896197327,
+ "grad_norm": 3.768132209777832,
+ "learning_rate": 8.705308775731311e-06,
+ "loss": 0.4086,
+ "step": 1700
+ },
+ {
+ "epoch": 1.7574511819116134,
+ "grad_norm": 4.793600559234619,
+ "learning_rate": 8.694474539544963e-06,
+ "loss": 0.414,
+ "step": 1710
+ },
+ {
+ "epoch": 1.7677286742034943,
+ "grad_norm": 4.651284217834473,
+ "learning_rate": 8.683640303358613e-06,
+ "loss": 0.3424,
+ "step": 1720
+ },
+ {
+ "epoch": 1.7780061664953752,
+ "grad_norm": 3.656557083129883,
+ "learning_rate": 8.672806067172265e-06,
+ "loss": 0.2813,
+ "step": 1730
+ },
+ {
+ "epoch": 1.788283658787256,
+ "grad_norm": 3.836421012878418,
+ "learning_rate": 8.661971830985915e-06,
+ "loss": 0.3834,
+ "step": 1740
+ },
+ {
+ "epoch": 1.7985611510791366,
+ "grad_norm": 4.500270843505859,
+ "learning_rate": 8.651137594799567e-06,
+ "loss": 0.337,
+ "step": 1750
+ },
+ {
+ "epoch": 1.8088386433710175,
+ "grad_norm": 3.3500618934631348,
+ "learning_rate": 8.640303358613219e-06,
+ "loss": 0.393,
+ "step": 1760
+ },
+ {
+ "epoch": 1.8191161356628982,
+ "grad_norm": 3.52258563041687,
+ "learning_rate": 8.62946912242687e-06,
+ "loss": 0.397,
+ "step": 1770
+ },
+ {
+ "epoch": 1.829393627954779,
+ "grad_norm": 4.57402229309082,
+ "learning_rate": 8.61863488624052e-06,
+ "loss": 0.4048,
+ "step": 1780
+ },
+ {
+ "epoch": 1.8396711202466598,
+ "grad_norm": 3.952526092529297,
+ "learning_rate": 8.607800650054172e-06,
+ "loss": 0.316,
+ "step": 1790
+ },
+ {
+ "epoch": 1.8499486125385407,
+ "grad_norm": 4.35211706161499,
+ "learning_rate": 8.596966413867823e-06,
+ "loss": 0.4373,
+ "step": 1800
+ },
+ {
+ "epoch": 1.8602261048304214,
+ "grad_norm": 3.061844825744629,
+ "learning_rate": 8.586132177681474e-06,
+ "loss": 0.2665,
+ "step": 1810
+ },
+ {
+ "epoch": 1.870503597122302,
+ "grad_norm": 3.6002986431121826,
+ "learning_rate": 8.575297941495125e-06,
+ "loss": 0.3807,
+ "step": 1820
+ },
+ {
+ "epoch": 1.880781089414183,
+ "grad_norm": 4.012722492218018,
+ "learning_rate": 8.564463705308776e-06,
+ "loss": 0.376,
+ "step": 1830
+ },
+ {
+ "epoch": 1.8910585817060637,
+ "grad_norm": 3.516463041305542,
+ "learning_rate": 8.553629469122428e-06,
+ "loss": 0.3497,
+ "step": 1840
+ },
+ {
+ "epoch": 1.9013360739979444,
+ "grad_norm": 4.711485385894775,
+ "learning_rate": 8.542795232936078e-06,
+ "loss": 0.2968,
+ "step": 1850
+ },
+ {
+ "epoch": 1.9116135662898253,
+ "grad_norm": 3.4084625244140625,
+ "learning_rate": 8.53196099674973e-06,
+ "loss": 0.3475,
+ "step": 1860
+ },
+ {
+ "epoch": 1.9218910585817062,
+ "grad_norm": 3.9419453144073486,
+ "learning_rate": 8.52112676056338e-06,
+ "loss": 0.342,
+ "step": 1870
+ },
+ {
+ "epoch": 1.9321685508735869,
+ "grad_norm": 3.4985804557800293,
+ "learning_rate": 8.510292524377032e-06,
+ "loss": 0.3805,
+ "step": 1880
+ },
+ {
+ "epoch": 1.9424460431654675,
+ "grad_norm": 4.257175445556641,
+ "learning_rate": 8.499458288190682e-06,
+ "loss": 0.3727,
+ "step": 1890
+ },
+ {
+ "epoch": 1.9527235354573484,
+ "grad_norm": 3.3339366912841797,
+ "learning_rate": 8.488624052004334e-06,
+ "loss": 0.3684,
+ "step": 1900
+ },
+ {
+ "epoch": 1.9630010277492291,
+ "grad_norm": 4.362495422363281,
+ "learning_rate": 8.477789815817984e-06,
+ "loss": 0.3216,
+ "step": 1910
+ },
+ {
+ "epoch": 1.9732785200411098,
+ "grad_norm": 4.4011549949646,
+ "learning_rate": 8.466955579631638e-06,
+ "loss": 0.3437,
+ "step": 1920
+ },
+ {
+ "epoch": 1.9835560123329907,
+ "grad_norm": 3.513015031814575,
+ "learning_rate": 8.456121343445288e-06,
+ "loss": 0.3621,
+ "step": 1930
+ },
+ {
+ "epoch": 1.9938335046248716,
+ "grad_norm": 3.66763973236084,
+ "learning_rate": 8.44528710725894e-06,
+ "loss": 0.4068,
+ "step": 1940
+ },
+ {
+ "epoch": 2.0,
+ "eval_loss": 0.38473397493362427,
+ "eval_runtime": 2272.3326,
+ "eval_samples_per_second": 2.582,
+ "eval_steps_per_second": 0.081,
+ "eval_wer": 0.48120656465137523,
+ "step": 1946
+ },
+ {
+ "epoch": 2.004110996916752,
+ "grad_norm": 3.577178716659546,
+ "learning_rate": 8.43445287107259e-06,
+ "loss": 0.3205,
+ "step": 1950
+ },
+ {
+ "epoch": 2.014388489208633,
+ "grad_norm": 3.6927402019500732,
+ "learning_rate": 8.423618634886242e-06,
+ "loss": 0.3097,
+ "step": 1960
+ },
+ {
+ "epoch": 2.024665981500514,
+ "grad_norm": 3.644838333129883,
+ "learning_rate": 8.412784398699892e-06,
+ "loss": 0.3386,
+ "step": 1970
+ },
+ {
+ "epoch": 2.034943473792395,
+ "grad_norm": 3.595942497253418,
+ "learning_rate": 8.401950162513544e-06,
+ "loss": 0.2982,
+ "step": 1980
+ },
+ {
+ "epoch": 2.0452209660842753,
+ "grad_norm": 4.100467205047607,
+ "learning_rate": 8.391115926327194e-06,
+ "loss": 0.2846,
+ "step": 1990
+ },
+ {
+ "epoch": 2.055498458376156,
+ "grad_norm": 3.059053421020508,
+ "learning_rate": 8.380281690140846e-06,
+ "loss": 0.2793,
+ "step": 2000
+ },
+ {
+ "epoch": 2.065775950668037,
+ "grad_norm": 3.1486964225769043,
+ "learning_rate": 8.369447453954496e-06,
+ "loss": 0.3014,
+ "step": 2010
+ },
+ {
+ "epoch": 2.0760534429599176,
+ "grad_norm": 3.5525431632995605,
+ "learning_rate": 8.358613217768148e-06,
+ "loss": 0.3615,
+ "step": 2020
+ },
+ {
+ "epoch": 2.0863309352517985,
+ "grad_norm": 3.877607583999634,
+ "learning_rate": 8.3477789815818e-06,
+ "loss": 0.3924,
+ "step": 2030
+ },
+ {
+ "epoch": 2.0966084275436794,
+ "grad_norm": 3.1528847217559814,
+ "learning_rate": 8.33694474539545e-06,
+ "loss": 0.3216,
+ "step": 2040
+ },
+ {
+ "epoch": 2.1068859198355603,
+ "grad_norm": 2.3761839866638184,
+ "learning_rate": 8.326110509209102e-06,
+ "loss": 0.2952,
+ "step": 2050
+ },
+ {
+ "epoch": 2.1171634121274407,
+ "grad_norm": 2.5675883293151855,
+ "learning_rate": 8.315276273022752e-06,
+ "loss": 0.3921,
+ "step": 2060
+ },
+ {
+ "epoch": 2.1274409044193217,
+ "grad_norm": 3.158750057220459,
+ "learning_rate": 8.304442036836404e-06,
+ "loss": 0.2995,
+ "step": 2070
+ },
+ {
+ "epoch": 2.1377183967112026,
+ "grad_norm": 4.566508769989014,
+ "learning_rate": 8.293607800650055e-06,
+ "loss": 0.3218,
+ "step": 2080
+ },
+ {
+ "epoch": 2.1479958890030835,
+ "grad_norm": 3.650635242462158,
+ "learning_rate": 8.282773564463706e-06,
+ "loss": 0.3943,
+ "step": 2090
+ },
+ {
+ "epoch": 2.158273381294964,
+ "grad_norm": 3.3389713764190674,
+ "learning_rate": 8.271939328277357e-06,
+ "loss": 0.2847,
+ "step": 2100
+ },
+ {
+ "epoch": 2.168550873586845,
+ "grad_norm": 4.3921685218811035,
+ "learning_rate": 8.26110509209101e-06,
+ "loss": 0.3476,
+ "step": 2110
+ },
+ {
+ "epoch": 2.1788283658787257,
+ "grad_norm": 4.2259087562561035,
+ "learning_rate": 8.25027085590466e-06,
+ "loss": 0.305,
+ "step": 2120
+ },
+ {
+ "epoch": 2.189105858170606,
+ "grad_norm": 3.849501609802246,
+ "learning_rate": 8.239436619718311e-06,
+ "loss": 0.2916,
+ "step": 2130
+ },
+ {
+ "epoch": 2.199383350462487,
+ "grad_norm": 4.750916957855225,
+ "learning_rate": 8.228602383531961e-06,
+ "loss": 0.3295,
+ "step": 2140
+ },
+ {
+ "epoch": 2.209660842754368,
+ "grad_norm": 2.9767019748687744,
+ "learning_rate": 8.217768147345613e-06,
+ "loss": 0.3359,
+ "step": 2150
+ },
+ {
+ "epoch": 2.2199383350462485,
+ "grad_norm": 3.6668782234191895,
+ "learning_rate": 8.206933911159263e-06,
+ "loss": 0.2647,
+ "step": 2160
+ },
+ {
+ "epoch": 2.2302158273381294,
+ "grad_norm": 4.5765790939331055,
+ "learning_rate": 8.196099674972915e-06,
+ "loss": 0.332,
+ "step": 2170
+ },
+ {
+ "epoch": 2.2404933196300103,
+ "grad_norm": 3.7282257080078125,
+ "learning_rate": 8.185265438786565e-06,
+ "loss": 0.2857,
+ "step": 2180
+ },
+ {
+ "epoch": 2.250770811921891,
+ "grad_norm": 4.175727367401123,
+ "learning_rate": 8.174431202600217e-06,
+ "loss": 0.3395,
+ "step": 2190
+ },
+ {
+ "epoch": 2.2610483042137717,
+ "grad_norm": 4.007727146148682,
+ "learning_rate": 8.163596966413867e-06,
+ "loss": 0.3229,
+ "step": 2200
+ },
+ {
+ "epoch": 2.2713257965056526,
+ "grad_norm": 3.212737560272217,
+ "learning_rate": 8.15276273022752e-06,
+ "loss": 0.2824,
+ "step": 2210
+ },
+ {
+ "epoch": 2.2816032887975335,
+ "grad_norm": 2.9416749477386475,
+ "learning_rate": 8.141928494041171e-06,
+ "loss": 0.2931,
+ "step": 2220
+ },
+ {
+ "epoch": 2.2918807810894144,
+ "grad_norm": 3.415862560272217,
+ "learning_rate": 8.131094257854823e-06,
+ "loss": 0.2788,
+ "step": 2230
+ },
+ {
+ "epoch": 2.302158273381295,
+ "grad_norm": 3.2938742637634277,
+ "learning_rate": 8.120260021668473e-06,
+ "loss": 0.3279,
+ "step": 2240
+ },
+ {
+ "epoch": 2.3124357656731758,
+ "grad_norm": 3.2525196075439453,
+ "learning_rate": 8.109425785482125e-06,
+ "loss": 0.2773,
+ "step": 2250
+ },
+ {
+ "epoch": 2.3227132579650567,
+ "grad_norm": 3.1913652420043945,
+ "learning_rate": 8.098591549295775e-06,
+ "loss": 0.2507,
+ "step": 2260
+ },
+ {
+ "epoch": 2.332990750256937,
+ "grad_norm": 4.119409561157227,
+ "learning_rate": 8.087757313109427e-06,
+ "loss": 0.321,
+ "step": 2270
+ },
+ {
+ "epoch": 2.343268242548818,
+ "grad_norm": 2.8287854194641113,
+ "learning_rate": 8.076923076923077e-06,
+ "loss": 0.3036,
+ "step": 2280
+ },
+ {
+ "epoch": 2.353545734840699,
+ "grad_norm": 3.1350390911102295,
+ "learning_rate": 8.066088840736729e-06,
+ "loss": 0.2861,
+ "step": 2290
+ },
+ {
+ "epoch": 2.3638232271325794,
+ "grad_norm": 4.139387607574463,
+ "learning_rate": 8.05525460455038e-06,
+ "loss": 0.3996,
+ "step": 2300
+ },
+ {
+ "epoch": 2.3741007194244603,
+ "grad_norm": 4.706663131713867,
+ "learning_rate": 8.04442036836403e-06,
+ "loss": 0.3329,
+ "step": 2310
+ },
+ {
+ "epoch": 2.3843782117163412,
+ "grad_norm": 3.5738885402679443,
+ "learning_rate": 8.033586132177683e-06,
+ "loss": 0.2948,
+ "step": 2320
+ },
+ {
+ "epoch": 2.394655704008222,
+ "grad_norm": 4.643505573272705,
+ "learning_rate": 8.022751895991333e-06,
+ "loss": 0.3465,
+ "step": 2330
+ },
+ {
+ "epoch": 2.4049331963001026,
+ "grad_norm": 4.715351104736328,
+ "learning_rate": 8.011917659804985e-06,
+ "loss": 0.4091,
+ "step": 2340
+ },
+ {
+ "epoch": 2.4152106885919835,
+ "grad_norm": 4.204322814941406,
+ "learning_rate": 8.001083423618635e-06,
+ "loss": 0.2641,
+ "step": 2350
+ },
+ {
+ "epoch": 2.4254881808838644,
+ "grad_norm": 4.201789379119873,
+ "learning_rate": 7.990249187432287e-06,
+ "loss": 0.2787,
+ "step": 2360
+ },
+ {
+ "epoch": 2.4357656731757453,
+ "grad_norm": 2.986111879348755,
+ "learning_rate": 7.979414951245937e-06,
+ "loss": 0.2681,
+ "step": 2370
+ },
+ {
+ "epoch": 2.446043165467626,
+ "grad_norm": 3.5498292446136475,
+ "learning_rate": 7.968580715059589e-06,
+ "loss": 0.3293,
+ "step": 2380
+ },
+ {
+ "epoch": 2.4563206577595067,
+ "grad_norm": 3.319282293319702,
+ "learning_rate": 7.95774647887324e-06,
+ "loss": 0.3647,
+ "step": 2390
+ },
+ {
+ "epoch": 2.4665981500513876,
+ "grad_norm": 3.654517889022827,
+ "learning_rate": 7.946912242686892e-06,
+ "loss": 0.3494,
+ "step": 2400
+ },
+ {
+ "epoch": 2.476875642343268,
+ "grad_norm": 3.8484079837799072,
+ "learning_rate": 7.936078006500542e-06,
+ "loss": 0.2913,
+ "step": 2410
+ },
+ {
+ "epoch": 2.487153134635149,
+ "grad_norm": 3.6659047603607178,
+ "learning_rate": 7.925243770314194e-06,
+ "loss": 0.2696,
+ "step": 2420
+ },
+ {
+ "epoch": 2.49743062692703,
+ "grad_norm": 3.6242451667785645,
+ "learning_rate": 7.914409534127844e-06,
+ "loss": 0.2779,
+ "step": 2430
+ },
+ {
+ "epoch": 2.5077081192189103,
+ "grad_norm": 4.905155658721924,
+ "learning_rate": 7.903575297941496e-06,
+ "loss": 0.3085,
+ "step": 2440
+ },
+ {
+ "epoch": 2.5179856115107913,
+ "grad_norm": 3.8230979442596436,
+ "learning_rate": 7.892741061755146e-06,
+ "loss": 0.3179,
+ "step": 2450
+ },
+ {
+ "epoch": 2.528263103802672,
+ "grad_norm": 3.2261550426483154,
+ "learning_rate": 7.881906825568798e-06,
+ "loss": 0.2982,
+ "step": 2460
+ },
+ {
+ "epoch": 2.538540596094553,
+ "grad_norm": 3.4974489212036133,
+ "learning_rate": 7.871072589382448e-06,
+ "loss": 0.2456,
+ "step": 2470
+ },
+ {
+ "epoch": 2.548818088386434,
+ "grad_norm": 2.6326630115509033,
+ "learning_rate": 7.8602383531961e-06,
+ "loss": 0.2948,
+ "step": 2480
+ },
+ {
+ "epoch": 2.5590955806783144,
+ "grad_norm": 3.988820791244507,
+ "learning_rate": 7.849404117009752e-06,
+ "loss": 0.3942,
+ "step": 2490
+ },
+ {
+ "epoch": 2.5693730729701953,
+ "grad_norm": 4.203096389770508,
+ "learning_rate": 7.838569880823402e-06,
+ "loss": 0.3251,
+ "step": 2500
+ },
+ {
+ "epoch": 2.5796505652620763,
+ "grad_norm": 4.1997199058532715,
+ "learning_rate": 7.827735644637054e-06,
+ "loss": 0.2908,
+ "step": 2510
+ },
+ {
+ "epoch": 2.5899280575539567,
+ "grad_norm": 3.816044330596924,
+ "learning_rate": 7.816901408450704e-06,
+ "loss": 0.2962,
+ "step": 2520
+ },
+ {
+ "epoch": 2.6002055498458376,
+ "grad_norm": 3.998377561569214,
+ "learning_rate": 7.806067172264356e-06,
+ "loss": 0.4137,
+ "step": 2530
+ },
+ {
+ "epoch": 2.6104830421377185,
+ "grad_norm": 3.7878313064575195,
+ "learning_rate": 7.795232936078008e-06,
+ "loss": 0.2679,
+ "step": 2540
+ },
+ {
+ "epoch": 2.620760534429599,
+ "grad_norm": 4.914570331573486,
+ "learning_rate": 7.784398699891658e-06,
+ "loss": 0.2976,
+ "step": 2550
+ },
+ {
+ "epoch": 2.63103802672148,
+ "grad_norm": 4.354416370391846,
+ "learning_rate": 7.77356446370531e-06,
+ "loss": 0.3577,
+ "step": 2560
+ },
+ {
+ "epoch": 2.641315519013361,
+ "grad_norm": 3.3747782707214355,
+ "learning_rate": 7.762730227518962e-06,
+ "loss": 0.2868,
+ "step": 2570
+ },
+ {
+ "epoch": 2.6515930113052413,
+ "grad_norm": 3.854323148727417,
+ "learning_rate": 7.751895991332612e-06,
+ "loss": 0.3262,
+ "step": 2580
+ },
+ {
+ "epoch": 2.661870503597122,
+ "grad_norm": 3.3421154022216797,
+ "learning_rate": 7.741061755146264e-06,
+ "loss": 0.2576,
+ "step": 2590
+ },
+ {
+ "epoch": 2.672147995889003,
+ "grad_norm": 3.1543657779693604,
+ "learning_rate": 7.730227518959914e-06,
+ "loss": 0.2698,
+ "step": 2600
+ },
+ {
+ "epoch": 2.682425488180884,
+ "grad_norm": 3.4310245513916016,
+ "learning_rate": 7.719393282773566e-06,
+ "loss": 0.338,
+ "step": 2610
+ },
+ {
+ "epoch": 2.692702980472765,
+ "grad_norm": 4.017239093780518,
+ "learning_rate": 7.708559046587216e-06,
+ "loss": 0.236,
+ "step": 2620
+ },
+ {
+ "epoch": 2.7029804727646454,
+ "grad_norm": 4.115433692932129,
+ "learning_rate": 7.697724810400868e-06,
+ "loss": 0.317,
+ "step": 2630
+ },
+ {
+ "epoch": 2.7132579650565263,
+ "grad_norm": 3.788522243499756,
+ "learning_rate": 7.686890574214518e-06,
+ "loss": 0.357,
+ "step": 2640
+ },
+ {
+ "epoch": 2.723535457348407,
+ "grad_norm": 4.260583400726318,
+ "learning_rate": 7.67605633802817e-06,
+ "loss": 0.2541,
+ "step": 2650
+ },
+ {
+ "epoch": 2.7338129496402876,
+ "grad_norm": 4.43798828125,
+ "learning_rate": 7.66522210184182e-06,
+ "loss": 0.3086,
+ "step": 2660
+ },
+ {
+ "epoch": 2.7440904419321686,
+ "grad_norm": 3.0351390838623047,
+ "learning_rate": 7.654387865655472e-06,
+ "loss": 0.2927,
+ "step": 2670
+ },
+ {
+ "epoch": 2.7543679342240495,
+ "grad_norm": 3.693898916244507,
+ "learning_rate": 7.643553629469122e-06,
+ "loss": 0.3282,
+ "step": 2680
+ },
+ {
+ "epoch": 2.76464542651593,
+ "grad_norm": 3.6460795402526855,
+ "learning_rate": 7.632719393282774e-06,
+ "loss": 0.2946,
+ "step": 2690
+ },
+ {
+ "epoch": 2.774922918807811,
+ "grad_norm": 3.9163734912872314,
+ "learning_rate": 7.6218851570964255e-06,
+ "loss": 0.2634,
+ "step": 2700
+ },
+ {
+ "epoch": 2.7852004110996917,
+ "grad_norm": 2.770110607147217,
+ "learning_rate": 7.611050920910077e-06,
+ "loss": 0.2571,
+ "step": 2710
+ },
+ {
+ "epoch": 2.795477903391572,
+ "grad_norm": 3.0240771770477295,
+ "learning_rate": 7.600216684723728e-06,
+ "loss": 0.2387,
+ "step": 2720
+ },
+ {
+ "epoch": 2.805755395683453,
+ "grad_norm": 4.414377689361572,
+ "learning_rate": 7.589382448537379e-06,
+ "loss": 0.2457,
+ "step": 2730
+ },
+ {
+ "epoch": 2.816032887975334,
+ "grad_norm": 3.280635118484497,
+ "learning_rate": 7.57854821235103e-06,
+ "loss": 0.3292,
+ "step": 2740
+ },
+ {
+ "epoch": 2.826310380267215,
+ "grad_norm": 3.114398717880249,
+ "learning_rate": 7.567713976164681e-06,
+ "loss": 0.3323,
+ "step": 2750
+ },
+ {
+ "epoch": 2.836587872559096,
+ "grad_norm": 3.8051183223724365,
+ "learning_rate": 7.556879739978332e-06,
+ "loss": 0.3752,
+ "step": 2760
+ },
+ {
+ "epoch": 2.8468653648509763,
+ "grad_norm": 4.505266189575195,
+ "learning_rate": 7.546045503791983e-06,
+ "loss": 0.3081,
+ "step": 2770
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 3.9669296741485596,
+ "learning_rate": 7.535211267605634e-06,
+ "loss": 0.2627,
+ "step": 2780
+ },
+ {
+ "epoch": 2.867420349434738,
+ "grad_norm": 3.6369543075561523,
+ "learning_rate": 7.524377031419285e-06,
+ "loss": 0.311,
+ "step": 2790
+ },
+ {
+ "epoch": 2.8776978417266186,
+ "grad_norm": 4.124249458312988,
+ "learning_rate": 7.513542795232936e-06,
+ "loss": 0.3401,
+ "step": 2800
+ },
+ {
+ "epoch": 2.8879753340184995,
+ "grad_norm": 3.646584987640381,
+ "learning_rate": 7.502708559046587e-06,
+ "loss": 0.3055,
+ "step": 2810
+ },
+ {
+ "epoch": 2.8982528263103804,
+ "grad_norm": 3.3459272384643555,
+ "learning_rate": 7.491874322860238e-06,
+ "loss": 0.2818,
+ "step": 2820
+ },
+ {
+ "epoch": 2.908530318602261,
+ "grad_norm": 5.0626630783081055,
+ "learning_rate": 7.48104008667389e-06,
+ "loss": 0.3489,
+ "step": 2830
+ },
+ {
+ "epoch": 2.9188078108941418,
+ "grad_norm": 2.9129016399383545,
+ "learning_rate": 7.470205850487541e-06,
+ "loss": 0.2889,
+ "step": 2840
+ },
+ {
+ "epoch": 2.9290853031860227,
+ "grad_norm": 4.322904109954834,
+ "learning_rate": 7.459371614301192e-06,
+ "loss": 0.3367,
+ "step": 2850
+ },
+ {
+ "epoch": 2.939362795477903,
+ "grad_norm": 3.556774139404297,
+ "learning_rate": 7.448537378114844e-06,
+ "loss": 0.3137,
+ "step": 2860
+ },
+ {
+ "epoch": 2.949640287769784,
+ "grad_norm": 2.786842107772827,
+ "learning_rate": 7.437703141928495e-06,
+ "loss": 0.3469,
+ "step": 2870
+ },
+ {
+ "epoch": 2.959917780061665,
+ "grad_norm": 3.740945339202881,
+ "learning_rate": 7.426868905742146e-06,
+ "loss": 0.2975,
+ "step": 2880
+ },
+ {
+ "epoch": 2.970195272353546,
+ "grad_norm": 3.770343780517578,
+ "learning_rate": 7.416034669555797e-06,
+ "loss": 0.3193,
+ "step": 2890
+ },
+ {
+ "epoch": 2.9804727646454268,
+ "grad_norm": 2.6630139350891113,
+ "learning_rate": 7.405200433369448e-06,
+ "loss": 0.2735,
+ "step": 2900
+ },
+ {
+ "epoch": 2.9907502569373072,
+ "grad_norm": 3.7929859161376953,
+ "learning_rate": 7.3943661971831e-06,
+ "loss": 0.3617,
+ "step": 2910
+ },
+ {
+ "epoch": 3.0,
+ "eval_loss": 0.35847166180610657,
+ "eval_runtime": 2239.9414,
+ "eval_samples_per_second": 2.62,
+ "eval_steps_per_second": 0.082,
+ "eval_wer": 0.43257528793761624,
+ "step": 2919
+ },
+ {
+ "epoch": 3.001027749229188,
+ "grad_norm": 4.5707197189331055,
+ "learning_rate": 7.383531960996751e-06,
+ "loss": 0.265,
+ "step": 2920
+ },
+ {
+ "epoch": 3.011305241521069,
+ "grad_norm": 3.3364627361297607,
+ "learning_rate": 7.372697724810402e-06,
+ "loss": 0.3148,
+ "step": 2930
+ },
+ {
+ "epoch": 3.0215827338129495,
+ "grad_norm": 3.253480911254883,
+ "learning_rate": 7.361863488624053e-06,
+ "loss": 0.2638,
+ "step": 2940
+ },
+ {
+ "epoch": 3.0318602261048304,
+ "grad_norm": 3.9313085079193115,
+ "learning_rate": 7.351029252437704e-06,
+ "loss": 0.2463,
+ "step": 2950
+ },
+ {
+ "epoch": 3.0421377183967113,
+ "grad_norm": 3.0780415534973145,
+ "learning_rate": 7.340195016251355e-06,
+ "loss": 0.2363,
+ "step": 2960
+ },
+ {
+ "epoch": 3.052415210688592,
+ "grad_norm": 3.63417649269104,
+ "learning_rate": 7.329360780065006e-06,
+ "loss": 0.2946,
+ "step": 2970
+ },
+ {
+ "epoch": 3.0626927029804727,
+ "grad_norm": 2.868053436279297,
+ "learning_rate": 7.318526543878657e-06,
+ "loss": 0.2659,
+ "step": 2980
+ },
+ {
+ "epoch": 3.0729701952723536,
+ "grad_norm": 3.3707587718963623,
+ "learning_rate": 7.307692307692308e-06,
+ "loss": 0.2843,
+ "step": 2990
+ },
+ {
+ "epoch": 3.0832476875642345,
+ "grad_norm": 4.61034631729126,
+ "learning_rate": 7.296858071505959e-06,
+ "loss": 0.3422,
+ "step": 3000
+ },
+ {
+ "epoch": 3.093525179856115,
+ "grad_norm": 3.640913963317871,
+ "learning_rate": 7.286023835319611e-06,
+ "loss": 0.2587,
+ "step": 3010
+ },
+ {
+ "epoch": 3.103802672147996,
+ "grad_norm": 3.330796957015991,
+ "learning_rate": 7.275189599133262e-06,
+ "loss": 0.2165,
+ "step": 3020
+ },
+ {
+ "epoch": 3.114080164439877,
+ "grad_norm": 4.815010070800781,
+ "learning_rate": 7.264355362946913e-06,
+ "loss": 0.2619,
+ "step": 3030
+ },
+ {
+ "epoch": 3.1243576567317572,
+ "grad_norm": 4.485762596130371,
+ "learning_rate": 7.253521126760564e-06,
+ "loss": 0.2913,
+ "step": 3040
+ },
+ {
+ "epoch": 3.134635149023638,
+ "grad_norm": 3.712749719619751,
+ "learning_rate": 7.242686890574215e-06,
+ "loss": 0.3216,
+ "step": 3050
+ },
+ {
+ "epoch": 3.144912641315519,
+ "grad_norm": 3.5483086109161377,
+ "learning_rate": 7.231852654387866e-06,
+ "loss": 0.2683,
+ "step": 3060
+ },
+ {
+ "epoch": 3.1551901336074,
+ "grad_norm": 4.34190034866333,
+ "learning_rate": 7.221018418201517e-06,
+ "loss": 0.2582,
+ "step": 3070
+ },
+ {
+ "epoch": 3.1654676258992804,
+ "grad_norm": 3.329151153564453,
+ "learning_rate": 7.210184182015168e-06,
+ "loss": 0.3271,
+ "step": 3080
+ },
+ {
+ "epoch": 3.1757451181911613,
+ "grad_norm": 3.318019151687622,
+ "learning_rate": 7.199349945828819e-06,
+ "loss": 0.2994,
+ "step": 3090
+ },
+ {
+ "epoch": 3.1860226104830422,
+ "grad_norm": 3.2192223072052,
+ "learning_rate": 7.188515709642471e-06,
+ "loss": 0.2656,
+ "step": 3100
+ },
+ {
+ "epoch": 3.1963001027749227,
+ "grad_norm": 3.161679267883301,
+ "learning_rate": 7.177681473456122e-06,
+ "loss": 0.2975,
+ "step": 3110
+ },
+ {
+ "epoch": 3.2065775950668036,
+ "grad_norm": 2.8480417728424072,
+ "learning_rate": 7.166847237269773e-06,
+ "loss": 0.3112,
+ "step": 3120
+ },
+ {
+ "epoch": 3.2168550873586845,
+ "grad_norm": 3.912022829055786,
+ "learning_rate": 7.156013001083424e-06,
+ "loss": 0.2526,
+ "step": 3130
+ },
+ {
+ "epoch": 3.2271325796505654,
+ "grad_norm": 3.466198682785034,
+ "learning_rate": 7.145178764897075e-06,
+ "loss": 0.2659,
+ "step": 3140
+ },
+ {
+ "epoch": 3.237410071942446,
+ "grad_norm": 4.21922492980957,
+ "learning_rate": 7.134344528710726e-06,
+ "loss": 0.2879,
+ "step": 3150
+ },
+ {
+ "epoch": 3.247687564234327,
+ "grad_norm": 3.762397527694702,
+ "learning_rate": 7.123510292524377e-06,
+ "loss": 0.2626,
+ "step": 3160
+ },
+ {
+ "epoch": 3.2579650565262077,
+ "grad_norm": 3.112666606903076,
+ "learning_rate": 7.112676056338029e-06,
+ "loss": 0.2293,
+ "step": 3170
+ },
+ {
+ "epoch": 3.2682425488180886,
+ "grad_norm": 4.5521650314331055,
+ "learning_rate": 7.101841820151681e-06,
+ "loss": 0.2594,
+ "step": 3180
+ },
+ {
+ "epoch": 3.278520041109969,
+ "grad_norm": 3.388822555541992,
+ "learning_rate": 7.091007583965332e-06,
+ "loss": 0.2543,
+ "step": 3190
+ },
+ {
+ "epoch": 3.28879753340185,
+ "grad_norm": 2.899162530899048,
+ "learning_rate": 7.080173347778983e-06,
+ "loss": 0.258,
+ "step": 3200
+ },
+ {
+ "epoch": 3.299075025693731,
+ "grad_norm": 2.9126293659210205,
+ "learning_rate": 7.069339111592634e-06,
+ "loss": 0.2437,
+ "step": 3210
+ },
+ {
+ "epoch": 3.3093525179856114,
+ "grad_norm": 4.10038423538208,
+ "learning_rate": 7.058504875406285e-06,
+ "loss": 0.2757,
+ "step": 3220
+ },
+ {
+ "epoch": 3.3196300102774923,
+ "grad_norm": 2.4944069385528564,
+ "learning_rate": 7.047670639219936e-06,
+ "loss": 0.2908,
+ "step": 3230
+ },
+ {
+ "epoch": 3.329907502569373,
+ "grad_norm": 3.2674710750579834,
+ "learning_rate": 7.036836403033587e-06,
+ "loss": 0.269,
+ "step": 3240
+ },
+ {
+ "epoch": 3.3401849948612536,
+ "grad_norm": 3.861447334289551,
+ "learning_rate": 7.026002166847238e-06,
+ "loss": 0.3253,
+ "step": 3250
+ },
+ {
+ "epoch": 3.3504624871531345,
+ "grad_norm": 4.241086483001709,
+ "learning_rate": 7.015167930660889e-06,
+ "loss": 0.3116,
+ "step": 3260
+ },
+ {
+ "epoch": 3.3607399794450155,
+ "grad_norm": 3.33585262298584,
+ "learning_rate": 7.00433369447454e-06,
+ "loss": 0.2506,
+ "step": 3270
+ },
+ {
+ "epoch": 3.3710174717368964,
+ "grad_norm": 4.410299777984619,
+ "learning_rate": 6.993499458288191e-06,
+ "loss": 0.2116,
+ "step": 3280
+ },
+ {
+ "epoch": 3.381294964028777,
+ "grad_norm": 2.706984281539917,
+ "learning_rate": 6.982665222101842e-06,
+ "loss": 0.2611,
+ "step": 3290
+ },
+ {
+ "epoch": 3.3915724563206577,
+ "grad_norm": 3.3027334213256836,
+ "learning_rate": 6.9718309859154935e-06,
+ "loss": 0.2962,
+ "step": 3300
+ },
+ {
+ "epoch": 3.4018499486125386,
+ "grad_norm": 4.3007354736328125,
+ "learning_rate": 6.9609967497291445e-06,
+ "loss": 0.245,
+ "step": 3310
+ },
+ {
+ "epoch": 3.4121274409044196,
+ "grad_norm": 4.280706405639648,
+ "learning_rate": 6.9501625135427955e-06,
+ "loss": 0.3149,
+ "step": 3320
+ },
+ {
+ "epoch": 3.4224049331963,
+ "grad_norm": 3.1681008338928223,
+ "learning_rate": 6.939328277356447e-06,
+ "loss": 0.2401,
+ "step": 3330
+ },
+ {
+ "epoch": 3.432682425488181,
+ "grad_norm": 4.223625183105469,
+ "learning_rate": 6.928494041170098e-06,
+ "loss": 0.2456,
+ "step": 3340
+ },
+ {
+ "epoch": 3.442959917780062,
+ "grad_norm": 3.4283297061920166,
+ "learning_rate": 6.917659804983749e-06,
+ "loss": 0.2488,
+ "step": 3350
+ },
+ {
+ "epoch": 3.4532374100719423,
+ "grad_norm": 4.3228631019592285,
+ "learning_rate": 6.9068255687974e-06,
+ "loss": 0.2837,
+ "step": 3360
+ },
+ {
+ "epoch": 3.463514902363823,
+ "grad_norm": 2.614888906478882,
+ "learning_rate": 6.895991332611051e-06,
+ "loss": 0.2818,
+ "step": 3370
+ },
+ {
+ "epoch": 3.473792394655704,
+ "grad_norm": 4.487213611602783,
+ "learning_rate": 6.885157096424703e-06,
+ "loss": 0.236,
+ "step": 3380
+ },
+ {
+ "epoch": 3.4840698869475846,
+ "grad_norm": 2.5529470443725586,
+ "learning_rate": 6.874322860238354e-06,
+ "loss": 0.3254,
+ "step": 3390
+ },
+ {
+ "epoch": 3.4943473792394655,
+ "grad_norm": 3.5503623485565186,
+ "learning_rate": 6.863488624052005e-06,
+ "loss": 0.1991,
+ "step": 3400
+ },
+ {
+ "epoch": 3.5046248715313464,
+ "grad_norm": 3.0319414138793945,
+ "learning_rate": 6.852654387865656e-06,
+ "loss": 0.2483,
+ "step": 3410
+ },
+ {
+ "epoch": 3.5149023638232273,
+ "grad_norm": 3.8595309257507324,
+ "learning_rate": 6.841820151679307e-06,
+ "loss": 0.2865,
+ "step": 3420
+ },
+ {
+ "epoch": 3.5251798561151078,
+ "grad_norm": 4.65828800201416,
+ "learning_rate": 6.830985915492958e-06,
+ "loss": 0.2457,
+ "step": 3430
+ },
+ {
+ "epoch": 3.5354573484069887,
+ "grad_norm": 4.1883015632629395,
+ "learning_rate": 6.820151679306609e-06,
+ "loss": 0.2911,
+ "step": 3440
+ },
+ {
+ "epoch": 3.5457348406988696,
+ "grad_norm": 2.8787269592285156,
+ "learning_rate": 6.80931744312026e-06,
+ "loss": 0.23,
+ "step": 3450
+ },
+ {
+ "epoch": 3.5560123329907505,
+ "grad_norm": 3.850490093231201,
+ "learning_rate": 6.798483206933911e-06,
+ "loss": 0.2203,
+ "step": 3460
+ },
+ {
+ "epoch": 3.566289825282631,
+ "grad_norm": 4.513101577758789,
+ "learning_rate": 6.787648970747562e-06,
+ "loss": 0.2693,
+ "step": 3470
+ },
+ {
+ "epoch": 3.576567317574512,
+ "grad_norm": 3.458218574523926,
+ "learning_rate": 6.776814734561215e-06,
+ "loss": 0.2462,
+ "step": 3480
+ },
+ {
+ "epoch": 3.5868448098663928,
+ "grad_norm": 2.8902974128723145,
+ "learning_rate": 6.765980498374866e-06,
+ "loss": 0.2759,
+ "step": 3490
+ },
+ {
+ "epoch": 3.597122302158273,
+ "grad_norm": 4.30615234375,
+ "learning_rate": 6.755146262188517e-06,
+ "loss": 0.3263,
+ "step": 3500
+ },
+ {
+ "epoch": 3.607399794450154,
+ "grad_norm": 2.434847116470337,
+ "learning_rate": 6.744312026002168e-06,
+ "loss": 0.2939,
+ "step": 3510
+ },
+ {
+ "epoch": 3.617677286742035,
+ "grad_norm": 3.484827756881714,
+ "learning_rate": 6.733477789815819e-06,
+ "loss": 0.2583,
+ "step": 3520
+ },
+ {
+ "epoch": 3.6279547790339155,
+ "grad_norm": 3.158450126647949,
+ "learning_rate": 6.72264355362947e-06,
+ "loss": 0.2121,
+ "step": 3530
+ },
+ {
+ "epoch": 3.6382322713257964,
+ "grad_norm": 3.7957651615142822,
+ "learning_rate": 6.711809317443121e-06,
+ "loss": 0.2668,
+ "step": 3540
+ },
+ {
+ "epoch": 3.6485097636176773,
+ "grad_norm": 3.237265110015869,
+ "learning_rate": 6.700975081256772e-06,
+ "loss": 0.217,
+ "step": 3550
+ },
+ {
+ "epoch": 3.6587872559095582,
+ "grad_norm": 3.7385904788970947,
+ "learning_rate": 6.690140845070423e-06,
+ "loss": 0.2668,
+ "step": 3560
+ },
+ {
+ "epoch": 3.6690647482014387,
+ "grad_norm": 3.3100502490997314,
+ "learning_rate": 6.6793066088840745e-06,
+ "loss": 0.279,
+ "step": 3570
+ },
+ {
+ "epoch": 3.6793422404933196,
+ "grad_norm": 2.7477927207946777,
+ "learning_rate": 6.6684723726977255e-06,
+ "loss": 0.3294,
+ "step": 3580
+ },
+ {
+ "epoch": 3.6896197327852005,
+ "grad_norm": 3.710700750350952,
+ "learning_rate": 6.6576381365113765e-06,
+ "loss": 0.3125,
+ "step": 3590
+ },
+ {
+ "epoch": 3.6998972250770814,
+ "grad_norm": 2.787705898284912,
+ "learning_rate": 6.6468039003250275e-06,
+ "loss": 0.2507,
+ "step": 3600
+ },
+ {
+ "epoch": 3.710174717368962,
+ "grad_norm": 2.693133592605591,
+ "learning_rate": 6.6359696641386785e-06,
+ "loss": 0.2254,
+ "step": 3610
+ },
+ {
+ "epoch": 3.720452209660843,
+ "grad_norm": 3.121232509613037,
+ "learning_rate": 6.6251354279523295e-06,
+ "loss": 0.2866,
+ "step": 3620
+ },
+ {
+ "epoch": 3.7307297019527237,
+ "grad_norm": 7.0685224533081055,
+ "learning_rate": 6.6143011917659805e-06,
+ "loss": 0.2865,
+ "step": 3630
+ },
+ {
+ "epoch": 3.741007194244604,
+ "grad_norm": 3.528265953063965,
+ "learning_rate": 6.603466955579632e-06,
+ "loss": 0.2963,
+ "step": 3640
+ },
+ {
+ "epoch": 3.751284686536485,
+ "grad_norm": 2.5636558532714844,
+ "learning_rate": 6.592632719393284e-06,
+ "loss": 0.2784,
+ "step": 3650
+ },
+ {
+ "epoch": 3.761562178828366,
+ "grad_norm": 3.7930173873901367,
+ "learning_rate": 6.581798483206935e-06,
+ "loss": 0.2101,
+ "step": 3660
+ },
+ {
+ "epoch": 3.7718396711202464,
+ "grad_norm": 2.674428939819336,
+ "learning_rate": 6.570964247020586e-06,
+ "loss": 0.2337,
+ "step": 3670
+ },
+ {
+ "epoch": 3.7821171634121273,
+ "grad_norm": 3.629955768585205,
+ "learning_rate": 6.560130010834237e-06,
+ "loss": 0.2849,
+ "step": 3680
+ },
+ {
+ "epoch": 3.7923946557040082,
+ "grad_norm": 2.831402063369751,
+ "learning_rate": 6.549295774647888e-06,
+ "loss": 0.248,
+ "step": 3690
+ },
+ {
+ "epoch": 3.802672147995889,
+ "grad_norm": 3.889512300491333,
+ "learning_rate": 6.538461538461539e-06,
+ "loss": 0.367,
+ "step": 3700
+ },
+ {
+ "epoch": 3.81294964028777,
+ "grad_norm": 3.9846222400665283,
+ "learning_rate": 6.52762730227519e-06,
+ "loss": 0.3112,
+ "step": 3710
+ },
+ {
+ "epoch": 3.8232271325796505,
+ "grad_norm": 3.839672327041626,
+ "learning_rate": 6.516793066088841e-06,
+ "loss": 0.3242,
+ "step": 3720
+ },
+ {
+ "epoch": 3.8335046248715314,
+ "grad_norm": 3.0426747798919678,
+ "learning_rate": 6.505958829902492e-06,
+ "loss": 0.2678,
+ "step": 3730
+ },
+ {
+ "epoch": 3.8437821171634123,
+ "grad_norm": 3.6387476921081543,
+ "learning_rate": 6.495124593716143e-06,
+ "loss": 0.2387,
+ "step": 3740
+ },
+ {
+ "epoch": 3.854059609455293,
+ "grad_norm": 4.004968166351318,
+ "learning_rate": 6.484290357529794e-06,
+ "loss": 0.2768,
+ "step": 3750
+ },
+ {
+ "epoch": 3.8643371017471737,
+ "grad_norm": 3.176748752593994,
+ "learning_rate": 6.473456121343445e-06,
+ "loss": 0.2588,
+ "step": 3760
+ },
+ {
+ "epoch": 3.8746145940390546,
+ "grad_norm": 4.246847152709961,
+ "learning_rate": 6.462621885157097e-06,
+ "loss": 0.3123,
+ "step": 3770
+ },
+ {
+ "epoch": 3.884892086330935,
+ "grad_norm": 2.7734780311584473,
+ "learning_rate": 6.451787648970748e-06,
+ "loss": 0.2182,
+ "step": 3780
+ },
+ {
+ "epoch": 3.895169578622816,
+ "grad_norm": 2.851536512374878,
+ "learning_rate": 6.440953412784399e-06,
+ "loss": 0.28,
+ "step": 3790
+ },
+ {
+ "epoch": 3.905447070914697,
+ "grad_norm": 4.055552005767822,
+ "learning_rate": 6.430119176598051e-06,
+ "loss": 0.2843,
+ "step": 3800
+ },
+ {
+ "epoch": 3.9157245632065774,
+ "grad_norm": 3.3340227603912354,
+ "learning_rate": 6.419284940411702e-06,
+ "loss": 0.1989,
+ "step": 3810
+ },
+ {
+ "epoch": 3.9260020554984583,
+ "grad_norm": 3.8631956577301025,
+ "learning_rate": 6.408450704225353e-06,
+ "loss": 0.3183,
+ "step": 3820
+ },
+ {
+ "epoch": 3.936279547790339,
+ "grad_norm": 3.597165107727051,
+ "learning_rate": 6.397616468039004e-06,
+ "loss": 0.2707,
+ "step": 3830
+ },
+ {
+ "epoch": 3.94655704008222,
+ "grad_norm": 4.144725799560547,
+ "learning_rate": 6.386782231852655e-06,
+ "loss": 0.2473,
+ "step": 3840
+ },
+ {
+ "epoch": 3.956834532374101,
+ "grad_norm": 3.035017967224121,
+ "learning_rate": 6.3759479956663066e-06,
+ "loss": 0.2145,
+ "step": 3850
+ },
+ {
+ "epoch": 3.9671120246659815,
+ "grad_norm": 4.453218936920166,
+ "learning_rate": 6.3651137594799575e-06,
+ "loss": 0.2232,
+ "step": 3860
+ },
+ {
+ "epoch": 3.9773895169578624,
+ "grad_norm": 3.9047586917877197,
+ "learning_rate": 6.3542795232936085e-06,
+ "loss": 0.2968,
+ "step": 3870
+ },
+ {
+ "epoch": 3.9876670092497433,
+ "grad_norm": 3.47819447517395,
+ "learning_rate": 6.3434452871072595e-06,
+ "loss": 0.2044,
+ "step": 3880
+ },
+ {
+ "epoch": 3.9979445015416237,
+ "grad_norm": 2.6448206901550293,
+ "learning_rate": 6.3326110509209105e-06,
+ "loss": 0.3144,
+ "step": 3890
+ },
+ {
+ "epoch": 4.0,
+ "eval_loss": 0.34360429644584656,
+ "eval_runtime": 1328.4973,
+ "eval_samples_per_second": 4.417,
+ "eval_steps_per_second": 0.139,
+ "eval_wer": 0.35935919605859895,
+ "step": 3892
+ },
+ {
+ "epoch": 4.008221993833504,
+ "grad_norm": 2.355408191680908,
+ "learning_rate": 6.3217768147345615e-06,
+ "loss": 0.2327,
+ "step": 3900
+ },
+ {
+ "epoch": 4.0184994861253855,
+ "grad_norm": 5.050731658935547,
+ "learning_rate": 6.3109425785482125e-06,
+ "loss": 0.2291,
+ "step": 3910
+ },
+ {
+ "epoch": 4.028776978417266,
+ "grad_norm": 3.9454739093780518,
+ "learning_rate": 6.3001083423618635e-06,
+ "loss": 0.3174,
+ "step": 3920
+ },
+ {
+ "epoch": 4.039054470709147,
+ "grad_norm": 3.784989356994629,
+ "learning_rate": 6.2892741061755145e-06,
+ "loss": 0.2269,
+ "step": 3930
+ },
+ {
+ "epoch": 4.049331963001028,
+ "grad_norm": 2.7388339042663574,
+ "learning_rate": 6.2784398699891655e-06,
+ "loss": 0.2213,
+ "step": 3940
+ },
+ {
+ "epoch": 4.059609455292908,
+ "grad_norm": 3.768240451812744,
+ "learning_rate": 6.267605633802818e-06,
+ "loss": 0.2409,
+ "step": 3950
+ },
+ {
+ "epoch": 4.06988694758479,
+ "grad_norm": 4.0260233879089355,
+ "learning_rate": 6.256771397616469e-06,
+ "loss": 0.1907,
+ "step": 3960
+ },
+ {
+ "epoch": 4.08016443987667,
+ "grad_norm": 3.6307528018951416,
+ "learning_rate": 6.24593716143012e-06,
+ "loss": 0.2457,
+ "step": 3970
+ },
+ {
+ "epoch": 4.090441932168551,
+ "grad_norm": 3.1473422050476074,
+ "learning_rate": 6.235102925243771e-06,
+ "loss": 0.2211,
+ "step": 3980
+ },
+ {
+ "epoch": 4.100719424460432,
+ "grad_norm": 3.6961750984191895,
+ "learning_rate": 6.224268689057422e-06,
+ "loss": 0.2347,
+ "step": 3990
+ },
+ {
+ "epoch": 4.110996916752312,
+ "grad_norm": 2.0914347171783447,
+ "learning_rate": 6.213434452871073e-06,
+ "loss": 0.289,
+ "step": 4000
+ },
+ {
+ "epoch": 4.121274409044193,
+ "grad_norm": 3.504941463470459,
+ "learning_rate": 6.202600216684724e-06,
+ "loss": 0.2985,
+ "step": 4010
+ },
+ {
+ "epoch": 4.131551901336074,
+ "grad_norm": 4.29639196395874,
+ "learning_rate": 6.191765980498375e-06,
+ "loss": 0.2286,
+ "step": 4020
+ },
+ {
+ "epoch": 4.141829393627955,
+ "grad_norm": 2.81628155708313,
+ "learning_rate": 6.180931744312026e-06,
+ "loss": 0.2581,
+ "step": 4030
+ },
+ {
+ "epoch": 4.152106885919835,
+ "grad_norm": 3.66825008392334,
+ "learning_rate": 6.170097508125678e-06,
+ "loss": 0.1727,
+ "step": 4040
+ },
+ {
+ "epoch": 4.1623843782117165,
+ "grad_norm": 4.361361026763916,
+ "learning_rate": 6.159263271939329e-06,
+ "loss": 0.2076,
+ "step": 4050
+ },
+ {
+ "epoch": 4.172661870503597,
+ "grad_norm": 3.2681326866149902,
+ "learning_rate": 6.14842903575298e-06,
+ "loss": 0.2044,
+ "step": 4060
+ },
+ {
+ "epoch": 4.182939362795478,
+ "grad_norm": 3.082566499710083,
+ "learning_rate": 6.137594799566631e-06,
+ "loss": 0.2175,
+ "step": 4070
+ },
+ {
+ "epoch": 4.193216855087359,
+ "grad_norm": 4.153407573699951,
+ "learning_rate": 6.126760563380282e-06,
+ "loss": 0.2607,
+ "step": 4080
+ },
+ {
+ "epoch": 4.203494347379239,
+ "grad_norm": 3.0195565223693848,
+ "learning_rate": 6.115926327193933e-06,
+ "loss": 0.2601,
+ "step": 4090
+ },
+ {
+ "epoch": 4.213771839671121,
+ "grad_norm": 3.5535833835601807,
+ "learning_rate": 6.105092091007584e-06,
+ "loss": 0.2432,
+ "step": 4100
+ },
+ {
+ "epoch": 4.224049331963001,
+ "grad_norm": 3.4361705780029297,
+ "learning_rate": 6.094257854821236e-06,
+ "loss": 0.1582,
+ "step": 4110
+ },
+ {
+ "epoch": 4.2343268242548815,
+ "grad_norm": 4.60875940322876,
+ "learning_rate": 6.083423618634888e-06,
+ "loss": 0.2713,
+ "step": 4120
+ },
+ {
+ "epoch": 4.244604316546763,
+ "grad_norm": 2.7858967781066895,
+ "learning_rate": 6.072589382448539e-06,
+ "loss": 0.2475,
+ "step": 4130
+ },
+ {
+ "epoch": 4.254881808838643,
+ "grad_norm": 3.1303088665008545,
+ "learning_rate": 6.06175514626219e-06,
+ "loss": 0.2682,
+ "step": 4140
+ },
+ {
+ "epoch": 4.265159301130524,
+ "grad_norm": 4.026236057281494,
+ "learning_rate": 6.0509209100758406e-06,
+ "loss": 0.2533,
+ "step": 4150
+ },
+ {
+ "epoch": 4.275436793422405,
+ "grad_norm": 2.858705520629883,
+ "learning_rate": 6.0400866738894916e-06,
+ "loss": 0.255,
+ "step": 4160
+ },
+ {
+ "epoch": 4.285714285714286,
+ "grad_norm": 2.965857744216919,
+ "learning_rate": 6.0292524377031426e-06,
+ "loss": 0.2476,
+ "step": 4170
+ },
+ {
+ "epoch": 4.295991778006167,
+ "grad_norm": 4.407792568206787,
+ "learning_rate": 6.0184182015167935e-06,
+ "loss": 0.2316,
+ "step": 4180
+ },
+ {
+ "epoch": 4.306269270298047,
+ "grad_norm": 2.92669939994812,
+ "learning_rate": 6.0075839653304445e-06,
+ "loss": 0.2162,
+ "step": 4190
+ },
+ {
+ "epoch": 4.316546762589928,
+ "grad_norm": 3.5979628562927246,
+ "learning_rate": 5.9967497291440955e-06,
+ "loss": 0.3606,
+ "step": 4200
+ },
+ {
+ "epoch": 4.326824254881809,
+ "grad_norm": 2.726447343826294,
+ "learning_rate": 5.9859154929577465e-06,
+ "loss": 0.205,
+ "step": 4210
+ },
+ {
+ "epoch": 4.33710174717369,
+ "grad_norm": 3.386230945587158,
+ "learning_rate": 5.9750812567713975e-06,
+ "loss": 0.2342,
+ "step": 4220
+ },
+ {
+ "epoch": 4.34737923946557,
+ "grad_norm": 3.2639143466949463,
+ "learning_rate": 5.9642470205850485e-06,
+ "loss": 0.2644,
+ "step": 4230
+ },
+ {
+ "epoch": 4.3576567317574515,
+ "grad_norm": 2.778188467025757,
+ "learning_rate": 5.9534127843987e-06,
+ "loss": 0.2256,
+ "step": 4240
+ },
+ {
+ "epoch": 4.367934224049332,
+ "grad_norm": 4.341221809387207,
+ "learning_rate": 5.942578548212351e-06,
+ "loss": 0.2249,
+ "step": 4250
+ },
+ {
+ "epoch": 4.378211716341212,
+ "grad_norm": 4.01497745513916,
+ "learning_rate": 5.931744312026003e-06,
+ "loss": 0.2355,
+ "step": 4260
+ },
+ {
+ "epoch": 4.388489208633094,
+ "grad_norm": 2.9536073207855225,
+ "learning_rate": 5.920910075839654e-06,
+ "loss": 0.2209,
+ "step": 4270
+ },
+ {
+ "epoch": 4.398766700924974,
+ "grad_norm": 2.984705686569214,
+ "learning_rate": 5.910075839653305e-06,
+ "loss": 0.2678,
+ "step": 4280
+ },
+ {
+ "epoch": 4.409044193216855,
+ "grad_norm": 2.538471221923828,
+ "learning_rate": 5.899241603466956e-06,
+ "loss": 0.234,
+ "step": 4290
+ },
+ {
+ "epoch": 4.419321685508736,
+ "grad_norm": 2.6294751167297363,
+ "learning_rate": 5.888407367280607e-06,
+ "loss": 0.2502,
+ "step": 4300
+ },
+ {
+ "epoch": 4.4295991778006165,
+ "grad_norm": 3.243605613708496,
+ "learning_rate": 5.877573131094258e-06,
+ "loss": 0.2293,
+ "step": 4310
+ },
+ {
+ "epoch": 4.439876670092497,
+ "grad_norm": 4.492715835571289,
+ "learning_rate": 5.86673889490791e-06,
+ "loss": 0.2296,
+ "step": 4320
+ },
+ {
+ "epoch": 4.450154162384378,
+ "grad_norm": 3.0587844848632812,
+ "learning_rate": 5.855904658721561e-06,
+ "loss": 0.219,
+ "step": 4330
+ },
+ {
+ "epoch": 4.460431654676259,
+ "grad_norm": 3.431396722793579,
+ "learning_rate": 5.845070422535212e-06,
+ "loss": 0.2164,
+ "step": 4340
+ },
+ {
+ "epoch": 4.47070914696814,
+ "grad_norm": 3.3825416564941406,
+ "learning_rate": 5.834236186348863e-06,
+ "loss": 0.2235,
+ "step": 4350
+ },
+ {
+ "epoch": 4.480986639260021,
+ "grad_norm": 3.5739195346832275,
+ "learning_rate": 5.823401950162514e-06,
+ "loss": 0.3161,
+ "step": 4360
+ },
+ {
+ "epoch": 4.491264131551901,
+ "grad_norm": 3.242276430130005,
+ "learning_rate": 5.812567713976165e-06,
+ "loss": 0.2277,
+ "step": 4370
+ },
+ {
+ "epoch": 4.501541623843782,
+ "grad_norm": 3.149808406829834,
+ "learning_rate": 5.801733477789816e-06,
+ "loss": 0.3057,
+ "step": 4380
+ },
+ {
+ "epoch": 4.511819116135663,
+ "grad_norm": 3.0861527919769287,
+ "learning_rate": 5.790899241603467e-06,
+ "loss": 0.2673,
+ "step": 4390
+ },
+ {
+ "epoch": 4.522096608427543,
+ "grad_norm": 2.8522121906280518,
+ "learning_rate": 5.780065005417118e-06,
+ "loss": 0.2213,
+ "step": 4400
+ },
+ {
+ "epoch": 4.532374100719425,
+ "grad_norm": 3.0801758766174316,
+ "learning_rate": 5.769230769230769e-06,
+ "loss": 0.2283,
+ "step": 4410
+ },
+ {
+ "epoch": 4.542651593011305,
+ "grad_norm": 4.235361576080322,
+ "learning_rate": 5.758396533044422e-06,
+ "loss": 0.2484,
+ "step": 4420
+ },
+ {
+ "epoch": 4.552929085303186,
+ "grad_norm": 3.881052017211914,
+ "learning_rate": 5.747562296858073e-06,
+ "loss": 0.2577,
+ "step": 4430
+ },
+ {
+ "epoch": 4.563206577595067,
+ "grad_norm": 4.833401203155518,
+ "learning_rate": 5.736728060671724e-06,
+ "loss": 0.2614,
+ "step": 4440
+ },
+ {
+ "epoch": 4.5734840698869474,
+ "grad_norm": 3.7018377780914307,
+ "learning_rate": 5.725893824485375e-06,
+ "loss": 0.2539,
+ "step": 4450
+ },
+ {
+ "epoch": 4.583761562178829,
+ "grad_norm": 3.454493522644043,
+ "learning_rate": 5.715059588299026e-06,
+ "loss": 0.2393,
+ "step": 4460
+ },
+ {
+ "epoch": 4.594039054470709,
+ "grad_norm": 3.1760177612304688,
+ "learning_rate": 5.7042253521126766e-06,
+ "loss": 0.2756,
+ "step": 4470
+ },
+ {
+ "epoch": 4.60431654676259,
+ "grad_norm": 3.2941226959228516,
+ "learning_rate": 5.6933911159263276e-06,
+ "loss": 0.221,
+ "step": 4480
+ },
+ {
+ "epoch": 4.614594039054471,
+ "grad_norm": 2.6715846061706543,
+ "learning_rate": 5.6825568797399786e-06,
+ "loss": 0.1891,
+ "step": 4490
+ },
+ {
+ "epoch": 4.6248715313463515,
+ "grad_norm": 4.533013343811035,
+ "learning_rate": 5.6717226435536295e-06,
+ "loss": 0.2153,
+ "step": 4500
+ },
+ {
+ "epoch": 4.635149023638232,
+ "grad_norm": 3.7271065711975098,
+ "learning_rate": 5.6608884073672805e-06,
+ "loss": 0.1851,
+ "step": 4510
+ },
+ {
+ "epoch": 4.645426515930113,
+ "grad_norm": 3.6285195350646973,
+ "learning_rate": 5.650054171180932e-06,
+ "loss": 0.2545,
+ "step": 4520
+ },
+ {
+ "epoch": 4.655704008221994,
+ "grad_norm": 2.942715883255005,
+ "learning_rate": 5.639219934994583e-06,
+ "loss": 0.2742,
+ "step": 4530
+ },
+ {
+ "epoch": 4.665981500513874,
+ "grad_norm": 3.835853099822998,
+ "learning_rate": 5.628385698808234e-06,
+ "loss": 0.26,
+ "step": 4540
+ },
+ {
+ "epoch": 4.676258992805756,
+ "grad_norm": 2.985145092010498,
+ "learning_rate": 5.617551462621885e-06,
+ "loss": 0.1806,
+ "step": 4550
+ },
+ {
+ "epoch": 4.686536485097636,
+ "grad_norm": 2.787224531173706,
+ "learning_rate": 5.606717226435536e-06,
+ "loss": 0.2763,
+ "step": 4560
+ },
+ {
+ "epoch": 4.6968139773895174,
+ "grad_norm": 3.8157551288604736,
+ "learning_rate": 5.595882990249187e-06,
+ "loss": 0.2892,
+ "step": 4570
+ },
+ {
+ "epoch": 4.707091469681398,
+ "grad_norm": 3.440402030944824,
+ "learning_rate": 5.585048754062839e-06,
+ "loss": 0.2788,
+ "step": 4580
+ },
+ {
+ "epoch": 4.717368961973278,
+ "grad_norm": 4.048103332519531,
+ "learning_rate": 5.57421451787649e-06,
+ "loss": 0.265,
+ "step": 4590
+ },
+ {
+ "epoch": 4.727646454265159,
+ "grad_norm": 2.923424482345581,
+ "learning_rate": 5.563380281690142e-06,
+ "loss": 0.2165,
+ "step": 4600
+ },
+ {
+ "epoch": 4.73792394655704,
+ "grad_norm": 2.8023345470428467,
+ "learning_rate": 5.552546045503793e-06,
+ "loss": 0.1895,
+ "step": 4610
+ },
+ {
+ "epoch": 4.748201438848921,
+ "grad_norm": 2.900959014892578,
+ "learning_rate": 5.541711809317444e-06,
+ "loss": 0.195,
+ "step": 4620
+ },
+ {
+ "epoch": 4.758478931140802,
+ "grad_norm": 3.2633719444274902,
+ "learning_rate": 5.530877573131095e-06,
+ "loss": 0.2658,
+ "step": 4630
+ },
+ {
+ "epoch": 4.7687564234326825,
+ "grad_norm": 3.7170844078063965,
+ "learning_rate": 5.520043336944746e-06,
+ "loss": 0.2247,
+ "step": 4640
+ },
+ {
+ "epoch": 4.779033915724563,
+ "grad_norm": 3.317171573638916,
+ "learning_rate": 5.509209100758397e-06,
+ "loss": 0.2497,
+ "step": 4650
+ },
+ {
+ "epoch": 4.789311408016444,
+ "grad_norm": 3.8781418800354004,
+ "learning_rate": 5.498374864572048e-06,
+ "loss": 0.218,
+ "step": 4660
+ },
+ {
+ "epoch": 4.799588900308325,
+ "grad_norm": 3.596952438354492,
+ "learning_rate": 5.487540628385699e-06,
+ "loss": 0.2871,
+ "step": 4670
+ },
+ {
+ "epoch": 4.809866392600205,
+ "grad_norm": 3.8521931171417236,
+ "learning_rate": 5.47670639219935e-06,
+ "loss": 0.2784,
+ "step": 4680
+ },
+ {
+ "epoch": 4.820143884892087,
+ "grad_norm": 3.562053680419922,
+ "learning_rate": 5.465872156013001e-06,
+ "loss": 0.2502,
+ "step": 4690
+ },
+ {
+ "epoch": 4.830421377183967,
+ "grad_norm": 3.1473138332366943,
+ "learning_rate": 5.455037919826652e-06,
+ "loss": 0.1935,
+ "step": 4700
+ },
+ {
+ "epoch": 4.8406988694758475,
+ "grad_norm": 3.755488157272339,
+ "learning_rate": 5.444203683640304e-06,
+ "loss": 0.2786,
+ "step": 4710
+ },
+ {
+ "epoch": 4.850976361767729,
+ "grad_norm": 2.7507431507110596,
+ "learning_rate": 5.433369447453955e-06,
+ "loss": 0.1997,
+ "step": 4720
+ },
+ {
+ "epoch": 4.861253854059609,
+ "grad_norm": 2.5082316398620605,
+ "learning_rate": 5.422535211267607e-06,
+ "loss": 0.1983,
+ "step": 4730
+ },
+ {
+ "epoch": 4.871531346351491,
+ "grad_norm": 4.426095485687256,
+ "learning_rate": 5.411700975081258e-06,
+ "loss": 0.2835,
+ "step": 4740
+ },
+ {
+ "epoch": 4.881808838643371,
+ "grad_norm": 3.8168785572052,
+ "learning_rate": 5.400866738894909e-06,
+ "loss": 0.197,
+ "step": 4750
+ },
+ {
+ "epoch": 4.892086330935252,
+ "grad_norm": 3.388019561767578,
+ "learning_rate": 5.39003250270856e-06,
+ "loss": 0.2651,
+ "step": 4760
+ },
+ {
+ "epoch": 4.902363823227133,
+ "grad_norm": 4.148802757263184,
+ "learning_rate": 5.379198266522211e-06,
+ "loss": 0.267,
+ "step": 4770
+ },
+ {
+ "epoch": 4.912641315519013,
+ "grad_norm": 4.763253211975098,
+ "learning_rate": 5.368364030335862e-06,
+ "loss": 0.2307,
+ "step": 4780
+ },
+ {
+ "epoch": 4.922918807810894,
+ "grad_norm": 5.207771301269531,
+ "learning_rate": 5.357529794149513e-06,
+ "loss": 0.2559,
+ "step": 4790
+ },
+ {
+ "epoch": 4.933196300102775,
+ "grad_norm": 2.9947853088378906,
+ "learning_rate": 5.346695557963164e-06,
+ "loss": 0.1951,
+ "step": 4800
+ },
+ {
+ "epoch": 4.943473792394656,
+ "grad_norm": 3.326383113861084,
+ "learning_rate": 5.335861321776815e-06,
+ "loss": 0.2096,
+ "step": 4810
+ },
+ {
+ "epoch": 4.953751284686536,
+ "grad_norm": 4.471996307373047,
+ "learning_rate": 5.325027085590466e-06,
+ "loss": 0.2297,
+ "step": 4820
+ },
+ {
+ "epoch": 4.9640287769784175,
+ "grad_norm": 2.7022922039031982,
+ "learning_rate": 5.314192849404117e-06,
+ "loss": 0.1895,
+ "step": 4830
+ },
+ {
+ "epoch": 4.974306269270298,
+ "grad_norm": 2.3356800079345703,
+ "learning_rate": 5.303358613217768e-06,
+ "loss": 0.1827,
+ "step": 4840
+ },
+ {
+ "epoch": 4.984583761562179,
+ "grad_norm": 3.5485782623291016,
+ "learning_rate": 5.292524377031419e-06,
+ "loss": 0.2314,
+ "step": 4850
+ },
+ {
+ "epoch": 4.99486125385406,
+ "grad_norm": 4.482807159423828,
+ "learning_rate": 5.28169014084507e-06,
+ "loss": 0.272,
+ "step": 4860
+ },
+ {
+ "epoch": 5.0,
+ "eval_loss": 0.3424507975578308,
+ "eval_runtime": 2155.9454,
+ "eval_samples_per_second": 2.722,
+ "eval_steps_per_second": 0.085,
+ "eval_wer": 0.3638944174361317,
+ "step": 4865
+ },
+ {
+ "epoch": 5.00513874614594,
+ "grad_norm": 3.5453717708587646,
+ "learning_rate": 5.270855904658721e-06,
+ "loss": 0.1935,
+ "step": 4870
+ },
+ {
+ "epoch": 5.015416238437822,
+ "grad_norm": 2.4929885864257812,
+ "learning_rate": 5.260021668472372e-06,
+ "loss": 0.2384,
+ "step": 4880
+ },
+ {
+ "epoch": 5.025693730729702,
+ "grad_norm": 4.065361499786377,
+ "learning_rate": 5.249187432286025e-06,
+ "loss": 0.1792,
+ "step": 4890
+ },
+ {
+ "epoch": 5.0359712230215825,
+ "grad_norm": 2.583575487136841,
+ "learning_rate": 5.238353196099676e-06,
+ "loss": 0.2077,
+ "step": 4900
+ },
+ {
+ "epoch": 5.046248715313464,
+ "grad_norm": 2.885948896408081,
+ "learning_rate": 5.227518959913327e-06,
+ "loss": 0.1988,
+ "step": 4910
+ },
+ {
+ "epoch": 5.056526207605344,
+ "grad_norm": 3.0783309936523438,
+ "learning_rate": 5.216684723726978e-06,
+ "loss": 0.1974,
+ "step": 4920
+ },
+ {
+ "epoch": 5.066803699897225,
+ "grad_norm": 2.903958559036255,
+ "learning_rate": 5.205850487540629e-06,
+ "loss": 0.2528,
+ "step": 4930
+ },
+ {
+ "epoch": 5.077081192189106,
+ "grad_norm": 4.175934791564941,
+ "learning_rate": 5.19501625135428e-06,
+ "loss": 0.2178,
+ "step": 4940
+ },
+ {
+ "epoch": 5.087358684480987,
+ "grad_norm": 2.6728291511535645,
+ "learning_rate": 5.184182015167931e-06,
+ "loss": 0.2193,
+ "step": 4950
+ },
+ {
+ "epoch": 5.097636176772867,
+ "grad_norm": 2.6697630882263184,
+ "learning_rate": 5.173347778981582e-06,
+ "loss": 0.1994,
+ "step": 4960
+ },
+ {
+ "epoch": 5.107913669064748,
+ "grad_norm": 3.4050991535186768,
+ "learning_rate": 5.162513542795233e-06,
+ "loss": 0.2599,
+ "step": 4970
+ },
+ {
+ "epoch": 5.118191161356629,
+ "grad_norm": 4.359245300292969,
+ "learning_rate": 5.151679306608884e-06,
+ "loss": 0.1863,
+ "step": 4980
+ },
+ {
+ "epoch": 5.128468653648509,
+ "grad_norm": 4.7175726890563965,
+ "learning_rate": 5.140845070422536e-06,
+ "loss": 0.2285,
+ "step": 4990
+ },
+ {
+ "epoch": 5.138746145940391,
+ "grad_norm": 3.808244228363037,
+ "learning_rate": 5.130010834236187e-06,
+ "loss": 0.2287,
+ "step": 5000
+ },
+ {
+ "epoch": 5.149023638232271,
+ "grad_norm": 2.721421957015991,
+ "learning_rate": 5.119176598049838e-06,
+ "loss": 0.1837,
+ "step": 5010
+ },
+ {
+ "epoch": 5.1593011305241525,
+ "grad_norm": 3.716888427734375,
+ "learning_rate": 5.108342361863489e-06,
+ "loss": 0.1911,
+ "step": 5020
+ },
+ {
+ "epoch": 5.169578622816033,
+ "grad_norm": 2.224301338195801,
+ "learning_rate": 5.09750812567714e-06,
+ "loss": 0.2105,
+ "step": 5030
+ },
+ {
+ "epoch": 5.179856115107913,
+ "grad_norm": 2.7661211490631104,
+ "learning_rate": 5.086673889490791e-06,
+ "loss": 0.1896,
+ "step": 5040
+ },
+ {
+ "epoch": 5.190133607399795,
+ "grad_norm": 2.1640748977661133,
+ "learning_rate": 5.075839653304443e-06,
+ "loss": 0.2515,
+ "step": 5050
+ },
+ {
+ "epoch": 5.200411099691675,
+ "grad_norm": 2.9602606296539307,
+ "learning_rate": 5.065005417118094e-06,
+ "loss": 0.2077,
+ "step": 5060
+ },
+ {
+ "epoch": 5.210688591983556,
+ "grad_norm": 3.2131927013397217,
+ "learning_rate": 5.0541711809317454e-06,
+ "loss": 0.1956,
+ "step": 5070
+ },
+ {
+ "epoch": 5.220966084275437,
+ "grad_norm": 4.2276105880737305,
+ "learning_rate": 5.0433369447453964e-06,
+ "loss": 0.2595,
+ "step": 5080
+ },
+ {
+ "epoch": 5.2312435765673175,
+ "grad_norm": 2.062800645828247,
+ "learning_rate": 5.0325027085590474e-06,
+ "loss": 0.2245,
+ "step": 5090
+ },
+ {
+ "epoch": 5.241521068859198,
+ "grad_norm": 3.1266229152679443,
+ "learning_rate": 5.0216684723726984e-06,
+ "loss": 0.222,
+ "step": 5100
+ },
+ {
+ "epoch": 5.251798561151079,
+ "grad_norm": 2.811793327331543,
+ "learning_rate": 5.010834236186349e-06,
+ "loss": 0.2297,
+ "step": 5110
+ },
+ {
+ "epoch": 5.26207605344296,
+ "grad_norm": 3.1398720741271973,
+ "learning_rate": 5e-06,
+ "loss": 0.1736,
+ "step": 5120
+ },
+ {
+ "epoch": 5.272353545734841,
+ "grad_norm": 3.829897880554199,
+ "learning_rate": 4.989165763813651e-06,
+ "loss": 0.2096,
+ "step": 5130
+ },
+ {
+ "epoch": 5.282631038026722,
+ "grad_norm": 3.5916318893432617,
+ "learning_rate": 4.978331527627302e-06,
+ "loss": 0.2428,
+ "step": 5140
+ },
+ {
+ "epoch": 5.292908530318602,
+ "grad_norm": 2.3700063228607178,
+ "learning_rate": 4.967497291440953e-06,
+ "loss": 0.1932,
+ "step": 5150
+ },
+ {
+ "epoch": 5.303186022610483,
+ "grad_norm": 3.2748498916625977,
+ "learning_rate": 4.956663055254605e-06,
+ "loss": 0.2216,
+ "step": 5160
+ },
+ {
+ "epoch": 5.313463514902364,
+ "grad_norm": 2.50570011138916,
+ "learning_rate": 4.945828819068256e-06,
+ "loss": 0.219,
+ "step": 5170
+ },
+ {
+ "epoch": 5.323741007194244,
+ "grad_norm": 3.0017030239105225,
+ "learning_rate": 4.934994582881907e-06,
+ "loss": 0.231,
+ "step": 5180
+ },
+ {
+ "epoch": 5.334018499486126,
+ "grad_norm": 2.34260630607605,
+ "learning_rate": 4.924160346695558e-06,
+ "loss": 0.2528,
+ "step": 5190
+ },
+ {
+ "epoch": 5.344295991778006,
+ "grad_norm": 5.0798444747924805,
+ "learning_rate": 4.913326110509209e-06,
+ "loss": 0.2046,
+ "step": 5200
+ },
+ {
+ "epoch": 5.354573484069887,
+ "grad_norm": 3.9050886631011963,
+ "learning_rate": 4.90249187432286e-06,
+ "loss": 0.2026,
+ "step": 5210
+ },
+ {
+ "epoch": 5.364850976361768,
+ "grad_norm": 4.162816524505615,
+ "learning_rate": 4.891657638136512e-06,
+ "loss": 0.2601,
+ "step": 5220
+ },
+ {
+ "epoch": 5.3751284686536485,
+ "grad_norm": 3.383274555206299,
+ "learning_rate": 4.880823401950163e-06,
+ "loss": 0.1653,
+ "step": 5230
+ },
+ {
+ "epoch": 5.385405960945529,
+ "grad_norm": 2.9354922771453857,
+ "learning_rate": 4.869989165763814e-06,
+ "loss": 0.2238,
+ "step": 5240
+ },
+ {
+ "epoch": 5.39568345323741,
+ "grad_norm": 2.3812334537506104,
+ "learning_rate": 4.859154929577465e-06,
+ "loss": 0.2059,
+ "step": 5250
+ },
+ {
+ "epoch": 5.405960945529291,
+ "grad_norm": 2.6520116329193115,
+ "learning_rate": 4.848320693391117e-06,
+ "loss": 0.2624,
+ "step": 5260
+ },
+ {
+ "epoch": 5.416238437821171,
+ "grad_norm": 3.0220510959625244,
+ "learning_rate": 4.837486457204768e-06,
+ "loss": 0.2858,
+ "step": 5270
+ },
+ {
+ "epoch": 5.4265159301130526,
+ "grad_norm": 3.373061180114746,
+ "learning_rate": 4.826652221018419e-06,
+ "loss": 0.2445,
+ "step": 5280
+ },
+ {
+ "epoch": 5.436793422404933,
+ "grad_norm": 3.253922462463379,
+ "learning_rate": 4.81581798483207e-06,
+ "loss": 0.2465,
+ "step": 5290
+ },
+ {
+ "epoch": 5.447070914696814,
+ "grad_norm": 3.8040966987609863,
+ "learning_rate": 4.804983748645721e-06,
+ "loss": 0.2157,
+ "step": 5300
+ },
+ {
+ "epoch": 5.457348406988695,
+ "grad_norm": 2.812648057937622,
+ "learning_rate": 4.794149512459372e-06,
+ "loss": 0.2039,
+ "step": 5310
+ },
+ {
+ "epoch": 5.467625899280575,
+ "grad_norm": 3.9564015865325928,
+ "learning_rate": 4.783315276273024e-06,
+ "loss": 0.1993,
+ "step": 5320
+ },
+ {
+ "epoch": 5.477903391572457,
+ "grad_norm": 2.991647481918335,
+ "learning_rate": 4.772481040086675e-06,
+ "loss": 0.1606,
+ "step": 5330
+ },
+ {
+ "epoch": 5.488180883864337,
+ "grad_norm": 3.1116158962249756,
+ "learning_rate": 4.761646803900326e-06,
+ "loss": 0.2625,
+ "step": 5340
+ },
+ {
+ "epoch": 5.498458376156218,
+ "grad_norm": 4.179168224334717,
+ "learning_rate": 4.750812567713977e-06,
+ "loss": 0.2525,
+ "step": 5350
+ },
+ {
+ "epoch": 5.508735868448099,
+ "grad_norm": 3.158571481704712,
+ "learning_rate": 4.739978331527628e-06,
+ "loss": 0.2093,
+ "step": 5360
+ },
+ {
+ "epoch": 5.519013360739979,
+ "grad_norm": 3.2934162616729736,
+ "learning_rate": 4.729144095341279e-06,
+ "loss": 0.2581,
+ "step": 5370
+ },
+ {
+ "epoch": 5.52929085303186,
+ "grad_norm": 2.104811668395996,
+ "learning_rate": 4.71830985915493e-06,
+ "loss": 0.1754,
+ "step": 5380
+ },
+ {
+ "epoch": 5.539568345323741,
+ "grad_norm": 2.4097349643707275,
+ "learning_rate": 4.707475622968581e-06,
+ "loss": 0.1439,
+ "step": 5390
+ },
+ {
+ "epoch": 5.549845837615622,
+ "grad_norm": 3.515024423599243,
+ "learning_rate": 4.6966413867822324e-06,
+ "loss": 0.1735,
+ "step": 5400
+ },
+ {
+ "epoch": 5.560123329907503,
+ "grad_norm": 3.5324950218200684,
+ "learning_rate": 4.6858071505958834e-06,
+ "loss": 0.2659,
+ "step": 5410
+ },
+ {
+ "epoch": 5.5704008221993835,
+ "grad_norm": 2.9916086196899414,
+ "learning_rate": 4.674972914409534e-06,
+ "loss": 0.2829,
+ "step": 5420
+ },
+ {
+ "epoch": 5.580678314491264,
+ "grad_norm": 3.8028905391693115,
+ "learning_rate": 4.664138678223185e-06,
+ "loss": 0.2463,
+ "step": 5430
+ },
+ {
+ "epoch": 5.590955806783145,
+ "grad_norm": 3.8579933643341064,
+ "learning_rate": 4.653304442036836e-06,
+ "loss": 0.2034,
+ "step": 5440
+ },
+ {
+ "epoch": 5.601233299075026,
+ "grad_norm": 2.935047149658203,
+ "learning_rate": 4.642470205850487e-06,
+ "loss": 0.1735,
+ "step": 5450
+ },
+ {
+ "epoch": 5.611510791366906,
+ "grad_norm": 3.832740068435669,
+ "learning_rate": 4.631635969664139e-06,
+ "loss": 0.2052,
+ "step": 5460
+ },
+ {
+ "epoch": 5.621788283658788,
+ "grad_norm": 3.1037473678588867,
+ "learning_rate": 4.62080173347779e-06,
+ "loss": 0.1715,
+ "step": 5470
+ },
+ {
+ "epoch": 5.632065775950668,
+ "grad_norm": 3.4924092292785645,
+ "learning_rate": 4.609967497291441e-06,
+ "loss": 0.1661,
+ "step": 5480
+ },
+ {
+ "epoch": 5.6423432682425485,
+ "grad_norm": 3.0313289165496826,
+ "learning_rate": 4.599133261105092e-06,
+ "loss": 0.2318,
+ "step": 5490
+ },
+ {
+ "epoch": 5.65262076053443,
+ "grad_norm": 2.711442708969116,
+ "learning_rate": 4.588299024918744e-06,
+ "loss": 0.203,
+ "step": 5500
+ },
+ {
+ "epoch": 5.66289825282631,
+ "grad_norm": 3.3626229763031006,
+ "learning_rate": 4.577464788732395e-06,
+ "loss": 0.1957,
+ "step": 5510
+ },
+ {
+ "epoch": 5.673175745118191,
+ "grad_norm": 5.538825988769531,
+ "learning_rate": 4.566630552546046e-06,
+ "loss": 0.2218,
+ "step": 5520
+ },
+ {
+ "epoch": 5.683453237410072,
+ "grad_norm": 4.4782280921936035,
+ "learning_rate": 4.555796316359697e-06,
+ "loss": 0.2317,
+ "step": 5530
+ },
+ {
+ "epoch": 5.693730729701953,
+ "grad_norm": 3.762585401535034,
+ "learning_rate": 4.544962080173348e-06,
+ "loss": 0.2034,
+ "step": 5540
+ },
+ {
+ "epoch": 5.704008221993833,
+ "grad_norm": 2.648967981338501,
+ "learning_rate": 4.534127843986999e-06,
+ "loss": 0.2342,
+ "step": 5550
+ },
+ {
+ "epoch": 5.714285714285714,
+ "grad_norm": 3.2710864543914795,
+ "learning_rate": 4.523293607800651e-06,
+ "loss": 0.2271,
+ "step": 5560
+ },
+ {
+ "epoch": 5.724563206577595,
+ "grad_norm": 3.4262428283691406,
+ "learning_rate": 4.512459371614302e-06,
+ "loss": 0.2354,
+ "step": 5570
+ },
+ {
+ "epoch": 5.734840698869476,
+ "grad_norm": 4.622767925262451,
+ "learning_rate": 4.501625135427953e-06,
+ "loss": 0.2362,
+ "step": 5580
+ },
+ {
+ "epoch": 5.745118191161357,
+ "grad_norm": 2.523181915283203,
+ "learning_rate": 4.490790899241604e-06,
+ "loss": 0.277,
+ "step": 5590
+ },
+ {
+ "epoch": 5.755395683453237,
+ "grad_norm": 3.465257167816162,
+ "learning_rate": 4.479956663055255e-06,
+ "loss": 0.1887,
+ "step": 5600
+ },
+ {
+ "epoch": 5.7656731757451185,
+ "grad_norm": 3.522796869277954,
+ "learning_rate": 4.469122426868906e-06,
+ "loss": 0.2124,
+ "step": 5610
+ },
+ {
+ "epoch": 5.775950668036999,
+ "grad_norm": 2.6344645023345947,
+ "learning_rate": 4.458288190682557e-06,
+ "loss": 0.1447,
+ "step": 5620
+ },
+ {
+ "epoch": 5.786228160328879,
+ "grad_norm": 4.947503566741943,
+ "learning_rate": 4.447453954496209e-06,
+ "loss": 0.2845,
+ "step": 5630
+ },
+ {
+ "epoch": 5.796505652620761,
+ "grad_norm": 2.601114511489868,
+ "learning_rate": 4.43661971830986e-06,
+ "loss": 0.225,
+ "step": 5640
+ },
+ {
+ "epoch": 5.806783144912641,
+ "grad_norm": 2.1763648986816406,
+ "learning_rate": 4.425785482123511e-06,
+ "loss": 0.2572,
+ "step": 5650
+ },
+ {
+ "epoch": 5.817060637204522,
+ "grad_norm": 3.327956199645996,
+ "learning_rate": 4.414951245937162e-06,
+ "loss": 0.252,
+ "step": 5660
+ },
+ {
+ "epoch": 5.827338129496403,
+ "grad_norm": 3.0444459915161133,
+ "learning_rate": 4.404117009750813e-06,
+ "loss": 0.202,
+ "step": 5670
+ },
+ {
+ "epoch": 5.8376156217882835,
+ "grad_norm": 3.5194921493530273,
+ "learning_rate": 4.393282773564464e-06,
+ "loss": 0.1841,
+ "step": 5680
+ },
+ {
+ "epoch": 5.847893114080165,
+ "grad_norm": 6.3386759757995605,
+ "learning_rate": 4.3824485373781155e-06,
+ "loss": 0.2575,
+ "step": 5690
+ },
+ {
+ "epoch": 5.858170606372045,
+ "grad_norm": 2.679220676422119,
+ "learning_rate": 4.3716143011917665e-06,
+ "loss": 0.2159,
+ "step": 5700
+ },
+ {
+ "epoch": 5.868448098663926,
+ "grad_norm": 3.0586998462677,
+ "learning_rate": 4.3607800650054174e-06,
+ "loss": 0.2096,
+ "step": 5710
+ },
+ {
+ "epoch": 5.878725590955807,
+ "grad_norm": 3.4423577785491943,
+ "learning_rate": 4.3499458288190684e-06,
+ "loss": 0.2255,
+ "step": 5720
+ },
+ {
+ "epoch": 5.889003083247688,
+ "grad_norm": 2.7511608600616455,
+ "learning_rate": 4.33911159263272e-06,
+ "loss": 0.1964,
+ "step": 5730
+ },
+ {
+ "epoch": 5.899280575539568,
+ "grad_norm": 2.233632802963257,
+ "learning_rate": 4.328277356446371e-06,
+ "loss": 0.197,
+ "step": 5740
+ },
+ {
+ "epoch": 5.909558067831449,
+ "grad_norm": 2.6019561290740967,
+ "learning_rate": 4.317443120260022e-06,
+ "loss": 0.167,
+ "step": 5750
+ },
+ {
+ "epoch": 5.91983556012333,
+ "grad_norm": 2.9982266426086426,
+ "learning_rate": 4.306608884073673e-06,
+ "loss": 0.2546,
+ "step": 5760
+ },
+ {
+ "epoch": 5.93011305241521,
+ "grad_norm": 2.8179750442504883,
+ "learning_rate": 4.295774647887324e-06,
+ "loss": 0.2555,
+ "step": 5770
+ },
+ {
+ "epoch": 5.940390544707092,
+ "grad_norm": 3.8446030616760254,
+ "learning_rate": 4.284940411700975e-06,
+ "loss": 0.2172,
+ "step": 5780
+ },
+ {
+ "epoch": 5.950668036998972,
+ "grad_norm": 3.377340078353882,
+ "learning_rate": 4.274106175514627e-06,
+ "loss": 0.2168,
+ "step": 5790
+ },
+ {
+ "epoch": 5.9609455292908535,
+ "grad_norm": 4.0742411613464355,
+ "learning_rate": 4.263271939328278e-06,
+ "loss": 0.2135,
+ "step": 5800
+ },
+ {
+ "epoch": 5.971223021582734,
+ "grad_norm": 2.8021926879882812,
+ "learning_rate": 4.252437703141929e-06,
+ "loss": 0.2091,
+ "step": 5810
+ },
+ {
+ "epoch": 5.9815005138746145,
+ "grad_norm": 2.8668556213378906,
+ "learning_rate": 4.24160346695558e-06,
+ "loss": 0.1975,
+ "step": 5820
+ },
+ {
+ "epoch": 5.991778006166495,
+ "grad_norm": 3.0243079662323,
+ "learning_rate": 4.230769230769231e-06,
+ "loss": 0.2246,
+ "step": 5830
+ },
+ {
+ "epoch": 6.0,
+ "eval_loss": 0.3370836079120636,
+ "eval_runtime": 1607.0593,
+ "eval_samples_per_second": 3.651,
+ "eval_steps_per_second": 0.114,
+ "eval_wer": 0.3341055173088845,
+ "step": 5838
+ },
+ {
+ "epoch": 6.002055498458376,
+ "grad_norm": 4.44910192489624,
+ "learning_rate": 4.219934994582882e-06,
+ "loss": 0.2229,
+ "step": 5840
+ },
+ {
+ "epoch": 6.012332990750257,
+ "grad_norm": 4.300351142883301,
+ "learning_rate": 4.209100758396533e-06,
+ "loss": 0.2699,
+ "step": 5850
+ },
+ {
+ "epoch": 6.022610483042138,
+ "grad_norm": 2.6255762577056885,
+ "learning_rate": 4.198266522210184e-06,
+ "loss": 0.2489,
+ "step": 5860
+ },
+ {
+ "epoch": 6.0328879753340185,
+ "grad_norm": 3.224179267883301,
+ "learning_rate": 4.187432286023836e-06,
+ "loss": 0.2066,
+ "step": 5870
+ },
+ {
+ "epoch": 6.043165467625899,
+ "grad_norm": 2.1570770740509033,
+ "learning_rate": 4.176598049837487e-06,
+ "loss": 0.1952,
+ "step": 5880
+ },
+ {
+ "epoch": 6.05344295991778,
+ "grad_norm": 2.8564834594726562,
+ "learning_rate": 4.165763813651138e-06,
+ "loss": 0.1647,
+ "step": 5890
+ },
+ {
+ "epoch": 6.063720452209661,
+ "grad_norm": 3.121005058288574,
+ "learning_rate": 4.154929577464789e-06,
+ "loss": 0.1846,
+ "step": 5900
+ },
+ {
+ "epoch": 6.073997944501541,
+ "grad_norm": 2.7559916973114014,
+ "learning_rate": 4.14409534127844e-06,
+ "loss": 0.2056,
+ "step": 5910
+ },
+ {
+ "epoch": 6.084275436793423,
+ "grad_norm": 2.2764577865600586,
+ "learning_rate": 4.133261105092091e-06,
+ "loss": 0.1827,
+ "step": 5920
+ },
+ {
+ "epoch": 6.094552929085303,
+ "grad_norm": 3.273794412612915,
+ "learning_rate": 4.122426868905743e-06,
+ "loss": 0.2004,
+ "step": 5930
+ },
+ {
+ "epoch": 6.104830421377184,
+ "grad_norm": 3.497180461883545,
+ "learning_rate": 4.111592632719394e-06,
+ "loss": 0.1765,
+ "step": 5940
+ },
+ {
+ "epoch": 6.115107913669065,
+ "grad_norm": 2.506742238998413,
+ "learning_rate": 4.100758396533045e-06,
+ "loss": 0.1928,
+ "step": 5950
+ },
+ {
+ "epoch": 6.125385405960945,
+ "grad_norm": 4.537923812866211,
+ "learning_rate": 4.089924160346696e-06,
+ "loss": 0.2638,
+ "step": 5960
+ },
+ {
+ "epoch": 6.135662898252827,
+ "grad_norm": 4.140702247619629,
+ "learning_rate": 4.0790899241603475e-06,
+ "loss": 0.2831,
+ "step": 5970
+ },
+ {
+ "epoch": 6.145940390544707,
+ "grad_norm": 3.754379987716675,
+ "learning_rate": 4.0682556879739985e-06,
+ "loss": 0.2594,
+ "step": 5980
+ },
+ {
+ "epoch": 6.156217882836588,
+ "grad_norm": 4.139322757720947,
+ "learning_rate": 4.0574214517876495e-06,
+ "loss": 0.154,
+ "step": 5990
+ },
+ {
+ "epoch": 6.166495375128469,
+ "grad_norm": 2.700397491455078,
+ "learning_rate": 4.0465872156013005e-06,
+ "loss": 0.1733,
+ "step": 6000
+ },
+ {
+ "epoch": 6.1767728674203495,
+ "grad_norm": 4.563075542449951,
+ "learning_rate": 4.0357529794149515e-06,
+ "loss": 0.1818,
+ "step": 6010
+ },
+ {
+ "epoch": 6.18705035971223,
+ "grad_norm": 3.9990346431732178,
+ "learning_rate": 4.024918743228603e-06,
+ "loss": 0.1735,
+ "step": 6020
+ },
+ {
+ "epoch": 6.197327852004111,
+ "grad_norm": 3.266754627227783,
+ "learning_rate": 4.014084507042254e-06,
+ "loss": 0.2065,
+ "step": 6030
+ },
+ {
+ "epoch": 6.207605344295992,
+ "grad_norm": 2.936103105545044,
+ "learning_rate": 4.003250270855905e-06,
+ "loss": 0.2146,
+ "step": 6040
+ },
+ {
+ "epoch": 6.217882836587872,
+ "grad_norm": 3.2443127632141113,
+ "learning_rate": 3.992416034669556e-06,
+ "loss": 0.2163,
+ "step": 6050
+ },
+ {
+ "epoch": 6.228160328879754,
+ "grad_norm": 1.8829902410507202,
+ "learning_rate": 3.981581798483207e-06,
+ "loss": 0.2112,
+ "step": 6060
+ },
+ {
+ "epoch": 6.238437821171634,
+ "grad_norm": 3.7794463634490967,
+ "learning_rate": 3.970747562296858e-06,
+ "loss": 0.2099,
+ "step": 6070
+ },
+ {
+ "epoch": 6.2487153134635145,
+ "grad_norm": 3.4124205112457275,
+ "learning_rate": 3.959913326110509e-06,
+ "loss": 0.2275,
+ "step": 6080
+ },
+ {
+ "epoch": 6.258992805755396,
+ "grad_norm": 3.0793240070343018,
+ "learning_rate": 3.94907908992416e-06,
+ "loss": 0.2107,
+ "step": 6090
+ },
+ {
+ "epoch": 6.269270298047276,
+ "grad_norm": 2.9140002727508545,
+ "learning_rate": 3.938244853737812e-06,
+ "loss": 0.2207,
+ "step": 6100
+ },
+ {
+ "epoch": 6.279547790339157,
+ "grad_norm": 4.15077543258667,
+ "learning_rate": 3.927410617551463e-06,
+ "loss": 0.2423,
+ "step": 6110
+ },
+ {
+ "epoch": 6.289825282631038,
+ "grad_norm": 3.4205381870269775,
+ "learning_rate": 3.916576381365114e-06,
+ "loss": 0.2209,
+ "step": 6120
+ },
+ {
+ "epoch": 6.300102774922919,
+ "grad_norm": 3.495804786682129,
+ "learning_rate": 3.905742145178765e-06,
+ "loss": 0.2076,
+ "step": 6130
+ },
+ {
+ "epoch": 6.3103802672148,
+ "grad_norm": 2.691032886505127,
+ "learning_rate": 3.894907908992416e-06,
+ "loss": 0.1965,
+ "step": 6140
+ },
+ {
+ "epoch": 6.32065775950668,
+ "grad_norm": 3.958749771118164,
+ "learning_rate": 3.884073672806067e-06,
+ "loss": 0.2056,
+ "step": 6150
+ },
+ {
+ "epoch": 6.330935251798561,
+ "grad_norm": 2.556640386581421,
+ "learning_rate": 3.873239436619718e-06,
+ "loss": 0.215,
+ "step": 6160
+ },
+ {
+ "epoch": 6.341212744090442,
+ "grad_norm": 2.6547491550445557,
+ "learning_rate": 3.86240520043337e-06,
+ "loss": 0.1615,
+ "step": 6170
+ },
+ {
+ "epoch": 6.351490236382323,
+ "grad_norm": 2.9845190048217773,
+ "learning_rate": 3.851570964247021e-06,
+ "loss": 0.2045,
+ "step": 6180
+ },
+ {
+ "epoch": 6.361767728674203,
+ "grad_norm": 3.978686571121216,
+ "learning_rate": 3.840736728060672e-06,
+ "loss": 0.2107,
+ "step": 6190
+ },
+ {
+ "epoch": 6.3720452209660845,
+ "grad_norm": 2.142049551010132,
+ "learning_rate": 3.829902491874323e-06,
+ "loss": 0.2276,
+ "step": 6200
+ },
+ {
+ "epoch": 6.382322713257965,
+ "grad_norm": 2.729975938796997,
+ "learning_rate": 3.819068255687975e-06,
+ "loss": 0.2222,
+ "step": 6210
+ },
+ {
+ "epoch": 6.392600205549845,
+ "grad_norm": 3.538694381713867,
+ "learning_rate": 3.8082340195016253e-06,
+ "loss": 0.208,
+ "step": 6220
+ },
+ {
+ "epoch": 6.402877697841727,
+ "grad_norm": 2.490054130554199,
+ "learning_rate": 3.7973997833152767e-06,
+ "loss": 0.2149,
+ "step": 6230
+ },
+ {
+ "epoch": 6.413155190133607,
+ "grad_norm": 3.753293514251709,
+ "learning_rate": 3.7865655471289277e-06,
+ "loss": 0.1673,
+ "step": 6240
+ },
+ {
+ "epoch": 6.423432682425489,
+ "grad_norm": 3.622450828552246,
+ "learning_rate": 3.7757313109425787e-06,
+ "loss": 0.2291,
+ "step": 6250
+ },
+ {
+ "epoch": 6.433710174717369,
+ "grad_norm": 4.3948187828063965,
+ "learning_rate": 3.76489707475623e-06,
+ "loss": 0.2168,
+ "step": 6260
+ },
+ {
+ "epoch": 6.4439876670092495,
+ "grad_norm": 3.6386053562164307,
+ "learning_rate": 3.7540628385698815e-06,
+ "loss": 0.2269,
+ "step": 6270
+ },
+ {
+ "epoch": 6.454265159301131,
+ "grad_norm": 2.8120999336242676,
+ "learning_rate": 3.7432286023835325e-06,
+ "loss": 0.2137,
+ "step": 6280
+ },
+ {
+ "epoch": 6.464542651593011,
+ "grad_norm": 4.64008092880249,
+ "learning_rate": 3.7323943661971835e-06,
+ "loss": 0.2356,
+ "step": 6290
+ },
+ {
+ "epoch": 6.474820143884892,
+ "grad_norm": 3.2172248363494873,
+ "learning_rate": 3.7215601300108345e-06,
+ "loss": 0.2019,
+ "step": 6300
+ },
+ {
+ "epoch": 6.485097636176773,
+ "grad_norm": 3.1428282260894775,
+ "learning_rate": 3.7107258938244855e-06,
+ "loss": 0.1808,
+ "step": 6310
+ },
+ {
+ "epoch": 6.495375128468654,
+ "grad_norm": 4.095731735229492,
+ "learning_rate": 3.6998916576381365e-06,
+ "loss": 0.2001,
+ "step": 6320
+ },
+ {
+ "epoch": 6.505652620760534,
+ "grad_norm": 2.5641703605651855,
+ "learning_rate": 3.689057421451788e-06,
+ "loss": 0.2002,
+ "step": 6330
+ },
+ {
+ "epoch": 6.515930113052415,
+ "grad_norm": 2.615081787109375,
+ "learning_rate": 3.6782231852654393e-06,
+ "loss": 0.1668,
+ "step": 6340
+ },
+ {
+ "epoch": 6.526207605344296,
+ "grad_norm": 3.6635892391204834,
+ "learning_rate": 3.6673889490790903e-06,
+ "loss": 0.2371,
+ "step": 6350
+ },
+ {
+ "epoch": 6.536485097636177,
+ "grad_norm": 4.5991950035095215,
+ "learning_rate": 3.6565547128927413e-06,
+ "loss": 0.1814,
+ "step": 6360
+ },
+ {
+ "epoch": 6.546762589928058,
+ "grad_norm": 3.3164796829223633,
+ "learning_rate": 3.6457204767063927e-06,
+ "loss": 0.1804,
+ "step": 6370
+ },
+ {
+ "epoch": 6.557040082219938,
+ "grad_norm": 2.7094385623931885,
+ "learning_rate": 3.6348862405200437e-06,
+ "loss": 0.2325,
+ "step": 6380
+ },
+ {
+ "epoch": 6.567317574511819,
+ "grad_norm": 3.2538888454437256,
+ "learning_rate": 3.6240520043336947e-06,
+ "loss": 0.1925,
+ "step": 6390
+ },
+ {
+ "epoch": 6.5775950668037,
+ "grad_norm": 4.265585422515869,
+ "learning_rate": 3.6132177681473457e-06,
+ "loss": 0.2064,
+ "step": 6400
+ },
+ {
+ "epoch": 6.5878725590955804,
+ "grad_norm": 3.4931657314300537,
+ "learning_rate": 3.6023835319609967e-06,
+ "loss": 0.2144,
+ "step": 6410
+ },
+ {
+ "epoch": 6.598150051387462,
+ "grad_norm": 3.760213851928711,
+ "learning_rate": 3.5915492957746485e-06,
+ "loss": 0.1294,
+ "step": 6420
+ },
+ {
+ "epoch": 6.608427543679342,
+ "grad_norm": 2.018725872039795,
+ "learning_rate": 3.5807150595882995e-06,
+ "loss": 0.2067,
+ "step": 6430
+ },
+ {
+ "epoch": 6.618705035971223,
+ "grad_norm": 3.8554389476776123,
+ "learning_rate": 3.5698808234019505e-06,
+ "loss": 0.1834,
+ "step": 6440
+ },
+ {
+ "epoch": 6.628982528263104,
+ "grad_norm": 2.937488555908203,
+ "learning_rate": 3.5590465872156015e-06,
+ "loss": 0.193,
+ "step": 6450
+ },
+ {
+ "epoch": 6.6392600205549845,
+ "grad_norm": 3.228877067565918,
+ "learning_rate": 3.5482123510292525e-06,
+ "loss": 0.2365,
+ "step": 6460
+ },
+ {
+ "epoch": 6.649537512846865,
+ "grad_norm": 2.6487700939178467,
+ "learning_rate": 3.537378114842904e-06,
+ "loss": 0.2074,
+ "step": 6470
+ },
+ {
+ "epoch": 6.659815005138746,
+ "grad_norm": 1.9501376152038574,
+ "learning_rate": 3.526543878656555e-06,
+ "loss": 0.1831,
+ "step": 6480
+ },
+ {
+ "epoch": 6.670092497430627,
+ "grad_norm": 3.428683280944824,
+ "learning_rate": 3.5157096424702063e-06,
+ "loss": 0.1707,
+ "step": 6490
+ },
+ {
+ "epoch": 6.680369989722507,
+ "grad_norm": 4.162604808807373,
+ "learning_rate": 3.5048754062838573e-06,
+ "loss": 0.1814,
+ "step": 6500
+ },
+ {
+ "epoch": 6.690647482014389,
+ "grad_norm": 2.8636157512664795,
+ "learning_rate": 3.4940411700975087e-06,
+ "loss": 0.2169,
+ "step": 6510
+ },
+ {
+ "epoch": 6.700924974306269,
+ "grad_norm": 3.628262519836426,
+ "learning_rate": 3.4832069339111597e-06,
+ "loss": 0.2447,
+ "step": 6520
+ },
+ {
+ "epoch": 6.7112024665981505,
+ "grad_norm": 3.1727042198181152,
+ "learning_rate": 3.4723726977248107e-06,
+ "loss": 0.2149,
+ "step": 6530
+ },
+ {
+ "epoch": 6.721479958890031,
+ "grad_norm": 4.583132743835449,
+ "learning_rate": 3.4615384615384617e-06,
+ "loss": 0.1968,
+ "step": 6540
+ },
+ {
+ "epoch": 6.731757451181911,
+ "grad_norm": 2.8516876697540283,
+ "learning_rate": 3.4507042253521127e-06,
+ "loss": 0.1502,
+ "step": 6550
+ },
+ {
+ "epoch": 6.742034943473793,
+ "grad_norm": 3.4025397300720215,
+ "learning_rate": 3.4398699891657637e-06,
+ "loss": 0.2249,
+ "step": 6560
+ },
+ {
+ "epoch": 6.752312435765673,
+ "grad_norm": 2.9527840614318848,
+ "learning_rate": 3.4290357529794155e-06,
+ "loss": 0.1757,
+ "step": 6570
+ },
+ {
+ "epoch": 6.762589928057554,
+ "grad_norm": 3.4826791286468506,
+ "learning_rate": 3.4182015167930665e-06,
+ "loss": 0.1957,
+ "step": 6580
+ },
+ {
+ "epoch": 6.772867420349435,
+ "grad_norm": 2.293030023574829,
+ "learning_rate": 3.4073672806067175e-06,
+ "loss": 0.2031,
+ "step": 6590
+ },
+ {
+ "epoch": 6.7831449126413155,
+ "grad_norm": 2.7427804470062256,
+ "learning_rate": 3.3965330444203685e-06,
+ "loss": 0.1944,
+ "step": 6600
+ },
+ {
+ "epoch": 6.793422404933196,
+ "grad_norm": 2.6534154415130615,
+ "learning_rate": 3.38569880823402e-06,
+ "loss": 0.2157,
+ "step": 6610
+ },
+ {
+ "epoch": 6.803699897225077,
+ "grad_norm": 3.0126819610595703,
+ "learning_rate": 3.374864572047671e-06,
+ "loss": 0.204,
+ "step": 6620
+ },
+ {
+ "epoch": 6.813977389516958,
+ "grad_norm": 2.957345724105835,
+ "learning_rate": 3.364030335861322e-06,
+ "loss": 0.1765,
+ "step": 6630
+ },
+ {
+ "epoch": 6.824254881808839,
+ "grad_norm": 3.7311325073242188,
+ "learning_rate": 3.353196099674973e-06,
+ "loss": 0.2501,
+ "step": 6640
+ },
+ {
+ "epoch": 6.83453237410072,
+ "grad_norm": 3.535660743713379,
+ "learning_rate": 3.3423618634886247e-06,
+ "loss": 0.2223,
+ "step": 6650
+ },
+ {
+ "epoch": 6.8448098663926,
+ "grad_norm": 2.5555760860443115,
+ "learning_rate": 3.3315276273022757e-06,
+ "loss": 0.1979,
+ "step": 6660
+ },
+ {
+ "epoch": 6.8550873586844805,
+ "grad_norm": 3.8657376766204834,
+ "learning_rate": 3.3206933911159267e-06,
+ "loss": 0.1537,
+ "step": 6670
+ },
+ {
+ "epoch": 6.865364850976362,
+ "grad_norm": 3.7345712184906006,
+ "learning_rate": 3.3098591549295777e-06,
+ "loss": 0.1761,
+ "step": 6680
+ },
+ {
+ "epoch": 6.875642343268242,
+ "grad_norm": 3.04667329788208,
+ "learning_rate": 3.2990249187432287e-06,
+ "loss": 0.2655,
+ "step": 6690
+ },
+ {
+ "epoch": 6.885919835560124,
+ "grad_norm": 2.715324640274048,
+ "learning_rate": 3.2881906825568797e-06,
+ "loss": 0.2037,
+ "step": 6700
+ },
+ {
+ "epoch": 6.896197327852004,
+ "grad_norm": 3.4638121128082275,
+ "learning_rate": 3.277356446370531e-06,
+ "loss": 0.2189,
+ "step": 6710
+ },
+ {
+ "epoch": 6.906474820143885,
+ "grad_norm": 1.9942344427108765,
+ "learning_rate": 3.266522210184182e-06,
+ "loss": 0.2246,
+ "step": 6720
+ },
+ {
+ "epoch": 6.916752312435766,
+ "grad_norm": 4.648674011230469,
+ "learning_rate": 3.2556879739978335e-06,
+ "loss": 0.1811,
+ "step": 6730
+ },
+ {
+ "epoch": 6.927029804727646,
+ "grad_norm": 3.068770170211792,
+ "learning_rate": 3.2448537378114845e-06,
+ "loss": 0.1767,
+ "step": 6740
+ },
+ {
+ "epoch": 6.937307297019527,
+ "grad_norm": 4.408273696899414,
+ "learning_rate": 3.234019501625136e-06,
+ "loss": 0.1555,
+ "step": 6750
+ },
+ {
+ "epoch": 6.947584789311408,
+ "grad_norm": 2.8085215091705322,
+ "learning_rate": 3.223185265438787e-06,
+ "loss": 0.1882,
+ "step": 6760
+ },
+ {
+ "epoch": 6.957862281603289,
+ "grad_norm": 2.6439192295074463,
+ "learning_rate": 3.212351029252438e-06,
+ "loss": 0.188,
+ "step": 6770
+ },
+ {
+ "epoch": 6.968139773895169,
+ "grad_norm": 3.1821489334106445,
+ "learning_rate": 3.201516793066089e-06,
+ "loss": 0.1666,
+ "step": 6780
+ },
+ {
+ "epoch": 6.9784172661870505,
+ "grad_norm": 4.910435199737549,
+ "learning_rate": 3.19068255687974e-06,
+ "loss": 0.2278,
+ "step": 6790
+ },
+ {
+ "epoch": 6.988694758478931,
+ "grad_norm": 3.4465863704681396,
+ "learning_rate": 3.1798483206933913e-06,
+ "loss": 0.2256,
+ "step": 6800
+ },
+ {
+ "epoch": 6.998972250770812,
+ "grad_norm": 4.111727237701416,
+ "learning_rate": 3.1690140845070427e-06,
+ "loss": 0.1541,
+ "step": 6810
+ },
+ {
+ "epoch": 7.0,
+ "eval_loss": 0.3404325246810913,
+ "eval_runtime": 1214.8326,
+ "eval_samples_per_second": 4.83,
+ "eval_steps_per_second": 0.151,
+ "eval_wer": 0.3377271689125257,
+ "step": 6811
+ },
+ {
+ "epoch": 7.009249743062693,
+ "grad_norm": 2.5531973838806152,
+ "learning_rate": 3.1581798483206937e-06,
+ "loss": 0.1542,
+ "step": 6820
+ },
+ {
+ "epoch": 7.019527235354573,
+ "grad_norm": 2.626990556716919,
+ "learning_rate": 3.1473456121343447e-06,
+ "loss": 0.2472,
+ "step": 6830
+ },
+ {
+ "epoch": 7.029804727646455,
+ "grad_norm": 3.149609327316284,
+ "learning_rate": 3.136511375947996e-06,
+ "loss": 0.1635,
+ "step": 6840
+ },
+ {
+ "epoch": 7.040082219938335,
+ "grad_norm": 2.6103062629699707,
+ "learning_rate": 3.125677139761647e-06,
+ "loss": 0.1614,
+ "step": 6850
+ },
+ {
+ "epoch": 7.0503597122302155,
+ "grad_norm": 3.6957461833953857,
+ "learning_rate": 3.114842903575298e-06,
+ "loss": 0.2344,
+ "step": 6860
+ },
+ {
+ "epoch": 7.060637204522097,
+ "grad_norm": 3.363518238067627,
+ "learning_rate": 3.104008667388949e-06,
+ "loss": 0.1342,
+ "step": 6870
+ },
+ {
+ "epoch": 7.070914696813977,
+ "grad_norm": 4.158355236053467,
+ "learning_rate": 3.093174431202601e-06,
+ "loss": 0.2523,
+ "step": 6880
+ },
+ {
+ "epoch": 7.081192189105858,
+ "grad_norm": 2.6004796028137207,
+ "learning_rate": 3.082340195016252e-06,
+ "loss": 0.1161,
+ "step": 6890
+ },
+ {
+ "epoch": 7.091469681397739,
+ "grad_norm": 2.7090814113616943,
+ "learning_rate": 3.071505958829903e-06,
+ "loss": 0.1893,
+ "step": 6900
+ },
+ {
+ "epoch": 7.10174717368962,
+ "grad_norm": 3.5262081623077393,
+ "learning_rate": 3.060671722643554e-06,
+ "loss": 0.2165,
+ "step": 6910
+ },
+ {
+ "epoch": 7.112024665981501,
+ "grad_norm": 4.6180267333984375,
+ "learning_rate": 3.049837486457205e-06,
+ "loss": 0.1831,
+ "step": 6920
+ },
+ {
+ "epoch": 7.122302158273381,
+ "grad_norm": 3.9975624084472656,
+ "learning_rate": 3.039003250270856e-06,
+ "loss": 0.175,
+ "step": 6930
+ },
+ {
+ "epoch": 7.132579650565262,
+ "grad_norm": 4.310418605804443,
+ "learning_rate": 3.0281690140845073e-06,
+ "loss": 0.2098,
+ "step": 6940
+ },
+ {
+ "epoch": 7.142857142857143,
+ "grad_norm": 2.2856907844543457,
+ "learning_rate": 3.0173347778981583e-06,
+ "loss": 0.189,
+ "step": 6950
+ },
+ {
+ "epoch": 7.153134635149024,
+ "grad_norm": 3.758925199508667,
+ "learning_rate": 3.0065005417118097e-06,
+ "loss": 0.2086,
+ "step": 6960
+ },
+ {
+ "epoch": 7.163412127440904,
+ "grad_norm": 4.2040252685546875,
+ "learning_rate": 2.9956663055254607e-06,
+ "loss": 0.2719,
+ "step": 6970
+ },
+ {
+ "epoch": 7.1736896197327855,
+ "grad_norm": 3.452918767929077,
+ "learning_rate": 2.984832069339112e-06,
+ "loss": 0.1471,
+ "step": 6980
+ },
+ {
+ "epoch": 7.183967112024666,
+ "grad_norm": 3.493724822998047,
+ "learning_rate": 2.973997833152763e-06,
+ "loss": 0.1716,
+ "step": 6990
+ },
+ {
+ "epoch": 7.194244604316546,
+ "grad_norm": 2.078178882598877,
+ "learning_rate": 2.963163596966414e-06,
+ "loss": 0.2764,
+ "step": 7000
+ },
+ {
+ "epoch": 7.204522096608428,
+ "grad_norm": 3.1027865409851074,
+ "learning_rate": 2.952329360780065e-06,
+ "loss": 0.22,
+ "step": 7010
+ },
+ {
+ "epoch": 7.214799588900308,
+ "grad_norm": 3.700126886367798,
+ "learning_rate": 2.941495124593716e-06,
+ "loss": 0.1683,
+ "step": 7020
+ },
+ {
+ "epoch": 7.225077081192189,
+ "grad_norm": 3.756478786468506,
+ "learning_rate": 2.930660888407367e-06,
+ "loss": 0.2066,
+ "step": 7030
+ },
+ {
+ "epoch": 7.23535457348407,
+ "grad_norm": 3.128451108932495,
+ "learning_rate": 2.919826652221019e-06,
+ "loss": 0.1784,
+ "step": 7040
+ },
+ {
+ "epoch": 7.2456320657759505,
+ "grad_norm": 3.994805097579956,
+ "learning_rate": 2.90899241603467e-06,
+ "loss": 0.157,
+ "step": 7050
+ },
+ {
+ "epoch": 7.255909558067831,
+ "grad_norm": 4.610233783721924,
+ "learning_rate": 2.898158179848321e-06,
+ "loss": 0.2441,
+ "step": 7060
+ },
+ {
+ "epoch": 7.266187050359712,
+ "grad_norm": 2.6469390392303467,
+ "learning_rate": 2.887323943661972e-06,
+ "loss": 0.1671,
+ "step": 7070
+ },
+ {
+ "epoch": 7.276464542651593,
+ "grad_norm": 2.4186360836029053,
+ "learning_rate": 2.8764897074756233e-06,
+ "loss": 0.1811,
+ "step": 7080
+ },
+ {
+ "epoch": 7.286742034943474,
+ "grad_norm": 1.9925665855407715,
+ "learning_rate": 2.8656554712892743e-06,
+ "loss": 0.222,
+ "step": 7090
+ },
+ {
+ "epoch": 7.297019527235355,
+ "grad_norm": 3.5605309009552,
+ "learning_rate": 2.8548212351029253e-06,
+ "loss": 0.197,
+ "step": 7100
+ },
+ {
+ "epoch": 7.307297019527235,
+ "grad_norm": 2.999796152114868,
+ "learning_rate": 2.8439869989165763e-06,
+ "loss": 0.2111,
+ "step": 7110
+ },
+ {
+ "epoch": 7.3175745118191164,
+ "grad_norm": 4.410515308380127,
+ "learning_rate": 2.833152762730228e-06,
+ "loss": 0.1883,
+ "step": 7120
+ },
+ {
+ "epoch": 7.327852004110997,
+ "grad_norm": 2.7855818271636963,
+ "learning_rate": 2.822318526543879e-06,
+ "loss": 0.2016,
+ "step": 7130
+ },
+ {
+ "epoch": 7.338129496402877,
+ "grad_norm": 3.494032859802246,
+ "learning_rate": 2.81148429035753e-06,
+ "loss": 0.2146,
+ "step": 7140
+ },
+ {
+ "epoch": 7.348406988694759,
+ "grad_norm": 2.8652873039245605,
+ "learning_rate": 2.800650054171181e-06,
+ "loss": 0.1437,
+ "step": 7150
+ },
+ {
+ "epoch": 7.358684480986639,
+ "grad_norm": 2.307541847229004,
+ "learning_rate": 2.789815817984832e-06,
+ "loss": 0.1561,
+ "step": 7160
+ },
+ {
+ "epoch": 7.36896197327852,
+ "grad_norm": 2.4155330657958984,
+ "learning_rate": 2.778981581798483e-06,
+ "loss": 0.1579,
+ "step": 7170
+ },
+ {
+ "epoch": 7.379239465570401,
+ "grad_norm": 2.9460861682891846,
+ "learning_rate": 2.7681473456121345e-06,
+ "loss": 0.1708,
+ "step": 7180
+ },
+ {
+ "epoch": 7.3895169578622815,
+ "grad_norm": 3.845841407775879,
+ "learning_rate": 2.7573131094257855e-06,
+ "loss": 0.2023,
+ "step": 7190
+ },
+ {
+ "epoch": 7.399794450154163,
+ "grad_norm": 3.194304943084717,
+ "learning_rate": 2.746478873239437e-06,
+ "loss": 0.187,
+ "step": 7200
+ },
+ {
+ "epoch": 7.410071942446043,
+ "grad_norm": 3.090686559677124,
+ "learning_rate": 2.735644637053088e-06,
+ "loss": 0.2484,
+ "step": 7210
+ },
+ {
+ "epoch": 7.420349434737924,
+ "grad_norm": 2.5463943481445312,
+ "learning_rate": 2.7248104008667394e-06,
+ "loss": 0.1819,
+ "step": 7220
+ },
+ {
+ "epoch": 7.430626927029805,
+ "grad_norm": 3.6043484210968018,
+ "learning_rate": 2.7139761646803903e-06,
+ "loss": 0.178,
+ "step": 7230
+ },
+ {
+ "epoch": 7.440904419321686,
+ "grad_norm": 2.053682804107666,
+ "learning_rate": 2.7031419284940413e-06,
+ "loss": 0.1712,
+ "step": 7240
+ },
+ {
+ "epoch": 7.451181911613566,
+ "grad_norm": 3.977004051208496,
+ "learning_rate": 2.6923076923076923e-06,
+ "loss": 0.1217,
+ "step": 7250
+ },
+ {
+ "epoch": 7.461459403905447,
+ "grad_norm": 3.555269479751587,
+ "learning_rate": 2.6814734561213433e-06,
+ "loss": 0.1494,
+ "step": 7260
+ },
+ {
+ "epoch": 7.471736896197328,
+ "grad_norm": 3.3504462242126465,
+ "learning_rate": 2.6706392199349947e-06,
+ "loss": 0.2055,
+ "step": 7270
+ },
+ {
+ "epoch": 7.482014388489208,
+ "grad_norm": 3.7585649490356445,
+ "learning_rate": 2.659804983748646e-06,
+ "loss": 0.2169,
+ "step": 7280
+ },
+ {
+ "epoch": 7.49229188078109,
+ "grad_norm": 3.8398501873016357,
+ "learning_rate": 2.648970747562297e-06,
+ "loss": 0.2372,
+ "step": 7290
+ },
+ {
+ "epoch": 7.50256937307297,
+ "grad_norm": 3.8778791427612305,
+ "learning_rate": 2.638136511375948e-06,
+ "loss": 0.2498,
+ "step": 7300
+ },
+ {
+ "epoch": 7.5128468653648515,
+ "grad_norm": 4.305149555206299,
+ "learning_rate": 2.6273022751895996e-06,
+ "loss": 0.2394,
+ "step": 7310
+ },
+ {
+ "epoch": 7.523124357656732,
+ "grad_norm": 2.9688165187835693,
+ "learning_rate": 2.6164680390032506e-06,
+ "loss": 0.2236,
+ "step": 7320
+ },
+ {
+ "epoch": 7.533401849948612,
+ "grad_norm": 3.433166265487671,
+ "learning_rate": 2.6056338028169015e-06,
+ "loss": 0.252,
+ "step": 7330
+ },
+ {
+ "epoch": 7.543679342240493,
+ "grad_norm": 6.375361919403076,
+ "learning_rate": 2.5947995666305525e-06,
+ "loss": 0.1974,
+ "step": 7340
+ },
+ {
+ "epoch": 7.553956834532374,
+ "grad_norm": 3.1338272094726562,
+ "learning_rate": 2.5839653304442044e-06,
+ "loss": 0.1581,
+ "step": 7350
+ },
+ {
+ "epoch": 7.564234326824255,
+ "grad_norm": 3.2185239791870117,
+ "learning_rate": 2.5731310942578554e-06,
+ "loss": 0.2223,
+ "step": 7360
+ },
+ {
+ "epoch": 7.574511819116136,
+ "grad_norm": 3.0594232082366943,
+ "learning_rate": 2.5622968580715064e-06,
+ "loss": 0.1841,
+ "step": 7370
+ },
+ {
+ "epoch": 7.5847893114080165,
+ "grad_norm": 3.161983013153076,
+ "learning_rate": 2.5514626218851574e-06,
+ "loss": 0.168,
+ "step": 7380
+ },
+ {
+ "epoch": 7.595066803699897,
+ "grad_norm": 3.0678579807281494,
+ "learning_rate": 2.5406283856988083e-06,
+ "loss": 0.1824,
+ "step": 7390
+ },
+ {
+ "epoch": 7.605344295991778,
+ "grad_norm": 2.438215494155884,
+ "learning_rate": 2.5297941495124593e-06,
+ "loss": 0.1585,
+ "step": 7400
+ },
+ {
+ "epoch": 7.615621788283659,
+ "grad_norm": 3.349719285964966,
+ "learning_rate": 2.5189599133261108e-06,
+ "loss": 0.1897,
+ "step": 7410
+ },
+ {
+ "epoch": 7.625899280575539,
+ "grad_norm": 2.981184720993042,
+ "learning_rate": 2.5081256771397617e-06,
+ "loss": 0.1652,
+ "step": 7420
+ },
+ {
+ "epoch": 7.636176772867421,
+ "grad_norm": 3.5360848903656006,
+ "learning_rate": 2.497291440953413e-06,
+ "loss": 0.1877,
+ "step": 7430
+ },
+ {
+ "epoch": 7.646454265159301,
+ "grad_norm": 3.8278608322143555,
+ "learning_rate": 2.486457204767064e-06,
+ "loss": 0.2073,
+ "step": 7440
+ },
+ {
+ "epoch": 7.6567317574511815,
+ "grad_norm": 3.7580535411834717,
+ "learning_rate": 2.4756229685807156e-06,
+ "loss": 0.2043,
+ "step": 7450
+ },
+ {
+ "epoch": 7.667009249743063,
+ "grad_norm": 2.7531728744506836,
+ "learning_rate": 2.4647887323943666e-06,
+ "loss": 0.2109,
+ "step": 7460
+ },
+ {
+ "epoch": 7.677286742034943,
+ "grad_norm": 3.5091471672058105,
+ "learning_rate": 2.4539544962080176e-06,
+ "loss": 0.2381,
+ "step": 7470
+ },
+ {
+ "epoch": 7.687564234326825,
+ "grad_norm": 3.000122547149658,
+ "learning_rate": 2.4431202600216686e-06,
+ "loss": 0.2226,
+ "step": 7480
+ },
+ {
+ "epoch": 7.697841726618705,
+ "grad_norm": 2.8081085681915283,
+ "learning_rate": 2.43228602383532e-06,
+ "loss": 0.1817,
+ "step": 7490
+ },
+ {
+ "epoch": 7.708119218910586,
+ "grad_norm": 3.1888771057128906,
+ "learning_rate": 2.421451787648971e-06,
+ "loss": 0.1591,
+ "step": 7500
+ },
+ {
+ "epoch": 7.718396711202467,
+ "grad_norm": 2.8959813117980957,
+ "learning_rate": 2.410617551462622e-06,
+ "loss": 0.1577,
+ "step": 7510
+ },
+ {
+ "epoch": 7.728674203494347,
+ "grad_norm": 3.66943359375,
+ "learning_rate": 2.399783315276273e-06,
+ "loss": 0.2263,
+ "step": 7520
+ },
+ {
+ "epoch": 7.738951695786228,
+ "grad_norm": 2.4040560722351074,
+ "learning_rate": 2.3889490790899244e-06,
+ "loss": 0.1604,
+ "step": 7530
+ },
+ {
+ "epoch": 7.749229188078109,
+ "grad_norm": 3.37458872795105,
+ "learning_rate": 2.3781148429035754e-06,
+ "loss": 0.1309,
+ "step": 7540
+ },
+ {
+ "epoch": 7.75950668036999,
+ "grad_norm": 3.064039707183838,
+ "learning_rate": 2.3672806067172268e-06,
+ "loss": 0.1594,
+ "step": 7550
+ },
+ {
+ "epoch": 7.76978417266187,
+ "grad_norm": 3.4622888565063477,
+ "learning_rate": 2.3564463705308778e-06,
+ "loss": 0.1117,
+ "step": 7560
+ },
+ {
+ "epoch": 7.7800616649537515,
+ "grad_norm": 4.166457176208496,
+ "learning_rate": 2.345612134344529e-06,
+ "loss": 0.1856,
+ "step": 7570
+ },
+ {
+ "epoch": 7.790339157245632,
+ "grad_norm": 4.1613054275512695,
+ "learning_rate": 2.33477789815818e-06,
+ "loss": 0.1904,
+ "step": 7580
+ },
+ {
+ "epoch": 7.800616649537513,
+ "grad_norm": 2.1978330612182617,
+ "learning_rate": 2.323943661971831e-06,
+ "loss": 0.1733,
+ "step": 7590
+ },
+ {
+ "epoch": 7.810894141829394,
+ "grad_norm": 3.197047472000122,
+ "learning_rate": 2.313109425785482e-06,
+ "loss": 0.2034,
+ "step": 7600
+ },
+ {
+ "epoch": 7.821171634121274,
+ "grad_norm": 3.35565447807312,
+ "learning_rate": 2.3022751895991336e-06,
+ "loss": 0.2322,
+ "step": 7610
+ },
+ {
+ "epoch": 7.831449126413155,
+ "grad_norm": 2.9416370391845703,
+ "learning_rate": 2.2914409534127846e-06,
+ "loss": 0.1569,
+ "step": 7620
+ },
+ {
+ "epoch": 7.841726618705036,
+ "grad_norm": 3.303370952606201,
+ "learning_rate": 2.2806067172264356e-06,
+ "loss": 0.1698,
+ "step": 7630
+ },
+ {
+ "epoch": 7.8520041109969165,
+ "grad_norm": 3.4625742435455322,
+ "learning_rate": 2.2697724810400866e-06,
+ "loss": 0.1802,
+ "step": 7640
+ },
+ {
+ "epoch": 7.862281603288798,
+ "grad_norm": 2.0214016437530518,
+ "learning_rate": 2.258938244853738e-06,
+ "loss": 0.1632,
+ "step": 7650
+ },
+ {
+ "epoch": 7.872559095580678,
+ "grad_norm": 4.070474624633789,
+ "learning_rate": 2.248104008667389e-06,
+ "loss": 0.211,
+ "step": 7660
+ },
+ {
+ "epoch": 7.882836587872559,
+ "grad_norm": 4.477987766265869,
+ "learning_rate": 2.2372697724810404e-06,
+ "loss": 0.2202,
+ "step": 7670
+ },
+ {
+ "epoch": 7.89311408016444,
+ "grad_norm": 3.7259104251861572,
+ "learning_rate": 2.2264355362946914e-06,
+ "loss": 0.2256,
+ "step": 7680
+ },
+ {
+ "epoch": 7.903391572456321,
+ "grad_norm": 2.9181573390960693,
+ "learning_rate": 2.2156013001083428e-06,
+ "loss": 0.1601,
+ "step": 7690
+ },
+ {
+ "epoch": 7.913669064748201,
+ "grad_norm": 5.0646138191223145,
+ "learning_rate": 2.2047670639219938e-06,
+ "loss": 0.19,
+ "step": 7700
+ },
+ {
+ "epoch": 7.923946557040082,
+ "grad_norm": 2.449718952178955,
+ "learning_rate": 2.1939328277356448e-06,
+ "loss": 0.1684,
+ "step": 7710
+ },
+ {
+ "epoch": 7.934224049331963,
+ "grad_norm": 2.8502633571624756,
+ "learning_rate": 2.1830985915492958e-06,
+ "loss": 0.1802,
+ "step": 7720
+ },
+ {
+ "epoch": 7.944501541623843,
+ "grad_norm": 3.0859627723693848,
+ "learning_rate": 2.172264355362947e-06,
+ "loss": 0.2066,
+ "step": 7730
+ },
+ {
+ "epoch": 7.954779033915725,
+ "grad_norm": 3.228625774383545,
+ "learning_rate": 2.161430119176598e-06,
+ "loss": 0.1639,
+ "step": 7740
+ },
+ {
+ "epoch": 7.965056526207605,
+ "grad_norm": 4.516101360321045,
+ "learning_rate": 2.150595882990249e-06,
+ "loss": 0.2496,
+ "step": 7750
+ },
+ {
+ "epoch": 7.9753340184994865,
+ "grad_norm": 3.3014371395111084,
+ "learning_rate": 2.1397616468039006e-06,
+ "loss": 0.1935,
+ "step": 7760
+ },
+ {
+ "epoch": 7.985611510791367,
+ "grad_norm": 3.219586133956909,
+ "learning_rate": 2.1289274106175516e-06,
+ "loss": 0.1628,
+ "step": 7770
+ },
+ {
+ "epoch": 7.9958890030832475,
+ "grad_norm": 2.924616813659668,
+ "learning_rate": 2.1180931744312026e-06,
+ "loss": 0.1387,
+ "step": 7780
+ },
+ {
+ "epoch": 8.0,
+ "eval_loss": 0.337012380361557,
+ "eval_runtime": 1074.5571,
+ "eval_samples_per_second": 5.461,
+ "eval_steps_per_second": 0.171,
+ "eval_wer": 0.31961891089431954,
+ "step": 7784
+ },
+ {
+ "epoch": 8.006166495375128,
+ "grad_norm": 3.2360055446624756,
+ "learning_rate": 2.107258938244854e-06,
+ "loss": 0.1754,
+ "step": 7790
+ },
+ {
+ "epoch": 8.016443987667008,
+ "grad_norm": 3.3808040618896484,
+ "learning_rate": 2.096424702058505e-06,
+ "loss": 0.1478,
+ "step": 7800
+ },
+ {
+ "epoch": 8.02672147995889,
+ "grad_norm": 3.0137150287628174,
+ "learning_rate": 2.0855904658721564e-06,
+ "loss": 0.16,
+ "step": 7810
+ },
+ {
+ "epoch": 8.036998972250771,
+ "grad_norm": 3.083184003829956,
+ "learning_rate": 2.0747562296858074e-06,
+ "loss": 0.2265,
+ "step": 7820
+ },
+ {
+ "epoch": 8.047276464542652,
+ "grad_norm": 3.101036310195923,
+ "learning_rate": 2.0639219934994584e-06,
+ "loss": 0.2517,
+ "step": 7830
+ },
+ {
+ "epoch": 8.057553956834532,
+ "grad_norm": 2.7504403591156006,
+ "learning_rate": 2.05308775731311e-06,
+ "loss": 0.147,
+ "step": 7840
+ },
+ {
+ "epoch": 8.067831449126412,
+ "grad_norm": 2.818974733352661,
+ "learning_rate": 2.0422535211267608e-06,
+ "loss": 0.1647,
+ "step": 7850
+ },
+ {
+ "epoch": 8.078108941418295,
+ "grad_norm": 2.7966461181640625,
+ "learning_rate": 2.0314192849404118e-06,
+ "loss": 0.1488,
+ "step": 7860
+ },
+ {
+ "epoch": 8.088386433710175,
+ "grad_norm": 2.811796188354492,
+ "learning_rate": 2.0205850487540628e-06,
+ "loss": 0.1302,
+ "step": 7870
+ },
+ {
+ "epoch": 8.098663926002056,
+ "grad_norm": 3.827772617340088,
+ "learning_rate": 2.009750812567714e-06,
+ "loss": 0.2351,
+ "step": 7880
+ },
+ {
+ "epoch": 8.108941418293936,
+ "grad_norm": 2.867687463760376,
+ "learning_rate": 1.998916576381365e-06,
+ "loss": 0.1956,
+ "step": 7890
+ },
+ {
+ "epoch": 8.119218910585817,
+ "grad_norm": 4.618106365203857,
+ "learning_rate": 1.9880823401950166e-06,
+ "loss": 0.24,
+ "step": 7900
+ },
+ {
+ "epoch": 8.129496402877697,
+ "grad_norm": 4.213103771209717,
+ "learning_rate": 1.9772481040086676e-06,
+ "loss": 0.1727,
+ "step": 7910
+ },
+ {
+ "epoch": 8.13977389516958,
+ "grad_norm": 2.7897140979766846,
+ "learning_rate": 1.966413867822319e-06,
+ "loss": 0.1638,
+ "step": 7920
+ },
+ {
+ "epoch": 8.15005138746146,
+ "grad_norm": 4.621983528137207,
+ "learning_rate": 1.95557963163597e-06,
+ "loss": 0.2088,
+ "step": 7930
+ },
+ {
+ "epoch": 8.16032887975334,
+ "grad_norm": 4.140936374664307,
+ "learning_rate": 1.944745395449621e-06,
+ "loss": 0.1728,
+ "step": 7940
+ },
+ {
+ "epoch": 8.17060637204522,
+ "grad_norm": 4.0776872634887695,
+ "learning_rate": 1.933911159263272e-06,
+ "loss": 0.1286,
+ "step": 7950
+ },
+ {
+ "epoch": 8.180883864337101,
+ "grad_norm": 2.705868721008301,
+ "learning_rate": 1.9230769230769234e-06,
+ "loss": 0.1931,
+ "step": 7960
+ },
+ {
+ "epoch": 8.191161356628983,
+ "grad_norm": 1.8853975534439087,
+ "learning_rate": 1.9122426868905744e-06,
+ "loss": 0.1576,
+ "step": 7970
+ },
+ {
+ "epoch": 8.201438848920864,
+ "grad_norm": 2.564020872116089,
+ "learning_rate": 1.9014084507042254e-06,
+ "loss": 0.1561,
+ "step": 7980
+ },
+ {
+ "epoch": 8.211716341212744,
+ "grad_norm": 2.386718988418579,
+ "learning_rate": 1.8905742145178766e-06,
+ "loss": 0.1759,
+ "step": 7990
+ },
+ {
+ "epoch": 8.221993833504625,
+ "grad_norm": 2.7982938289642334,
+ "learning_rate": 1.8797399783315278e-06,
+ "loss": 0.1265,
+ "step": 8000
+ },
+ {
+ "epoch": 8.232271325796505,
+ "grad_norm": 3.071965456008911,
+ "learning_rate": 1.868905742145179e-06,
+ "loss": 0.1538,
+ "step": 8010
+ },
+ {
+ "epoch": 8.242548818088386,
+ "grad_norm": 3.2080135345458984,
+ "learning_rate": 1.85807150595883e-06,
+ "loss": 0.1783,
+ "step": 8020
+ },
+ {
+ "epoch": 8.252826310380268,
+ "grad_norm": 3.6915481090545654,
+ "learning_rate": 1.847237269772481e-06,
+ "loss": 0.1344,
+ "step": 8030
+ },
+ {
+ "epoch": 8.263103802672148,
+ "grad_norm": 2.8961260318756104,
+ "learning_rate": 1.8364030335861324e-06,
+ "loss": 0.2134,
+ "step": 8040
+ },
+ {
+ "epoch": 8.273381294964029,
+ "grad_norm": 2.1078479290008545,
+ "learning_rate": 1.8255687973997834e-06,
+ "loss": 0.1981,
+ "step": 8050
+ },
+ {
+ "epoch": 8.28365878725591,
+ "grad_norm": 3.1685969829559326,
+ "learning_rate": 1.8147345612134346e-06,
+ "loss": 0.2123,
+ "step": 8060
+ },
+ {
+ "epoch": 8.29393627954779,
+ "grad_norm": 2.517881155014038,
+ "learning_rate": 1.8039003250270856e-06,
+ "loss": 0.1796,
+ "step": 8070
+ },
+ {
+ "epoch": 8.30421377183967,
+ "grad_norm": 3.021322250366211,
+ "learning_rate": 1.793066088840737e-06,
+ "loss": 0.1674,
+ "step": 8080
+ },
+ {
+ "epoch": 8.314491264131552,
+ "grad_norm": 2.5098516941070557,
+ "learning_rate": 1.782231852654388e-06,
+ "loss": 0.1511,
+ "step": 8090
+ },
+ {
+ "epoch": 8.324768756423433,
+ "grad_norm": 1.9316967725753784,
+ "learning_rate": 1.7713976164680392e-06,
+ "loss": 0.1702,
+ "step": 8100
+ },
+ {
+ "epoch": 8.335046248715313,
+ "grad_norm": 4.33780574798584,
+ "learning_rate": 1.7605633802816902e-06,
+ "loss": 0.2348,
+ "step": 8110
+ },
+ {
+ "epoch": 8.345323741007194,
+ "grad_norm": 2.2334718704223633,
+ "learning_rate": 1.7497291440953416e-06,
+ "loss": 0.2045,
+ "step": 8120
+ },
+ {
+ "epoch": 8.355601233299074,
+ "grad_norm": 4.0230817794799805,
+ "learning_rate": 1.7388949079089926e-06,
+ "loss": 0.1708,
+ "step": 8130
+ },
+ {
+ "epoch": 8.365878725590957,
+ "grad_norm": 4.178952693939209,
+ "learning_rate": 1.7280606717226436e-06,
+ "loss": 0.2161,
+ "step": 8140
+ },
+ {
+ "epoch": 8.376156217882837,
+ "grad_norm": 4.08049201965332,
+ "learning_rate": 1.7172264355362948e-06,
+ "loss": 0.1663,
+ "step": 8150
+ },
+ {
+ "epoch": 8.386433710174718,
+ "grad_norm": 2.8087127208709717,
+ "learning_rate": 1.706392199349946e-06,
+ "loss": 0.1395,
+ "step": 8160
+ },
+ {
+ "epoch": 8.396711202466598,
+ "grad_norm": 2.3792827129364014,
+ "learning_rate": 1.6955579631635972e-06,
+ "loss": 0.1782,
+ "step": 8170
+ },
+ {
+ "epoch": 8.406988694758478,
+ "grad_norm": 2.47105073928833,
+ "learning_rate": 1.6847237269772482e-06,
+ "loss": 0.1244,
+ "step": 8180
+ },
+ {
+ "epoch": 8.417266187050359,
+ "grad_norm": 2.740131139755249,
+ "learning_rate": 1.6738894907908992e-06,
+ "loss": 0.1589,
+ "step": 8190
+ },
+ {
+ "epoch": 8.427543679342241,
+ "grad_norm": 3.5961389541625977,
+ "learning_rate": 1.6630552546045506e-06,
+ "loss": 0.2159,
+ "step": 8200
+ },
+ {
+ "epoch": 8.437821171634122,
+ "grad_norm": 2.2895331382751465,
+ "learning_rate": 1.6522210184182016e-06,
+ "loss": 0.2396,
+ "step": 8210
+ },
+ {
+ "epoch": 8.448098663926002,
+ "grad_norm": 2.7489428520202637,
+ "learning_rate": 1.6413867822318528e-06,
+ "loss": 0.1571,
+ "step": 8220
+ },
+ {
+ "epoch": 8.458376156217883,
+ "grad_norm": 2.2983639240264893,
+ "learning_rate": 1.630552546045504e-06,
+ "loss": 0.1638,
+ "step": 8230
+ },
+ {
+ "epoch": 8.468653648509763,
+ "grad_norm": 4.014866352081299,
+ "learning_rate": 1.6197183098591552e-06,
+ "loss": 0.1661,
+ "step": 8240
+ },
+ {
+ "epoch": 8.478931140801645,
+ "grad_norm": 3.341618299484253,
+ "learning_rate": 1.6088840736728062e-06,
+ "loss": 0.1608,
+ "step": 8250
+ },
+ {
+ "epoch": 8.489208633093526,
+ "grad_norm": 2.8683981895446777,
+ "learning_rate": 1.5980498374864572e-06,
+ "loss": 0.1725,
+ "step": 8260
+ },
+ {
+ "epoch": 8.499486125385406,
+ "grad_norm": 2.0408174991607666,
+ "learning_rate": 1.5872156013001086e-06,
+ "loss": 0.1465,
+ "step": 8270
+ },
+ {
+ "epoch": 8.509763617677287,
+ "grad_norm": 3.999898910522461,
+ "learning_rate": 1.5763813651137596e-06,
+ "loss": 0.2527,
+ "step": 8280
+ },
+ {
+ "epoch": 8.520041109969167,
+ "grad_norm": 1.6335963010787964,
+ "learning_rate": 1.5655471289274108e-06,
+ "loss": 0.2212,
+ "step": 8290
+ },
+ {
+ "epoch": 8.530318602261048,
+ "grad_norm": 3.973618268966675,
+ "learning_rate": 1.5547128927410618e-06,
+ "loss": 0.2156,
+ "step": 8300
+ },
+ {
+ "epoch": 8.54059609455293,
+ "grad_norm": 3.429445743560791,
+ "learning_rate": 1.5438786565547132e-06,
+ "loss": 0.1736,
+ "step": 8310
+ },
+ {
+ "epoch": 8.55087358684481,
+ "grad_norm": 3.0188443660736084,
+ "learning_rate": 1.5330444203683642e-06,
+ "loss": 0.2454,
+ "step": 8320
+ },
+ {
+ "epoch": 8.56115107913669,
+ "grad_norm": 3.69152569770813,
+ "learning_rate": 1.5222101841820152e-06,
+ "loss": 0.1734,
+ "step": 8330
+ },
+ {
+ "epoch": 8.571428571428571,
+ "grad_norm": 2.118117570877075,
+ "learning_rate": 1.5113759479956664e-06,
+ "loss": 0.1258,
+ "step": 8340
+ },
+ {
+ "epoch": 8.581706063720452,
+ "grad_norm": 3.0305957794189453,
+ "learning_rate": 1.5005417118093176e-06,
+ "loss": 0.1582,
+ "step": 8350
+ },
+ {
+ "epoch": 8.591983556012334,
+ "grad_norm": 3.0334131717681885,
+ "learning_rate": 1.4897074756229688e-06,
+ "loss": 0.2074,
+ "step": 8360
+ },
+ {
+ "epoch": 8.602261048304214,
+ "grad_norm": 4.964859962463379,
+ "learning_rate": 1.4788732394366198e-06,
+ "loss": 0.13,
+ "step": 8370
+ },
+ {
+ "epoch": 8.612538540596095,
+ "grad_norm": 2.461299180984497,
+ "learning_rate": 1.4680390032502708e-06,
+ "loss": 0.1735,
+ "step": 8380
+ },
+ {
+ "epoch": 8.622816032887975,
+ "grad_norm": 3.70139217376709,
+ "learning_rate": 1.4572047670639222e-06,
+ "loss": 0.204,
+ "step": 8390
+ },
+ {
+ "epoch": 8.633093525179856,
+ "grad_norm": 3.2094924449920654,
+ "learning_rate": 1.4463705308775732e-06,
+ "loss": 0.1603,
+ "step": 8400
+ },
+ {
+ "epoch": 8.643371017471736,
+ "grad_norm": 2.1742327213287354,
+ "learning_rate": 1.4355362946912244e-06,
+ "loss": 0.1881,
+ "step": 8410
+ },
+ {
+ "epoch": 8.653648509763618,
+ "grad_norm": 4.349330425262451,
+ "learning_rate": 1.4247020585048754e-06,
+ "loss": 0.2027,
+ "step": 8420
+ },
+ {
+ "epoch": 8.663926002055499,
+ "grad_norm": 2.7261245250701904,
+ "learning_rate": 1.4138678223185268e-06,
+ "loss": 0.1838,
+ "step": 8430
+ },
+ {
+ "epoch": 8.67420349434738,
+ "grad_norm": 3.8358700275421143,
+ "learning_rate": 1.4030335861321778e-06,
+ "loss": 0.164,
+ "step": 8440
+ },
+ {
+ "epoch": 8.68448098663926,
+ "grad_norm": 3.345456600189209,
+ "learning_rate": 1.3921993499458288e-06,
+ "loss": 0.2412,
+ "step": 8450
+ },
+ {
+ "epoch": 8.69475847893114,
+ "grad_norm": 4.2143235206604,
+ "learning_rate": 1.38136511375948e-06,
+ "loss": 0.1478,
+ "step": 8460
+ },
+ {
+ "epoch": 8.70503597122302,
+ "grad_norm": 4.512454032897949,
+ "learning_rate": 1.3705308775731312e-06,
+ "loss": 0.2299,
+ "step": 8470
+ },
+ {
+ "epoch": 8.715313463514903,
+ "grad_norm": 4.468479156494141,
+ "learning_rate": 1.3596966413867824e-06,
+ "loss": 0.2131,
+ "step": 8480
+ },
+ {
+ "epoch": 8.725590955806783,
+ "grad_norm": 2.5895509719848633,
+ "learning_rate": 1.3488624052004334e-06,
+ "loss": 0.1332,
+ "step": 8490
+ },
+ {
+ "epoch": 8.735868448098664,
+ "grad_norm": 3.56890606880188,
+ "learning_rate": 1.3380281690140844e-06,
+ "loss": 0.2086,
+ "step": 8500
+ },
+ {
+ "epoch": 8.746145940390544,
+ "grad_norm": 2.2950034141540527,
+ "learning_rate": 1.3271939328277358e-06,
+ "loss": 0.1424,
+ "step": 8510
+ },
+ {
+ "epoch": 8.756423432682425,
+ "grad_norm": 3.012187957763672,
+ "learning_rate": 1.3163596966413868e-06,
+ "loss": 0.1308,
+ "step": 8520
+ },
+ {
+ "epoch": 8.766700924974307,
+ "grad_norm": 3.362175464630127,
+ "learning_rate": 1.305525460455038e-06,
+ "loss": 0.1948,
+ "step": 8530
+ },
+ {
+ "epoch": 8.776978417266188,
+ "grad_norm": 2.3637940883636475,
+ "learning_rate": 1.294691224268689e-06,
+ "loss": 0.1984,
+ "step": 8540
+ },
+ {
+ "epoch": 8.787255909558068,
+ "grad_norm": 2.958972215652466,
+ "learning_rate": 1.2838569880823404e-06,
+ "loss": 0.2032,
+ "step": 8550
+ },
+ {
+ "epoch": 8.797533401849948,
+ "grad_norm": 3.2505674362182617,
+ "learning_rate": 1.2730227518959914e-06,
+ "loss": 0.205,
+ "step": 8560
+ },
+ {
+ "epoch": 8.807810894141829,
+ "grad_norm": 2.259211301803589,
+ "learning_rate": 1.2621885157096424e-06,
+ "loss": 0.1454,
+ "step": 8570
+ },
+ {
+ "epoch": 8.81808838643371,
+ "grad_norm": 2.959885358810425,
+ "learning_rate": 1.2513542795232936e-06,
+ "loss": 0.1514,
+ "step": 8580
+ },
+ {
+ "epoch": 8.828365878725592,
+ "grad_norm": 3.410799980163574,
+ "learning_rate": 1.2405200433369448e-06,
+ "loss": 0.1741,
+ "step": 8590
+ },
+ {
+ "epoch": 8.838643371017472,
+ "grad_norm": 2.8411593437194824,
+ "learning_rate": 1.229685807150596e-06,
+ "loss": 0.1531,
+ "step": 8600
+ },
+ {
+ "epoch": 8.848920863309353,
+ "grad_norm": 4.301185607910156,
+ "learning_rate": 1.218851570964247e-06,
+ "loss": 0.167,
+ "step": 8610
+ },
+ {
+ "epoch": 8.859198355601233,
+ "grad_norm": 3.080378770828247,
+ "learning_rate": 1.2080173347778982e-06,
+ "loss": 0.1387,
+ "step": 8620
+ },
+ {
+ "epoch": 8.869475847893113,
+ "grad_norm": 2.687458038330078,
+ "learning_rate": 1.1971830985915492e-06,
+ "loss": 0.1533,
+ "step": 8630
+ },
+ {
+ "epoch": 8.879753340184994,
+ "grad_norm": 1.749852180480957,
+ "learning_rate": 1.1863488624052004e-06,
+ "loss": 0.1726,
+ "step": 8640
+ },
+ {
+ "epoch": 8.890030832476876,
+ "grad_norm": 3.142988920211792,
+ "learning_rate": 1.1755146262188516e-06,
+ "loss": 0.1886,
+ "step": 8650
+ },
+ {
+ "epoch": 8.900308324768757,
+ "grad_norm": 3.1398189067840576,
+ "learning_rate": 1.1646803900325028e-06,
+ "loss": 0.1674,
+ "step": 8660
+ },
+ {
+ "epoch": 8.910585817060637,
+ "grad_norm": 4.18948221206665,
+ "learning_rate": 1.153846153846154e-06,
+ "loss": 0.1637,
+ "step": 8670
+ },
+ {
+ "epoch": 8.920863309352518,
+ "grad_norm": 2.723585844039917,
+ "learning_rate": 1.143011917659805e-06,
+ "loss": 0.1851,
+ "step": 8680
+ },
+ {
+ "epoch": 8.931140801644398,
+ "grad_norm": 2.2024054527282715,
+ "learning_rate": 1.1321776814734562e-06,
+ "loss": 0.2011,
+ "step": 8690
+ },
+ {
+ "epoch": 8.94141829393628,
+ "grad_norm": 3.5099658966064453,
+ "learning_rate": 1.1224268689057424e-06,
+ "loss": 0.2067,
+ "step": 8700
+ },
+ {
+ "epoch": 8.95169578622816,
+ "grad_norm": 3.7603750228881836,
+ "learning_rate": 1.1115926327193934e-06,
+ "loss": 0.2005,
+ "step": 8710
+ },
+ {
+ "epoch": 8.961973278520041,
+ "grad_norm": 2.5853493213653564,
+ "learning_rate": 1.1007583965330446e-06,
+ "loss": 0.1571,
+ "step": 8720
+ },
+ {
+ "epoch": 8.972250770811922,
+ "grad_norm": 3.06177020072937,
+ "learning_rate": 1.0899241603466956e-06,
+ "loss": 0.1363,
+ "step": 8730
+ },
+ {
+ "epoch": 8.982528263103802,
+ "grad_norm": 2.5474419593811035,
+ "learning_rate": 1.0790899241603468e-06,
+ "loss": 0.1746,
+ "step": 8740
+ },
+ {
+ "epoch": 8.992805755395683,
+ "grad_norm": 3.2577226161956787,
+ "learning_rate": 1.068255687973998e-06,
+ "loss": 0.1554,
+ "step": 8750
+ },
+ {
+ "epoch": 9.0,
+ "eval_loss": 0.338668555021286,
+ "eval_runtime": 1131.2123,
+ "eval_samples_per_second": 5.187,
+ "eval_steps_per_second": 0.163,
+ "eval_wer": 0.3113152141994845,
+ "step": 8757
+ },
+ {
+ "epoch": 9.003083247687565,
+ "grad_norm": 3.3654706478118896,
+ "learning_rate": 1.0574214517876492e-06,
+ "loss": 0.1761,
+ "step": 8760
+ },
+ {
+ "epoch": 9.013360739979445,
+ "grad_norm": 3.2227976322174072,
+ "learning_rate": 1.0465872156013002e-06,
+ "loss": 0.152,
+ "step": 8770
+ },
+ {
+ "epoch": 9.023638232271326,
+ "grad_norm": 3.463777542114258,
+ "learning_rate": 1.0357529794149514e-06,
+ "loss": 0.1545,
+ "step": 8780
+ },
+ {
+ "epoch": 9.033915724563206,
+ "grad_norm": 2.3301193714141846,
+ "learning_rate": 1.0249187432286024e-06,
+ "loss": 0.1805,
+ "step": 8790
+ },
+ {
+ "epoch": 9.044193216855087,
+ "grad_norm": 3.6022043228149414,
+ "learning_rate": 1.0140845070422536e-06,
+ "loss": 0.1652,
+ "step": 8800
+ },
+ {
+ "epoch": 9.054470709146969,
+ "grad_norm": 1.9217338562011719,
+ "learning_rate": 1.0032502708559048e-06,
+ "loss": 0.1265,
+ "step": 8810
+ },
+ {
+ "epoch": 9.06474820143885,
+ "grad_norm": 2.3764290809631348,
+ "learning_rate": 9.92416034669556e-07,
+ "loss": 0.152,
+ "step": 8820
+ },
+ {
+ "epoch": 9.07502569373073,
+ "grad_norm": 3.19063663482666,
+ "learning_rate": 9.81581798483207e-07,
+ "loss": 0.2095,
+ "step": 8830
+ },
+ {
+ "epoch": 9.08530318602261,
+ "grad_norm": 3.6087141036987305,
+ "learning_rate": 9.707475622968582e-07,
+ "loss": 0.1757,
+ "step": 8840
+ },
+ {
+ "epoch": 9.09558067831449,
+ "grad_norm": 3.343388080596924,
+ "learning_rate": 9.599133261105092e-07,
+ "loss": 0.1481,
+ "step": 8850
+ },
+ {
+ "epoch": 9.105858170606371,
+ "grad_norm": 3.509066581726074,
+ "learning_rate": 9.490790899241605e-07,
+ "loss": 0.1844,
+ "step": 8860
+ },
+ {
+ "epoch": 9.116135662898253,
+ "grad_norm": 5.043178081512451,
+ "learning_rate": 9.382448537378115e-07,
+ "loss": 0.1882,
+ "step": 8870
+ },
+ {
+ "epoch": 9.126413155190134,
+ "grad_norm": 2.8442821502685547,
+ "learning_rate": 9.274106175514627e-07,
+ "loss": 0.1949,
+ "step": 8880
+ },
+ {
+ "epoch": 9.136690647482014,
+ "grad_norm": 4.197086334228516,
+ "learning_rate": 9.165763813651138e-07,
+ "loss": 0.1897,
+ "step": 8890
+ },
+ {
+ "epoch": 9.146968139773895,
+ "grad_norm": 4.18369722366333,
+ "learning_rate": 9.05742145178765e-07,
+ "loss": 0.1915,
+ "step": 8900
+ },
+ {
+ "epoch": 9.157245632065775,
+ "grad_norm": 5.1201395988464355,
+ "learning_rate": 8.949079089924161e-07,
+ "loss": 0.1678,
+ "step": 8910
+ },
+ {
+ "epoch": 9.167523124357658,
+ "grad_norm": 3.6054434776306152,
+ "learning_rate": 8.840736728060673e-07,
+ "loss": 0.1345,
+ "step": 8920
+ },
+ {
+ "epoch": 9.177800616649538,
+ "grad_norm": 1.9621152877807617,
+ "learning_rate": 8.732394366197183e-07,
+ "loss": 0.1695,
+ "step": 8930
+ },
+ {
+ "epoch": 9.188078108941419,
+ "grad_norm": 2.4010167121887207,
+ "learning_rate": 8.624052004333695e-07,
+ "loss": 0.198,
+ "step": 8940
+ },
+ {
+ "epoch": 9.198355601233299,
+ "grad_norm": 4.3703837394714355,
+ "learning_rate": 8.515709642470206e-07,
+ "loss": 0.1694,
+ "step": 8950
+ },
+ {
+ "epoch": 9.20863309352518,
+ "grad_norm": 3.70489239692688,
+ "learning_rate": 8.407367280606718e-07,
+ "loss": 0.1332,
+ "step": 8960
+ },
+ {
+ "epoch": 9.21891058581706,
+ "grad_norm": 2.4999241828918457,
+ "learning_rate": 8.299024918743229e-07,
+ "loss": 0.1845,
+ "step": 8970
+ },
+ {
+ "epoch": 9.229188078108942,
+ "grad_norm": 3.0255820751190186,
+ "learning_rate": 8.190682556879741e-07,
+ "loss": 0.1724,
+ "step": 8980
+ },
+ {
+ "epoch": 9.239465570400823,
+ "grad_norm": 4.293249130249023,
+ "learning_rate": 8.082340195016251e-07,
+ "loss": 0.1723,
+ "step": 8990
+ },
+ {
+ "epoch": 9.249743062692703,
+ "grad_norm": 3.077747106552124,
+ "learning_rate": 7.973997833152763e-07,
+ "loss": 0.13,
+ "step": 9000
+ },
+ {
+ "epoch": 9.260020554984584,
+ "grad_norm": 3.6788992881774902,
+ "learning_rate": 7.865655471289274e-07,
+ "loss": 0.1805,
+ "step": 9010
+ },
+ {
+ "epoch": 9.270298047276464,
+ "grad_norm": 2.088778257369995,
+ "learning_rate": 7.757313109425786e-07,
+ "loss": 0.2132,
+ "step": 9020
+ },
+ {
+ "epoch": 9.280575539568344,
+ "grad_norm": 3.2747962474823,
+ "learning_rate": 7.648970747562297e-07,
+ "loss": 0.2321,
+ "step": 9030
+ },
+ {
+ "epoch": 9.290853031860227,
+ "grad_norm": 2.9871788024902344,
+ "learning_rate": 7.540628385698809e-07,
+ "loss": 0.2326,
+ "step": 9040
+ },
+ {
+ "epoch": 9.301130524152107,
+ "grad_norm": 3.4682159423828125,
+ "learning_rate": 7.432286023835321e-07,
+ "loss": 0.1606,
+ "step": 9050
+ },
+ {
+ "epoch": 9.311408016443988,
+ "grad_norm": 2.0525877475738525,
+ "learning_rate": 7.323943661971832e-07,
+ "loss": 0.1504,
+ "step": 9060
+ },
+ {
+ "epoch": 9.321685508735868,
+ "grad_norm": 2.734421491622925,
+ "learning_rate": 7.215601300108344e-07,
+ "loss": 0.2069,
+ "step": 9070
+ },
+ {
+ "epoch": 9.331963001027749,
+ "grad_norm": 2.3396363258361816,
+ "learning_rate": 7.107258938244854e-07,
+ "loss": 0.1294,
+ "step": 9080
+ },
+ {
+ "epoch": 9.34224049331963,
+ "grad_norm": 3.383275032043457,
+ "learning_rate": 6.998916576381366e-07,
+ "loss": 0.1732,
+ "step": 9090
+ },
+ {
+ "epoch": 9.352517985611511,
+ "grad_norm": 3.2529306411743164,
+ "learning_rate": 6.890574214517877e-07,
+ "loss": 0.2062,
+ "step": 9100
+ },
+ {
+ "epoch": 9.362795477903392,
+ "grad_norm": 2.869129180908203,
+ "learning_rate": 6.782231852654389e-07,
+ "loss": 0.1562,
+ "step": 9110
+ },
+ {
+ "epoch": 9.373072970195272,
+ "grad_norm": 2.590285301208496,
+ "learning_rate": 6.6738894907909e-07,
+ "loss": 0.1701,
+ "step": 9120
+ },
+ {
+ "epoch": 9.383350462487153,
+ "grad_norm": 2.4754040241241455,
+ "learning_rate": 6.565547128927412e-07,
+ "loss": 0.1178,
+ "step": 9130
+ },
+ {
+ "epoch": 9.393627954779033,
+ "grad_norm": 3.87373948097229,
+ "learning_rate": 6.457204767063922e-07,
+ "loss": 0.1609,
+ "step": 9140
+ },
+ {
+ "epoch": 9.403905447070915,
+ "grad_norm": 2.705742359161377,
+ "learning_rate": 6.348862405200434e-07,
+ "loss": 0.1613,
+ "step": 9150
+ },
+ {
+ "epoch": 9.414182939362796,
+ "grad_norm": 2.920018434524536,
+ "learning_rate": 6.240520043336945e-07,
+ "loss": 0.1814,
+ "step": 9160
+ },
+ {
+ "epoch": 9.424460431654676,
+ "grad_norm": 4.477851390838623,
+ "learning_rate": 6.132177681473456e-07,
+ "loss": 0.202,
+ "step": 9170
+ },
+ {
+ "epoch": 9.434737923946557,
+ "grad_norm": 2.75160551071167,
+ "learning_rate": 6.023835319609968e-07,
+ "loss": 0.1518,
+ "step": 9180
+ },
+ {
+ "epoch": 9.445015416238437,
+ "grad_norm": 3.1380374431610107,
+ "learning_rate": 5.915492957746479e-07,
+ "loss": 0.18,
+ "step": 9190
+ },
+ {
+ "epoch": 9.45529290853032,
+ "grad_norm": 3.3420140743255615,
+ "learning_rate": 5.80715059588299e-07,
+ "loss": 0.1787,
+ "step": 9200
+ },
+ {
+ "epoch": 9.4655704008222,
+ "grad_norm": 2.556490182876587,
+ "learning_rate": 5.698808234019502e-07,
+ "loss": 0.191,
+ "step": 9210
+ },
+ {
+ "epoch": 9.47584789311408,
+ "grad_norm": 3.437117576599121,
+ "learning_rate": 5.590465872156013e-07,
+ "loss": 0.1654,
+ "step": 9220
+ },
+ {
+ "epoch": 9.48612538540596,
+ "grad_norm": 3.4692296981811523,
+ "learning_rate": 5.482123510292524e-07,
+ "loss": 0.2181,
+ "step": 9230
+ },
+ {
+ "epoch": 9.496402877697841,
+ "grad_norm": 3.6604814529418945,
+ "learning_rate": 5.373781148429036e-07,
+ "loss": 0.1442,
+ "step": 9240
+ },
+ {
+ "epoch": 9.506680369989722,
+ "grad_norm": 3.1510462760925293,
+ "learning_rate": 5.265438786565547e-07,
+ "loss": 0.1292,
+ "step": 9250
+ },
+ {
+ "epoch": 9.516957862281604,
+ "grad_norm": 3.3884165287017822,
+ "learning_rate": 5.157096424702059e-07,
+ "loss": 0.1995,
+ "step": 9260
+ },
+ {
+ "epoch": 9.527235354573484,
+ "grad_norm": 4.259904384613037,
+ "learning_rate": 5.04875406283857e-07,
+ "loss": 0.1732,
+ "step": 9270
+ },
+ {
+ "epoch": 9.537512846865365,
+ "grad_norm": 2.8383660316467285,
+ "learning_rate": 4.940411700975082e-07,
+ "loss": 0.1649,
+ "step": 9280
+ },
+ {
+ "epoch": 9.547790339157245,
+ "grad_norm": 3.905196189880371,
+ "learning_rate": 4.832069339111593e-07,
+ "loss": 0.142,
+ "step": 9290
+ },
+ {
+ "epoch": 9.558067831449126,
+ "grad_norm": 4.23677396774292,
+ "learning_rate": 4.7237269772481046e-07,
+ "loss": 0.1343,
+ "step": 9300
+ },
+ {
+ "epoch": 9.568345323741006,
+ "grad_norm": 3.761404275894165,
+ "learning_rate": 4.615384615384616e-07,
+ "loss": 0.1493,
+ "step": 9310
+ },
+ {
+ "epoch": 9.578622816032889,
+ "grad_norm": 3.2816426753997803,
+ "learning_rate": 4.507042253521127e-07,
+ "loss": 0.1869,
+ "step": 9320
+ },
+ {
+ "epoch": 9.588900308324769,
+ "grad_norm": 2.690589666366577,
+ "learning_rate": 4.3986998916576387e-07,
+ "loss": 0.2415,
+ "step": 9330
+ },
+ {
+ "epoch": 9.59917780061665,
+ "grad_norm": 3.552229642868042,
+ "learning_rate": 4.29035752979415e-07,
+ "loss": 0.1602,
+ "step": 9340
+ },
+ {
+ "epoch": 9.60945529290853,
+ "grad_norm": 2.21655535697937,
+ "learning_rate": 4.1820151679306617e-07,
+ "loss": 0.1942,
+ "step": 9350
+ },
+ {
+ "epoch": 9.61973278520041,
+ "grad_norm": 2.400130271911621,
+ "learning_rate": 4.0736728060671727e-07,
+ "loss": 0.1857,
+ "step": 9360
+ },
+ {
+ "epoch": 9.630010277492293,
+ "grad_norm": 3.0297770500183105,
+ "learning_rate": 3.965330444203684e-07,
+ "loss": 0.162,
+ "step": 9370
+ },
+ {
+ "epoch": 9.640287769784173,
+ "grad_norm": 2.344736099243164,
+ "learning_rate": 3.8569880823401957e-07,
+ "loss": 0.1301,
+ "step": 9380
+ },
+ {
+ "epoch": 9.650565262076054,
+ "grad_norm": 2.695235013961792,
+ "learning_rate": 3.7486457204767067e-07,
+ "loss": 0.1601,
+ "step": 9390
+ },
+ {
+ "epoch": 9.660842754367934,
+ "grad_norm": 4.2191243171691895,
+ "learning_rate": 3.640303358613218e-07,
+ "loss": 0.2298,
+ "step": 9400
+ },
+ {
+ "epoch": 9.671120246659815,
+ "grad_norm": 2.826716184616089,
+ "learning_rate": 3.5319609967497297e-07,
+ "loss": 0.2066,
+ "step": 9410
+ },
+ {
+ "epoch": 9.681397738951695,
+ "grad_norm": 2.636258363723755,
+ "learning_rate": 3.4236186348862407e-07,
+ "loss": 0.2013,
+ "step": 9420
+ },
+ {
+ "epoch": 9.691675231243577,
+ "grad_norm": 4.043715953826904,
+ "learning_rate": 3.315276273022752e-07,
+ "loss": 0.1886,
+ "step": 9430
+ },
+ {
+ "epoch": 9.701952723535458,
+ "grad_norm": 3.714195728302002,
+ "learning_rate": 3.2069339111592637e-07,
+ "loss": 0.209,
+ "step": 9440
+ },
+ {
+ "epoch": 9.712230215827338,
+ "grad_norm": 2.3214926719665527,
+ "learning_rate": 3.0985915492957747e-07,
+ "loss": 0.1636,
+ "step": 9450
+ },
+ {
+ "epoch": 9.722507708119219,
+ "grad_norm": 3.8343677520751953,
+ "learning_rate": 2.990249187432286e-07,
+ "loss": 0.2022,
+ "step": 9460
+ },
+ {
+ "epoch": 9.732785200411099,
+ "grad_norm": 3.7999889850616455,
+ "learning_rate": 2.881906825568798e-07,
+ "loss": 0.1934,
+ "step": 9470
+ },
+ {
+ "epoch": 9.743062692702981,
+ "grad_norm": 3.770751953125,
+ "learning_rate": 2.773564463705309e-07,
+ "loss": 0.1713,
+ "step": 9480
+ },
+ {
+ "epoch": 9.753340184994862,
+ "grad_norm": 4.560354709625244,
+ "learning_rate": 2.66522210184182e-07,
+ "loss": 0.1804,
+ "step": 9490
+ },
+ {
+ "epoch": 9.763617677286742,
+ "grad_norm": 2.4577202796936035,
+ "learning_rate": 2.556879739978332e-07,
+ "loss": 0.1585,
+ "step": 9500
+ },
+ {
+ "epoch": 9.773895169578623,
+ "grad_norm": 3.556216239929199,
+ "learning_rate": 2.4485373781148433e-07,
+ "loss": 0.1846,
+ "step": 9510
+ },
+ {
+ "epoch": 9.784172661870503,
+ "grad_norm": 3.2222604751586914,
+ "learning_rate": 2.3401950162513545e-07,
+ "loss": 0.1611,
+ "step": 9520
+ },
+ {
+ "epoch": 9.794450154162384,
+ "grad_norm": 2.583037853240967,
+ "learning_rate": 2.2318526543878658e-07,
+ "loss": 0.1712,
+ "step": 9530
+ },
+ {
+ "epoch": 9.804727646454266,
+ "grad_norm": 2.7843735218048096,
+ "learning_rate": 2.123510292524377e-07,
+ "loss": 0.1325,
+ "step": 9540
+ },
+ {
+ "epoch": 9.815005138746146,
+ "grad_norm": 2.146963357925415,
+ "learning_rate": 2.0151679306608885e-07,
+ "loss": 0.1508,
+ "step": 9550
+ },
+ {
+ "epoch": 9.825282631038027,
+ "grad_norm": 3.2385213375091553,
+ "learning_rate": 1.9068255687973998e-07,
+ "loss": 0.1541,
+ "step": 9560
+ },
+ {
+ "epoch": 9.835560123329907,
+ "grad_norm": 3.864046573638916,
+ "learning_rate": 1.798483206933911e-07,
+ "loss": 0.1544,
+ "step": 9570
+ },
+ {
+ "epoch": 9.845837615621788,
+ "grad_norm": 3.785207748413086,
+ "learning_rate": 1.6901408450704225e-07,
+ "loss": 0.1882,
+ "step": 9580
+ },
+ {
+ "epoch": 9.85611510791367,
+ "grad_norm": 3.159970760345459,
+ "learning_rate": 1.5817984832069343e-07,
+ "loss": 0.1798,
+ "step": 9590
+ },
+ {
+ "epoch": 9.86639260020555,
+ "grad_norm": 3.8631491661071777,
+ "learning_rate": 1.4734561213434453e-07,
+ "loss": 0.1657,
+ "step": 9600
+ },
+ {
+ "epoch": 9.87667009249743,
+ "grad_norm": 2.512830972671509,
+ "learning_rate": 1.3651137594799568e-07,
+ "loss": 0.1679,
+ "step": 9610
+ },
+ {
+ "epoch": 9.886947584789311,
+ "grad_norm": 3.2564988136291504,
+ "learning_rate": 1.256771397616468e-07,
+ "loss": 0.2286,
+ "step": 9620
+ },
+ {
+ "epoch": 9.897225077081192,
+ "grad_norm": 2.4131650924682617,
+ "learning_rate": 1.1484290357529795e-07,
+ "loss": 0.164,
+ "step": 9630
+ },
+ {
+ "epoch": 9.907502569373072,
+ "grad_norm": 4.661231517791748,
+ "learning_rate": 1.040086673889491e-07,
+ "loss": 0.1114,
+ "step": 9640
+ },
+ {
+ "epoch": 9.917780061664955,
+ "grad_norm": 3.26019024848938,
+ "learning_rate": 9.317443120260024e-08,
+ "loss": 0.1821,
+ "step": 9650
+ },
+ {
+ "epoch": 9.928057553956835,
+ "grad_norm": 3.3677456378936768,
+ "learning_rate": 8.234019501625136e-08,
+ "loss": 0.1456,
+ "step": 9660
+ },
+ {
+ "epoch": 9.938335046248715,
+ "grad_norm": 3.299299716949463,
+ "learning_rate": 7.15059588299025e-08,
+ "loss": 0.191,
+ "step": 9670
+ },
+ {
+ "epoch": 9.948612538540596,
+ "grad_norm": 2.5458364486694336,
+ "learning_rate": 6.067172264355364e-08,
+ "loss": 0.2054,
+ "step": 9680
+ },
+ {
+ "epoch": 9.958890030832476,
+ "grad_norm": 4.219174385070801,
+ "learning_rate": 4.983748645720477e-08,
+ "loss": 0.206,
+ "step": 9690
+ },
+ {
+ "epoch": 9.969167523124357,
+ "grad_norm": 1.5195386409759521,
+ "learning_rate": 3.9003250270855906e-08,
+ "loss": 0.1463,
+ "step": 9700
+ },
+ {
+ "epoch": 9.979445015416239,
+ "grad_norm": 3.900343418121338,
+ "learning_rate": 2.8169014084507045e-08,
+ "loss": 0.1477,
+ "step": 9710
+ },
+ {
+ "epoch": 9.98972250770812,
+ "grad_norm": 4.024109363555908,
+ "learning_rate": 1.7334777898158183e-08,
+ "loss": 0.2275,
+ "step": 9720
+ },
+ {
+ "epoch": 10.0,
+ "grad_norm": 2.2625277042388916,
+ "learning_rate": 6.5005417118093186e-09,
+ "loss": 0.1692,
+ "step": 9730
+ },
+ {
+ "epoch": 10.0,
+ "eval_loss": 0.33747920393943787,
+ "eval_runtime": 1209.0372,
+ "eval_samples_per_second": 4.853,
+ "eval_steps_per_second": 0.152,
+ "eval_wer": 0.3192600084831479,
+ "step": 9730
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 9730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 4.03896185545923e+19,
+ "train_batch_size": 16,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/whisper/whisper_base_finetuned/checkpoint-9730/training_args.bin b/whisper/whisper_base_finetuned/checkpoint-9730/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cec0038665d32391824dfe472a35578679380744
--- /dev/null
+++ b/whisper/whisper_base_finetuned/checkpoint-9730/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9e2cc97af116b33d30c72667d46ddd426569f5f483ad8392e19d95860dfcc43
+size 5240
diff --git a/whisper/whisper_base_finetuned/config.json b/whisper/whisper_base_finetuned/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..91728b7bc6c3a43bb11e0d161949a286ca009408
--- /dev/null
+++ b/whisper/whisper_base_finetuned/config.json
@@ -0,0 +1,52 @@
+{
+ "_name_or_path": "whisper_base_finetuned",
+ "activation_dropout": 0.0,
+ "activation_function": "gelu",
+ "apply_spec_augment": true,
+ "architectures": [
+ "WhisperForConditionalGeneration"
+ ],
+ "attention_dropout": 0.0,
+ "begin_suppress_tokens": [
+ 220,
+ 50257
+ ],
+ "bos_token_id": 50257,
+ "classifier_proj_size": 256,
+ "d_model": 512,
+ "decoder_attention_heads": 8,
+ "decoder_ffn_dim": 2048,
+ "decoder_layerdrop": 0.0,
+ "decoder_layers": 6,
+ "decoder_start_token_id": 50258,
+ "dropout": 0.0,
+ "encoder_attention_heads": 8,
+ "encoder_ffn_dim": 2048,
+ "encoder_layerdrop": 0.0,
+ "encoder_layers": 6,
+ "eos_token_id": 50257,
+ "forced_decoder_ids": null,
+ "init_std": 0.02,
+ "is_encoder_decoder": true,
+ "mask_feature_length": 10,
+ "mask_feature_min_masks": 0,
+ "mask_feature_prob": 0.05,
+ "mask_time_length": 10,
+ "mask_time_min_masks": 2,
+ "mask_time_prob": 0.05,
+ "max_length": 448,
+ "max_source_positions": 1500,
+ "max_target_positions": 448,
+ "median_filter_width": 7,
+ "model_type": "whisper",
+ "num_hidden_layers": 6,
+ "num_mel_bins": 80,
+ "pad_token_id": 50257,
+ "scale_embedding": false,
+ "suppress_tokens": [],
+ "torch_dtype": "float32",
+ "transformers_version": "4.42.0.dev0",
+ "use_cache": true,
+ "use_weighted_layer_sum": false,
+ "vocab_size": 51865
+}
diff --git a/whisper/whisper_base_finetuned/generation_config.json b/whisper/whisper_base_finetuned/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3ce877d310342bb057324d0dfcf6f83dc6055c1a
--- /dev/null
+++ b/whisper/whisper_base_finetuned/generation_config.json
@@ -0,0 +1,256 @@
+{
+ "alignment_heads": [
+ [
+ 3,
+ 1
+ ],
+ [
+ 4,
+ 2
+ ],
+ [
+ 4,
+ 3
+ ],
+ [
+ 4,
+ 7
+ ],
+ [
+ 5,
+ 1
+ ],
+ [
+ 5,
+ 2
+ ],
+ [
+ 5,
+ 4
+ ],
+ [
+ 5,
+ 6
+ ]
+ ],
+ "begin_suppress_tokens": [
+ 220,
+ 50257
+ ],
+ "bos_token_id": 50257,
+ "decoder_start_token_id": 50258,
+ "eos_token_id": 50257,
+ "forced_decoder_ids": [
+ [
+ 1,
+ null
+ ],
+ [
+ 2,
+ 50359
+ ]
+ ],
+ "is_multilingual": true,
+ "lang_to_id": {
+ "<|af|>": 50327,
+ "<|am|>": 50334,
+ "<|ar|>": 50272,
+ "<|as|>": 50350,
+ "<|az|>": 50304,
+ "<|ba|>": 50355,
+ "<|be|>": 50330,
+ "<|bg|>": 50292,
+ "<|bn|>": 50302,
+ "<|bo|>": 50347,
+ "<|br|>": 50309,
+ "<|bs|>": 50315,
+ "<|ca|>": 50270,
+ "<|cs|>": 50283,
+ "<|cy|>": 50297,
+ "<|da|>": 50285,
+ "<|de|>": 50261,
+ "<|el|>": 50281,
+ "<|en|>": 50259,
+ "<|es|>": 50262,
+ "<|et|>": 50307,
+ "<|eu|>": 50310,
+ "<|fa|>": 50300,
+ "<|fi|>": 50277,
+ "<|fo|>": 50338,
+ "<|fr|>": 50265,
+ "<|gl|>": 50319,
+ "<|gu|>": 50333,
+ "<|haw|>": 50352,
+ "<|ha|>": 50354,
+ "<|he|>": 50279,
+ "<|hi|>": 50276,
+ "<|hr|>": 50291,
+ "<|ht|>": 50339,
+ "<|hu|>": 50286,
+ "<|hy|>": 50312,
+ "<|id|>": 50275,
+ "<|is|>": 50311,
+ "<|it|>": 50274,
+ "<|ja|>": 50266,
+ "<|jw|>": 50356,
+ "<|ka|>": 50329,
+ "<|kk|>": 50316,
+ "<|km|>": 50323,
+ "<|kn|>": 50306,
+ "<|ko|>": 50264,
+ "<|la|>": 50294,
+ "<|lb|>": 50345,
+ "<|ln|>": 50353,
+ "<|lo|>": 50336,
+ "<|lt|>": 50293,
+ "<|lv|>": 50301,
+ "<|mg|>": 50349,
+ "<|mi|>": 50295,
+ "<|mk|>": 50308,
+ "<|ml|>": 50296,
+ "<|mn|>": 50314,
+ "<|mr|>": 50320,
+ "<|ms|>": 50282,
+ "<|mt|>": 50343,
+ "<|my|>": 50346,
+ "<|ne|>": 50313,
+ "<|nl|>": 50271,
+ "<|nn|>": 50342,
+ "<|no|>": 50288,
+ "<|oc|>": 50328,
+ "<|pa|>": 50321,
+ "<|pl|>": 50269,
+ "<|ps|>": 50340,
+ "<|pt|>": 50267,
+ "<|ro|>": 50284,
+ "<|ru|>": 50263,
+ "<|sa|>": 50344,
+ "<|sd|>": 50332,
+ "<|si|>": 50322,
+ "<|sk|>": 50298,
+ "<|sl|>": 50305,
+ "<|sn|>": 50324,
+ "<|so|>": 50326,
+ "<|sq|>": 50317,
+ "<|sr|>": 50303,
+ "<|su|>": 50357,
+ "<|sv|>": 50273,
+ "<|sw|>": 50318,
+ "<|ta|>": 50287,
+ "<|te|>": 50299,
+ "<|tg|>": 50331,
+ "<|th|>": 50289,
+ "<|tk|>": 50341,
+ "<|tl|>": 50348,
+ "<|tr|>": 50268,
+ "<|tt|>": 50351,
+ "<|uk|>": 50280,
+ "<|ur|>": 50290,
+ "<|uz|>": 50337,
+ "<|vi|>": 50278,
+ "<|yi|>": 50335,
+ "<|yo|>": 50325,
+ "<|zh|>": 50260
+ },
+ "max_initial_timestamp_index": 50,
+ "max_length": 448,
+ "no_timestamps_token_id": 50363,
+ "pad_token_id": 50257,
+ "prev_sot_token_id": 50361,
+ "return_timestamps": false,
+ "suppress_tokens": [
+ 1,
+ 2,
+ 7,
+ 8,
+ 9,
+ 10,
+ 14,
+ 25,
+ 26,
+ 27,
+ 28,
+ 29,
+ 31,
+ 58,
+ 59,
+ 60,
+ 61,
+ 62,
+ 63,
+ 90,
+ 91,
+ 92,
+ 93,
+ 359,
+ 503,
+ 522,
+ 542,
+ 873,
+ 893,
+ 902,
+ 918,
+ 922,
+ 931,
+ 1350,
+ 1853,
+ 1982,
+ 2460,
+ 2627,
+ 3246,
+ 3253,
+ 3268,
+ 3536,
+ 3846,
+ 3961,
+ 4183,
+ 4667,
+ 6585,
+ 6647,
+ 7273,
+ 9061,
+ 9383,
+ 10428,
+ 10929,
+ 11938,
+ 12033,
+ 12331,
+ 12562,
+ 13793,
+ 14157,
+ 14635,
+ 15265,
+ 15618,
+ 16553,
+ 16604,
+ 18362,
+ 18956,
+ 20075,
+ 21675,
+ 22520,
+ 26130,
+ 26161,
+ 26435,
+ 28279,
+ 29464,
+ 31650,
+ 32302,
+ 32470,
+ 36865,
+ 42863,
+ 47425,
+ 49870,
+ 50254,
+ 50258,
+ 50358,
+ 50359,
+ 50360,
+ 50361,
+ 50362
+ ],
+ "task_to_id": {
+ "transcribe": 50359,
+ "translate": 50358
+ },
+ "transformers_version": "4.42.0.dev0"
+}
diff --git a/whisper/whisper_base_finetuned/model.safetensors b/whisper/whisper_base_finetuned/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d3ffdfb85687050faeb83213beaa74fff65de794
--- /dev/null
+++ b/whisper/whisper_base_finetuned/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dd026348fe37281549fd7c6a808f086ca0fd97d58b2cacc1f727f1b30983206
+size 290403936
diff --git a/whisper/whisper_base_finetuned/preprocessor_config.json b/whisper/whisper_base_finetuned/preprocessor_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..91876762a536a746d268353c5cba57286e76b058
--- /dev/null
+++ b/whisper/whisper_base_finetuned/preprocessor_config.json
@@ -0,0 +1,14 @@
+{
+ "chunk_length": 30,
+ "feature_extractor_type": "WhisperFeatureExtractor",
+ "feature_size": 80,
+ "hop_length": 160,
+ "n_fft": 400,
+ "n_samples": 480000,
+ "nb_max_frames": 3000,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "processor_class": "WhisperProcessor",
+ "return_attention_mask": false,
+ "sampling_rate": 16000
+}
diff --git a/whisper/whisper_base_finetuned/training_args.bin b/whisper/whisper_base_finetuned/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cec0038665d32391824dfe472a35578679380744
--- /dev/null
+++ b/whisper/whisper_base_finetuned/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9e2cc97af116b33d30c72667d46ddd426569f5f483ad8392e19d95860dfcc43
+size 5240
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/best-train-loss.pt b/zipformer/finetuned/ctc/causal/exp_finetune/best-train-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..07dbca08854d656fb0251367591c640c51ff603d
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/best-train-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abeb187cfd750a533d745951ed4f05300ab887a94d12510446173afc9ad64ab9
+size 1062964430
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/best-valid-loss.pt b/zipformer/finetuned/ctc/causal/exp_finetune/best-valid-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d77e0a4e3753c76a3ee54894f5ad9ef6813c4700
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/best-valid-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92908b67b3cd59091848f2ebd72c1e4aad91ff3c0774f27aeada8668ff589fa4
+size 1062964494
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-1.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-1.pt
new file mode 100644
index 0000000000000000000000000000000000000000..40fffa46139aabf4dfe8e5377d3f832affc3824e
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-1.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d927f5b0338a62d415c36f3705bde3f3276c88369f904575e83945d138cfef3e
+size 1062960853
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-10.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-10.pt
new file mode 100644
index 0000000000000000000000000000000000000000..04cea4713cc514c906aa4da064d3d001232ee9e0
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-10.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:07c28ba01255527170c8d297aafe10678dbf051a263f2eb8ca06442c701f9ef3
+size 1062964046
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-11.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-11.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a669461baa8fbb9bb73aa160bab4c7c0f630a5e0
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-11.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:254bb0451bb8282fbf44b55ae9225a2b933a125f6677aa57a8c1a80426fc3299
+size 1062964110
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-12.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-12.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a8da0d373a5fa8e05aebf0143328bd85b4667ca1
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-12.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:51af1f57dbd87eddb1ebeba3b31b845063e4c042fd899f5884dd0a41ff95d5b4
+size 1062964110
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-13.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-13.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b44a655a2c0085324463d42258c1007544c58653
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-13.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c0afecf8875b63321a7c020df9565414884617e54de884959c413f4013673df
+size 1062964174
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-14.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-14.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2267b9d8427ec547006b09f4e04f42a3160224f4
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-14.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e59080fab9bbe2b6e7e7c99d7feab0c5f034097ea7c2c6f025c765430295ee57
+size 1062964238
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-15.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-15.pt
new file mode 100644
index 0000000000000000000000000000000000000000..31ba5cb894a2a702bc6223735dc55aeb6c9d730e
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-15.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4655cf2317de278a498cf45e8656f994bf7b48cd131ed70150f1657bd7c38281
+size 1062964238
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-16.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-16.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fc41edd0897203fdf5d360077494730fb1df8a95
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-16.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:425e29b71db27e9b165b0b6b87e65e7e92ff430f1af45dd5244f5d8c70a278ec
+size 1062964302
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-17.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-17.pt
new file mode 100644
index 0000000000000000000000000000000000000000..36cf2a104cffe71b3a62adbd1b27281a26a8e237
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-17.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7635c71701052a4652e68f2e104cf144e2bba02256354e9b9683464cb8888997
+size 1062964366
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-18.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-18.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3cbe966dcd6a8634d080102cfc9f6971097a7889
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-18.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ef656e75a76c9a7823846c9482d96836da5661219f38363748bcabd08cb9049
+size 1062964430
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-19.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-19.pt
new file mode 100644
index 0000000000000000000000000000000000000000..07dbca08854d656fb0251367591c640c51ff603d
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-19.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abeb187cfd750a533d745951ed4f05300ab887a94d12510446173afc9ad64ab9
+size 1062964430
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-2.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-2.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2cc7b54c01a2216413d463c4930f218b61ff8d6a
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-2.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9aac5c412ebfa61e6173739f6adc63283fb6268f87042a68c2579d7aaec4160d
+size 1062960981
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-20.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-20.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d77e0a4e3753c76a3ee54894f5ad9ef6813c4700
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-20.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92908b67b3cd59091848f2ebd72c1e4aad91ff3c0774f27aeada8668ff589fa4
+size 1062964494
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-3.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-3.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4227883ea394fbc49f937943cf8b3d221e1e597d
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-3.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4958378241bdca4e0b09d75953d8c36bf70b1d1f9d7309a857426e8078ef7e75
+size 1062961045
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-4.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-4.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f5bb9205102e76ea2426c85b4c92fb5d759f807d
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-4.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65dc89fda1a1e82c2cf0314218e04f134ed92834f63b80599c9ebdd365170781
+size 1062961045
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-5.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-5.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1f9f8e1ebef2a7916e6547f87bfd11ec007e89dd
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-5.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1467ef51278ca01f91eef1d5d4d57d360d202cba68a1b5c35bfc478e9bf15273
+size 1062961109
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-6.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-6.pt
new file mode 100644
index 0000000000000000000000000000000000000000..805478a06c0859a31be6c633a5341a339f32839e
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-6.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2dd8b95f78b2539a4664a439bb2a8cf944fd06851485385105cdf3fd80785ff
+size 1062961173
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-7.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-7.pt
new file mode 100644
index 0000000000000000000000000000000000000000..962089be4133ab8e3f82b53fd2dc3a7fa4776d5e
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-7.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9b1237f3dbe638e1975b925a8b7d98bf31503d57a8e2adc9c4df5d28c90b0f0
+size 1062961237
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-8.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-8.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9e0e1cd54231311a4d77433c015251713b120650
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-8.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc920a87e4efc362adfbad874c73c1f90bef5d1340669d1b58a9030b7bbc7e95
+size 1062961237
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/epoch-9.pt b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-9.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ca687b4d4b4b6fa2c4b5b6f53f9fa294381c7882
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/epoch-9.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8bd9fe5609b8a747cd7c1e92dbaf30d3d5b9dff5c3124266e0079e01df5657bc
+size 1062961301
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-0
new file mode 100644
index 0000000000000000000000000000000000000000..cbbedfe201c43b9b2841e8a0c2e936a29f544e38
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-0
@@ -0,0 +1,9 @@
+2024-08-29 03:37:48,106 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-29 03:37:48,411 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-29 03:37:48,411 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-29 03:37:48,765 INFO [dysarthria_finetune.py:1219] (0/4) (33748090880, 34072559616)
+2024-08-29 03:37:48,771 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-29 03:37:49,594 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2655.int.cedar.computecanada.ca', 'IP address': '172.16.146.92'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 03:37:49,594 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-29 03:37:50,277 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66367431
+2024-08-29 03:37:50,816 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-1
new file mode 100644
index 0000000000000000000000000000000000000000..883755739d168a9375bd4af105e36153c701cf1a
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-1
@@ -0,0 +1,7 @@
+2024-08-29 03:37:48,477 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-29 03:37:50,829 INFO [dysarthria_finetune.py:1214] (1/4) (33427226624, 34072559616)
+2024-08-29 03:37:50,829 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-29 03:37:51,205 INFO [dysarthria_finetune.py:1219] (1/4) (33427226624, 34072559616)
+2024-08-29 03:37:51,205 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-29 03:37:51,208 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2655.int.cedar.computecanada.ca', 'IP address': '172.16.146.92'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 03:37:51,209 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-2
new file mode 100644
index 0000000000000000000000000000000000000000..f863a9af401ff1ed95184a0a3ac78a1e2f3a9ea5
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-2
@@ -0,0 +1 @@
+2024-08-29 03:37:48,474 INFO [dysarthria_finetune.py:1212] (2/4) Training started
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-3
new file mode 100644
index 0000000000000000000000000000000000000000..55e082fe8f24fd2a6375fc8935f7bb10e4f2abe9
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-03-37-48-3
@@ -0,0 +1,3 @@
+2024-08-29 03:37:48,476 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-29 03:37:55,030 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-29 03:37:55,030 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-0
new file mode 100644
index 0000000000000000000000000000000000000000..2e35d0583622f5a47755e0d015f4b7c5ae7df549
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-0
@@ -0,0 +1,9 @@
+2024-08-29 11:41:33,292 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-29 11:41:33,521 INFO [dysarthria_finetune.py:1214] (0/4) (33725022208, 34072559616)
+2024-08-29 11:41:33,521 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-29 11:41:33,990 INFO [dysarthria_finetune.py:1219] (0/4) (33427226624, 34072559616)
+2024-08-29 11:41:33,994 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-29 11:41:36,507 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 11:41:36,507 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-29 11:41:37,205 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66367431
+2024-08-29 11:41:37,767 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-1
new file mode 100644
index 0000000000000000000000000000000000000000..97d11ee5609cb0985a9c7befa5bab0510bb5d49e
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-1
@@ -0,0 +1 @@
+2024-08-29 11:41:33,540 INFO [dysarthria_finetune.py:1212] (1/4) Training started
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-2
new file mode 100644
index 0000000000000000000000000000000000000000..b3b694744f34db8f50e3b8bfa4e820227d998971
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-2
@@ -0,0 +1,9 @@
+2024-08-29 11:41:33,436 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-29 11:41:40,957 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-29 11:41:40,957 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-29 11:41:41,322 INFO [dysarthria_finetune.py:1219] (2/4) (33748090880, 34072559616)
+2024-08-29 11:41:41,324 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-29 11:41:41,326 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 11:41:41,327 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-29 11:41:42,016 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66367431
+2024-08-29 11:41:42,016 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-11-41-33-3
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-0
new file mode 100644
index 0000000000000000000000000000000000000000..534bcb2d26134ab69efb738ac67fa381db477111
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-0
@@ -0,0 +1,4 @@
+2024-08-29 20:11:55,398 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-29 20:11:55,756 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-29 20:11:55,757 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-29 20:11:56,549 INFO [dysarthria_finetune.py:1219] (0/4) (33106362368, 34072559616)
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-1
new file mode 100644
index 0000000000000000000000000000000000000000..353da3c1bb048f1346a7f6a8105b0aa33e7c7c0f
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-1
@@ -0,0 +1,9 @@
+2024-08-29 20:11:55,762 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-29 20:11:55,804 INFO [dysarthria_finetune.py:1214] (1/4) (33735507968, 34072559616)
+2024-08-29 20:11:55,804 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-29 20:11:56,545 INFO [dysarthria_finetune.py:1219] (1/4) (33106362368, 34072559616)
+2024-08-29 20:11:58,602 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-29 20:11:58,660 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 20:11:58,660 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-29 20:11:59,356 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66367431
+2024-08-29 20:11:59,357 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-2
new file mode 100644
index 0000000000000000000000000000000000000000..06238258cbb09f462f1f3e280a7e187bdd5711bc
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-2
@@ -0,0 +1,9 @@
+2024-08-29 20:11:55,746 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-29 20:11:58,602 INFO [dysarthria_finetune.py:1214] (2/4) (32783400960, 34072559616)
+2024-08-29 20:11:58,602 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-29 20:11:58,988 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-29 20:11:58,989 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-29 20:11:58,993 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 20:11:58,993 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-29 20:11:59,702 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66367431
+2024-08-29 20:11:59,702 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-3
new file mode 100644
index 0000000000000000000000000000000000000000..5bdc9714ffd5197179650093eae344bd33b71f56
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-29-20-11-55-3
@@ -0,0 +1,9 @@
+2024-08-29 20:11:55,761 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-29 20:11:55,804 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-29 20:11:55,804 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-29 20:11:56,543 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-29 20:11:58,602 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-29 20:11:58,660 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 20:11:58,661 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-29 20:11:59,362 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66367431
+2024-08-29 20:11:59,362 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-0
new file mode 100644
index 0000000000000000000000000000000000000000..77cd5f5ca9a6f6269455e87446e46e51c4eb1722
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-0
@@ -0,0 +1 @@
+2024-08-30 17:23:52,592 INFO [dysarthria_finetune.py:1212] (0/4) Training started
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-1
new file mode 100644
index 0000000000000000000000000000000000000000..3135f29ace324a73c67f0972e6462c5e050383d6
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-1
@@ -0,0 +1,9 @@
+2024-08-30 17:23:52,733 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-30 17:23:52,765 INFO [dysarthria_finetune.py:1214] (1/4) (33748090880, 34072559616)
+2024-08-30 17:23:52,766 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-30 17:23:53,415 INFO [dysarthria_finetune.py:1219] (1/4) (33414643712, 34072559616)
+2024-08-30 17:23:53,415 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-30 17:23:53,662 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2651.int.cedar.computecanada.ca', 'IP address': '172.16.146.88'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 17:23:53,662 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-30 17:23:54,364 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66367431
+2024-08-30 17:23:54,364 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-2
new file mode 100644
index 0000000000000000000000000000000000000000..81b1316b7761eb9a1ed9e33ba0309090ddbb7bad
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-2
@@ -0,0 +1,9 @@
+2024-08-30 17:23:52,703 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-30 17:23:53,136 INFO [dysarthria_finetune.py:1214] (2/4) (33414643712, 34072559616)
+2024-08-30 17:23:53,136 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-30 17:23:53,608 INFO [dysarthria_finetune.py:1219] (2/4) (33106362368, 34072559616)
+2024-08-30 17:23:53,609 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-30 17:23:53,661 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2651.int.cedar.computecanada.ca', 'IP address': '172.16.146.88'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 17:23:53,661 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-30 17:23:54,373 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66367431
+2024-08-30 17:23:54,373 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-3
new file mode 100644
index 0000000000000000000000000000000000000000..28c09f84ee5056bb158935bb63852af99ff70955
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-17-23-52-3
@@ -0,0 +1,9 @@
+2024-08-30 17:23:52,718 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-30 17:23:52,720 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-30 17:23:52,720 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-30 17:23:53,398 INFO [dysarthria_finetune.py:1219] (3/4) (33427226624, 34072559616)
+2024-08-30 17:23:53,399 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-30 17:23:53,661 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2651.int.cedar.computecanada.ca', 'IP address': '172.16.146.88'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 17:23:53,661 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-30 17:23:54,364 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66367431
+2024-08-30 17:23:54,364 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-33-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-33-0
new file mode 100644
index 0000000000000000000000000000000000000000..671001a017b546d97a6aa9f7b77823a3d206f99e
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-33-0
@@ -0,0 +1,9 @@
+2024-08-30 21:11:33,959 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-30 21:11:34,230 INFO [dysarthria_finetune.py:1214] (0/4) (33735507968, 34072559616)
+2024-08-30 21:11:34,231 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-30 21:11:34,950 INFO [dysarthria_finetune.py:1219] (0/4) (33106362368, 34072559616)
+2024-08-30 21:11:34,958 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-30 21:11:34,960 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:11:34,961 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-30 21:11:35,755 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66367431
+2024-08-30 21:11:36,317 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-1
new file mode 100644
index 0000000000000000000000000000000000000000..4d8112b475661e227df6f5e1378ae6f058e78472
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-1
@@ -0,0 +1,9 @@
+2024-08-30 21:11:34,198 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-30 21:11:34,230 INFO [dysarthria_finetune.py:1214] (1/4) (33748090880, 34072559616)
+2024-08-30 21:11:34,231 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-30 21:11:34,961 INFO [dysarthria_finetune.py:1219] (1/4) (33106362368, 34072559616)
+2024-08-30 21:11:34,962 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-30 21:11:34,964 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:11:34,965 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-30 21:11:35,679 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66367431
+2024-08-30 21:11:35,679 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-2
new file mode 100644
index 0000000000000000000000000000000000000000..f2caa03243c75553b575e823459c4b80cc1ed045
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-2
@@ -0,0 +1,9 @@
+2024-08-30 21:11:34,197 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-30 21:11:35,546 INFO [dysarthria_finetune.py:1214] (2/4) (32783400960, 34072559616)
+2024-08-30 21:11:35,546 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-30 21:11:35,930 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-30 21:11:35,931 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-30 21:11:35,933 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:11:35,933 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-30 21:11:36,632 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66367431
+2024-08-30 21:11:36,632 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-3
new file mode 100644
index 0000000000000000000000000000000000000000..22624786ca583564f188b4d0af7dccf0e9062a90
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-21-11-34-3
@@ -0,0 +1,9 @@
+2024-08-30 21:11:34,184 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-30 21:11:34,185 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-30 21:11:34,185 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-30 21:11:34,945 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-30 21:11:34,946 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-30 21:11:34,948 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:11:34,948 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-30 21:11:35,649 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66367431
+2024-08-30 21:11:35,649 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-0
new file mode 100644
index 0000000000000000000000000000000000000000..af96a4145393ae81e5e169dab9ce3d8fcd4f1d93
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-0
@@ -0,0 +1 @@
+2024-08-30 22:48:54,417 INFO [dysarthria_finetune.py:1212] (0/4) Training started
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-1
new file mode 100644
index 0000000000000000000000000000000000000000..5a5127ac50027fd70414bb87cea4d9a32ef3fb04
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-1
@@ -0,0 +1,3 @@
+2024-08-30 22:48:54,486 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-30 22:49:06,853 INFO [dysarthria_finetune.py:1214] (1/4) (33748090880, 34072559616)
+2024-08-30 22:49:06,853 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-2
new file mode 100644
index 0000000000000000000000000000000000000000..6912371f16a858ad9cae74639067d72890ce1920
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-2
@@ -0,0 +1,9 @@
+2024-08-30 22:48:54,490 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-30 22:48:54,531 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-30 22:48:54,531 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-30 22:48:55,055 INFO [dysarthria_finetune.py:1219] (2/4) (33427226624, 34072559616)
+2024-08-30 22:48:55,055 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-30 22:48:55,099 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2539.int.cedar.computecanada.ca', 'IP address': '172.16.145.232'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 22:48:55,100 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-30 22:48:55,797 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66367431
+2024-08-30 22:48:55,797 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-3
new file mode 100644
index 0000000000000000000000000000000000000000..9541a6908eddf653acb65bceef27dd6e85f0fca5
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-30-22-48-54-3
@@ -0,0 +1,9 @@
+2024-08-30 22:48:54,484 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-30 22:48:54,485 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-30 22:48:54,485 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-30 22:48:55,049 INFO [dysarthria_finetune.py:1219] (3/4) (33427226624, 34072559616)
+2024-08-30 22:48:55,050 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-30 22:48:55,100 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2539.int.cedar.computecanada.ca', 'IP address': '172.16.145.232'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 22:48:55,100 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-30 22:48:55,797 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66367431
+2024-08-30 22:48:55,797 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-0
new file mode 100644
index 0000000000000000000000000000000000000000..8894177ac7e3525aef1f3ea4945b364d4fe5d4f2
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-0
@@ -0,0 +1,11 @@
+2024-08-31 18:50:37,365 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-31 18:50:37,681 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-31 18:50:37,681 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-31 18:50:38,683 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-31 18:50:41,220 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-31 18:50:41,223 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 18:50:41,224 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-31 18:50:41,941 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66367431
+2024-08-31 18:50:42,495 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 18:50:44,403 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-31 18:50:45,805 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-1
new file mode 100644
index 0000000000000000000000000000000000000000..4363bcf8b0d9610d8ac88b39dfaae5b8888c573f
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-1
@@ -0,0 +1,11 @@
+2024-08-31 18:50:37,683 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-31 18:50:37,726 INFO [dysarthria_finetune.py:1214] (1/4) (33735507968, 34072559616)
+2024-08-31 18:50:37,726 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-31 18:50:38,674 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-31 18:50:38,675 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-31 18:50:38,754 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 18:50:38,754 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-31 18:50:39,457 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66367431
+2024-08-31 18:50:39,457 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 18:50:40,690 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-31 18:50:45,796 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-2
new file mode 100644
index 0000000000000000000000000000000000000000..2dcf96b42efd0b0d62ee043441c70fc7f7c35957
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-2
@@ -0,0 +1,11 @@
+2024-08-31 18:50:37,845 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-31 18:50:37,847 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-31 18:50:37,847 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-31 18:50:38,686 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-31 18:50:38,686 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-31 18:50:38,754 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 18:50:38,754 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-31 18:50:39,432 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66367431
+2024-08-31 18:50:39,432 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 18:50:40,631 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-31 18:50:45,784 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-3
new file mode 100644
index 0000000000000000000000000000000000000000..b8d0019c4d5966502e71323afe8a8ac20ca98335
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-18-50-37-3
@@ -0,0 +1,11 @@
+2024-08-31 18:50:37,695 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-31 18:50:37,726 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-31 18:50:37,726 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-31 18:50:38,683 INFO [dysarthria_finetune.py:1219] (3/4) (32783400960, 34072559616)
+2024-08-31 18:50:38,683 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-31 18:50:38,754 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 18:50:38,754 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-31 18:50:39,454 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66367431
+2024-08-31 18:50:39,454 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 18:50:40,688 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-31 18:50:45,792 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-08-59-0 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-08-59-0
new file mode 100644
index 0000000000000000000000000000000000000000..12892eedc9376dc9f8f90739168e64c62fee2a6e
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-08-59-0
@@ -0,0 +1,551 @@
+2024-08-31 22:08:59,831 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-31 22:09:00,050 INFO [dysarthria_finetune.py:1214] (0/4) (33735507968, 34072559616)
+2024-08-31 22:09:00,050 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-31 22:09:00,558 INFO [dysarthria_finetune.py:1219] (0/4) (33427226624, 34072559616)
+2024-08-31 22:09:00,564 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-31 22:09:01,025 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2653.int.cedar.computecanada.ca', 'IP address': '172.16.146.90'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:09:01,025 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-31 22:09:16,970 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66367431
+2024-08-31 22:10:07,061 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 22:10:51,211 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-31 22:11:01,688 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-31 22:11:01,783 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-31 22:11:17,376 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-31 22:11:18,292 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-31 22:11:18,293 INFO [dysarthria_asr_datamodule.py:501] (0/4) About to get dev cuts
+2024-08-31 22:11:18,428 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-31 22:11:18,749 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-31 22:11:18,749 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:12:59,094 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.27 vs. limit=5.0
+2024-08-31 22:12:59,567 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=15.76 vs. limit=7.5
+2024-08-31 22:13:03,030 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12626MB
+2024-08-31 22:13:04,411 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=384, metric=18.93 vs. limit=7.5
+2024-08-31 22:13:04,880 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12626MB
+2024-08-31 22:14:12,900 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12626MB
+2024-08-31 22:14:14,957 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12626MB
+2024-08-31 22:19:48,583 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=25.16 vs. limit=7.5
+2024-08-31 22:19:51,916 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12626MB
+2024-08-31 22:19:54,344 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12626MB
+2024-08-31 22:20:43,746 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.3566, simple_loss=0.292, pruned_loss=0.165, ctc_loss=0.2404, over 18513.00 frames. ], tot_loss[loss=0.3566, simple_loss=0.292, pruned_loss=0.165, ctc_loss=0.2404, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:43,747 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-31 22:46:04,596 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3942, simple_loss=0.3187, pruned_loss=0.1927, ctc_loss=0.281, over 1073944.00 frames.
+2024-08-31 22:46:04,729 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-08-31 23:01:30,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-31 23:06:11,554 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 9.685e+02 9.975e+02 1.051e+03 1.091e+03 1.133e+03, threshold=4.203e+03, percent-clipped=0.0
+2024-08-31 23:13:47,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100053.33333333333, ans=0.125
+2024-08-31 23:24:16,381 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=25.24 vs. limit=15.0
+2024-08-31 23:25:12,997 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 8.960e+02 9.836e+02 1.043e+03 1.067e+03 1.144e+03, threshold=4.173e+03, percent-clipped=0.0
+2024-08-31 23:31:26,726 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=32.76 vs. limit=15.0
+2024-08-31 23:43:25,261 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=19.62 vs. limit=15.0
+2024-08-31 23:50:05,829 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.677e+02 8.648e+02 9.697e+02 1.051e+03 1.144e+03, threshold=3.879e+03, percent-clipped=0.0
+2024-08-31 23:53:03,027 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=100213.33333333333, ans=0.09899494936611666
+2024-08-31 23:53:03,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=18.59 vs. limit=15.0
+2024-08-31 23:55:34,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-31 23:55:38,224 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.4293, simple_loss=0.3426, pruned_loss=0.209, ctc_loss=0.3269, over 18890.00 frames. ], tot_loss[loss=0.4257, simple_loss=0.3414, pruned_loss=0.2117, ctc_loss=0.3147, over 828692.51 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 23:59:17,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-09-01 00:09:28,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100373.33333333333, ans=0.125
+2024-09-01 00:14:15,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-09-01 00:16:22,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=100480.0, ans=0.07
+2024-09-01 00:18:32,454 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.599e+02 6.914e+02 7.776e+02 9.170e+02 1.144e+03, threshold=1.555e+03, percent-clipped=0.0
+2024-09-01 00:18:32,492 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 100, loss[loss=0.3986, simple_loss=0.3194, pruned_loss=0.198, ctc_loss=0.2924, over 19293.00 frames. ], tot_loss[loss=0.406, simple_loss=0.326, pruned_loss=0.1985, ctc_loss=0.2989, over 1474004.25 frames. ], batch size: 144, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:20:55,215 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=21.73 vs. limit=15.0
+2024-09-01 00:21:57,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=19.91 vs. limit=15.0
+2024-09-01 00:27:33,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=100640.0, ans=0.2
+2024-09-01 00:28:35,602 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-1.pt
+2024-09-01 00:30:00,886 INFO [dysarthria_finetune.py:1435] (0/4) (1324023808, 34072559616)
+2024-09-01 00:30:00,886 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 00:30:00,913 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 00:30:12,104 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 0, loss[loss=0.3438, simple_loss=0.2817, pruned_loss=0.1436, ctc_loss=0.2423, over 18874.00 frames. ], tot_loss[loss=0.3438, simple_loss=0.2817, pruned_loss=0.1436, ctc_loss=0.2423, over 18874.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:30:12,105 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 00:34:27,528 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 2, validation: loss=0.3547, simple_loss=0.2901, pruned_loss=0.1627, ctc_loss=0.2412, over 1073944.00 frames.
+2024-09-01 00:34:27,529 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 00:39:15,688 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=100736.0, ans=0.0
+2024-09-01 00:39:16,183 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.17 vs. limit=15.0
+2024-09-01 00:39:57,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=100736.0, ans=0.125
+2024-09-01 00:40:02,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=100736.0, ans=0.125
+2024-09-01 00:41:34,528 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=22.90 vs. limit=15.0
+2024-09-01 00:41:38,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-09-01 00:41:38,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-09-01 00:45:14,113 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=100842.66666666667, ans=0.035
+2024-09-01 00:48:16,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100949.33333333333, ans=0.125
+2024-09-01 00:48:18,401 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 50, loss[loss=0.3832, simple_loss=0.3059, pruned_loss=0.1888, ctc_loss=0.2848, over 18964.00 frames. ], tot_loss[loss=0.3931, simple_loss=0.3168, pruned_loss=0.1856, ctc_loss=0.2849, over 826819.73 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 00:49:20,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-09-01 00:49:32,146 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.01 vs. limit=10.0
+2024-09-01 00:49:32,223 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.82 vs. limit=15.0
+2024-09-01 00:50:03,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=101002.66666666667, ans=0.0
+2024-09-01 00:52:13,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101002.66666666667, ans=0.1
+2024-09-01 00:53:05,014 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.81 vs. limit=15.0
+2024-09-01 00:53:05,343 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.883e+02 4.624e+02 4.997e+02 5.383e+02 6.686e+02, threshold=9.995e+02, percent-clipped=0.0
+2024-09-01 00:54:01,503 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 00:54:20,761 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=101109.33333333333, ans=0.2
+2024-09-01 00:56:30,068 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 100, loss[loss=0.4099, simple_loss=0.3316, pruned_loss=0.189, ctc_loss=0.2944, over 19229.00 frames. ], tot_loss[loss=0.3792, simple_loss=0.3066, pruned_loss=0.1765, ctc_loss=0.2723, over 1473154.80 frames. ], batch size: 144, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 00:57:11,943 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=29.46 vs. limit=22.5
+2024-09-01 01:00:20,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=101269.33333333333, ans=0.0
+2024-09-01 01:00:29,854 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=101269.33333333333, ans=0.125
+2024-09-01 01:01:16,814 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-2.pt
+2024-09-01 01:01:27,525 INFO [dysarthria_finetune.py:1435] (0/4) (1368064000, 34072559616)
+2024-09-01 01:01:27,526 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:01:27,552 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 01:01:35,831 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 0, loss[loss=0.3716, simple_loss=0.3001, pruned_loss=0.1732, ctc_loss=0.2671, over 18603.00 frames. ], tot_loss[loss=0.3716, simple_loss=0.3001, pruned_loss=0.1732, ctc_loss=0.2671, over 18603.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:01:35,832 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:01:59,922 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 3, validation: loss=0.3274, simple_loss=0.2708, pruned_loss=0.1428, ctc_loss=0.2163, over 1073944.00 frames.
+2024-09-01 01:01:59,923 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 01:03:02,874 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.85 vs. limit=5.0
+2024-09-01 01:03:42,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=101477.33333333333, ans=0.07
+2024-09-01 01:04:29,801 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=6.68 vs. limit=15.0
+2024-09-01 01:04:33,379 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.942e+02 3.454e+02 3.711e+02 3.996e+02 5.509e+02, threshold=7.422e+02, percent-clipped=0.0
+2024-09-01 01:04:56,821 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 50, loss[loss=0.3628, simple_loss=0.2979, pruned_loss=0.1588, ctc_loss=0.2498, over 18964.00 frames. ], tot_loss[loss=0.3617, simple_loss=0.2938, pruned_loss=0.1631, ctc_loss=0.2578, over 827741.27 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:05:07,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=101637.33333333333, ans=0.1
+2024-09-01 01:05:40,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=101690.66666666667, ans=0.125
+2024-09-01 01:06:07,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101744.0, ans=0.1
+2024-09-01 01:06:43,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 01:06:49,149 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 01:07:00,076 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 100, loss[loss=0.3478, simple_loss=0.2835, pruned_loss=0.1557, ctc_loss=0.245, over 19231.00 frames. ], tot_loss[loss=0.3529, simple_loss=0.2873, pruned_loss=0.1579, ctc_loss=0.2503, over 1473938.15 frames. ], batch size: 144, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:07:57,683 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=18.54 vs. limit=15.0
+2024-09-01 01:08:18,703 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.13 vs. limit=6.0
+2024-09-01 01:08:21,702 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-3.pt
+2024-09-01 01:08:26,528 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 01:08:26,529 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:08:26,555 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 01:08:35,077 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 0, loss[loss=0.3452, simple_loss=0.2786, pruned_loss=0.1602, ctc_loss=0.2485, over 18523.00 frames. ], tot_loss[loss=0.3452, simple_loss=0.2786, pruned_loss=0.1602, ctc_loss=0.2485, over 18523.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:08:35,078 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:08:58,403 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 4, validation: loss=0.308, simple_loss=0.2573, pruned_loss=0.1299, ctc_loss=0.2, over 1073944.00 frames.
+2024-09-01 01:08:58,403 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 01:09:33,161 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.484e+02 2.869e+02 3.070e+02 3.452e+02 5.291e+02, threshold=6.140e+02, percent-clipped=0.0
+2024-09-01 01:09:57,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=102160.0, ans=0.2
+2024-09-01 01:10:06,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=102213.33333333333, ans=0.025
+2024-09-01 01:10:47,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=102266.66666666667, ans=0.125
+2024-09-01 01:10:51,030 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 50, loss[loss=0.3704, simple_loss=0.3044, pruned_loss=0.1625, ctc_loss=0.2554, over 18961.00 frames. ], tot_loss[loss=0.3425, simple_loss=0.2794, pruned_loss=0.15, ctc_loss=0.2453, over 828586.64 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:11:03,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102320.0, ans=0.1
+2024-09-01 01:11:07,950 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.22 vs. limit=15.0
+2024-09-01 01:11:25,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=102373.33333333333, ans=0.125
+2024-09-01 01:11:29,832 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.48 vs. limit=15.0
+2024-09-01 01:11:31,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=102373.33333333333, ans=0.0
+2024-09-01 01:11:38,201 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.71 vs. limit=10.0
+2024-09-01 01:11:46,652 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102426.66666666667, ans=0.125
+2024-09-01 01:12:16,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=102533.33333333333, ans=0.2
+2024-09-01 01:12:25,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=102533.33333333333, ans=0.125
+2024-09-01 01:12:38,908 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 100, loss[loss=0.3573, simple_loss=0.2881, pruned_loss=0.157, ctc_loss=0.2709, over 19286.00 frames. ], tot_loss[loss=0.3347, simple_loss=0.2734, pruned_loss=0.1459, ctc_loss=0.2392, over 1474147.24 frames. ], batch size: 144, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:12:55,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=102586.66666666667, ans=0.125
+2024-09-01 01:12:56,218 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.73 vs. limit=15.0
+2024-09-01 01:13:12,348 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 2.669e+02 2.871e+02 3.122e+02 4.671e+02, threshold=5.742e+02, percent-clipped=0.0
+2024-09-01 01:13:39,282 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:13:40,148 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-4.pt
+2024-09-01 01:13:44,640 INFO [dysarthria_finetune.py:1435] (0/4) (1368064000, 34072559616)
+2024-09-01 01:13:44,640 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:13:44,668 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 01:13:53,058 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 0, loss[loss=0.282, simple_loss=0.231, pruned_loss=0.1172, ctc_loss=0.2094, over 18549.00 frames. ], tot_loss[loss=0.282, simple_loss=0.231, pruned_loss=0.1172, ctc_loss=0.2094, over 18549.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:13:53,059 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:14:16,495 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 5, validation: loss=0.2909, simple_loss=0.2453, pruned_loss=0.1191, ctc_loss=0.1881, over 1073944.00 frames.
+2024-09-01 01:14:16,496 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 01:15:18,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=102741.33333333333, ans=0.025
+2024-09-01 01:15:54,979 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.36 vs. limit=15.0
+2024-09-01 01:16:52,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102848.0, ans=0.0
+2024-09-01 01:17:05,660 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102848.0, ans=0.1
+2024-09-01 01:17:33,339 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=102901.33333333333, ans=0.0
+2024-09-01 01:19:42,059 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.86 vs. limit=10.0
+2024-09-01 01:20:50,672 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 50, loss[loss=0.3179, simple_loss=0.2635, pruned_loss=0.135, ctc_loss=0.2188, over 19008.00 frames. ], tot_loss[loss=0.3198, simple_loss=0.2635, pruned_loss=0.1343, ctc_loss=0.2292, over 828355.03 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:22:35,136 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=103061.33333333333, ans=0.125
+2024-09-01 01:22:44,294 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-09-01 01:25:33,874 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.253e+02 2.485e+02 2.709e+02 2.997e+02 4.733e+02, threshold=5.419e+02, percent-clipped=0.0
+2024-09-01 01:25:46,312 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.72 vs. limit=15.0
+2024-09-01 01:25:49,667 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.57 vs. limit=22.5
+2024-09-01 01:25:52,259 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.02 vs. limit=22.5
+2024-09-01 01:26:12,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=103221.33333333333, ans=0.025
+2024-09-01 01:26:19,492 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.36 vs. limit=6.0
+2024-09-01 01:26:23,127 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 100, loss[loss=0.3115, simple_loss=0.2548, pruned_loss=0.1334, ctc_loss=0.2282, over 19287.00 frames. ], tot_loss[loss=0.316, simple_loss=0.2604, pruned_loss=0.1331, ctc_loss=0.2261, over 1473652.43 frames. ], batch size: 144, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:26:48,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=103274.66666666667, ans=0.2
+2024-09-01 01:27:24,215 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=103328.0, ans=0.125
+2024-09-01 01:27:28,280 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.96 vs. limit=15.0
+2024-09-01 01:27:48,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103381.33333333333, ans=0.1
+2024-09-01 01:27:55,386 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-5.pt
+2024-09-01 01:28:06,319 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 01:28:06,319 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:28:06,346 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 01:28:14,505 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 0, loss[loss=0.2938, simple_loss=0.2489, pruned_loss=0.113, ctc_loss=0.2044, over 18610.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.2489, pruned_loss=0.113, ctc_loss=0.2044, over 18610.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:28:14,506 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:28:37,895 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 6, validation: loss=0.2789, simple_loss=0.2369, pruned_loss=0.1122, ctc_loss=0.1819, over 1073944.00 frames.
+2024-09-01 01:28:37,896 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 01:28:57,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=103424.0, ans=0.125
+2024-09-01 01:30:32,255 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 50, loss[loss=0.2775, simple_loss=0.2359, pruned_loss=0.1085, ctc_loss=0.1889, over 19047.00 frames. ], tot_loss[loss=0.3118, simple_loss=0.2575, pruned_loss=0.1305, ctc_loss=0.2251, over 829577.21 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:31:04,059 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:31:04,785 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 2.419e+02 2.583e+02 2.819e+02 4.094e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-09-01 01:31:38,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-09-01 01:32:43,095 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 100, loss[loss=0.2663, simple_loss=0.2264, pruned_loss=0.1063, ctc_loss=0.1789, over 19232.00 frames. ], tot_loss[loss=0.2978, simple_loss=0.2479, pruned_loss=0.1225, ctc_loss=0.212, over 1476247.28 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:33:44,575 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-6.pt
+2024-09-01 01:33:49,546 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 01:33:49,546 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:33:49,572 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 01:33:57,915 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 0, loss[loss=0.2773, simple_loss=0.2403, pruned_loss=0.107, ctc_loss=0.1771, over 18570.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.2403, pruned_loss=0.107, ctc_loss=0.1771, over 18570.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:33:57,916 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:34:21,896 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 7, validation: loss=0.2604, simple_loss=0.2251, pruned_loss=0.1007, ctc_loss=0.1681, over 1073944.00 frames.
+2024-09-01 01:34:21,897 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 01:34:25,577 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104106.66666666667, ans=0.1
+2024-09-01 01:34:52,442 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=104160.0, ans=0.0
+2024-09-01 01:35:05,677 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.97 vs. limit=15.0
+2024-09-01 01:35:34,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=104213.33333333333, ans=0.0
+2024-09-01 01:35:40,238 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.049e+02 2.272e+02 2.384e+02 2.601e+02 4.291e+02, threshold=4.768e+02, percent-clipped=0.0
+2024-09-01 01:37:02,200 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 50, loss[loss=0.3143, simple_loss=0.2592, pruned_loss=0.1296, ctc_loss=0.2374, over 18968.00 frames. ], tot_loss[loss=0.2908, simple_loss=0.2432, pruned_loss=0.1174, ctc_loss=0.2108, over 827907.61 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:38:09,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=104426.66666666667, ans=0.125
+2024-09-01 01:39:04,052 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.70 vs. limit=15.0
+2024-09-01 01:39:36,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=104533.33333333333, ans=15.0
+2024-09-01 01:39:53,559 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=19.59 vs. limit=22.5
+2024-09-01 01:40:47,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=104586.66666666667, ans=0.1
+2024-09-01 01:40:47,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104586.66666666667, ans=0.1
+2024-09-01 01:40:52,485 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 100, loss[loss=0.286, simple_loss=0.2391, pruned_loss=0.1153, ctc_loss=0.2109, over 19302.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.241, pruned_loss=0.1167, ctc_loss=0.2105, over 1473040.93 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:41:30,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=104640.0, ans=0.125
+2024-09-01 01:41:50,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=104693.33333333333, ans=0.035
+2024-09-01 01:42:07,329 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=9.33 vs. limit=12.0
+2024-09-01 01:42:59,045 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-7.pt
+2024-09-01 01:43:04,118 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 01:43:04,119 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:43:04,145 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 01:43:13,102 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 0, loss[loss=0.2877, simple_loss=0.2454, pruned_loss=0.1148, ctc_loss=0.1975, over 18485.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.2454, pruned_loss=0.1148, ctc_loss=0.1975, over 18485.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:43:13,103 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:44:04,944 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 8, validation: loss=0.2572, simple_loss=0.2228, pruned_loss=0.09973, ctc_loss=0.1708, over 1073944.00 frames.
+2024-09-01 01:44:04,945 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 01:44:08,917 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.92 vs. limit=15.0
+2024-09-01 01:44:16,260 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:44:25,624 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.924e+02 2.205e+02 2.324e+02 2.533e+02 3.850e+02, threshold=4.647e+02, percent-clipped=0.0
+2024-09-01 01:51:43,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=104842.66666666667, ans=0.0
+2024-09-01 01:53:51,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=104949.33333333333, ans=6.0
+2024-09-01 01:54:21,882 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.58 vs. limit=12.0
+2024-09-01 01:54:34,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=104949.33333333333, ans=0.05
+2024-09-01 01:54:53,262 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.77 vs. limit=15.0
+2024-09-01 01:56:19,986 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 50, loss[loss=0.2756, simple_loss=0.2331, pruned_loss=0.1082, ctc_loss=0.204, over 18938.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.2336, pruned_loss=0.109, ctc_loss=0.2025, over 828565.55 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:57:05,484 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.54 vs. limit=15.0
+2024-09-01 01:58:16,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=105109.33333333333, ans=0.0
+2024-09-01 01:59:06,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=105162.66666666667, ans=0.0
+2024-09-01 01:59:07,119 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.95 vs. limit=15.0
+2024-09-01 01:59:39,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=105216.0, ans=0.025
+2024-09-01 01:59:59,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=105216.0, ans=0.125
+2024-09-01 02:00:29,560 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=12.53 vs. limit=15.0
+2024-09-01 02:00:56,047 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 100, loss[loss=0.2612, simple_loss=0.2296, pruned_loss=0.09556, ctc_loss=0.1819, over 19222.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.2331, pruned_loss=0.109, ctc_loss=0.202, over 1474444.14 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:01:07,951 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.165e+02 2.362e+02 2.610e+02 3.254e+02, threshold=4.723e+02, percent-clipped=0.0
+2024-09-01 02:01:24,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=105322.66666666667, ans=0.0
+2024-09-01 02:01:34,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=105322.66666666667, ans=0.09899494936611666
+2024-09-01 02:01:57,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105376.0, ans=0.125
+2024-09-01 02:02:13,171 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=3.204e-02
+2024-09-01 02:02:56,106 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=26.18 vs. limit=22.5
+2024-09-01 02:02:57,167 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-8.pt
+2024-09-01 02:03:01,685 INFO [dysarthria_finetune.py:1435] (0/4) (1365966848, 34072559616)
+2024-09-01 02:03:01,685 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:03:01,712 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:03:10,292 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 0, loss[loss=0.2911, simple_loss=0.2426, pruned_loss=0.1212, ctc_loss=0.2131, over 18596.00 frames. ], tot_loss[loss=0.2911, simple_loss=0.2426, pruned_loss=0.1212, ctc_loss=0.2131, over 18596.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:10,293 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:03:33,810 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 9, validation: loss=0.2431, simple_loss=0.2147, pruned_loss=0.0913, ctc_loss=0.1608, over 1073944.00 frames.
+2024-09-01 02:03:33,811 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:04:28,693 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=105584.0, ans=0.2
+2024-09-01 02:04:49,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105637.33333333333, ans=0.125
+2024-09-01 02:05:08,864 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.40 vs. limit=15.0
+2024-09-01 02:05:31,827 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=105690.66666666667, ans=0.2
+2024-09-01 02:05:38,136 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 50, loss[loss=0.284, simple_loss=0.2405, pruned_loss=0.1115, ctc_loss=0.2153, over 19065.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.2311, pruned_loss=0.1053, ctc_loss=0.2006, over 828972.56 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:48,898 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=27.06 vs. limit=22.5
+2024-09-01 02:05:58,376 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=105744.0, ans=0.125
+2024-09-01 02:05:58,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105744.0, ans=0.125
+2024-09-01 02:06:37,638 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.168e+02 2.346e+02 2.556e+02 3.441e+02, threshold=4.692e+02, percent-clipped=0.0
+2024-09-01 02:06:49,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=105904.0, ans=0.125
+2024-09-01 02:07:58,205 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=105957.33333333333, ans=15.0
+2024-09-01 02:08:13,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:08:17,420 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 100, loss[loss=0.2064, simple_loss=0.1894, pruned_loss=0.06727, ctc_loss=0.1475, over 19269.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.2268, pruned_loss=0.1013, ctc_loss=0.1939, over 1474236.32 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:48,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=106064.0, ans=0.2
+2024-09-01 02:08:50,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106064.0, ans=0.125
+2024-09-01 02:08:52,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=106064.0, ans=0.2
+2024-09-01 02:09:05,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=106117.33333333333, ans=0.0
+2024-09-01 02:09:08,354 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.68 vs. limit=22.5
+2024-09-01 02:09:14,801 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.74 vs. limit=6.0
+2024-09-01 02:09:19,435 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-9.pt
+2024-09-01 02:09:26,254 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 02:09:26,254 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:09:26,282 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:09:34,441 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 0, loss[loss=0.2289, simple_loss=0.2026, pruned_loss=0.08397, ctc_loss=0.1637, over 18682.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2026, pruned_loss=0.08397, ctc_loss=0.1637, over 18682.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:09:34,441 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:09:58,831 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 10, validation: loss=0.2363, simple_loss=0.211, pruned_loss=0.08786, ctc_loss=0.1591, over 1073944.00 frames.
+2024-09-01 02:09:58,832 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:10:04,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=106165.33333333333, ans=0.0
+2024-09-01 02:10:17,597 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_ff2.min_abs, batch_count=106165.33333333333, ans=0.1
+2024-09-01 02:10:35,288 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.07 vs. limit=15.0
+2024-09-01 02:10:41,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=106272.0, ans=0.125
+2024-09-01 02:11:00,989 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:11:11,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:11:22,762 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.27 vs. limit=6.0
+2024-09-01 02:11:36,258 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.111e+02 2.256e+02 2.412e+02 3.661e+02, threshold=4.511e+02, percent-clipped=0.0
+2024-09-01 02:11:39,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106378.66666666667, ans=0.125
+2024-09-01 02:11:39,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106378.66666666667, ans=0.125
+2024-09-01 02:11:47,312 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 50, loss[loss=0.305, simple_loss=0.255, pruned_loss=0.1214, ctc_loss=0.2443, over 19012.00 frames. ], tot_loss[loss=0.262, simple_loss=0.2252, pruned_loss=0.101, ctc_loss=0.1974, over 829104.52 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:11:50,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:11:50,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:12:18,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=106485.33333333333, ans=0.125
+2024-09-01 02:12:36,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=106538.66666666667, ans=0.025
+2024-09-01 02:12:41,023 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:13:21,688 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.82 vs. limit=6.0
+2024-09-01 02:13:23,791 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.94 vs. limit=15.0
+2024-09-01 02:13:32,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106645.33333333333, ans=0.125
+2024-09-01 02:13:35,237 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 100, loss[loss=0.2427, simple_loss=0.216, pruned_loss=0.08691, ctc_loss=0.1835, over 19226.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.2232, pruned_loss=0.09907, ctc_loss=0.1934, over 1474931.95 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:13:51,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106698.66666666667, ans=0.125
+2024-09-01 02:14:15,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=106752.0, ans=0.125
+2024-09-01 02:14:17,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106805.33333333333, ans=0.1
+2024-09-01 02:14:23,957 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.85 vs. limit=15.0
+2024-09-01 02:14:27,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=106805.33333333333, ans=0.025
+2024-09-01 02:14:34,899 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-10.pt
+2024-09-01 02:14:39,264 INFO [dysarthria_finetune.py:1435] (0/4) (1368064000, 34072559616)
+2024-09-01 02:14:39,264 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:14:39,291 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:14:48,321 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 0, loss[loss=0.2698, simple_loss=0.2322, pruned_loss=0.1077, ctc_loss=0.1957, over 18505.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.2322, pruned_loss=0.1077, ctc_loss=0.1957, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:14:48,322 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:15:11,808 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 11, validation: loss=0.2335, simple_loss=0.2098, pruned_loss=0.0867, ctc_loss=0.1618, over 1073944.00 frames.
+2024-09-01 02:15:11,809 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:15:22,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106853.33333333333, ans=0.1
+2024-09-01 02:16:01,165 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.106e+02 2.175e+02 2.350e+02 3.456e+02, threshold=4.351e+02, percent-clipped=0.0
+2024-09-01 02:16:03,031 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=106906.66666666667, ans=0.1
+2024-09-01 02:16:17,062 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=10.45 vs. limit=15.0
+2024-09-01 02:16:36,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=107013.33333333333, ans=0.0
+2024-09-01 02:16:54,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=107013.33333333333, ans=0.025
+2024-09-01 02:21:55,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=107066.66666666667, ans=0.0
+2024-09-01 02:21:58,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=107066.66666666667, ans=0.025
+2024-09-01 02:22:24,190 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 50, loss[loss=0.2693, simple_loss=0.2289, pruned_loss=0.1065, ctc_loss=0.211, over 19023.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.2218, pruned_loss=0.09766, ctc_loss=0.1965, over 827570.26 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:22:31,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=107120.0, ans=0.125
+2024-09-01 02:22:31,947 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.89 vs. limit=22.5
+2024-09-01 02:23:34,131 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:24:12,454 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:24:19,057 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.92 vs. limit=15.0
+2024-09-01 02:24:26,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107280.0, ans=0.1
+2024-09-01 02:25:09,439 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 100, loss[loss=0.2193, simple_loss=0.1998, pruned_loss=0.07434, ctc_loss=0.1746, over 19237.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.2183, pruned_loss=0.0944, ctc_loss=0.1915, over 1473115.37 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 16.0
+2024-09-01 02:25:36,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107440.0, ans=0.1
+2024-09-01 02:25:41,976 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.31 vs. limit=6.0
+2024-09-01 02:25:52,647 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.027e+02 2.133e+02 2.278e+02 3.178e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-09-01 02:25:54,956 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.56 vs. limit=15.0
+2024-09-01 02:26:15,389 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-11.pt
+2024-09-01 02:26:28,581 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 02:26:28,582 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:26:28,608 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:26:37,015 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 0, loss[loss=0.2513, simple_loss=0.2169, pruned_loss=0.1003, ctc_loss=0.1861, over 18585.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.2169, pruned_loss=0.1003, ctc_loss=0.1861, over 18585.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:37,015 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:27:00,632 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 12, validation: loss=0.2234, simple_loss=0.2042, pruned_loss=0.08189, ctc_loss=0.1554, over 1073944.00 frames.
+2024-09-01 02:27:00,632 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:27:19,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=107541.33333333333, ans=0.2
+2024-09-01 02:27:24,925 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.58 vs. limit=15.0
+2024-09-01 02:27:38,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=107594.66666666667, ans=10.0
+2024-09-01 02:27:43,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=107594.66666666667, ans=0.0
+2024-09-01 02:28:37,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107754.66666666667, ans=0.1
+2024-09-01 02:28:37,653 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.40 vs. limit=15.0
+2024-09-01 02:28:39,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-09-01 02:28:53,634 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 50, loss[loss=0.2094, simple_loss=0.1964, pruned_loss=0.06983, ctc_loss=0.1595, over 18986.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2107, pruned_loss=0.08652, ctc_loss=0.1809, over 829307.75 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:28:55,466 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=10.06 vs. limit=12.0
+2024-09-01 02:29:31,689 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107861.33333333333, ans=0.1
+2024-09-01 02:29:38,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=107914.66666666667, ans=0.125
+2024-09-01 02:29:38,665 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.90 vs. limit=15.0
+2024-09-01 02:29:56,225 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.81 vs. limit=15.0
+2024-09-01 02:30:06,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107968.0, ans=0.125
+2024-09-01 02:30:20,075 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.056e+02 2.167e+02 2.338e+02 2.987e+02, threshold=4.333e+02, percent-clipped=0.0
+2024-09-01 02:30:23,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=108021.33333333333, ans=0.125
+2024-09-01 02:34:06,108 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 100, loss[loss=0.1869, simple_loss=0.1783, pruned_loss=0.06038, ctc_loss=0.1445, over 19194.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.211, pruned_loss=0.08909, ctc_loss=0.1836, over 1473409.16 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:34:14,186 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=10.70 vs. limit=12.0
+2024-09-01 02:35:00,958 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.59 vs. limit=10.0
+2024-09-01 02:35:45,469 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.98 vs. limit=6.0
+2024-09-01 02:36:42,074 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-12.pt
+2024-09-01 02:36:46,406 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 02:36:46,406 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:36:46,433 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:36:54,943 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 0, loss[loss=0.2847, simple_loss=0.2388, pruned_loss=0.1179, ctc_loss=0.2214, over 18643.00 frames. ], tot_loss[loss=0.2847, simple_loss=0.2388, pruned_loss=0.1179, ctc_loss=0.2214, over 18643.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:54,944 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:37:18,556 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 13, validation: loss=0.2186, simple_loss=0.2014, pruned_loss=0.08061, ctc_loss=0.1543, over 1073944.00 frames.
+2024-09-01 02:37:18,557 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:37:37,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=108229.33333333333, ans=0.2
+2024-09-01 02:37:45,009 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.23 vs. limit=15.0
+2024-09-01 02:37:54,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 02:37:57,604 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.47 vs. limit=12.0
+2024-09-01 02:38:02,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108336.0, ans=0.1
+2024-09-01 02:38:19,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=108336.0, ans=0.035
+2024-09-01 02:39:08,430 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 50, loss[loss=0.19, simple_loss=0.1872, pruned_loss=0.05898, ctc_loss=0.1459, over 19011.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.2092, pruned_loss=0.08686, ctc_loss=0.1819, over 829773.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:39:14,070 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=108496.0, ans=0.0
+2024-09-01 02:39:18,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=108496.0, ans=0.2
+2024-09-01 02:39:21,334 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.22 vs. limit=22.5
+2024-09-01 02:39:30,401 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.005e+02 2.143e+02 2.348e+02 3.224e+02, threshold=4.286e+02, percent-clipped=0.0
+2024-09-01 02:40:02,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=108602.66666666667, ans=0.2
+2024-09-01 02:40:17,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-09-01 02:40:28,577 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.71 vs. limit=22.5
+2024-09-01 02:40:34,794 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.98 vs. limit=15.0
+2024-09-01 02:40:56,423 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 100, loss[loss=0.1837, simple_loss=0.1699, pruned_loss=0.06266, ctc_loss=0.1548, over 19225.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2081, pruned_loss=0.08664, ctc_loss=0.1802, over 1474982.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:40:59,755 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=108762.66666666667, ans=0.2
+2024-09-01 02:41:10,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 02:41:30,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=108816.0, ans=0.125
+2024-09-01 02:41:41,597 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.17 vs. limit=15.0
+2024-09-01 02:41:47,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=108869.33333333333, ans=0.025
+2024-09-01 02:41:56,706 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-13.pt
+2024-09-01 02:42:01,259 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 02:42:01,260 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:42:01,288 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:42:09,606 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 0, loss[loss=0.2404, simple_loss=0.2186, pruned_loss=0.09083, ctc_loss=0.1789, over 18695.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.2186, pruned_loss=0.09083, ctc_loss=0.1789, over 18695.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:42:09,607 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:42:31,154 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.3.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([4.3587, 4.3646, 4.6946, 4.5945, 4.5833, 4.5968, 4.6705, 4.4017],
+ device='cuda:0')
+2024-09-01 02:42:33,559 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 14, validation: loss=0.209, simple_loss=0.1966, pruned_loss=0.0763, ctc_loss=0.148, over 1073944.00 frames.
+2024-09-01 02:42:33,560 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:42:41,769 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 02:42:48,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108917.33333333333, ans=0.1
+2024-09-01 02:42:57,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=108970.66666666667, ans=0.2
+2024-09-01 02:43:35,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=109024.0, ans=0.0
+2024-09-01 02:43:42,311 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.90 vs. limit=15.0
+2024-09-01 02:43:42,871 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.996e+02 2.096e+02 2.326e+02 2.912e+02, threshold=4.192e+02, percent-clipped=0.0
+2024-09-01 02:44:24,652 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 50, loss[loss=0.1948, simple_loss=0.1946, pruned_loss=0.06246, ctc_loss=0.147, over 18964.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2036, pruned_loss=0.08346, ctc_loss=0.1768, over 828263.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:44:28,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:45:22,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=109290.66666666667, ans=0.125
+2024-09-01 02:45:35,732 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=18.29 vs. limit=15.0
+2024-09-01 02:46:09,563 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.51 vs. limit=15.0
+2024-09-01 02:46:12,178 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 100, loss[loss=0.196, simple_loss=0.1811, pruned_loss=0.07169, ctc_loss=0.1541, over 19207.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2029, pruned_loss=0.08251, ctc_loss=0.1739, over 1474261.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:46:36,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=109504.0, ans=0.125
+2024-09-01 02:47:12,361 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-14.pt
+2024-09-01 02:47:17,068 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 02:47:17,068 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:47:17,095 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:47:25,787 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 0, loss[loss=0.276, simple_loss=0.2437, pruned_loss=0.1108, ctc_loss=0.2061, over 18509.00 frames. ], tot_loss[loss=0.276, simple_loss=0.2437, pruned_loss=0.1108, ctc_loss=0.2061, over 18509.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:47:25,787 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:48:03,604 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 15, validation: loss=0.2059, simple_loss=0.1951, pruned_loss=0.07588, ctc_loss=0.1481, over 1073944.00 frames.
+2024-09-01 02:48:03,605 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:48:13,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=109605.33333333333, ans=0.125
+2024-09-01 02:48:20,619 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.965e+02 2.102e+02 2.301e+02 3.159e+02, threshold=4.205e+02, percent-clipped=0.0
+2024-09-01 02:48:27,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=109605.33333333333, ans=0.95
+2024-09-01 02:50:38,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=109765.33333333333, ans=0.125
+2024-09-01 02:51:41,397 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 50, loss[loss=0.2064, simple_loss=0.1964, pruned_loss=0.07236, ctc_loss=0.1668, over 19011.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2025, pruned_loss=0.08352, ctc_loss=0.1791, over 827942.50 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:53:10,054 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.78 vs. limit=15.0
+2024-09-01 02:53:57,780 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.77 vs. limit=22.5
+2024-09-01 02:54:10,511 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110032.0, ans=0.1
+2024-09-01 02:54:19,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=110032.0, ans=0.025
+2024-09-01 02:55:02,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110085.33333333333, ans=0.125
+2024-09-01 02:55:28,635 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 100, loss[loss=0.1792, simple_loss=0.1763, pruned_loss=0.0591, ctc_loss=0.1513, over 19251.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2002, pruned_loss=0.08205, ctc_loss=0.1755, over 1473903.80 frames. ], batch size: 144, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:55:44,087 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 1.961e+02 2.099e+02 2.266e+02 2.969e+02, threshold=4.197e+02, percent-clipped=0.0
+2024-09-01 02:57:18,309 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-15.pt
+2024-09-01 02:57:24,862 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 02:57:24,862 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:57:24,890 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 02:57:33,288 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 0, loss[loss=0.2495, simple_loss=0.2262, pruned_loss=0.09837, ctc_loss=0.1861, over 18729.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.2262, pruned_loss=0.09837, ctc_loss=0.1861, over 18729.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:57:33,289 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:58:10,349 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 16, validation: loss=0.2065, simple_loss=0.1951, pruned_loss=0.07751, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 02:58:10,349 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 02:58:32,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=110293.33333333333, ans=0.0
+2024-09-01 02:58:38,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=110346.66666666667, ans=0.0
+2024-09-01 02:58:49,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=110346.66666666667, ans=0.125
+2024-09-01 02:59:01,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=110346.66666666667, ans=0.025
+2024-09-01 02:59:21,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.min_positive, batch_count=110400.0, ans=0.025
+2024-09-01 03:00:21,805 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 50, loss[loss=0.2085, simple_loss=0.1941, pruned_loss=0.07676, ctc_loss=0.1723, over 18988.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.1995, pruned_loss=0.08073, ctc_loss=0.1727, over 828175.61 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:00:45,591 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=110560.0, ans=0.125
+2024-09-01 03:01:02,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=110613.33333333333, ans=0.125
+2024-09-01 03:01:12,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=110613.33333333333, ans=0.0
+2024-09-01 03:01:25,489 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 1.971e+02 2.093e+02 2.277e+02 2.936e+02, threshold=4.187e+02, percent-clipped=0.0
+2024-09-01 03:02:04,099 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=110773.33333333333, ans=0.125
+2024-09-01 03:02:22,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-09-01 03:02:24,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-09-01 03:02:27,247 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 100, loss[loss=0.2253, simple_loss=0.1982, pruned_loss=0.08526, ctc_loss=0.2047, over 19270.00 frames. ], tot_loss[loss=0.213, simple_loss=0.1975, pruned_loss=0.07974, ctc_loss=0.1709, over 1473314.28 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:02:37,347 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=110826.66666666667, ans=0.125
+2024-09-01 03:03:29,007 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-16.pt
+2024-09-01 03:03:32,845 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 03:03:32,846 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:03:32,873 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 03:03:41,377 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 0, loss[loss=0.2455, simple_loss=0.2106, pruned_loss=0.1009, ctc_loss=0.1965, over 18739.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.2106, pruned_loss=0.1009, ctc_loss=0.1965, over 18739.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:03:41,377 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:03:43,427 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.4.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([2.7321, 2.9711, 2.8178, 3.0906], device='cuda:0')
+2024-09-01 03:03:45,988 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.6832, 1.6698, 1.6951, 1.7560, 1.8006, 1.7465, 1.7382, 1.7596],
+ device='cuda:0')
+2024-09-01 03:04:05,400 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 17, validation: loss=0.1943, simple_loss=0.1886, pruned_loss=0.07183, ctc_loss=0.1409, over 1073944.00 frames.
+2024-09-01 03:04:05,401 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 03:04:40,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=111034.66666666667, ans=0.0
+2024-09-01 03:04:42,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 03:05:09,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=111088.0, ans=0.0
+2024-09-01 03:05:42,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=111194.66666666667, ans=0.0
+2024-09-01 03:05:45,147 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 1.958e+02 2.075e+02 2.282e+02 2.777e+02, threshold=4.150e+02, percent-clipped=0.0
+2024-09-01 03:05:56,265 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 50, loss[loss=0.2139, simple_loss=0.2033, pruned_loss=0.07789, ctc_loss=0.1716, over 19028.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.1948, pruned_loss=0.07861, ctc_loss=0.1709, over 827378.67 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:06:10,890 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.08 vs. limit=15.0
+2024-09-01 03:07:29,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=111461.33333333333, ans=0.125
+2024-09-01 03:07:40,496 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.88 vs. limit=15.0
+2024-09-01 03:07:45,179 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 100, loss[loss=0.1577, simple_loss=0.1609, pruned_loss=0.05103, ctc_loss=0.1309, over 19218.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.1959, pruned_loss=0.07927, ctc_loss=0.1712, over 1473529.96 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:07:50,985 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.26 vs. limit=6.0
+2024-09-01 03:07:56,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=111514.66666666667, ans=0.1
+2024-09-01 03:08:44,481 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-17.pt
+2024-09-01 03:09:07,772 INFO [dysarthria_finetune.py:1435] (0/4) (1368064000, 34072559616)
+2024-09-01 03:09:07,772 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:09:07,799 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 03:09:15,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 03:09:16,219 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 0, loss[loss=0.2407, simple_loss=0.216, pruned_loss=0.095, ctc_loss=0.1886, over 18538.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.216, pruned_loss=0.095, ctc_loss=0.1886, over 18538.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:09:16,220 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:09:39,625 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 18, validation: loss=0.1961, simple_loss=0.1886, pruned_loss=0.07291, ctc_loss=0.1441, over 1073944.00 frames.
+2024-09-01 03:09:39,625 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 03:09:43,354 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=1.651e-01
+2024-09-01 03:09:45,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=111669.33333333333, ans=0.125
+2024-09-01 03:09:45,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=111669.33333333333, ans=0.125
+2024-09-01 03:10:14,931 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 1.913e+02 2.060e+02 2.285e+02 3.151e+02, threshold=4.120e+02, percent-clipped=0.0
+2024-09-01 03:10:32,023 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=15.78 vs. limit=22.5
+2024-09-01 03:10:55,627 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.55 vs. limit=15.0
+2024-09-01 03:10:57,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=111829.33333333333, ans=0.125
+2024-09-01 03:11:28,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=111936.0, ans=0.125
+2024-09-01 03:11:29,067 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 50, loss[loss=0.1717, simple_loss=0.1789, pruned_loss=0.05455, ctc_loss=0.1384, over 18998.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.1957, pruned_loss=0.07887, ctc_loss=0.1723, over 828205.61 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:11:36,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=111936.0, ans=0.125
+2024-09-01 03:11:45,847 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=28.42 vs. limit=22.5
+2024-09-01 03:12:31,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=112042.66666666667, ans=0.2
+2024-09-01 03:12:44,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=112096.0, ans=0.2
+2024-09-01 03:13:33,983 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.00 vs. limit=22.5
+2024-09-01 03:13:44,692 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 100, loss[loss=0.1647, simple_loss=0.1685, pruned_loss=0.05338, ctc_loss=0.1351, over 19294.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.1936, pruned_loss=0.077, ctc_loss=0.1672, over 1473690.24 frames. ], batch size: 144, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:13:56,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112202.66666666667, ans=0.125
+2024-09-01 03:14:14,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=112256.0, ans=0.0
+2024-09-01 03:14:19,257 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.738e+02 1.898e+02 2.020e+02 2.262e+02 2.800e+02, threshold=4.040e+02, percent-clipped=0.0
+2024-09-01 03:14:37,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=112309.33333333333, ans=0.125
+2024-09-01 03:14:38,016 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.19 vs. limit=22.5
+2024-09-01 03:14:43,282 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-18.pt
+2024-09-01 03:14:46,652 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 03:14:46,652 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:14:46,680 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 03:14:55,813 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 0, loss[loss=0.2336, simple_loss=0.2161, pruned_loss=0.09103, ctc_loss=0.1725, over 18598.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2161, pruned_loss=0.09103, ctc_loss=0.1725, over 18598.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:14:55,814 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:15:42,617 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 19, validation: loss=0.1928, simple_loss=0.1862, pruned_loss=0.07146, ctc_loss=0.1413, over 1073944.00 frames.
+2024-09-01 03:15:42,618 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 03:16:05,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112352.0, ans=0.125
+2024-09-01 03:16:28,513 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.64 vs. limit=15.0
+2024-09-01 03:16:43,241 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=112405.33333333333, ans=0.0
+2024-09-01 03:16:47,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=112405.33333333333, ans=0.5
+2024-09-01 03:17:43,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=112458.66666666667, ans=0.125
+2024-09-01 03:17:51,721 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=112458.66666666667, ans=0.2
+2024-09-01 03:18:20,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=112512.0, ans=0.0
+2024-09-01 03:19:34,380 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 50, loss[loss=0.1994, simple_loss=0.1861, pruned_loss=0.07271, ctc_loss=0.1683, over 19038.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.1873, pruned_loss=0.07333, ctc_loss=0.1613, over 827203.46 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:20:28,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=112672.0, ans=0.025
+2024-09-01 03:21:16,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112725.33333333333, ans=0.125
+2024-09-01 03:21:58,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=112725.33333333333, ans=0.04949747468305833
+2024-09-01 03:22:34,904 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 1.922e+02 2.090e+02 2.243e+02 2.725e+02, threshold=4.180e+02, percent-clipped=0.0
+2024-09-01 03:22:47,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=112832.0, ans=0.0
+2024-09-01 03:23:34,280 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 100, loss[loss=0.1909, simple_loss=0.1792, pruned_loss=0.06871, ctc_loss=0.1631, over 19274.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.1869, pruned_loss=0.07385, ctc_loss=0.1614, over 1472434.33 frames. ], batch size: 144, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:24:30,440 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=112938.66666666667, ans=0.125
+2024-09-01 03:24:30,719 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=12.47 vs. limit=12.0
+2024-09-01 03:24:44,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=112992.0, ans=0.05
+2024-09-01 03:24:59,892 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=112992.0, ans=0.035
+2024-09-01 03:25:10,294 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.36 vs. limit=15.0
+2024-09-01 03:25:13,449 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-19.pt
+2024-09-01 03:25:17,836 INFO [dysarthria_finetune.py:1435] (0/4) (1328218112, 34072559616)
+2024-09-01 03:25:17,836 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:25:17,864 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 03:25:27,036 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 0, loss[loss=0.1914, simple_loss=0.1827, pruned_loss=0.07141, ctc_loss=0.1432, over 18599.00 frames. ], tot_loss[loss=0.1914, simple_loss=0.1827, pruned_loss=0.07141, ctc_loss=0.1432, over 18599.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:25:27,037 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:26:10,761 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 20, validation: loss=0.19, simple_loss=0.1838, pruned_loss=0.07041, ctc_loss=0.1385, over 1073944.00 frames.
+2024-09-01 03:26:10,762 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26728MB
+2024-09-01 03:26:26,512 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.19 vs. limit=15.0
+2024-09-01 03:26:39,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=113040.0, ans=0.0
+2024-09-01 03:27:12,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=113093.33333333333, ans=0.2
+2024-09-01 03:27:59,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=113146.66666666667, ans=0.125
+2024-09-01 03:28:34,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=113200.0, ans=0.0
+2024-09-01 03:29:04,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=113253.33333333333, ans=0.2
+2024-09-01 03:29:26,707 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 50, loss[loss=0.223, simple_loss=0.202, pruned_loss=0.08467, ctc_loss=0.1864, over 18985.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.1895, pruned_loss=0.07666, ctc_loss=0.1662, over 828130.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:30:05,873 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 1.917e+02 2.046e+02 2.200e+02 2.791e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-09-01 03:30:47,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=113360.0, ans=0.2
+2024-09-01 03:31:57,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=113466.66666666667, ans=0.1
+2024-09-01 03:32:17,088 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.19 vs. limit=10.0
+2024-09-01 03:32:43,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=113520.0, ans=0.0
+2024-09-01 03:33:06,750 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 100, loss[loss=0.1667, simple_loss=0.1658, pruned_loss=0.05756, ctc_loss=0.1309, over 19321.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.1871, pruned_loss=0.0747, ctc_loss=0.1609, over 1472900.97 frames. ], batch size: 144, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:34:16,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=113626.66666666667, ans=0.025
+2024-09-01 03:35:10,299 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune/epoch-20.pt
+2024-09-01 03:35:13,227 INFO [dysarthria_finetune.py:1435] (0/4) (1368064000, 34072559616)
+2024-09-01 03:35:13,227 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:35:13,254 INFO [dysarthria_finetune.py:1440] (0/4) (29818028032, 34072559616)
+2024-09-01 03:35:13,254 INFO [dysarthria_finetune.py:1442] (0/4) Done!
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-1 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-1
new file mode 100644
index 0000000000000000000000000000000000000000..a9f4d26becd84f5feab92e48f5e8795e21f05187
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-1
@@ -0,0 +1,544 @@
+2024-08-31 22:09:00,008 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-31 22:09:15,261 INFO [dysarthria_finetune.py:1214] (1/4) (32783400960, 34072559616)
+2024-08-31 22:09:15,261 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-31 22:09:15,629 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-31 22:09:15,630 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-31 22:09:15,633 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2653.int.cedar.computecanada.ca', 'IP address': '172.16.146.90'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:09:15,633 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-31 22:09:16,872 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66367431
+2024-08-31 22:09:16,872 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 22:10:50,764 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-31 22:11:01,688 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-31 22:11:01,783 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-31 22:11:17,376 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-31 22:11:18,296 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-31 22:11:18,297 INFO [dysarthria_asr_datamodule.py:501] (1/4) About to get dev cuts
+2024-08-31 22:11:18,428 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-31 22:11:18,755 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-31 22:11:18,755 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:12:59,093 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.41 vs. limit=5.0
+2024-08-31 22:12:59,566 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=12.58 vs. limit=7.5
+2024-08-31 22:13:03,035 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12709MB
+2024-08-31 22:13:04,365 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=18.27 vs. limit=7.5
+2024-08-31 22:13:04,879 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12709MB
+2024-08-31 22:14:12,900 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12709MB
+2024-08-31 22:14:14,953 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12709MB
+2024-08-31 22:19:42,273 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=25.56 vs. limit=7.5
+2024-08-31 22:19:51,909 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12709MB
+2024-08-31 22:19:54,347 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12709MB
+2024-08-31 22:20:43,750 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.4218, simple_loss=0.336, pruned_loss=0.2127, ctc_loss=0.3225, over 18549.00 frames. ], tot_loss[loss=0.4218, simple_loss=0.336, pruned_loss=0.2127, ctc_loss=0.3225, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:43,751 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-31 22:46:04,587 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3942, simple_loss=0.3187, pruned_loss=0.1927, ctc_loss=0.281, over 1073944.00 frames.
+2024-08-31 22:46:09,467 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13137MB
+2024-08-31 23:01:27,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-31 23:06:11,558 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 9.685e+02 9.975e+02 1.051e+03 1.091e+03 1.133e+03, threshold=4.203e+03, percent-clipped=0.0
+2024-08-31 23:25:13,000 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 8.960e+02 9.836e+02 1.043e+03 1.067e+03 1.144e+03, threshold=4.173e+03, percent-clipped=0.0
+2024-08-31 23:45:54,002 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=21.65 vs. limit=15.0
+2024-08-31 23:47:54,172 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=100160.0, ans=0.025
+2024-08-31 23:49:19,697 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=100160.0, ans=0.125
+2024-08-31 23:49:50,569 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=100160.0, ans=0.125
+2024-08-31 23:50:05,833 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.677e+02 8.648e+02 9.697e+02 1.051e+03 1.144e+03, threshold=3.879e+03, percent-clipped=0.0
+2024-08-31 23:55:15,504 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff2.min_abs, batch_count=100213.33333333333, ans=0.1
+2024-08-31 23:55:38,251 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.4441, simple_loss=0.3529, pruned_loss=0.2241, ctc_loss=0.3407, over 19042.00 frames. ], tot_loss[loss=0.433, simple_loss=0.346, pruned_loss=0.2183, ctc_loss=0.3241, over 827432.33 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 23:56:31,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100266.66666666667, ans=0.1
+2024-08-31 23:59:19,638 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=100266.66666666667, ans=0.0
+2024-09-01 00:01:26,292 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=22.44 vs. limit=15.0
+2024-09-01 00:02:57,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=100320.0, ans=0.125
+2024-09-01 00:07:56,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=100373.33333333333, ans=0.0
+2024-09-01 00:14:36,710 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=26.44 vs. limit=22.5
+2024-09-01 00:16:22,837 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.35 vs. limit=15.0
+2024-09-01 00:18:32,452 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.599e+02 6.914e+02 7.776e+02 9.170e+02 1.144e+03, threshold=1.555e+03, percent-clipped=0.0
+2024-09-01 00:18:32,506 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 100, loss[loss=0.441, simple_loss=0.349, pruned_loss=0.2281, ctc_loss=0.3385, over 19093.00 frames. ], tot_loss[loss=0.4152, simple_loss=0.3328, pruned_loss=0.2049, ctc_loss=0.3073, over 1470684.91 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:22:05,927 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.56 vs. limit=15.0
+2024-09-01 00:22:28,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100586.66666666667, ans=0.0
+2024-09-01 00:28:33,622 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=100640.0, ans=0.125
+2024-09-01 00:28:35,595 INFO [dysarthria_finetune.py:1435] (1/4) (14068416512, 34072559616)
+2024-09-01 00:28:35,596 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 00:28:35,632 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 00:30:12,106 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 0, loss[loss=0.3566, simple_loss=0.2869, pruned_loss=0.158, ctc_loss=0.27, over 18746.00 frames. ], tot_loss[loss=0.3566, simple_loss=0.2869, pruned_loss=0.158, ctc_loss=0.27, over 18746.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:30:12,106 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 00:34:27,524 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 2, validation: loss=0.3547, simple_loss=0.2901, pruned_loss=0.1627, ctc_loss=0.2412, over 1073944.00 frames.
+2024-09-01 00:34:27,525 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13656MB
+2024-09-01 00:37:41,578 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.07 vs. limit=15.0
+2024-09-01 00:40:34,560 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=18.56 vs. limit=15.0
+2024-09-01 00:45:12,362 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=22.90 vs. limit=22.5
+2024-09-01 00:45:14,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100842.66666666667, ans=0.125
+2024-09-01 00:45:55,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100896.0, ans=0.125
+2024-09-01 00:46:00,529 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=29.89 vs. limit=22.5
+2024-09-01 00:47:24,008 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=100896.0, ans=0.125
+2024-09-01 00:48:18,417 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 50, loss[loss=0.4438, simple_loss=0.3531, pruned_loss=0.2176, ctc_loss=0.3362, over 19071.00 frames. ], tot_loss[loss=0.3919, simple_loss=0.3156, pruned_loss=0.1853, ctc_loss=0.285, over 827854.65 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 00:52:13,931 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.79 vs. limit=10.0
+2024-09-01 00:53:05,342 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.883e+02 4.624e+02 4.997e+02 5.383e+02 6.686e+02, threshold=9.995e+02, percent-clipped=0.0
+2024-09-01 00:53:41,555 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=101056.0, ans=0.0
+2024-09-01 00:53:44,500 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=22.33 vs. limit=22.5
+2024-09-01 00:54:20,904 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.51 vs. limit=15.0
+2024-09-01 00:55:22,144 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=28.13 vs. limit=22.5
+2024-09-01 00:56:06,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=101162.66666666667, ans=0.125
+2024-09-01 00:56:30,062 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 100, loss[loss=0.3696, simple_loss=0.3008, pruned_loss=0.1697, ctc_loss=0.2581, over 19090.00 frames. ], tot_loss[loss=0.3808, simple_loss=0.3075, pruned_loss=0.1777, ctc_loss=0.2748, over 1472213.55 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 00:57:14,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=101216.0, ans=15.0
+2024-09-01 01:00:20,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=101269.33333333333, ans=0.05
+2024-09-01 01:00:32,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=101322.66666666667, ans=0.2
+2024-09-01 01:01:10,487 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.76 vs. limit=22.5
+2024-09-01 01:01:16,816 INFO [dysarthria_finetune.py:1435] (1/4) (793444352, 34072559616)
+2024-09-01 01:01:16,817 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:01:16,894 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 01:01:35,834 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 0, loss[loss=0.3973, simple_loss=0.3195, pruned_loss=0.1906, ctc_loss=0.2856, over 18511.00 frames. ], tot_loss[loss=0.3973, simple_loss=0.3195, pruned_loss=0.1906, ctc_loss=0.2856, over 18511.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:01:35,835 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:01:59,924 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 3, validation: loss=0.3274, simple_loss=0.2708, pruned_loss=0.1428, ctc_loss=0.2163, over 1073944.00 frames.
+2024-09-01 01:01:59,925 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13713MB
+2024-09-01 01:02:07,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=101370.66666666667, ans=0.2
+2024-09-01 01:03:09,888 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101424.0, ans=0.1
+2024-09-01 01:03:18,406 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.41 vs. limit=15.0
+2024-09-01 01:03:46,764 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=22.31 vs. limit=15.0
+2024-09-01 01:04:33,383 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.942e+02 3.454e+02 3.711e+02 3.996e+02 5.509e+02, threshold=7.422e+02, percent-clipped=0.0
+2024-09-01 01:04:53,467 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.14 vs. limit=22.5
+2024-09-01 01:04:56,823 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 50, loss[loss=0.3554, simple_loss=0.2909, pruned_loss=0.1567, ctc_loss=0.2475, over 19005.00 frames. ], tot_loss[loss=0.3623, simple_loss=0.2945, pruned_loss=0.1628, ctc_loss=0.2579, over 828905.42 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:06:12,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=101797.33333333333, ans=0.0
+2024-09-01 01:06:42,265 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.22 vs. limit=15.0
+2024-09-01 01:06:46,704 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=101850.66666666667, ans=0.0
+2024-09-01 01:06:46,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 01:07:00,095 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 100, loss[loss=0.3174, simple_loss=0.2632, pruned_loss=0.1347, ctc_loss=0.2131, over 19133.00 frames. ], tot_loss[loss=0.3504, simple_loss=0.2856, pruned_loss=0.156, ctc_loss=0.2478, over 1474266.40 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:07:06,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=101904.0, ans=0.125
+2024-09-01 01:07:13,919 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.84 vs. limit=15.0
+2024-09-01 01:07:35,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=101904.0, ans=0.2
+2024-09-01 01:07:40,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101957.33333333333, ans=0.1
+2024-09-01 01:07:45,107 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.46 vs. limit=22.5
+2024-09-01 01:08:21,704 INFO [dysarthria_finetune.py:1435] (1/4) (1508573184, 34072559616)
+2024-09-01 01:08:21,705 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:08:21,776 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 01:08:35,076 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 0, loss[loss=0.313, simple_loss=0.2583, pruned_loss=0.1351, ctc_loss=0.2132, over 18466.00 frames. ], tot_loss[loss=0.313, simple_loss=0.2583, pruned_loss=0.1351, ctc_loss=0.2132, over 18466.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:08:35,077 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:08:58,413 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 4, validation: loss=0.308, simple_loss=0.2573, pruned_loss=0.1299, ctc_loss=0.2, over 1073944.00 frames.
+2024-09-01 01:08:58,414 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13713MB
+2024-09-01 01:09:16,932 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=20.81 vs. limit=15.0
+2024-09-01 01:09:27,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=102106.66666666667, ans=0.07
+2024-09-01 01:09:33,166 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.484e+02 2.869e+02 3.070e+02 3.452e+02 5.291e+02, threshold=6.140e+02, percent-clipped=0.0
+2024-09-01 01:10:51,070 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 50, loss[loss=0.3214, simple_loss=0.2639, pruned_loss=0.1394, ctc_loss=0.2247, over 18961.00 frames. ], tot_loss[loss=0.3356, simple_loss=0.2752, pruned_loss=0.1446, ctc_loss=0.2375, over 827373.05 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:11:07,574 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=102320.0, ans=0.125
+2024-09-01 01:11:48,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=102426.66666666667, ans=0.125
+2024-09-01 01:12:12,401 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=102480.0, ans=0.0
+2024-09-01 01:12:29,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102533.33333333333, ans=0.1
+2024-09-01 01:12:38,912 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 100, loss[loss=0.3201, simple_loss=0.2669, pruned_loss=0.1323, ctc_loss=0.217, over 19038.00 frames. ], tot_loss[loss=0.3311, simple_loss=0.2713, pruned_loss=0.1429, ctc_loss=0.2354, over 1472261.06 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:13:12,352 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 2.669e+02 2.871e+02 3.122e+02 4.671e+02, threshold=5.742e+02, percent-clipped=0.0
+2024-09-01 01:13:40,161 INFO [dysarthria_finetune.py:1435] (1/4) (799735808, 34072559616)
+2024-09-01 01:13:40,162 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:13:40,221 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 01:13:53,068 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 0, loss[loss=0.3073, simple_loss=0.2544, pruned_loss=0.1296, ctc_loss=0.2131, over 18670.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.2544, pruned_loss=0.1296, ctc_loss=0.2131, over 18670.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:13:53,069 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:14:16,501 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 5, validation: loss=0.2909, simple_loss=0.2453, pruned_loss=0.1191, ctc_loss=0.1881, over 1073944.00 frames.
+2024-09-01 01:14:16,501 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13713MB
+2024-09-01 01:14:35,836 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.00 vs. limit=15.0
+2024-09-01 01:15:01,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=102741.33333333333, ans=0.0
+2024-09-01 01:15:18,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=102741.33333333333, ans=0.025
+2024-09-01 01:15:54,697 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.74 vs. limit=22.5
+2024-09-01 01:16:49,754 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=102848.0, ans=0.125
+2024-09-01 01:18:31,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=102901.33333333333, ans=0.125
+2024-09-01 01:20:35,882 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=25.21 vs. limit=22.5
+2024-09-01 01:20:50,672 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 50, loss[loss=0.3274, simple_loss=0.2667, pruned_loss=0.1438, ctc_loss=0.2369, over 18968.00 frames. ], tot_loss[loss=0.3183, simple_loss=0.2627, pruned_loss=0.1338, ctc_loss=0.2259, over 828630.89 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:21:20,932 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.82 vs. limit=15.0
+2024-09-01 01:24:03,880 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:24:22,638 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:24:29,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:25:25,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=103168.0, ans=10.0
+2024-09-01 01:25:33,876 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.253e+02 2.485e+02 2.709e+02 2.997e+02 4.733e+02, threshold=5.419e+02, percent-clipped=0.0
+2024-09-01 01:26:02,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103221.33333333333, ans=0.125
+2024-09-01 01:26:19,127 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-09-01 01:26:23,125 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 100, loss[loss=0.3026, simple_loss=0.2513, pruned_loss=0.1244, ctc_loss=0.2155, over 19157.00 frames. ], tot_loss[loss=0.3131, simple_loss=0.2586, pruned_loss=0.1315, ctc_loss=0.2222, over 1473409.40 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:26:40,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:26:52,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:27:36,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=103381.33333333333, ans=0.0
+2024-09-01 01:27:48,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=103381.33333333333, ans=0.125
+2024-09-01 01:27:54,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=103381.33333333333, ans=0.025
+2024-09-01 01:27:55,391 INFO [dysarthria_finetune.py:1435] (1/4) (468385792, 34072559616)
+2024-09-01 01:27:55,392 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:27:55,477 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 01:28:14,515 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 0, loss[loss=0.3173, simple_loss=0.2612, pruned_loss=0.1317, ctc_loss=0.2336, over 18435.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.2612, pruned_loss=0.1317, ctc_loss=0.2336, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:28:14,516 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:28:37,903 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 6, validation: loss=0.2789, simple_loss=0.2369, pruned_loss=0.1122, ctc_loss=0.1819, over 1073944.00 frames.
+2024-09-01 01:28:37,904 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13962MB
+2024-09-01 01:28:58,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=103424.0, ans=0.125
+2024-09-01 01:29:21,289 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103477.33333333333, ans=0.1
+2024-09-01 01:29:21,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=103477.33333333333, ans=0.125
+2024-09-01 01:29:32,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103530.66666666667, ans=0.1
+2024-09-01 01:29:58,587 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.87 vs. limit=15.0
+2024-09-01 01:30:02,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=103584.0, ans=0.0
+2024-09-01 01:30:11,137 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=13.90 vs. limit=15.0
+2024-09-01 01:30:29,744 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.14 vs. limit=15.0
+2024-09-01 01:30:32,266 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 50, loss[loss=0.2858, simple_loss=0.2375, pruned_loss=0.1173, ctc_loss=0.2057, over 19041.00 frames. ], tot_loss[loss=0.2981, simple_loss=0.2489, pruned_loss=0.1209, ctc_loss=0.2115, over 827399.35 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:30:37,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103690.66666666667, ans=0.125
+2024-09-01 01:30:37,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=103690.66666666667, ans=0.125
+2024-09-01 01:31:04,792 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 2.419e+02 2.583e+02 2.819e+02 4.094e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-09-01 01:31:06,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=103690.66666666667, ans=0.0
+2024-09-01 01:31:29,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=103744.0, ans=0.125
+2024-09-01 01:31:40,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-09-01 01:31:45,485 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.55 vs. limit=15.0
+2024-09-01 01:31:51,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=103797.33333333333, ans=0.125
+2024-09-01 01:32:43,094 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 100, loss[loss=0.2708, simple_loss=0.2248, pruned_loss=0.1102, ctc_loss=0.1992, over 19066.00 frames. ], tot_loss[loss=0.2947, simple_loss=0.2461, pruned_loss=0.1199, ctc_loss=0.2093, over 1471849.14 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:33:15,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=104010.66666666667, ans=0.125
+2024-09-01 01:33:44,582 INFO [dysarthria_finetune.py:1435] (1/4) (1112211456, 34072559616)
+2024-09-01 01:33:44,582 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:33:44,671 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 01:33:57,916 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 0, loss[loss=0.3301, simple_loss=0.2633, pruned_loss=0.1475, ctc_loss=0.2582, over 18532.00 frames. ], tot_loss[loss=0.3301, simple_loss=0.2633, pruned_loss=0.1475, ctc_loss=0.2582, over 18532.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:33:57,916 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:34:21,902 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 7, validation: loss=0.2604, simple_loss=0.2251, pruned_loss=0.1007, ctc_loss=0.1681, over 1073944.00 frames.
+2024-09-01 01:34:21,902 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13962MB
+2024-09-01 01:35:11,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=104213.33333333333, ans=0.0
+2024-09-01 01:35:39,543 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.94 vs. limit=15.0
+2024-09-01 01:35:40,243 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.049e+02 2.272e+02 2.384e+02 2.601e+02 4.291e+02, threshold=4.768e+02, percent-clipped=0.0
+2024-09-01 01:36:31,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=104320.0, ans=0.125
+2024-09-01 01:36:31,718 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=8.88 vs. limit=12.0
+2024-09-01 01:36:51,662 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=19.61 vs. limit=15.0
+2024-09-01 01:37:01,398 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.16 vs. limit=15.0
+2024-09-01 01:37:02,202 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 50, loss[loss=0.291, simple_loss=0.241, pruned_loss=0.1187, ctc_loss=0.2188, over 19096.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.2425, pruned_loss=0.1165, ctc_loss=0.2101, over 827950.42 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:37:13,597 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.98 vs. limit=15.0
+2024-09-01 01:39:36,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=104533.33333333333, ans=0.125
+2024-09-01 01:40:52,492 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 100, loss[loss=0.2884, simple_loss=0.2429, pruned_loss=0.1151, ctc_loss=0.209, over 19105.00 frames. ], tot_loss[loss=0.2826, simple_loss=0.238, pruned_loss=0.1125, ctc_loss=0.2036, over 1472811.51 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:41:42,009 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.40 vs. limit=15.0
+2024-09-01 01:42:01,332 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.93 vs. limit=15.0
+2024-09-01 01:42:16,919 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=104746.66666666667, ans=0.0
+2024-09-01 01:42:17,198 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.73 vs. limit=15.0
+2024-09-01 01:42:59,066 INFO [dysarthria_finetune.py:1435] (1/4) (14456389632, 34072559616)
+2024-09-01 01:42:59,067 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:42:59,100 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 01:43:13,125 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 0, loss[loss=0.2446, simple_loss=0.2135, pruned_loss=0.09198, ctc_loss=0.1633, over 18679.00 frames. ], tot_loss[loss=0.2446, simple_loss=0.2135, pruned_loss=0.09198, ctc_loss=0.1633, over 18679.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:43:13,125 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:44:04,948 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 8, validation: loss=0.2572, simple_loss=0.2228, pruned_loss=0.09973, ctc_loss=0.1708, over 1073944.00 frames.
+2024-09-01 01:44:04,949 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13962MB
+2024-09-01 01:44:09,716 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.93 vs. limit=12.0
+2024-09-01 01:44:25,626 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.924e+02 2.205e+02 2.324e+02 2.533e+02 3.850e+02, threshold=4.647e+02, percent-clipped=0.0
+2024-09-01 01:44:59,183 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.36 vs. limit=22.5
+2024-09-01 01:51:46,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=104842.66666666667, ans=0.125
+2024-09-01 01:54:05,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-09-01 01:54:30,928 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=104949.33333333333, ans=0.05
+2024-09-01 01:55:52,164 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.69 vs. limit=15.0
+2024-09-01 01:56:19,988 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 50, loss[loss=0.255, simple_loss=0.2204, pruned_loss=0.09907, ctc_loss=0.173, over 19009.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.2345, pruned_loss=0.1097, ctc_loss=0.202, over 829068.39 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:59:30,321 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.64 vs. limit=15.0
+2024-09-01 01:59:33,879 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.84 vs. limit=22.5
+2024-09-01 01:59:49,740 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.73 vs. limit=22.5
+2024-09-01 02:00:03,655 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.93 vs. limit=6.0
+2024-09-01 02:00:08,454 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.22 vs. limit=12.0
+2024-09-01 02:00:24,072 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=105269.33333333333, ans=0.125
+2024-09-01 02:00:56,068 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 100, loss[loss=0.2567, simple_loss=0.223, pruned_loss=0.09536, ctc_loss=0.185, over 19109.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.2327, pruned_loss=0.1081, ctc_loss=0.1993, over 1473116.98 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:00:58,858 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.47 vs. limit=15.0
+2024-09-01 02:01:07,952 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.165e+02 2.362e+02 2.610e+02 3.254e+02, threshold=4.723e+02, percent-clipped=0.0
+2024-09-01 02:01:20,673 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.21 vs. limit=22.5
+2024-09-01 02:02:31,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=105429.33333333333, ans=0.0
+2024-09-01 02:02:57,168 INFO [dysarthria_finetune.py:1435] (1/4) (954925056, 34072559616)
+2024-09-01 02:02:57,169 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:02:57,236 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:03:10,308 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 0, loss[loss=0.2887, simple_loss=0.2399, pruned_loss=0.1205, ctc_loss=0.2133, over 18520.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.2399, pruned_loss=0.1205, ctc_loss=0.2133, over 18520.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:10,309 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:03:33,813 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 9, validation: loss=0.2431, simple_loss=0.2147, pruned_loss=0.0913, ctc_loss=0.1608, over 1073944.00 frames.
+2024-09-01 02:03:33,813 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:03:43,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=105477.33333333333, ans=0.125
+2024-09-01 02:03:52,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=105477.33333333333, ans=0.125
+2024-09-01 02:03:57,970 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.40 vs. limit=15.0
+2024-09-01 02:04:03,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=105530.66666666667, ans=0.2
+2024-09-01 02:04:12,985 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.50 vs. limit=22.5
+2024-09-01 02:04:26,489 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105584.0, ans=0.1
+2024-09-01 02:05:38,157 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 50, loss[loss=0.3039, simple_loss=0.2554, pruned_loss=0.1225, ctc_loss=0.2281, over 19008.00 frames. ], tot_loss[loss=0.268, simple_loss=0.2283, pruned_loss=0.1046, ctc_loss=0.1989, over 827563.28 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:39,683 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=105744.0, ans=0.025
+2024-09-01 02:05:42,204 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.93 vs. limit=15.0
+2024-09-01 02:05:52,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=105744.0, ans=0.125
+2024-09-01 02:06:32,901 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.05 vs. limit=6.0
+2024-09-01 02:06:37,639 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.168e+02 2.346e+02 2.556e+02 3.441e+02, threshold=4.692e+02, percent-clipped=0.0
+2024-09-01 02:07:33,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=105904.0, ans=0.125
+2024-09-01 02:07:35,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=105904.0, ans=0.2
+2024-09-01 02:08:04,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:08:10,928 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:08:16,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=106010.66666666667, ans=0.0
+2024-09-01 02:08:17,445 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 100, loss[loss=0.2692, simple_loss=0.2325, pruned_loss=0.1023, ctc_loss=0.2006, over 19113.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.227, pruned_loss=0.1024, ctc_loss=0.1953, over 1473118.75 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:41,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=106064.0, ans=0.2
+2024-09-01 02:09:01,687 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.10 vs. limit=15.0
+2024-09-01 02:09:19,435 INFO [dysarthria_finetune.py:1435] (1/4) (1303052288, 34072559616)
+2024-09-01 02:09:19,436 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:09:19,509 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:09:34,441 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 0, loss[loss=0.2564, simple_loss=0.226, pruned_loss=0.09643, ctc_loss=0.1795, over 18522.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.226, pruned_loss=0.09643, ctc_loss=0.1795, over 18522.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:09:34,441 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:09:58,839 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 10, validation: loss=0.2363, simple_loss=0.211, pruned_loss=0.08786, ctc_loss=0.1591, over 1073944.00 frames.
+2024-09-01 02:09:58,839 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:10:06,684 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=106165.33333333333, ans=0.0
+2024-09-01 02:10:24,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=106218.66666666667, ans=0.125
+2024-09-01 02:10:24,601 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106218.66666666667, ans=0.125
+2024-09-01 02:10:33,209 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.41 vs. limit=15.0
+2024-09-01 02:10:39,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106218.66666666667, ans=0.125
+2024-09-01 02:10:46,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=106272.0, ans=0.0
+2024-09-01 02:11:12,041 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.30 vs. limit=15.0
+2024-09-01 02:11:36,265 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.111e+02 2.256e+02 2.412e+02 3.661e+02, threshold=4.511e+02, percent-clipped=0.0
+2024-09-01 02:11:47,330 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 50, loss[loss=0.2232, simple_loss=0.2074, pruned_loss=0.07342, ctc_loss=0.1559, over 18973.00 frames. ], tot_loss[loss=0.258, simple_loss=0.223, pruned_loss=0.09831, ctc_loss=0.1937, over 826863.11 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:12:21,544 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=20.16 vs. limit=15.0
+2024-09-01 02:12:32,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:12:36,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=106538.66666666667, ans=0.2
+2024-09-01 02:12:38,762 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=106538.66666666667, ans=0.2
+2024-09-01 02:13:04,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=106592.0, ans=0.04949747468305833
+2024-09-01 02:13:35,236 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 100, loss[loss=0.2546, simple_loss=0.2246, pruned_loss=0.09207, ctc_loss=0.1965, over 19188.00 frames. ], tot_loss[loss=0.253, simple_loss=0.22, pruned_loss=0.09552, ctc_loss=0.1901, over 1472464.39 frames. ], batch size: 134, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:13:38,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=106698.66666666667, ans=0.5
+2024-09-01 02:13:45,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106698.66666666667, ans=0.1
+2024-09-01 02:13:53,499 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=106698.66666666667, ans=0.0
+2024-09-01 02:14:34,903 INFO [dysarthria_finetune.py:1435] (1/4) (14122942464, 34072559616)
+2024-09-01 02:14:34,904 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:14:34,957 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:14:48,355 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 0, loss[loss=0.2626, simple_loss=0.2202, pruned_loss=0.106, ctc_loss=0.2062, over 18704.00 frames. ], tot_loss[loss=0.2626, simple_loss=0.2202, pruned_loss=0.106, ctc_loss=0.2062, over 18704.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:14:48,356 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:15:11,815 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 11, validation: loss=0.2335, simple_loss=0.2098, pruned_loss=0.0867, ctc_loss=0.1618, over 1073944.00 frames.
+2024-09-01 02:15:11,815 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:15:37,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=106853.33333333333, ans=0.0
+2024-09-01 02:15:43,438 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.80 vs. limit=15.0
+2024-09-01 02:15:46,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106906.66666666667, ans=0.125
+2024-09-01 02:16:01,168 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.106e+02 2.175e+02 2.350e+02 3.456e+02, threshold=4.351e+02, percent-clipped=0.0
+2024-09-01 02:16:28,286 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106960.0, ans=0.125
+2024-09-01 02:16:31,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=106960.0, ans=0.2
+2024-09-01 02:16:47,207 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.55 vs. limit=22.5
+2024-09-01 02:21:27,199 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107013.33333333333, ans=0.1
+2024-09-01 02:21:37,674 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=107066.66666666667, ans=0.2
+2024-09-01 02:21:58,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff3.min_abs, batch_count=107066.66666666667, ans=0.2
+2024-09-01 02:22:24,203 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 50, loss[loss=0.2509, simple_loss=0.2201, pruned_loss=0.09399, ctc_loss=0.192, over 18947.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.2183, pruned_loss=0.09389, ctc_loss=0.189, over 828704.78 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:22:28,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=107120.0, ans=0.125
+2024-09-01 02:22:33,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107120.0, ans=0.1
+2024-09-01 02:23:31,527 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=107173.33333333333, ans=0.125
+2024-09-01 02:24:10,261 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=13.71 vs. limit=15.0
+2024-09-01 02:24:34,081 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.23 vs. limit=6.0
+2024-09-01 02:24:41,420 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:24:41,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=107280.0, ans=0.0
+2024-09-01 02:24:53,670 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=107333.33333333333, ans=0.125
+2024-09-01 02:25:09,440 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 100, loss[loss=0.2481, simple_loss=0.214, pruned_loss=0.0948, ctc_loss=0.1981, over 19147.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.2164, pruned_loss=0.09269, ctc_loss=0.1875, over 1473582.76 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 16.0
+2024-09-01 02:25:16,826 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=107386.66666666667, ans=0.125
+2024-09-01 02:25:17,252 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=10.94 vs. limit=12.0
+2024-09-01 02:25:22,133 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107386.66666666667, ans=0.125
+2024-09-01 02:25:34,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107440.0, ans=0.1
+2024-09-01 02:25:52,642 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.027e+02 2.133e+02 2.278e+02 3.178e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-09-01 02:26:15,389 INFO [dysarthria_finetune.py:1435] (1/4) (594214912, 34072559616)
+2024-09-01 02:26:15,390 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:26:15,476 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:26:37,015 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 0, loss[loss=0.2963, simple_loss=0.2408, pruned_loss=0.1238, ctc_loss=0.2475, over 18735.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.2408, pruned_loss=0.1238, ctc_loss=0.2475, over 18735.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:37,016 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:27:00,638 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 12, validation: loss=0.2234, simple_loss=0.2042, pruned_loss=0.08189, ctc_loss=0.1554, over 1073944.00 frames.
+2024-09-01 02:27:00,638 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:27:24,924 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.67 vs. limit=15.0
+2024-09-01 02:27:29,596 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=7.09 vs. limit=12.0
+2024-09-01 02:27:50,994 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.88 vs. limit=15.0
+2024-09-01 02:28:04,299 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.45 vs. limit=15.0
+2024-09-01 02:28:30,529 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:28:42,045 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.26 vs. limit=12.0
+2024-09-01 02:28:53,640 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 50, loss[loss=0.1935, simple_loss=0.1812, pruned_loss=0.06457, ctc_loss=0.1483, over 18974.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2142, pruned_loss=0.0907, ctc_loss=0.1879, over 827168.58 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:29:15,608 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107861.33333333333, ans=0.1
+2024-09-01 02:29:22,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107861.33333333333, ans=0.125
+2024-09-01 02:30:00,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=107968.0, ans=0.0
+2024-09-01 02:30:17,095 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-09-01 02:30:19,478 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.16 vs. limit=22.5
+2024-09-01 02:30:20,079 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.056e+02 2.167e+02 2.338e+02 2.987e+02, threshold=4.333e+02, percent-clipped=0.0
+2024-09-01 02:30:34,763 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.06 vs. limit=6.0
+2024-09-01 02:34:06,103 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 100, loss[loss=0.2109, simple_loss=0.1987, pruned_loss=0.07313, ctc_loss=0.1513, over 19114.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2133, pruned_loss=0.09117, ctc_loss=0.1866, over 1473649.48 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:35:08,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=108074.66666666667, ans=0.0
+2024-09-01 02:35:45,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=108128.0, ans=0.2
+2024-09-01 02:35:56,698 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=15.84 vs. limit=22.5
+2024-09-01 02:36:36,970 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=31.70 vs. limit=22.5
+2024-09-01 02:36:42,074 INFO [dysarthria_finetune.py:1435] (1/4) (14311686144, 34072559616)
+2024-09-01 02:36:42,074 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:36:42,127 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:36:54,946 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 0, loss[loss=0.2932, simple_loss=0.2442, pruned_loss=0.1225, ctc_loss=0.2299, over 18361.00 frames. ], tot_loss[loss=0.2932, simple_loss=0.2442, pruned_loss=0.1225, ctc_loss=0.2299, over 18361.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:54,946 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:37:18,562 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 13, validation: loss=0.2186, simple_loss=0.2014, pruned_loss=0.08061, ctc_loss=0.1543, over 1073944.00 frames.
+2024-09-01 02:37:18,563 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:37:37,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=108229.33333333333, ans=0.025
+2024-09-01 02:37:46,738 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=108282.66666666667, ans=0.0
+2024-09-01 02:37:47,227 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.90 vs. limit=15.0
+2024-09-01 02:37:55,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 02:38:01,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=108336.0, ans=0.0
+2024-09-01 02:38:04,030 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=108336.0, ans=0.0
+2024-09-01 02:38:12,998 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.76 vs. limit=15.0
+2024-09-01 02:38:28,246 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=26.02 vs. limit=22.5
+2024-09-01 02:38:43,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108389.33333333333, ans=0.0
+2024-09-01 02:38:51,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=108442.66666666667, ans=0.125
+2024-09-01 02:38:56,813 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.78 vs. limit=15.0
+2024-09-01 02:39:08,446 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 50, loss[loss=0.191, simple_loss=0.193, pruned_loss=0.05823, ctc_loss=0.1366, over 19011.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.2069, pruned_loss=0.08682, ctc_loss=0.1828, over 828396.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:39:14,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=108496.0, ans=0.125
+2024-09-01 02:39:16,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=108496.0, ans=0.2
+2024-09-01 02:39:30,398 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.005e+02 2.143e+02 2.348e+02 3.224e+02, threshold=4.286e+02, percent-clipped=0.0
+2024-09-01 02:39:58,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108602.66666666667, ans=0.1
+2024-09-01 02:40:23,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108656.0, ans=0.1
+2024-09-01 02:40:32,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=108656.0, ans=0.0
+2024-09-01 02:40:38,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-09-01 02:40:56,445 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 100, loss[loss=0.2167, simple_loss=0.1985, pruned_loss=0.07983, ctc_loss=0.1637, over 19217.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2059, pruned_loss=0.0858, ctc_loss=0.1799, over 1472353.64 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:40:57,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=108762.66666666667, ans=0.0
+2024-09-01 02:41:06,795 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.51 vs. limit=15.0
+2024-09-01 02:41:08,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=108762.66666666667, ans=0.05
+2024-09-01 02:41:19,490 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=1.132e-02
+2024-09-01 02:41:47,683 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.70 vs. limit=22.5
+2024-09-01 02:41:49,634 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=108869.33333333333, ans=0.07
+2024-09-01 02:41:56,720 INFO [dysarthria_finetune.py:1435] (1/4) (14064222208, 34072559616)
+2024-09-01 02:41:56,721 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:41:56,771 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:42:04,496 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 02:42:09,611 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 0, loss[loss=0.2634, simple_loss=0.2238, pruned_loss=0.1056, ctc_loss=0.2156, over 18619.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.2238, pruned_loss=0.1056, ctc_loss=0.2156, over 18619.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:42:09,611 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:42:14,833 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.2601, 1.2442, 1.5896, 0.6401, 1.6574, 1.7630, 1.6676, 1.6827],
+ device='cuda:1')
+2024-09-01 02:42:33,565 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 14, validation: loss=0.209, simple_loss=0.1966, pruned_loss=0.0763, ctc_loss=0.148, over 1073944.00 frames.
+2024-09-01 02:42:33,566 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:43:06,977 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.73 vs. limit=12.0
+2024-09-01 02:43:37,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=109024.0, ans=0.0
+2024-09-01 02:43:42,876 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.996e+02 2.096e+02 2.326e+02 2.912e+02, threshold=4.192e+02, percent-clipped=0.0
+2024-09-01 02:43:52,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=109077.33333333333, ans=0.125
+2024-09-01 02:44:24,649 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 50, loss[loss=0.249, simple_loss=0.2262, pruned_loss=0.09241, ctc_loss=0.1968, over 19004.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.2053, pruned_loss=0.08527, ctc_loss=0.1797, over 826629.84 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:44:41,918 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.70 vs. limit=15.0
+2024-09-01 02:44:43,654 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:44:50,599 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=7.15 vs. limit=10.0
+2024-09-01 02:45:01,162 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109237.33333333333, ans=0.1
+2024-09-01 02:45:16,263 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.70 vs. limit=15.0
+2024-09-01 02:46:12,202 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 100, loss[loss=0.2042, simple_loss=0.1958, pruned_loss=0.07109, ctc_loss=0.1569, over 19114.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2041, pruned_loss=0.08424, ctc_loss=0.1779, over 1472155.55 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:46:26,422 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.75 vs. limit=6.0
+2024-09-01 02:46:38,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109504.0, ans=0.1
+2024-09-01 02:46:52,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=109504.0, ans=0.0
+2024-09-01 02:47:12,383 INFO [dysarthria_finetune.py:1435] (1/4) (14116651008, 34072559616)
+2024-09-01 02:47:12,384 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:47:12,422 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:47:25,807 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 0, loss[loss=0.2453, simple_loss=0.2093, pruned_loss=0.09896, ctc_loss=0.2011, over 18480.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.2093, pruned_loss=0.09896, ctc_loss=0.2011, over 18480.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:47:25,808 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:48:03,598 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 15, validation: loss=0.2059, simple_loss=0.1951, pruned_loss=0.07588, ctc_loss=0.1481, over 1073944.00 frames.
+2024-09-01 02:48:03,598 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:48:14,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=109605.33333333333, ans=0.125
+2024-09-01 02:48:20,624 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.965e+02 2.102e+02 2.301e+02 3.159e+02, threshold=4.205e+02, percent-clipped=0.0
+2024-09-01 02:48:46,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=109658.66666666667, ans=0.125
+2024-09-01 02:49:18,015 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109658.66666666667, ans=0.1
+2024-09-01 02:49:40,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109712.0, ans=0.125
+2024-09-01 02:50:50,537 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:51:22,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=109818.66666666667, ans=0.0
+2024-09-01 02:51:41,387 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 50, loss[loss=0.2511, simple_loss=0.2222, pruned_loss=0.09594, ctc_loss=0.2113, over 19020.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2037, pruned_loss=0.08513, ctc_loss=0.1815, over 827766.05 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:52:39,277 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=109925.33333333333, ans=0.07
+2024-09-01 02:53:06,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109925.33333333333, ans=0.125
+2024-09-01 02:55:23,016 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=18.70 vs. limit=22.5
+2024-09-01 02:55:28,630 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 100, loss[loss=0.1817, simple_loss=0.1722, pruned_loss=0.06281, ctc_loss=0.1566, over 19074.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.2029, pruned_loss=0.08357, ctc_loss=0.1771, over 1471681.17 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:55:44,094 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 1.961e+02 2.099e+02 2.266e+02 2.969e+02, threshold=4.197e+02, percent-clipped=0.0
+2024-09-01 02:56:20,378 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=9.09 vs. limit=15.0
+2024-09-01 02:57:03,848 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.27 vs. limit=22.5
+2024-09-01 02:57:12,072 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.79 vs. limit=15.0
+2024-09-01 02:57:18,308 INFO [dysarthria_finetune.py:1435] (1/4) (2664103936, 34072559616)
+2024-09-01 02:57:18,309 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:57:18,384 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 02:57:33,285 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 0, loss[loss=0.2472, simple_loss=0.2223, pruned_loss=0.09475, ctc_loss=0.2018, over 18847.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.2223, pruned_loss=0.09475, ctc_loss=0.2018, over 18847.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:57:33,285 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:58:10,357 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 16, validation: loss=0.2065, simple_loss=0.1951, pruned_loss=0.07751, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 02:58:10,357 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 02:58:51,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=110346.66666666667, ans=0.0
+2024-09-01 02:59:29,104 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.09 vs. limit=22.5
+2024-09-01 02:59:49,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=110453.33333333333, ans=0.09899494936611666
+2024-09-01 02:59:54,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=110506.66666666667, ans=0.0
+2024-09-01 02:59:58,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=110506.66666666667, ans=0.2
+2024-09-01 03:00:01,328 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=110506.66666666667, ans=0.0
+2024-09-01 03:00:02,299 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.15 vs. limit=22.5
+2024-09-01 03:00:21,810 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 50, loss[loss=0.1869, simple_loss=0.1886, pruned_loss=0.06437, ctc_loss=0.1393, over 19018.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.1999, pruned_loss=0.08214, ctc_loss=0.1765, over 827868.27 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:00:43,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110560.0, ans=0.1
+2024-09-01 03:00:49,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=110613.33333333333, ans=0.125
+2024-09-01 03:01:09,984 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.78 vs. limit=10.0
+2024-09-01 03:01:10,112 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.92 vs. limit=15.0
+2024-09-01 03:01:25,487 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 1.971e+02 2.093e+02 2.277e+02 2.936e+02, threshold=4.187e+02, percent-clipped=0.0
+2024-09-01 03:02:27,243 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 100, loss[loss=0.1943, simple_loss=0.1857, pruned_loss=0.06962, ctc_loss=0.1594, over 19118.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.1988, pruned_loss=0.08047, ctc_loss=0.1725, over 1473208.83 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:02:28,764 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.14 vs. limit=22.5
+2024-09-01 03:03:01,498 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.95 vs. limit=22.5
+2024-09-01 03:03:29,025 INFO [dysarthria_finetune.py:1435] (1/4) (168493056, 34072559616)
+2024-09-01 03:03:29,026 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:03:29,085 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 03:03:41,379 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 0, loss[loss=0.2688, simple_loss=0.2431, pruned_loss=0.1072, ctc_loss=0.2005, over 18527.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.2431, pruned_loss=0.1072, ctc_loss=0.2005, over 18527.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:03:41,380 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:03:45,302 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.3.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.6355, 2.0034, 1.7446, 0.6951, 1.7418, 1.8900, 1.8514, 1.3211],
+ device='cuda:1')
+2024-09-01 03:04:05,405 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 17, validation: loss=0.1943, simple_loss=0.1886, pruned_loss=0.07183, ctc_loss=0.1409, over 1073944.00 frames.
+2024-09-01 03:04:05,406 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 03:04:24,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110981.33333333333, ans=0.1
+2024-09-01 03:04:38,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 03:04:40,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111034.66666666667, ans=0.1
+2024-09-01 03:04:53,881 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:05:02,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=111088.0, ans=0.125
+2024-09-01 03:05:09,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=111088.0, ans=0.0
+2024-09-01 03:05:20,835 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=7.75 vs. limit=12.0
+2024-09-01 03:05:45,148 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 1.958e+02 2.075e+02 2.282e+02 2.777e+02, threshold=4.150e+02, percent-clipped=0.0
+2024-09-01 03:05:48,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=111194.66666666667, ans=0.2
+2024-09-01 03:05:56,283 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 50, loss[loss=0.1922, simple_loss=0.19, pruned_loss=0.06448, ctc_loss=0.1634, over 19037.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.1963, pruned_loss=0.07886, ctc_loss=0.1696, over 827680.99 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:06:20,147 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=26.24 vs. limit=22.5
+2024-09-01 03:06:30,826 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=111301.33333333333, ans=0.125
+2024-09-01 03:06:33,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=111301.33333333333, ans=0.95
+2024-09-01 03:06:44,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=111354.66666666667, ans=0.2
+2024-09-01 03:07:10,381 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=111408.0, ans=0.0
+2024-09-01 03:07:14,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=111408.0, ans=0.125
+2024-09-01 03:07:18,666 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=111408.0, ans=0.125
+2024-09-01 03:07:45,186 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 100, loss[loss=0.1672, simple_loss=0.171, pruned_loss=0.05633, ctc_loss=0.127, over 19067.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.1937, pruned_loss=0.07742, ctc_loss=0.1677, over 1472664.14 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:07:54,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=111514.66666666667, ans=0.95
+2024-09-01 03:08:23,225 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.57 vs. limit=22.5
+2024-09-01 03:08:44,483 INFO [dysarthria_finetune.py:1435] (1/4) (1298857984, 34072559616)
+2024-09-01 03:08:44,484 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:08:44,548 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 03:08:52,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 03:09:16,217 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 0, loss[loss=0.2577, simple_loss=0.2193, pruned_loss=0.104, ctc_loss=0.2202, over 18622.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.2193, pruned_loss=0.104, ctc_loss=0.2202, over 18622.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:09:16,217 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:09:39,632 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 18, validation: loss=0.1961, simple_loss=0.1886, pruned_loss=0.07291, ctc_loss=0.1441, over 1073944.00 frames.
+2024-09-01 03:09:39,632 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 03:10:14,936 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 1.913e+02 2.060e+02 2.285e+02 3.151e+02, threshold=4.120e+02, percent-clipped=0.0
+2024-09-01 03:10:21,024 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.29 vs. limit=10.0
+2024-09-01 03:10:23,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=111776.0, ans=0.025
+2024-09-01 03:10:59,550 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=111829.33333333333, ans=0.125
+2024-09-01 03:11:00,152 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=11.48 vs. limit=12.0
+2024-09-01 03:11:29,090 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 50, loss[loss=0.2202, simple_loss=0.2014, pruned_loss=0.08342, ctc_loss=0.1803, over 19026.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.1936, pruned_loss=0.07855, ctc_loss=0.1693, over 826500.31 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:11:45,310 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=111936.0, ans=0.2
+2024-09-01 03:12:33,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112096.0, ans=0.1
+2024-09-01 03:12:42,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=112096.0, ans=0.0
+2024-09-01 03:12:44,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=112096.0, ans=0.125
+2024-09-01 03:13:05,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112149.33333333333, ans=0.1
+2024-09-01 03:13:44,716 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 100, loss[loss=0.2002, simple_loss=0.1888, pruned_loss=0.07276, ctc_loss=0.1651, over 19036.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.1919, pruned_loss=0.07755, ctc_loss=0.1664, over 1471672.61 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:14:03,334 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.75 vs. limit=15.0
+2024-09-01 03:14:19,258 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.738e+02 1.898e+02 2.020e+02 2.262e+02 2.800e+02, threshold=4.040e+02, percent-clipped=0.0
+2024-09-01 03:14:22,796 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=112256.0, ans=0.125
+2024-09-01 03:14:43,277 INFO [dysarthria_finetune.py:1435] (1/4) (428539904, 34072559616)
+2024-09-01 03:14:43,279 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:14:43,351 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 03:14:55,814 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 0, loss[loss=0.2309, simple_loss=0.2158, pruned_loss=0.08766, ctc_loss=0.177, over 18691.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.2158, pruned_loss=0.08766, ctc_loss=0.177, over 18691.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:14:55,814 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:15:42,622 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 19, validation: loss=0.1928, simple_loss=0.1862, pruned_loss=0.07146, ctc_loss=0.1413, over 1073944.00 frames.
+2024-09-01 03:15:42,623 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 03:16:00,155 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.51 vs. limit=15.0
+2024-09-01 03:16:31,651 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:16:43,541 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=13.94 vs. limit=15.0
+2024-09-01 03:17:02,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=112405.33333333333, ans=0.0
+2024-09-01 03:18:36,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=112512.0, ans=0.125
+2024-09-01 03:18:42,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=112565.33333333333, ans=0.09899494936611666
+2024-09-01 03:19:33,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112618.66666666667, ans=0.125
+2024-09-01 03:19:34,391 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 50, loss[loss=0.2181, simple_loss=0.2028, pruned_loss=0.07977, ctc_loss=0.1846, over 18976.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.1919, pruned_loss=0.07771, ctc_loss=0.1678, over 828010.25 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:22:34,913 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 1.922e+02 2.090e+02 2.243e+02 2.725e+02, threshold=4.180e+02, percent-clipped=0.0
+2024-09-01 03:22:47,886 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=27.87 vs. limit=22.5
+2024-09-01 03:23:34,303 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 100, loss[loss=0.1669, simple_loss=0.1637, pruned_loss=0.05815, ctc_loss=0.1346, over 19118.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.1916, pruned_loss=0.07696, ctc_loss=0.1665, over 1474453.83 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:23:44,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-09-01 03:24:33,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=112938.66666666667, ans=0.125
+2024-09-01 03:24:41,041 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.62 vs. limit=15.0
+2024-09-01 03:25:02,820 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.22 vs. limit=15.0
+2024-09-01 03:25:13,452 INFO [dysarthria_finetune.py:1435] (1/4) (12938051584, 34072559616)
+2024-09-01 03:25:13,454 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:25:13,497 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 03:25:27,039 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 0, loss[loss=0.2548, simple_loss=0.2276, pruned_loss=0.1014, ctc_loss=0.1979, over 18758.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.2276, pruned_loss=0.1014, ctc_loss=0.1979, over 18758.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:25:27,039 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:26:10,759 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 20, validation: loss=0.19, simple_loss=0.1838, pruned_loss=0.07041, ctc_loss=0.1385, over 1073944.00 frames.
+2024-09-01 03:26:10,760 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14024MB
+2024-09-01 03:26:12,982 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.58 vs. limit=15.0
+2024-09-01 03:26:23,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=113040.0, ans=0.125
+2024-09-01 03:27:22,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=113093.33333333333, ans=0.2
+2024-09-01 03:27:59,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113146.66666666667, ans=0.1
+2024-09-01 03:28:35,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=113200.0, ans=0.025
+2024-09-01 03:29:06,704 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113253.33333333333, ans=0.1
+2024-09-01 03:29:26,740 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 50, loss[loss=0.198, simple_loss=0.1865, pruned_loss=0.07357, ctc_loss=0.1557, over 19069.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.1889, pruned_loss=0.077, ctc_loss=0.1662, over 828644.17 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:29:32,765 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.88 vs. limit=22.5
+2024-09-01 03:30:05,879 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 1.917e+02 2.046e+02 2.200e+02 2.791e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-09-01 03:30:44,062 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.75 vs. limit=22.5
+2024-09-01 03:31:15,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=113413.33333333333, ans=0.125
+2024-09-01 03:31:23,289 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113413.33333333333, ans=0.125
+2024-09-01 03:32:29,420 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.72 vs. limit=15.0
+2024-09-01 03:33:06,774 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 100, loss[loss=0.1532, simple_loss=0.149, pruned_loss=0.05368, ctc_loss=0.1252, over 19104.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.1865, pruned_loss=0.07485, ctc_loss=0.1617, over 1473557.06 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:33:46,891 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=113573.33333333333, ans=0.125
+2024-09-01 03:34:10,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=113626.66666666667, ans=0.125
+2024-09-01 03:35:10,295 INFO [dysarthria_finetune.py:1435] (1/4) (376111104, 34072559616)
+2024-09-01 03:35:10,296 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:35:10,372 INFO [dysarthria_finetune.py:1440] (1/4) (29688004608, 34072559616)
+2024-09-01 03:35:10,373 INFO [dysarthria_finetune.py:1442] (1/4) Done!
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-2 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-2
new file mode 100644
index 0000000000000000000000000000000000000000..944c6a92f6ad640c9474b75e517774daab084e93
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-2
@@ -0,0 +1,551 @@
+2024-08-31 22:09:00,004 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-31 22:09:00,960 INFO [dysarthria_finetune.py:1214] (2/4) (33106362368, 34072559616)
+2024-08-31 22:09:00,960 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-31 22:09:01,332 INFO [dysarthria_finetune.py:1219] (2/4) (33106362368, 34072559616)
+2024-08-31 22:09:01,332 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-31 22:09:01,335 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2653.int.cedar.computecanada.ca', 'IP address': '172.16.146.90'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:09:01,335 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-31 22:09:16,893 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66367431
+2024-08-31 22:09:16,893 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 22:10:50,739 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-31 22:11:01,693 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-31 22:11:01,783 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-31 22:11:17,376 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-31 22:11:18,285 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-31 22:11:18,291 INFO [dysarthria_asr_datamodule.py:501] (2/4) About to get dev cuts
+2024-08-31 22:11:18,428 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-31 22:11:18,753 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-31 22:11:18,754 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:12:59,095 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.41 vs. limit=5.0
+2024-08-31 22:12:59,567 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=15.16 vs. limit=7.5
+2024-08-31 22:13:03,031 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12713MB
+2024-08-31 22:13:04,361 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=18.51 vs. limit=7.5
+2024-08-31 22:13:04,877 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12713MB
+2024-08-31 22:14:12,892 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12713MB
+2024-08-31 22:14:14,953 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12713MB
+2024-08-31 22:19:47,416 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=27.15 vs. limit=7.5
+2024-08-31 22:19:51,922 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12713MB
+2024-08-31 22:19:54,352 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12713MB
+2024-08-31 22:20:43,766 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.4194, simple_loss=0.3368, pruned_loss=0.2044, ctc_loss=0.3107, over 18533.00 frames. ], tot_loss[loss=0.4194, simple_loss=0.3368, pruned_loss=0.2044, ctc_loss=0.3107, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:43,766 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-31 22:46:04,595 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3942, simple_loss=0.3187, pruned_loss=0.1927, ctc_loss=0.281, over 1073944.00 frames.
+2024-08-31 22:46:14,297 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19801MB
+2024-08-31 23:06:10,406 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=16.18 vs. limit=15.0
+2024-08-31 23:06:10,480 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.51 vs. limit=22.5
+2024-08-31 23:06:11,554 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 9.685e+02 9.975e+02 1.051e+03 1.091e+03 1.133e+03, threshold=4.203e+03, percent-clipped=0.0
+2024-08-31 23:23:02,239 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=100053.33333333333, ans=0.125
+2024-08-31 23:25:12,994 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 8.960e+02 9.836e+02 1.043e+03 1.067e+03 1.144e+03, threshold=4.173e+03, percent-clipped=0.0
+2024-08-31 23:45:44,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=8.30 vs. limit=6.0
+2024-08-31 23:49:49,539 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.16 vs. limit=6.0
+2024-08-31 23:50:05,828 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.677e+02 8.648e+02 9.697e+02 1.051e+03 1.144e+03, threshold=3.879e+03, percent-clipped=0.0
+2024-08-31 23:55:38,225 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.4319, simple_loss=0.3435, pruned_loss=0.2185, ctc_loss=0.3295, over 19018.00 frames. ], tot_loss[loss=0.4291, simple_loss=0.3434, pruned_loss=0.2152, ctc_loss=0.3194, over 827419.58 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-09-01 00:03:12,048 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.75 vs. limit=6.0
+2024-09-01 00:03:33,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=100320.0, ans=0.125
+2024-09-01 00:08:31,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=100373.33333333333, ans=0.025
+2024-09-01 00:13:07,229 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.73 vs. limit=15.0
+2024-09-01 00:18:32,448 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.599e+02 6.914e+02 7.776e+02 9.170e+02 1.144e+03, threshold=1.555e+03, percent-clipped=0.0
+2024-09-01 00:18:32,485 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 100, loss[loss=0.3764, simple_loss=0.3057, pruned_loss=0.1764, ctc_loss=0.2637, over 19117.00 frames. ], tot_loss[loss=0.4128, simple_loss=0.331, pruned_loss=0.2035, ctc_loss=0.3049, over 1475925.13 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:23:33,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=100586.66666666667, ans=0.025
+2024-09-01 00:27:43,313 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.88 vs. limit=15.0
+2024-09-01 00:27:58,750 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=31.79 vs. limit=22.5
+2024-09-01 00:28:35,635 INFO [dysarthria_finetune.py:1435] (2/4) (10100604928, 34072559616)
+2024-09-01 00:28:35,636 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 00:28:35,657 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 00:30:12,128 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 0, loss[loss=0.3746, simple_loss=0.3028, pruned_loss=0.1772, ctc_loss=0.2681, over 18502.00 frames. ], tot_loss[loss=0.3746, simple_loss=0.3028, pruned_loss=0.1772, ctc_loss=0.2681, over 18502.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:30:12,129 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 00:34:27,519 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 2, validation: loss=0.3547, simple_loss=0.2901, pruned_loss=0.1627, ctc_loss=0.2412, over 1073944.00 frames.
+2024-09-01 00:34:27,520 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 00:41:34,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=100789.33333333333, ans=0.125
+2024-09-01 00:41:38,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=100789.33333333333, ans=0.05
+2024-09-01 00:43:08,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-09-01 00:43:14,095 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=20.01 vs. limit=15.0
+2024-09-01 00:45:12,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=100842.66666666667, ans=0.09899494936611666
+2024-09-01 00:48:18,393 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 50, loss[loss=0.3747, simple_loss=0.304, pruned_loss=0.1698, ctc_loss=0.2683, over 18952.00 frames. ], tot_loss[loss=0.3931, simple_loss=0.3162, pruned_loss=0.1878, ctc_loss=0.286, over 829638.79 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 00:48:23,809 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=100949.33333333333, ans=0.125
+2024-09-01 00:50:02,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101002.66666666667, ans=0.1
+2024-09-01 00:51:31,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=101002.66666666667, ans=22.5
+2024-09-01 00:53:05,353 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.883e+02 4.624e+02 4.997e+02 5.383e+02 6.686e+02, threshold=9.995e+02, percent-clipped=0.0
+2024-09-01 00:53:41,077 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101056.0, ans=0.1
+2024-09-01 00:54:47,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=101109.33333333333, ans=0.125
+2024-09-01 00:55:04,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=101109.33333333333, ans=0.125
+2024-09-01 00:56:26,667 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101216.0, ans=0.1
+2024-09-01 00:56:30,069 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 100, loss[loss=0.3088, simple_loss=0.2566, pruned_loss=0.1307, ctc_loss=0.2019, over 19108.00 frames. ], tot_loss[loss=0.3788, simple_loss=0.3059, pruned_loss=0.1776, ctc_loss=0.2728, over 1476292.15 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 00:56:56,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101216.0, ans=0.1
+2024-09-01 00:57:14,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=101216.0, ans=0.125
+2024-09-01 00:59:53,910 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=20.21 vs. limit=15.0
+2024-09-01 01:00:09,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=101269.33333333333, ans=0.2
+2024-09-01 01:00:28,276 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=101269.33333333333, ans=0.2
+2024-09-01 01:01:04,886 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.72 vs. limit=15.0
+2024-09-01 01:01:15,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=101322.66666666667, ans=0.025
+2024-09-01 01:01:16,824 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 01:01:16,824 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:01:16,866 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 01:01:24,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-09-01 01:01:35,837 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 0, loss[loss=0.3886, simple_loss=0.3111, pruned_loss=0.1873, ctc_loss=0.2854, over 18600.00 frames. ], tot_loss[loss=0.3886, simple_loss=0.3111, pruned_loss=0.1873, ctc_loss=0.2854, over 18600.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:01:35,837 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:01:59,928 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 3, validation: loss=0.3274, simple_loss=0.2708, pruned_loss=0.1428, ctc_loss=0.2163, over 1073944.00 frames.
+2024-09-01 01:01:59,928 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 01:03:09,911 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=101424.0, ans=0.2
+2024-09-01 01:03:53,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=101477.33333333333, ans=0.09899494936611666
+2024-09-01 01:03:59,503 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=26.13 vs. limit=22.5
+2024-09-01 01:04:06,819 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=27.42 vs. limit=22.5
+2024-09-01 01:04:33,381 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.942e+02 3.454e+02 3.711e+02 3.996e+02 5.509e+02, threshold=7.422e+02, percent-clipped=0.0
+2024-09-01 01:04:56,820 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 50, loss[loss=0.3581, simple_loss=0.2895, pruned_loss=0.1633, ctc_loss=0.2597, over 19168.00 frames. ], tot_loss[loss=0.3653, simple_loss=0.2967, pruned_loss=0.1653, ctc_loss=0.26, over 828229.52 frames. ], batch size: 103, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:05:02,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=101637.33333333333, ans=0.125
+2024-09-01 01:05:38,370 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=101690.66666666667, ans=0.125
+2024-09-01 01:05:38,830 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=19.91 vs. limit=15.0
+2024-09-01 01:05:41,749 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=27.19 vs. limit=22.5
+2024-09-01 01:06:07,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101744.0, ans=0.1
+2024-09-01 01:06:09,771 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=101744.0, ans=0.125
+2024-09-01 01:06:19,422 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.30 vs. limit=15.0
+2024-09-01 01:06:21,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=101797.33333333333, ans=0.125
+2024-09-01 01:06:44,325 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=20.87 vs. limit=15.0
+2024-09-01 01:06:46,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=101850.66666666667, ans=15.0
+2024-09-01 01:07:00,078 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 100, loss[loss=0.3048, simple_loss=0.2513, pruned_loss=0.1295, ctc_loss=0.2104, over 19024.00 frames. ], tot_loss[loss=0.3547, simple_loss=0.2887, pruned_loss=0.1592, ctc_loss=0.2512, over 1476045.82 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:08:05,640 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=17.50 vs. limit=15.0
+2024-09-01 01:08:07,279 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=102010.66666666667, ans=0.125
+2024-09-01 01:08:21,715 INFO [dysarthria_finetune.py:1435] (2/4) (10138353664, 34072559616)
+2024-09-01 01:08:21,715 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:08:21,747 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 01:08:35,097 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 0, loss[loss=0.36, simple_loss=0.292, pruned_loss=0.1595, ctc_loss=0.2627, over 18618.00 frames. ], tot_loss[loss=0.36, simple_loss=0.292, pruned_loss=0.1595, ctc_loss=0.2627, over 18618.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:08:35,097 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:08:58,405 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 4, validation: loss=0.308, simple_loss=0.2573, pruned_loss=0.1299, ctc_loss=0.2, over 1073944.00 frames.
+2024-09-01 01:08:58,406 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 01:09:19,175 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=16.80 vs. limit=15.0
+2024-09-01 01:09:33,167 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.484e+02 2.869e+02 3.070e+02 3.452e+02 5.291e+02, threshold=6.140e+02, percent-clipped=0.0
+2024-09-01 01:09:38,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=102106.66666666667, ans=0.125
+2024-09-01 01:09:48,744 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:09:55,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=102160.0, ans=0.0
+2024-09-01 01:10:19,511 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=102213.33333333333, ans=0.2
+2024-09-01 01:10:32,581 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:10:47,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102266.66666666667, ans=0.125
+2024-09-01 01:10:51,037 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 50, loss[loss=0.3395, simple_loss=0.276, pruned_loss=0.149, ctc_loss=0.2475, over 18961.00 frames. ], tot_loss[loss=0.3414, simple_loss=0.2789, pruned_loss=0.1484, ctc_loss=0.2439, over 828488.26 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:10:59,363 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=14.85 vs. limit=15.0
+2024-09-01 01:11:05,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=102320.0, ans=0.0
+2024-09-01 01:11:12,206 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.95 vs. limit=15.0
+2024-09-01 01:11:19,063 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.83 vs. limit=10.0
+2024-09-01 01:11:27,783 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.72 vs. limit=22.5
+2024-09-01 01:11:53,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=102426.66666666667, ans=0.125
+2024-09-01 01:12:02,015 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.40 vs. limit=10.0
+2024-09-01 01:12:08,734 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.72 vs. limit=6.0
+2024-09-01 01:12:38,912 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 100, loss[loss=0.3042, simple_loss=0.2512, pruned_loss=0.1326, ctc_loss=0.2065, over 19090.00 frames. ], tot_loss[loss=0.3367, simple_loss=0.2753, pruned_loss=0.1466, ctc_loss=0.2401, over 1476821.49 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:12:55,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=102586.66666666667, ans=0.0
+2024-09-01 01:13:01,027 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=19.80 vs. limit=15.0
+2024-09-01 01:13:04,768 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=102640.0, ans=0.125
+2024-09-01 01:13:09,811 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=17.88 vs. limit=15.0
+2024-09-01 01:13:12,349 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 2.669e+02 2.871e+02 3.122e+02 4.671e+02, threshold=5.742e+02, percent-clipped=0.0
+2024-09-01 01:13:40,143 INFO [dysarthria_finetune.py:1435] (2/4) (10169810944, 34072559616)
+2024-09-01 01:13:40,144 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:13:40,189 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 01:13:53,058 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 0, loss[loss=0.3214, simple_loss=0.2649, pruned_loss=0.136, ctc_loss=0.227, over 18551.00 frames. ], tot_loss[loss=0.3214, simple_loss=0.2649, pruned_loss=0.136, ctc_loss=0.227, over 18551.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:13:53,058 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:14:16,500 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 5, validation: loss=0.2909, simple_loss=0.2453, pruned_loss=0.1191, ctc_loss=0.1881, over 1073944.00 frames.
+2024-09-01 01:14:16,501 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 01:15:29,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 01:15:51,501 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.05 vs. limit=6.0
+2024-09-01 01:15:55,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=102794.66666666667, ans=0.1
+2024-09-01 01:15:55,872 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.96 vs. limit=15.0
+2024-09-01 01:16:06,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=102794.66666666667, ans=0.0
+2024-09-01 01:16:58,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102848.0, ans=0.0
+2024-09-01 01:17:08,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=102848.0, ans=0.125
+2024-09-01 01:17:19,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=102848.0, ans=0.125
+2024-09-01 01:18:35,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=102901.33333333333, ans=0.125
+2024-09-01 01:20:50,700 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 50, loss[loss=0.2865, simple_loss=0.2385, pruned_loss=0.1176, ctc_loss=0.1999, over 19027.00 frames. ], tot_loss[loss=0.3257, simple_loss=0.2679, pruned_loss=0.1381, ctc_loss=0.2331, over 828775.72 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:21:28,674 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.16 vs. limit=15.0
+2024-09-01 01:21:54,518 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.88 vs. limit=6.0
+2024-09-01 01:22:28,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=103061.33333333333, ans=0.2
+2024-09-01 01:24:14,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=103114.66666666667, ans=0.0
+2024-09-01 01:24:46,487 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=22.75 vs. limit=15.0
+2024-09-01 01:25:33,880 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.253e+02 2.485e+02 2.709e+02 2.997e+02 4.733e+02, threshold=5.419e+02, percent-clipped=0.0
+2024-09-01 01:26:15,753 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-09-01 01:26:21,864 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:26:23,136 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 100, loss[loss=0.3173, simple_loss=0.2618, pruned_loss=0.1357, ctc_loss=0.2234, over 19114.00 frames. ], tot_loss[loss=0.3152, simple_loss=0.2599, pruned_loss=0.1329, ctc_loss=0.2248, over 1478197.42 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:26:40,040 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.14 vs. limit=10.0
+2024-09-01 01:26:43,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103274.66666666667, ans=0.1
+2024-09-01 01:27:05,510 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=8.37 vs. limit=12.0
+2024-09-01 01:27:34,383 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103381.33333333333, ans=0.1
+2024-09-01 01:27:55,386 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 01:27:55,387 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:27:55,421 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 01:28:14,511 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 0, loss[loss=0.2893, simple_loss=0.2454, pruned_loss=0.1135, ctc_loss=0.1957, over 18783.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.2454, pruned_loss=0.1135, ctc_loss=0.1957, over 18783.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:28:14,512 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:28:37,895 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 6, validation: loss=0.2789, simple_loss=0.2369, pruned_loss=0.1122, ctc_loss=0.1819, over 1073944.00 frames.
+2024-09-01 01:28:37,895 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 01:29:32,699 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.80 vs. limit=15.0
+2024-09-01 01:29:51,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=103584.0, ans=0.025
+2024-09-01 01:29:53,844 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.09 vs. limit=15.0
+2024-09-01 01:30:17,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=103637.33333333333, ans=0.025
+2024-09-01 01:30:32,251 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 50, loss[loss=0.2931, simple_loss=0.2474, pruned_loss=0.1183, ctc_loss=0.1988, over 19006.00 frames. ], tot_loss[loss=0.2996, simple_loss=0.2486, pruned_loss=0.1228, ctc_loss=0.2163, over 828020.78 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:30:35,774 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=12.83 vs. limit=15.0
+2024-09-01 01:31:04,785 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 2.419e+02 2.583e+02 2.819e+02 4.094e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-09-01 01:31:29,862 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=9.99 vs. limit=15.0
+2024-09-01 01:31:29,910 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.98 vs. limit=6.0
+2024-09-01 01:31:38,448 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=103797.33333333333, ans=0.125
+2024-09-01 01:31:38,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-09-01 01:31:49,378 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-09-01 01:32:43,096 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 100, loss[loss=0.3281, simple_loss=0.2726, pruned_loss=0.1348, ctc_loss=0.2379, over 19060.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.248, pruned_loss=0.1225, ctc_loss=0.2137, over 1475525.13 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:33:15,651 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=104010.66666666667, ans=0.125
+2024-09-01 01:33:44,580 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 01:33:44,581 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:33:44,617 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 01:33:57,935 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 0, loss[loss=0.2927, simple_loss=0.2482, pruned_loss=0.1179, ctc_loss=0.1984, over 18435.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.2482, pruned_loss=0.1179, ctc_loss=0.1984, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:33:57,935 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:34:21,903 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 7, validation: loss=0.2604, simple_loss=0.2251, pruned_loss=0.1007, ctc_loss=0.1681, over 1073944.00 frames.
+2024-09-01 01:34:21,904 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 01:34:50,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=104160.0, ans=0.125
+2024-09-01 01:35:40,238 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.049e+02 2.272e+02 2.384e+02 2.601e+02 4.291e+02, threshold=4.768e+02, percent-clipped=0.0
+2024-09-01 01:37:02,208 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 50, loss[loss=0.2726, simple_loss=0.2309, pruned_loss=0.1049, ctc_loss=0.1991, over 18970.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.2474, pruned_loss=0.121, ctc_loss=0.2161, over 828175.40 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:37:57,613 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=104426.66666666667, ans=0.125
+2024-09-01 01:39:43,302 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.25 vs. limit=15.0
+2024-09-01 01:40:04,597 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.92 vs. limit=22.5
+2024-09-01 01:40:52,487 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 100, loss[loss=0.2466, simple_loss=0.2186, pruned_loss=0.08723, ctc_loss=0.1633, over 19065.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.2411, pruned_loss=0.1162, ctc_loss=0.2083, over 1476190.89 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:41:03,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=104640.0, ans=0.125
+2024-09-01 01:41:07,881 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.48 vs. limit=6.0
+2024-09-01 01:41:50,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=104693.33333333333, ans=0.125
+2024-09-01 01:42:54,534 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=104746.66666666667, ans=0.0
+2024-09-01 01:42:59,051 INFO [dysarthria_finetune.py:1435] (2/4) (10176102400, 34072559616)
+2024-09-01 01:42:59,052 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:42:59,100 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 01:43:13,095 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 0, loss[loss=0.2607, simple_loss=0.2234, pruned_loss=0.09992, ctc_loss=0.1855, over 18635.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.2234, pruned_loss=0.09992, ctc_loss=0.1855, over 18635.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:43:13,095 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:44:04,949 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 8, validation: loss=0.2572, simple_loss=0.2228, pruned_loss=0.09973, ctc_loss=0.1708, over 1073944.00 frames.
+2024-09-01 01:44:04,950 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 01:44:10,073 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.73 vs. limit=12.0
+2024-09-01 01:44:25,630 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.924e+02 2.205e+02 2.324e+02 2.533e+02 3.850e+02, threshold=4.647e+02, percent-clipped=0.0
+2024-09-01 01:44:41,305 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.98 vs. limit=15.0
+2024-09-01 01:44:58,155 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=16.56 vs. limit=15.0
+2024-09-01 01:52:39,804 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.12 vs. limit=15.0
+2024-09-01 01:52:53,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=104896.0, ans=0.0
+2024-09-01 01:54:00,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-09-01 01:54:40,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104949.33333333333, ans=0.1
+2024-09-01 01:55:49,062 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=105002.66666666667, ans=0.2
+2024-09-01 01:56:19,985 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 50, loss[loss=0.2533, simple_loss=0.2276, pruned_loss=0.08824, ctc_loss=0.1662, over 19000.00 frames. ], tot_loss[loss=0.2852, simple_loss=0.2386, pruned_loss=0.1145, ctc_loss=0.2132, over 827531.12 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:56:45,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=105056.0, ans=0.0
+2024-09-01 01:56:45,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=105056.0, ans=0.07
+2024-09-01 02:00:20,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105269.33333333333, ans=0.125
+2024-09-01 02:00:31,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=105269.33333333333, ans=0.125
+2024-09-01 02:00:56,053 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 100, loss[loss=0.2517, simple_loss=0.2194, pruned_loss=0.09369, ctc_loss=0.1783, over 19093.00 frames. ], tot_loss[loss=0.2757, simple_loss=0.2327, pruned_loss=0.1092, ctc_loss=0.2035, over 1475468.79 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:01:07,948 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.165e+02 2.362e+02 2.610e+02 3.254e+02, threshold=4.723e+02, percent-clipped=0.0
+2024-09-01 02:01:29,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=105322.66666666667, ans=0.125
+2024-09-01 02:02:23,360 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=105376.0, ans=0.2
+2024-09-01 02:02:36,588 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=105429.33333333333, ans=0.1
+2024-09-01 02:02:56,068 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.63 vs. limit=22.5
+2024-09-01 02:02:57,170 INFO [dysarthria_finetune.py:1435] (2/4) (10142547968, 34072559616)
+2024-09-01 02:02:57,171 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:02:57,207 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:03:10,289 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 0, loss[loss=0.2894, simple_loss=0.243, pruned_loss=0.1182, ctc_loss=0.212, over 18461.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.243, pruned_loss=0.1182, ctc_loss=0.212, over 18461.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:10,290 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:03:33,813 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 9, validation: loss=0.2431, simple_loss=0.2147, pruned_loss=0.0913, ctc_loss=0.1608, over 1073944.00 frames.
+2024-09-01 02:03:33,813 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:03:38,872 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.30 vs. limit=15.0
+2024-09-01 02:03:52,170 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=105477.33333333333, ans=0.2
+2024-09-01 02:04:08,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=105530.66666666667, ans=0.125
+2024-09-01 02:04:52,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=105637.33333333333, ans=0.125
+2024-09-01 02:05:27,315 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105690.66666666667, ans=0.125
+2024-09-01 02:05:38,142 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 50, loss[loss=0.2838, simple_loss=0.2448, pruned_loss=0.1078, ctc_loss=0.2093, over 18943.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.2266, pruned_loss=0.1032, ctc_loss=0.1967, over 826909.81 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:52,903 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=105744.0, ans=0.125
+2024-09-01 02:06:23,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=105797.33333333333, ans=0.125
+2024-09-01 02:06:37,636 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.168e+02 2.346e+02 2.556e+02 3.441e+02, threshold=4.692e+02, percent-clipped=0.0
+2024-09-01 02:07:40,377 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=105904.0, ans=0.025
+2024-09-01 02:07:53,865 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.29 vs. limit=15.0
+2024-09-01 02:07:53,880 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.17 vs. limit=10.0
+2024-09-01 02:08:10,820 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:08:17,428 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 100, loss[loss=0.2426, simple_loss=0.2191, pruned_loss=0.08446, ctc_loss=0.1699, over 19136.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.2261, pruned_loss=0.1022, ctc_loss=0.1967, over 1474643.82 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:27,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=106010.66666666667, ans=0.05
+2024-09-01 02:08:30,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:08:30,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:08:43,574 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=106064.0, ans=0.025
+2024-09-01 02:09:19,445 INFO [dysarthria_finetune.py:1435] (2/4) (10142547968, 34072559616)
+2024-09-01 02:09:19,446 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:09:19,481 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:09:34,479 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 0, loss[loss=0.3038, simple_loss=0.2551, pruned_loss=0.1237, ctc_loss=0.2282, over 18505.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.2551, pruned_loss=0.1237, ctc_loss=0.2282, over 18505.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:09:34,480 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:09:54,959 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([2.8844, 4.8770, 3.6462, 2.8434], device='cuda:2')
+2024-09-01 02:09:58,839 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 10, validation: loss=0.2363, simple_loss=0.211, pruned_loss=0.08786, ctc_loss=0.1591, over 1073944.00 frames.
+2024-09-01 02:09:58,839 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:10:06,576 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106165.33333333333, ans=0.1
+2024-09-01 02:10:29,265 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.74 vs. limit=15.0
+2024-09-01 02:10:35,317 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.93 vs. limit=22.5
+2024-09-01 02:10:37,409 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.34 vs. limit=15.0
+2024-09-01 02:10:43,733 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=106272.0, ans=0.125
+2024-09-01 02:10:58,737 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=106272.0, ans=0.0
+2024-09-01 02:11:09,294 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=106325.33333333333, ans=0.0
+2024-09-01 02:11:09,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:11:28,907 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106378.66666666667, ans=0.125
+2024-09-01 02:11:36,257 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.111e+02 2.256e+02 2.412e+02 3.661e+02, threshold=4.511e+02, percent-clipped=0.0
+2024-09-01 02:11:47,317 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 50, loss[loss=0.2391, simple_loss=0.2155, pruned_loss=0.08232, ctc_loss=0.1786, over 19019.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.2229, pruned_loss=0.09915, ctc_loss=0.1955, over 827816.98 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:11:59,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=106432.0, ans=15.0
+2024-09-01 02:12:16,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=106485.33333333333, ans=0.0
+2024-09-01 02:12:18,970 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=106485.33333333333, ans=0.025
+2024-09-01 02:12:23,839 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.88 vs. limit=15.0
+2024-09-01 02:12:41,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:12:43,201 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=106538.66666666667, ans=0.95
+2024-09-01 02:13:00,179 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=11.05 vs. limit=12.0
+2024-09-01 02:13:23,373 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=106645.33333333333, ans=0.0
+2024-09-01 02:13:35,242 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 100, loss[loss=0.2263, simple_loss=0.1993, pruned_loss=0.08021, ctc_loss=0.1811, over 19070.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.2173, pruned_loss=0.09356, ctc_loss=0.1867, over 1475821.74 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:13:45,183 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=106698.66666666667, ans=0.07
+2024-09-01 02:13:53,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=106698.66666666667, ans=0.0
+2024-09-01 02:13:58,264 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=6.83 vs. limit=15.0
+2024-09-01 02:14:31,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106805.33333333333, ans=0.0
+2024-09-01 02:14:34,903 INFO [dysarthria_finetune.py:1435] (2/4) (10169810944, 34072559616)
+2024-09-01 02:14:34,904 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:14:34,956 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:14:48,322 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 0, loss[loss=0.3055, simple_loss=0.2484, pruned_loss=0.1299, ctc_loss=0.2437, over 18525.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.2484, pruned_loss=0.1299, ctc_loss=0.2437, over 18525.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:14:48,322 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:15:11,809 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 11, validation: loss=0.2335, simple_loss=0.2098, pruned_loss=0.0867, ctc_loss=0.1618, over 1073944.00 frames.
+2024-09-01 02:15:11,810 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:16:01,162 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.106e+02 2.175e+02 2.350e+02 3.456e+02, threshold=4.351e+02, percent-clipped=0.0
+2024-09-01 02:16:11,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=106960.0, ans=0.125
+2024-09-01 02:16:13,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=106960.0, ans=0.125
+2024-09-01 02:16:23,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=106960.0, ans=0.125
+2024-09-01 02:16:49,035 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107013.33333333333, ans=0.1
+2024-09-01 02:22:24,184 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 50, loss[loss=0.2462, simple_loss=0.2151, pruned_loss=0.09435, ctc_loss=0.1839, over 19068.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.2172, pruned_loss=0.09387, ctc_loss=0.1891, over 827285.47 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:22:28,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=107120.0, ans=0.125
+2024-09-01 02:22:31,848 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.87 vs. limit=15.0
+2024-09-01 02:22:39,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=107120.0, ans=0.2
+2024-09-01 02:23:18,584 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=107173.33333333333, ans=0.125
+2024-09-01 02:23:34,037 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=107173.33333333333, ans=0.125
+2024-09-01 02:24:38,838 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.73 vs. limit=22.5
+2024-09-01 02:24:55,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107333.33333333333, ans=0.1
+2024-09-01 02:25:09,456 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 100, loss[loss=0.2397, simple_loss=0.2067, pruned_loss=0.09236, ctc_loss=0.1889, over 19059.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.2155, pruned_loss=0.09244, ctc_loss=0.1864, over 1474809.38 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 16.0
+2024-09-01 02:25:51,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=107440.0, ans=0.125
+2024-09-01 02:25:52,649 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.027e+02 2.133e+02 2.278e+02 3.178e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-09-01 02:26:15,406 INFO [dysarthria_finetune.py:1435] (2/4) (10138353664, 34072559616)
+2024-09-01 02:26:15,407 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:26:15,462 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:26:37,053 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 0, loss[loss=0.2582, simple_loss=0.2227, pruned_loss=0.0996, ctc_loss=0.2043, over 18505.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.2227, pruned_loss=0.0996, ctc_loss=0.2043, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:37,053 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:27:00,641 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 12, validation: loss=0.2234, simple_loss=0.2042, pruned_loss=0.08189, ctc_loss=0.1554, over 1073944.00 frames.
+2024-09-01 02:27:00,642 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:27:06,845 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.14 vs. limit=15.0
+2024-09-01 02:27:24,693 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=107594.66666666667, ans=0.125
+2024-09-01 02:27:24,926 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.09 vs. limit=22.5
+2024-09-01 02:27:53,445 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.94 vs. limit=15.0
+2024-09-01 02:28:15,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=107701.33333333333, ans=0.125
+2024-09-01 02:28:28,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=107701.33333333333, ans=0.125
+2024-09-01 02:28:37,257 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=107754.66666666667, ans=0.125
+2024-09-01 02:28:41,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=107754.66666666667, ans=0.2
+2024-09-01 02:28:43,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=107754.66666666667, ans=0.95
+2024-09-01 02:28:53,638 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 50, loss[loss=0.2293, simple_loss=0.2119, pruned_loss=0.0785, ctc_loss=0.177, over 18979.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2125, pruned_loss=0.09069, ctc_loss=0.1886, over 828348.40 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:28:55,021 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107808.0, ans=0.1
+2024-09-01 02:28:57,746 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.30 vs. limit=15.0
+2024-09-01 02:29:27,246 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.99 vs. limit=15.0
+2024-09-01 02:29:38,292 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-09-01 02:29:38,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-09-01 02:30:00,524 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.72 vs. limit=22.5
+2024-09-01 02:30:17,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten.whitening_limit, batch_count=107968.0, ans=15.0
+2024-09-01 02:30:20,080 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.056e+02 2.167e+02 2.338e+02 2.987e+02, threshold=4.333e+02, percent-clipped=0.0
+2024-09-01 02:34:06,134 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 100, loss[loss=0.2075, simple_loss=0.1981, pruned_loss=0.06937, ctc_loss=0.1512, over 19089.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.2128, pruned_loss=0.0902, ctc_loss=0.187, over 1475248.85 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:35:08,767 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108074.66666666667, ans=0.125
+2024-09-01 02:36:26,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_abs, batch_count=108181.33333333333, ans=0.5
+2024-09-01 02:36:42,077 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 02:36:42,078 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:36:42,128 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:36:54,936 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 0, loss[loss=0.2667, simple_loss=0.2317, pruned_loss=0.1072, ctc_loss=0.1963, over 18540.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.2317, pruned_loss=0.1072, ctc_loss=0.1963, over 18540.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:54,936 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:37:18,563 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 13, validation: loss=0.2186, simple_loss=0.2014, pruned_loss=0.08061, ctc_loss=0.1543, over 1073944.00 frames.
+2024-09-01 02:37:18,563 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:37:37,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=108229.33333333333, ans=0.125
+2024-09-01 02:37:59,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 02:38:01,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=108336.0, ans=0.0
+2024-09-01 02:38:18,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=108336.0, ans=0.1
+2024-09-01 02:38:25,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108389.33333333333, ans=0.1
+2024-09-01 02:38:28,262 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.67 vs. limit=15.0
+2024-09-01 02:38:45,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=108442.66666666667, ans=0.025
+2024-09-01 02:39:03,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=108442.66666666667, ans=0.125
+2024-09-01 02:39:08,426 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 50, loss[loss=0.2401, simple_loss=0.2208, pruned_loss=0.08813, ctc_loss=0.1762, over 18984.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2086, pruned_loss=0.08491, ctc_loss=0.1811, over 829065.08 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:39:16,489 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:39:18,625 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=108496.0, ans=0.2
+2024-09-01 02:39:29,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=108549.33333333333, ans=0.025
+2024-09-01 02:39:30,405 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.005e+02 2.143e+02 2.348e+02 3.224e+02, threshold=4.286e+02, percent-clipped=0.0
+2024-09-01 02:40:23,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=108656.0, ans=0.125
+2024-09-01 02:40:40,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 02:40:56,424 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 100, loss[loss=0.2213, simple_loss=0.2013, pruned_loss=0.08022, ctc_loss=0.1769, over 19116.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2083, pruned_loss=0.0856, ctc_loss=0.1797, over 1477011.25 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:40:57,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten.whitening_limit, batch_count=108762.66666666667, ans=15.0
+2024-09-01 02:41:26,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108816.0, ans=0.1
+2024-09-01 02:41:41,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108869.33333333333, ans=0.1
+2024-09-01 02:41:49,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=108869.33333333333, ans=0.125
+2024-09-01 02:41:56,705 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 02:41:56,705 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:41:56,770 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:42:04,402 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 02:42:09,602 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 0, loss[loss=0.2447, simple_loss=0.2132, pruned_loss=0.0963, ctc_loss=0.1926, over 18523.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.2132, pruned_loss=0.0963, ctc_loss=0.1926, over 18523.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:42:09,603 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:42:17,646 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.2284, 3.3698, 3.6828, 2.8419, 3.8979, 3.9161, 3.8591, 3.8486],
+ device='cuda:2')
+2024-09-01 02:42:33,562 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 14, validation: loss=0.209, simple_loss=0.1966, pruned_loss=0.0763, ctc_loss=0.148, over 1073944.00 frames.
+2024-09-01 02:42:33,562 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:42:51,090 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=108917.33333333333, ans=0.07
+2024-09-01 02:43:15,696 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=16.44 vs. limit=15.0
+2024-09-01 02:43:22,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=109024.0, ans=0.125
+2024-09-01 02:43:32,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=109024.0, ans=0.125
+2024-09-01 02:43:42,871 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.996e+02 2.096e+02 2.326e+02 2.912e+02, threshold=4.192e+02, percent-clipped=0.0
+2024-09-01 02:43:52,722 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:43:52,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=109077.33333333333, ans=0.0
+2024-09-01 02:44:17,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=109130.66666666667, ans=0.0
+2024-09-01 02:44:24,655 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 50, loss[loss=0.239, simple_loss=0.2087, pruned_loss=0.09073, ctc_loss=0.2038, over 18999.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.2068, pruned_loss=0.08665, ctc_loss=0.1851, over 827850.18 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:44:46,219 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.12 vs. limit=15.0
+2024-09-01 02:45:06,012 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=7.38 vs. limit=15.0
+2024-09-01 02:45:09,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109290.66666666667, ans=0.2
+2024-09-01 02:45:11,588 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=109290.66666666667, ans=0.125
+2024-09-01 02:45:22,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=109290.66666666667, ans=0.0
+2024-09-01 02:45:23,013 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.60 vs. limit=22.5
+2024-09-01 02:46:12,178 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 100, loss[loss=0.2044, simple_loss=0.1957, pruned_loss=0.07142, ctc_loss=0.1567, over 19059.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.2072, pruned_loss=0.08708, ctc_loss=0.1831, over 1475617.37 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:46:28,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=109450.66666666667, ans=0.125
+2024-09-01 02:46:52,164 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=109504.0, ans=0.2
+2024-09-01 02:46:54,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=109557.33333333333, ans=0.0
+2024-09-01 02:47:12,358 INFO [dysarthria_finetune.py:1435] (2/4) (10081730560, 34072559616)
+2024-09-01 02:47:12,359 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:47:12,420 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:47:25,785 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 0, loss[loss=0.2861, simple_loss=0.247, pruned_loss=0.1161, ctc_loss=0.2234, over 18678.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.247, pruned_loss=0.1161, ctc_loss=0.2234, over 18678.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:47:25,786 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:48:03,605 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 15, validation: loss=0.2059, simple_loss=0.1951, pruned_loss=0.07588, ctc_loss=0.1481, over 1073944.00 frames.
+2024-09-01 02:48:03,605 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:48:10,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-09-01 02:48:14,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-09-01 02:48:20,626 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.965e+02 2.102e+02 2.301e+02 3.159e+02, threshold=4.205e+02, percent-clipped=0.0
+2024-09-01 02:48:25,036 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.02 vs. limit=22.5
+2024-09-01 02:48:27,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-09-01 02:48:40,495 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.52 vs. limit=12.0
+2024-09-01 02:50:24,712 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=109765.33333333333, ans=0.0
+2024-09-01 02:50:39,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=109765.33333333333, ans=0.0
+2024-09-01 02:50:53,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=109818.66666666667, ans=0.125
+2024-09-01 02:51:41,401 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 50, loss[loss=0.2163, simple_loss=0.2089, pruned_loss=0.07619, ctc_loss=0.1653, over 18994.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2041, pruned_loss=0.08491, ctc_loss=0.1802, over 827605.34 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:52:40,067 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=109925.33333333333, ans=0.2
+2024-09-01 02:53:16,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.21 vs. limit=15.0
+2024-09-01 02:54:03,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=109978.66666666667, ans=0.125
+2024-09-01 02:54:31,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=110032.0, ans=0.125
+2024-09-01 02:55:28,639 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 100, loss[loss=0.2082, simple_loss=0.1991, pruned_loss=0.07444, ctc_loss=0.1631, over 19062.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2034, pruned_loss=0.0844, ctc_loss=0.1786, over 1475114.53 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:55:32,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=110138.66666666667, ans=0.2
+2024-09-01 02:55:44,090 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 1.961e+02 2.099e+02 2.266e+02 2.969e+02, threshold=4.197e+02, percent-clipped=0.0
+2024-09-01 02:57:18,316 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 02:57:18,317 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:57:18,359 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 02:57:33,304 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 0, loss[loss=0.257, simple_loss=0.2223, pruned_loss=0.1039, ctc_loss=0.2068, over 18504.00 frames. ], tot_loss[loss=0.257, simple_loss=0.2223, pruned_loss=0.1039, ctc_loss=0.2068, over 18504.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:57:33,304 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:58:10,357 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 16, validation: loss=0.2065, simple_loss=0.1951, pruned_loss=0.07751, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 02:58:10,358 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 02:58:32,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=110293.33333333333, ans=0.0
+2024-09-01 02:58:35,362 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.84 vs. limit=10.0
+2024-09-01 02:58:39,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=110346.66666666667, ans=0.2
+2024-09-01 02:58:49,898 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:59:54,167 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=110506.66666666667, ans=0.125
+2024-09-01 03:00:21,826 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 50, loss[loss=0.1955, simple_loss=0.1932, pruned_loss=0.06912, ctc_loss=0.1474, over 19044.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.1968, pruned_loss=0.07968, ctc_loss=0.1734, over 828171.03 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:00:45,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=110560.0, ans=0.125
+2024-09-01 03:01:14,484 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=110666.66666666667, ans=0.025
+2024-09-01 03:01:25,494 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 1.971e+02 2.093e+02 2.277e+02 2.936e+02, threshold=4.187e+02, percent-clipped=0.0
+2024-09-01 03:02:01,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=110720.0, ans=0.2
+2024-09-01 03:02:27,261 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 100, loss[loss=0.1912, simple_loss=0.1843, pruned_loss=0.06797, ctc_loss=0.1552, over 19090.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.1967, pruned_loss=0.07934, ctc_loss=0.1707, over 1476933.27 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:02:37,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=110826.66666666667, ans=0.125
+2024-09-01 03:02:54,988 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.16 vs. limit=22.5
+2024-09-01 03:03:06,184 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.42 vs. limit=12.0
+2024-09-01 03:03:29,007 INFO [dysarthria_finetune.py:1435] (2/4) (10138353664, 34072559616)
+2024-09-01 03:03:29,008 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:03:29,055 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 03:03:41,380 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 0, loss[loss=0.2406, simple_loss=0.2169, pruned_loss=0.09479, ctc_loss=0.1866, over 18336.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.2169, pruned_loss=0.09479, ctc_loss=0.1866, over 18336.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:03:41,381 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:03:42,840 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.4.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([2.7163, 2.9691, 2.6776, 3.0356], device='cuda:2')
+2024-09-01 03:03:45,327 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.6989, 1.5512, 1.5774, 1.7242, 1.8196, 1.7138, 1.7730, 1.7477],
+ device='cuda:2')
+2024-09-01 03:04:05,401 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 17, validation: loss=0.1943, simple_loss=0.1886, pruned_loss=0.07183, ctc_loss=0.1409, over 1073944.00 frames.
+2024-09-01 03:04:05,402 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 03:04:20,852 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=110981.33333333333, ans=0.125
+2024-09-01 03:04:32,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111034.66666666667, ans=0.1
+2024-09-01 03:04:38,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 03:05:09,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111088.0, ans=0.125
+2024-09-01 03:05:33,283 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=111194.66666666667, ans=0.2
+2024-09-01 03:05:45,143 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 1.958e+02 2.075e+02 2.282e+02 2.777e+02, threshold=4.150e+02, percent-clipped=0.0
+2024-09-01 03:05:46,514 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=111194.66666666667, ans=0.125
+2024-09-01 03:05:56,267 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 50, loss[loss=0.1852, simple_loss=0.1923, pruned_loss=0.06054, ctc_loss=0.1424, over 19057.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.1965, pruned_loss=0.07954, ctc_loss=0.1714, over 827125.84 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:07:14,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=111408.0, ans=0.0
+2024-09-01 03:07:14,830 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.06 vs. limit=15.0
+2024-09-01 03:07:33,633 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=111461.33333333333, ans=0.125
+2024-09-01 03:07:45,180 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 100, loss[loss=0.1887, simple_loss=0.1797, pruned_loss=0.06918, ctc_loss=0.1485, over 19126.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.1953, pruned_loss=0.07861, ctc_loss=0.1691, over 1475165.47 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:08:03,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111514.66666666667, ans=0.125
+2024-09-01 03:08:24,996 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=111568.0, ans=0.1
+2024-09-01 03:08:29,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=111621.33333333333, ans=0.0
+2024-09-01 03:08:44,474 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 03:08:44,475 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:08:44,517 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 03:08:52,082 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 03:09:16,224 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 0, loss[loss=0.2402, simple_loss=0.2173, pruned_loss=0.09277, ctc_loss=0.1937, over 18559.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2173, pruned_loss=0.09277, ctc_loss=0.1937, over 18559.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:09:16,225 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:09:39,624 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 18, validation: loss=0.1961, simple_loss=0.1886, pruned_loss=0.07291, ctc_loss=0.1441, over 1073944.00 frames.
+2024-09-01 03:09:39,625 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 03:09:45,469 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=111669.33333333333, ans=0.0
+2024-09-01 03:10:12,348 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.17 vs. limit=15.0
+2024-09-01 03:10:14,927 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 1.913e+02 2.060e+02 2.285e+02 3.151e+02, threshold=4.120e+02, percent-clipped=0.0
+2024-09-01 03:10:33,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=111776.0, ans=0.125
+2024-09-01 03:10:37,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=111776.0, ans=0.0
+2024-09-01 03:11:03,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=111829.33333333333, ans=0.0
+2024-09-01 03:11:10,920 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.30 vs. limit=22.5
+2024-09-01 03:11:29,066 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 50, loss[loss=0.2482, simple_loss=0.2214, pruned_loss=0.09671, ctc_loss=0.2041, over 18975.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.1969, pruned_loss=0.08079, ctc_loss=0.1745, over 827610.12 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:11:38,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=111936.0, ans=0.0
+2024-09-01 03:12:07,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.70 vs. limit=15.0
+2024-09-01 03:12:59,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.50 vs. limit=15.0
+2024-09-01 03:13:10,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=112149.33333333333, ans=0.125
+2024-09-01 03:13:35,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=112202.66666666667, ans=0.0
+2024-09-01 03:13:44,688 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 100, loss[loss=0.1786, simple_loss=0.171, pruned_loss=0.06428, ctc_loss=0.1442, over 19135.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.1936, pruned_loss=0.07824, ctc_loss=0.1695, over 1477220.69 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:13:56,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=112202.66666666667, ans=0.125
+2024-09-01 03:14:19,253 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.738e+02 1.898e+02 2.020e+02 2.262e+02 2.800e+02, threshold=4.040e+02, percent-clipped=0.0
+2024-09-01 03:14:37,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=112309.33333333333, ans=0.125
+2024-09-01 03:14:38,009 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.63 vs. limit=15.0
+2024-09-01 03:14:43,308 INFO [dysarthria_finetune.py:1435] (2/4) (10142547968, 34072559616)
+2024-09-01 03:14:43,308 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:14:43,349 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 03:14:55,811 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 0, loss[loss=0.222, simple_loss=0.2012, pruned_loss=0.08618, ctc_loss=0.176, over 18438.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2012, pruned_loss=0.08618, ctc_loss=0.176, over 18438.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:14:55,811 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:15:42,620 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 19, validation: loss=0.1928, simple_loss=0.1862, pruned_loss=0.07146, ctc_loss=0.1413, over 1073944.00 frames.
+2024-09-01 03:15:42,621 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 03:16:00,856 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.31 vs. limit=15.0
+2024-09-01 03:16:42,593 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112405.33333333333, ans=0.125
+2024-09-01 03:17:02,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=112405.33333333333, ans=0.1
+2024-09-01 03:17:57,753 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.03 vs. limit=15.0
+2024-09-01 03:18:56,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 03:19:02,860 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=112565.33333333333, ans=0.2
+2024-09-01 03:19:10,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 03:19:34,387 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 50, loss[loss=0.2048, simple_loss=0.1981, pruned_loss=0.07234, ctc_loss=0.167, over 19013.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.192, pruned_loss=0.07794, ctc_loss=0.1689, over 827262.88 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:20:07,722 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=112618.66666666667, ans=0.0
+2024-09-01 03:20:34,227 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=112672.0, ans=0.04949747468305833
+2024-09-01 03:22:03,093 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=112778.66666666667, ans=0.025
+2024-09-01 03:22:34,902 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 1.922e+02 2.090e+02 2.243e+02 2.725e+02, threshold=4.180e+02, percent-clipped=0.0
+2024-09-01 03:22:36,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=112778.66666666667, ans=0.125
+2024-09-01 03:23:27,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=112832.0, ans=0.5
+2024-09-01 03:23:34,279 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 100, loss[loss=0.1568, simple_loss=0.1539, pruned_loss=0.05365, ctc_loss=0.1308, over 19169.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.1917, pruned_loss=0.07787, ctc_loss=0.167, over 1475351.90 frames. ], batch size: 134, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:23:46,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=112885.33333333333, ans=0.2
+2024-09-01 03:24:40,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112938.66666666667, ans=0.1
+2024-09-01 03:25:05,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=112992.0, ans=0.025
+2024-09-01 03:25:13,468 INFO [dysarthria_finetune.py:1435] (2/4) (10169810944, 34072559616)
+2024-09-01 03:25:13,469 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:25:13,503 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 03:25:27,057 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 0, loss[loss=0.2318, simple_loss=0.2127, pruned_loss=0.08921, ctc_loss=0.1813, over 18527.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.2127, pruned_loss=0.08921, ctc_loss=0.1813, over 18527.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:25:27,057 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:26:10,755 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 20, validation: loss=0.19, simple_loss=0.1838, pruned_loss=0.07041, ctc_loss=0.1385, over 1073944.00 frames.
+2024-09-01 03:26:10,756 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19803MB
+2024-09-01 03:26:27,189 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.53 vs. limit=15.0
+2024-09-01 03:26:40,555 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.89 vs. limit=5.0
+2024-09-01 03:27:14,702 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.60 vs. limit=22.5
+2024-09-01 03:27:22,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=113093.33333333333, ans=0.1
+2024-09-01 03:27:59,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=113146.66666666667, ans=0.125
+2024-09-01 03:28:13,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=113146.66666666667, ans=0.09899494936611666
+2024-09-01 03:28:33,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=113200.0, ans=0.025
+2024-09-01 03:29:11,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113253.33333333333, ans=0.1
+2024-09-01 03:29:26,704 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 50, loss[loss=0.203, simple_loss=0.192, pruned_loss=0.07429, ctc_loss=0.1635, over 18968.00 frames. ], tot_loss[loss=0.204, simple_loss=0.1893, pruned_loss=0.07603, ctc_loss=0.1667, over 828106.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:30:05,882 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 1.917e+02 2.046e+02 2.200e+02 2.791e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-09-01 03:30:48,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=113360.0, ans=0.125
+2024-09-01 03:31:59,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=113466.66666666667, ans=0.0
+2024-09-01 03:33:06,746 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 100, loss[loss=0.1599, simple_loss=0.1626, pruned_loss=0.0538, ctc_loss=0.1242, over 19074.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.1877, pruned_loss=0.07473, ctc_loss=0.1634, over 1476081.83 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:33:09,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=113573.33333333333, ans=0.2
+2024-09-01 03:33:39,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=113573.33333333333, ans=0.5
+2024-09-01 03:34:13,575 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 03:34:13,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 03:34:43,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=113680.0, ans=0.125
+2024-09-01 03:35:10,305 INFO [dysarthria_finetune.py:1435] (2/4) (10140450816, 34072559616)
+2024-09-01 03:35:10,306 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:35:10,365 INFO [dysarthria_finetune.py:1440] (2/4) (28979167232, 34072559616)
+2024-09-01 03:35:10,365 INFO [dysarthria_finetune.py:1442] (2/4) Done!
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-3 b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-3
new file mode 100644
index 0000000000000000000000000000000000000000..1eacf8b0759ec625dfa7c90a9b028a42b101dd26
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/log/log-train-2024-08-31-22-09-00-3
@@ -0,0 +1,560 @@
+2024-08-31 22:09:00,005 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-31 22:09:00,006 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-31 22:09:00,006 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-31 22:09:00,558 INFO [dysarthria_finetune.py:1219] (3/4) (33427226624, 34072559616)
+2024-08-31 22:09:00,559 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-31 22:09:01,025 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2653.int.cedar.computecanada.ca', 'IP address': '172.16.146.90'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:09:01,026 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-31 22:09:16,886 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66367431
+2024-08-31 22:09:16,886 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 22:10:50,729 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-31 22:11:01,688 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-31 22:11:01,783 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-31 22:11:02,460 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-31 22:11:02,461 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-31 22:11:17,376 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-31 22:11:18,285 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-31 22:11:18,291 INFO [dysarthria_asr_datamodule.py:501] (3/4) About to get dev cuts
+2024-08-31 22:11:18,428 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-31 22:11:18,751 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-31 22:11:18,751 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:12:59,095 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.63 vs. limit=5.0
+2024-08-31 22:12:59,567 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=13.51 vs. limit=7.5
+2024-08-31 22:13:03,031 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12706MB
+2024-08-31 22:13:04,356 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=17.15 vs. limit=7.5
+2024-08-31 22:13:04,883 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12706MB
+2024-08-31 22:14:12,892 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12706MB
+2024-08-31 22:14:14,958 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12706MB
+2024-08-31 22:19:50,669 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=23.91 vs. limit=7.5
+2024-08-31 22:19:51,915 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12706MB
+2024-08-31 22:19:54,343 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12706MB
+2024-08-31 22:20:43,748 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3536, simple_loss=0.2859, pruned_loss=0.1622, ctc_loss=0.2575, over 18634.00 frames. ], tot_loss[loss=0.3536, simple_loss=0.2859, pruned_loss=0.1622, ctc_loss=0.2575, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:43,749 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-31 22:46:04,597 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3942, simple_loss=0.3187, pruned_loss=0.1927, ctc_loss=0.281, over 1073944.00 frames.
+2024-08-31 22:46:04,729 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14296MB
+2024-08-31 23:01:25,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-31 23:06:11,555 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 9.685e+02 9.975e+02 1.051e+03 1.091e+03 1.133e+03, threshold=4.203e+03, percent-clipped=0.0
+2024-08-31 23:12:07,271 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.80 vs. limit=22.5
+2024-08-31 23:24:50,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100106.66666666667, ans=0.125
+2024-08-31 23:25:12,998 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 8.960e+02 9.836e+02 1.043e+03 1.067e+03 1.144e+03, threshold=4.173e+03, percent-clipped=0.0
+2024-08-31 23:43:54,698 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.61 vs. limit=15.0
+2024-08-31 23:43:54,880 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=38.43 vs. limit=22.5
+2024-08-31 23:48:48,168 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=100160.0, ans=0.125
+2024-08-31 23:50:05,824 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.677e+02 8.648e+02 9.697e+02 1.051e+03 1.144e+03, threshold=3.879e+03, percent-clipped=0.0
+2024-08-31 23:50:46,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=100213.33333333333, ans=10.0
+2024-08-31 23:52:46,838 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=100213.33333333333, ans=0.2
+2024-08-31 23:55:34,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=100266.66666666667, ans=15.0
+2024-08-31 23:55:38,233 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.4536, simple_loss=0.3606, pruned_loss=0.2303, ctc_loss=0.3464, over 19001.00 frames. ], tot_loss[loss=0.4274, simple_loss=0.3426, pruned_loss=0.2127, ctc_loss=0.3165, over 828973.50 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 23:56:18,614 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=20.32 vs. limit=15.0
+2024-08-31 23:59:46,517 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=100320.0, ans=0.0
+2024-09-01 00:01:23,412 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=18.41 vs. limit=15.0
+2024-09-01 00:06:50,103 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=16.09 vs. limit=15.0
+2024-09-01 00:07:16,392 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=3.99 vs. limit=15.0
+2024-09-01 00:08:29,557 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100373.33333333333, ans=0.1
+2024-09-01 00:09:24,388 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=100373.33333333333, ans=0.125
+2024-09-01 00:14:30,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-09-01 00:14:31,114 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=31.14 vs. limit=15.0
+2024-09-01 00:16:00,367 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=8.77 vs. limit=12.0
+2024-09-01 00:18:32,448 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.599e+02 6.914e+02 7.776e+02 9.170e+02 1.144e+03, threshold=1.555e+03, percent-clipped=0.0
+2024-09-01 00:18:32,486 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 100, loss[loss=0.3936, simple_loss=0.3164, pruned_loss=0.1907, ctc_loss=0.2875, over 19146.00 frames. ], tot_loss[loss=0.4119, simple_loss=0.3302, pruned_loss=0.2029, ctc_loss=0.305, over 1476162.18 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:22:28,980 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=35.40 vs. limit=15.0
+2024-09-01 00:23:02,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=100586.66666666667, ans=0.0
+2024-09-01 00:25:35,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_na.min_abs, batch_count=100586.66666666667, ans=0.02
+2024-09-01 00:27:44,029 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=100640.0, ans=15.0
+2024-09-01 00:28:35,600 INFO [dysarthria_finetune.py:1435] (3/4) (13353287680, 34072559616)
+2024-09-01 00:28:35,601 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 00:28:35,633 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 00:30:12,102 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 0, loss[loss=0.3937, simple_loss=0.3173, pruned_loss=0.182, ctc_loss=0.2893, over 18501.00 frames. ], tot_loss[loss=0.3937, simple_loss=0.3173, pruned_loss=0.182, ctc_loss=0.2893, over 18501.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:30:12,103 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 00:34:27,528 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 2, validation: loss=0.3547, simple_loss=0.2901, pruned_loss=0.1627, ctc_loss=0.2412, over 1073944.00 frames.
+2024-09-01 00:34:27,529 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 00:40:40,159 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-09-01 00:41:39,780 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-09-01 00:41:39,807 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=100789.33333333333, ans=0.125
+2024-09-01 00:43:46,327 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.27 vs. limit=6.0
+2024-09-01 00:44:20,353 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.67 vs. limit=6.0
+2024-09-01 00:45:11,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=100842.66666666667, ans=0.07
+2024-09-01 00:45:59,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=100896.0, ans=0.04949747468305833
+2024-09-01 00:48:18,400 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 50, loss[loss=0.4155, simple_loss=0.3323, pruned_loss=0.1958, ctc_loss=0.3136, over 18956.00 frames. ], tot_loss[loss=0.3971, simple_loss=0.319, pruned_loss=0.1902, ctc_loss=0.2911, over 828460.00 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 00:49:22,076 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-09-01 00:50:01,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=101002.66666666667, ans=0.0
+2024-09-01 00:50:04,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=101002.66666666667, ans=0.125
+2024-09-01 00:53:05,352 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.883e+02 4.624e+02 4.997e+02 5.383e+02 6.686e+02, threshold=9.995e+02, percent-clipped=0.0
+2024-09-01 00:54:15,305 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-09-01 00:54:21,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-09-01 00:55:06,860 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=101162.66666666667, ans=0.0
+2024-09-01 00:56:13,740 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=13.36 vs. limit=12.0
+2024-09-01 00:56:30,067 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 100, loss[loss=0.343, simple_loss=0.2774, pruned_loss=0.158, ctc_loss=0.2471, over 19077.00 frames. ], tot_loss[loss=0.3817, simple_loss=0.308, pruned_loss=0.1791, ctc_loss=0.276, over 1476919.42 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 00:59:23,684 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:00:16,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=101269.33333333333, ans=0.125
+2024-09-01 01:01:16,818 INFO [dysarthria_finetune.py:1435] (3/4) (13298761728, 34072559616)
+2024-09-01 01:01:16,818 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:01:16,861 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 01:01:35,833 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 0, loss[loss=0.3399, simple_loss=0.2754, pruned_loss=0.1477, ctc_loss=0.2517, over 18579.00 frames. ], tot_loss[loss=0.3399, simple_loss=0.2754, pruned_loss=0.1477, ctc_loss=0.2517, over 18579.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:01:35,833 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:01:59,926 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 3, validation: loss=0.3274, simple_loss=0.2708, pruned_loss=0.1428, ctc_loss=0.2163, over 1073944.00 frames.
+2024-09-01 01:01:59,926 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 01:02:02,030 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=20.98 vs. limit=15.0
+2024-09-01 01:03:09,847 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=101424.0, ans=0.125
+2024-09-01 01:03:48,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=101477.33333333333, ans=0.125
+2024-09-01 01:04:30,320 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=101584.0, ans=0.125
+2024-09-01 01:04:33,377 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.942e+02 3.454e+02 3.711e+02 3.996e+02 5.509e+02, threshold=7.422e+02, percent-clipped=0.0
+2024-09-01 01:04:56,814 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 50, loss[loss=0.3518, simple_loss=0.2929, pruned_loss=0.148, ctc_loss=0.2311, over 19113.00 frames. ], tot_loss[loss=0.3636, simple_loss=0.2954, pruned_loss=0.1633, ctc_loss=0.2594, over 827781.85 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:05:07,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=101637.33333333333, ans=0.1
+2024-09-01 01:05:18,644 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.04 vs. limit=6.0
+2024-09-01 01:05:41,118 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=16.88 vs. limit=15.0
+2024-09-01 01:05:43,522 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=101690.66666666667, ans=0.125
+2024-09-01 01:06:07,781 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=101744.0, ans=0.2
+2024-09-01 01:06:26,313 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=10.20 vs. limit=12.0
+2024-09-01 01:06:30,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=101797.33333333333, ans=0.2
+2024-09-01 01:06:32,832 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=101797.33333333333, ans=0.125
+2024-09-01 01:06:39,802 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=25.13 vs. limit=15.0
+2024-09-01 01:06:43,982 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 01:06:47,070 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.14 vs. limit=15.0
+2024-09-01 01:06:59,465 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.77 vs. limit=22.5
+2024-09-01 01:07:00,076 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 100, loss[loss=0.3301, simple_loss=0.2706, pruned_loss=0.1429, ctc_loss=0.2322, over 19145.00 frames. ], tot_loss[loss=0.3545, simple_loss=0.2882, pruned_loss=0.1589, ctc_loss=0.2525, over 1476240.06 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:07:06,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101904.0, ans=0.1
+2024-09-01 01:07:28,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=101904.0, ans=0.2
+2024-09-01 01:07:35,117 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=101904.0, ans=0.025
+2024-09-01 01:08:21,702 INFO [dysarthria_finetune.py:1435] (3/4) (13321830400, 34072559616)
+2024-09-01 01:08:21,702 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:08:21,745 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 01:08:35,075 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 0, loss[loss=0.4029, simple_loss=0.3191, pruned_loss=0.1877, ctc_loss=0.3163, over 18645.00 frames. ], tot_loss[loss=0.4029, simple_loss=0.3191, pruned_loss=0.1877, ctc_loss=0.3163, over 18645.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:08:35,075 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:08:58,406 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 4, validation: loss=0.308, simple_loss=0.2573, pruned_loss=0.1299, ctc_loss=0.2, over 1073944.00 frames.
+2024-09-01 01:08:58,406 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 01:09:19,169 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=17.65 vs. limit=15.0
+2024-09-01 01:09:30,348 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.03 vs. limit=22.5
+2024-09-01 01:09:33,164 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.484e+02 2.869e+02 3.070e+02 3.452e+02 5.291e+02, threshold=6.140e+02, percent-clipped=0.0
+2024-09-01 01:09:38,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff3.min_abs, batch_count=102106.66666666667, ans=0.2
+2024-09-01 01:09:55,481 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=102160.0, ans=0.0
+2024-09-01 01:09:57,632 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:09:59,799 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:10:01,868 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:10:24,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=102213.33333333333, ans=0.0
+2024-09-01 01:10:51,038 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 50, loss[loss=0.3285, simple_loss=0.2705, pruned_loss=0.1422, ctc_loss=0.2266, over 18993.00 frames. ], tot_loss[loss=0.3431, simple_loss=0.2801, pruned_loss=0.1499, ctc_loss=0.2455, over 827748.42 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:11:18,541 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=102373.33333333333, ans=0.125
+2024-09-01 01:11:27,395 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=102373.33333333333, ans=0.025
+2024-09-01 01:11:57,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=102480.0, ans=0.125
+2024-09-01 01:12:38,908 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 100, loss[loss=0.3329, simple_loss=0.2716, pruned_loss=0.1491, ctc_loss=0.2342, over 19161.00 frames. ], tot_loss[loss=0.3318, simple_loss=0.2721, pruned_loss=0.1437, ctc_loss=0.2345, over 1475350.41 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:13:12,356 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 2.669e+02 2.871e+02 3.122e+02 4.671e+02, threshold=5.742e+02, percent-clipped=0.0
+2024-09-01 01:13:40,158 INFO [dysarthria_finetune.py:1435] (3/4) (13313441792, 34072559616)
+2024-09-01 01:13:40,159 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:13:40,191 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 01:13:53,060 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 0, loss[loss=0.3017, simple_loss=0.2507, pruned_loss=0.1278, ctc_loss=0.2041, over 18566.00 frames. ], tot_loss[loss=0.3017, simple_loss=0.2507, pruned_loss=0.1278, ctc_loss=0.2041, over 18566.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:13:53,061 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:14:16,499 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 5, validation: loss=0.2909, simple_loss=0.2453, pruned_loss=0.1191, ctc_loss=0.1881, over 1073944.00 frames.
+2024-09-01 01:14:16,500 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 01:15:16,838 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.70 vs. limit=15.0
+2024-09-01 01:15:32,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=102794.66666666667, ans=0.015
+2024-09-01 01:16:01,769 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.26 vs. limit=15.0
+2024-09-01 01:16:49,782 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=102848.0, ans=0.0
+2024-09-01 01:20:50,680 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 50, loss[loss=0.3449, simple_loss=0.282, pruned_loss=0.152, ctc_loss=0.2447, over 18976.00 frames. ], tot_loss[loss=0.3189, simple_loss=0.2629, pruned_loss=0.1341, ctc_loss=0.2275, over 827749.28 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:22:21,049 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.48 vs. limit=22.5
+2024-09-01 01:24:18,284 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.00 vs. limit=22.5
+2024-09-01 01:24:37,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=103114.66666666667, ans=0.0
+2024-09-01 01:25:19,624 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.73 vs. limit=15.0
+2024-09-01 01:25:29,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=103168.0, ans=0.0
+2024-09-01 01:25:33,874 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.253e+02 2.485e+02 2.709e+02 2.997e+02 4.733e+02, threshold=5.419e+02, percent-clipped=0.0
+2024-09-01 01:26:19,364 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-09-01 01:26:22,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:26:23,132 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 100, loss[loss=0.3073, simple_loss=0.2558, pruned_loss=0.1258, ctc_loss=0.217, over 19091.00 frames. ], tot_loss[loss=0.315, simple_loss=0.2597, pruned_loss=0.1327, ctc_loss=0.2248, over 1475913.16 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:26:36,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:26:40,124 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.50 vs. limit=15.0
+2024-09-01 01:27:10,986 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.75 vs. limit=22.5
+2024-09-01 01:27:54,325 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=19.75 vs. limit=22.5
+2024-09-01 01:27:55,382 INFO [dysarthria_finetune.py:1435] (3/4) (1227554816, 34072559616)
+2024-09-01 01:27:55,382 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:27:55,473 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 01:28:14,510 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 0, loss[loss=0.281, simple_loss=0.2376, pruned_loss=0.113, ctc_loss=0.1882, over 18684.00 frames. ], tot_loss[loss=0.281, simple_loss=0.2376, pruned_loss=0.113, ctc_loss=0.1882, over 18684.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:28:14,510 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:28:37,896 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 6, validation: loss=0.2789, simple_loss=0.2369, pruned_loss=0.1122, ctc_loss=0.1819, over 1073944.00 frames.
+2024-09-01 01:28:37,897 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 01:29:51,212 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=103584.0, ans=0.025
+2024-09-01 01:30:04,885 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.49 vs. limit=15.0
+2024-09-01 01:30:17,357 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103637.33333333333, ans=0.125
+2024-09-01 01:30:32,248 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 50, loss[loss=0.3048, simple_loss=0.2522, pruned_loss=0.1284, ctc_loss=0.2173, over 19058.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.2532, pruned_loss=0.1264, ctc_loss=0.2201, over 828493.81 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:30:44,321 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=103690.66666666667, ans=0.0
+2024-09-01 01:31:04,785 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 2.419e+02 2.583e+02 2.819e+02 4.094e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-09-01 01:31:34,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=103744.0, ans=10.0
+2024-09-01 01:31:38,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103797.33333333333, ans=0.125
+2024-09-01 01:31:43,192 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.81 vs. limit=22.5
+2024-09-01 01:32:27,122 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:32:37,577 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=103904.0, ans=0.125
+2024-09-01 01:32:43,095 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 100, loss[loss=0.2937, simple_loss=0.2405, pruned_loss=0.1251, ctc_loss=0.2179, over 19113.00 frames. ], tot_loss[loss=0.3029, simple_loss=0.2513, pruned_loss=0.1251, ctc_loss=0.2181, over 1475249.23 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:32:55,755 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=103957.33333333333, ans=0.125
+2024-09-01 01:32:55,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=103957.33333333333, ans=0.5
+2024-09-01 01:33:24,240 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104010.66666666667, ans=0.1
+2024-09-01 01:33:39,682 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.08 vs. limit=15.0
+2024-09-01 01:33:44,580 INFO [dysarthria_finetune.py:1435] (3/4) (717946880, 34072559616)
+2024-09-01 01:33:44,581 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:33:44,671 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 01:33:57,912 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 0, loss[loss=0.3232, simple_loss=0.2643, pruned_loss=0.1388, ctc_loss=0.2391, over 18595.00 frames. ], tot_loss[loss=0.3232, simple_loss=0.2643, pruned_loss=0.1388, ctc_loss=0.2391, over 18595.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:33:57,912 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:34:21,900 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 7, validation: loss=0.2604, simple_loss=0.2251, pruned_loss=0.1007, ctc_loss=0.1681, over 1073944.00 frames.
+2024-09-01 01:34:21,901 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 01:34:52,493 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=104160.0, ans=0.0
+2024-09-01 01:35:40,245 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.049e+02 2.272e+02 2.384e+02 2.601e+02 4.291e+02, threshold=4.768e+02, percent-clipped=0.0
+2024-09-01 01:36:07,060 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=104266.66666666667, ans=0.2
+2024-09-01 01:36:15,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=104320.0, ans=0.125
+2024-09-01 01:37:02,201 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 50, loss[loss=0.2985, simple_loss=0.2498, pruned_loss=0.122, ctc_loss=0.2136, over 18963.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.2456, pruned_loss=0.1184, ctc_loss=0.2131, over 827887.87 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:37:35,599 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=104373.33333333333, ans=0.07
+2024-09-01 01:38:41,681 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.31 vs. limit=15.0
+2024-09-01 01:39:56,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104533.33333333333, ans=0.1
+2024-09-01 01:39:59,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=104533.33333333333, ans=0.0
+2024-09-01 01:40:47,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=104586.66666666667, ans=0.125
+2024-09-01 01:40:52,490 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 100, loss[loss=0.2774, simple_loss=0.2327, pruned_loss=0.1126, ctc_loss=0.1999, over 19124.00 frames. ], tot_loss[loss=0.287, simple_loss=0.2407, pruned_loss=0.1155, ctc_loss=0.2078, over 1475075.17 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 16.0
+2024-09-01 01:41:26,351 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.95 vs. limit=10.0
+2024-09-01 01:41:48,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff3.min_abs, batch_count=104693.33333333333, ans=0.2
+2024-09-01 01:42:54,574 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.18 vs. limit=15.0
+2024-09-01 01:42:59,046 INFO [dysarthria_finetune.py:1435] (3/4) (13351190528, 34072559616)
+2024-09-01 01:42:59,047 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:42:59,096 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 01:43:13,098 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 0, loss[loss=0.2737, simple_loss=0.2319, pruned_loss=0.1106, ctc_loss=0.1908, over 18547.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.2319, pruned_loss=0.1106, ctc_loss=0.1908, over 18547.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:43:13,099 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:44:04,944 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 8, validation: loss=0.2572, simple_loss=0.2228, pruned_loss=0.09973, ctc_loss=0.1708, over 1073944.00 frames.
+2024-09-01 01:44:04,944 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 01:44:09,697 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.79 vs. limit=12.0
+2024-09-01 01:44:25,620 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.924e+02 2.205e+02 2.324e+02 2.533e+02 3.850e+02, threshold=4.647e+02, percent-clipped=0.0
+2024-09-01 01:53:56,183 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=104949.33333333333, ans=0.025
+2024-09-01 01:54:31,867 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:54:39,442 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=104949.33333333333, ans=0.125
+2024-09-01 01:55:10,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=105002.66666666667, ans=0.025
+2024-09-01 01:56:16,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=105056.0, ans=0.125
+2024-09-01 01:56:19,987 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 50, loss[loss=0.2953, simple_loss=0.2463, pruned_loss=0.1227, ctc_loss=0.2139, over 18964.00 frames. ], tot_loss[loss=0.2811, simple_loss=0.237, pruned_loss=0.1118, ctc_loss=0.2062, over 828441.23 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:57:13,194 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=105056.0, ans=0.125
+2024-09-01 01:57:46,119 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105109.33333333333, ans=0.125
+2024-09-01 01:58:24,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=105109.33333333333, ans=0.0
+2024-09-01 01:59:29,723 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.62 vs. limit=22.5
+2024-09-01 02:00:56,046 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 100, loss[loss=0.2942, simple_loss=0.248, pruned_loss=0.1172, ctc_loss=0.2189, over 19119.00 frames. ], tot_loss[loss=0.2779, simple_loss=0.2346, pruned_loss=0.1105, ctc_loss=0.2037, over 1475727.62 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:01:07,947 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.165e+02 2.362e+02 2.610e+02 3.254e+02, threshold=4.723e+02, percent-clipped=0.0
+2024-09-01 02:02:50,968 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.73 vs. limit=15.0
+2024-09-01 02:02:57,163 INFO [dysarthria_finetune.py:1435] (3/4) (14437515264, 34072559616)
+2024-09-01 02:02:57,164 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:02:57,205 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:03:10,302 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 0, loss[loss=0.2955, simple_loss=0.25, pruned_loss=0.1182, ctc_loss=0.2164, over 18777.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.25, pruned_loss=0.1182, ctc_loss=0.2164, over 18777.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:10,302 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:03:33,812 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 9, validation: loss=0.2431, simple_loss=0.2147, pruned_loss=0.0913, ctc_loss=0.1608, over 1073944.00 frames.
+2024-09-01 02:03:33,813 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:04:14,704 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=105530.66666666667, ans=0.125
+2024-09-01 02:04:16,654 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=105530.66666666667, ans=0.035
+2024-09-01 02:04:21,580 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=105530.66666666667, ans=0.0
+2024-09-01 02:04:25,945 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=105584.0, ans=0.125
+2024-09-01 02:04:28,783 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=105584.0, ans=0.0
+2024-09-01 02:04:42,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=105584.0, ans=0.0
+2024-09-01 02:04:50,634 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.46 vs. limit=15.0
+2024-09-01 02:04:52,503 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=105637.33333333333, ans=0.125
+2024-09-01 02:05:04,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=105637.33333333333, ans=0.1
+2024-09-01 02:05:23,369 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.56 vs. limit=10.0
+2024-09-01 02:05:27,363 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=105690.66666666667, ans=0.0
+2024-09-01 02:05:29,756 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.45 vs. limit=15.0
+2024-09-01 02:05:38,136 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 50, loss[loss=0.2586, simple_loss=0.2286, pruned_loss=0.09443, ctc_loss=0.1816, over 18965.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.2283, pruned_loss=0.1035, ctc_loss=0.1986, over 827503.70 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:55,047 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=105744.0, ans=0.125
+2024-09-01 02:06:34,704 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=105850.66666666667, ans=0.0
+2024-09-01 02:06:34,826 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:06:37,632 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.168e+02 2.346e+02 2.556e+02 3.441e+02, threshold=4.692e+02, percent-clipped=0.0
+2024-09-01 02:06:47,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=105850.66666666667, ans=0.0
+2024-09-01 02:08:17,425 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 100, loss[loss=0.2393, simple_loss=0.2098, pruned_loss=0.0883, ctc_loss=0.1748, over 19159.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.2252, pruned_loss=0.1023, ctc_loss=0.1953, over 1475225.92 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:32,567 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=106010.66666666667, ans=0.025
+2024-09-01 02:08:32,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:08:50,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=106064.0, ans=0.5
+2024-09-01 02:09:18,573 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=106117.33333333333, ans=0.2
+2024-09-01 02:09:19,436 INFO [dysarthria_finetune.py:1435] (3/4) (13315538944, 34072559616)
+2024-09-01 02:09:19,436 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:09:19,480 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:09:34,443 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 0, loss[loss=0.2592, simple_loss=0.2218, pruned_loss=0.1007, ctc_loss=0.1952, over 18587.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.2218, pruned_loss=0.1007, ctc_loss=0.1952, over 18587.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:09:34,443 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:09:49,238 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.7520, 4.0905, 2.7166, 1.9140], device='cuda:3')
+2024-09-01 02:09:58,833 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 10, validation: loss=0.2363, simple_loss=0.211, pruned_loss=0.08786, ctc_loss=0.1591, over 1073944.00 frames.
+2024-09-01 02:09:58,833 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:10:06,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106165.33333333333, ans=0.1
+2024-09-01 02:10:17,570 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=106165.33333333333, ans=0.0
+2024-09-01 02:10:39,271 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=106218.66666666667, ans=0.125
+2024-09-01 02:10:41,488 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=106272.0, ans=0.2
+2024-09-01 02:10:43,809 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=106272.0, ans=0.125
+2024-09-01 02:10:46,145 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=106272.0, ans=0.2
+2024-09-01 02:11:05,812 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=6.56 vs. limit=12.0
+2024-09-01 02:11:07,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=106325.33333333333, ans=0.0
+2024-09-01 02:11:29,260 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.02 vs. limit=10.0
+2024-09-01 02:11:35,721 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.98 vs. limit=15.0
+2024-09-01 02:11:36,265 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.111e+02 2.256e+02 2.412e+02 3.661e+02, threshold=4.511e+02, percent-clipped=0.0
+2024-09-01 02:11:47,308 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 50, loss[loss=0.2803, simple_loss=0.2436, pruned_loss=0.1078, ctc_loss=0.2045, over 19101.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.2222, pruned_loss=0.09829, ctc_loss=0.1942, over 827631.91 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:12:02,129 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.92 vs. limit=15.0
+2024-09-01 02:12:21,354 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.61 vs. limit=15.0
+2024-09-01 02:12:25,861 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=14.17 vs. limit=15.0
+2024-09-01 02:12:38,780 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:12:40,967 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:12:41,366 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=18.95 vs. limit=15.0
+2024-09-01 02:13:08,889 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.42 vs. limit=6.0
+2024-09-01 02:13:32,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=106645.33333333333, ans=0.05
+2024-09-01 02:13:35,243 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 100, loss[loss=0.259, simple_loss=0.2205, pruned_loss=0.1012, ctc_loss=0.2024, over 19051.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.2216, pruned_loss=0.09689, ctc_loss=0.1918, over 1475773.03 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:14:15,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=106752.0, ans=0.125
+2024-09-01 02:14:30,380 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.46 vs. limit=10.0
+2024-09-01 02:14:34,897 INFO [dysarthria_finetune.py:1435] (3/4) (13326024704, 34072559616)
+2024-09-01 02:14:34,898 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:14:34,954 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:14:48,322 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 0, loss[loss=0.286, simple_loss=0.2392, pruned_loss=0.1148, ctc_loss=0.229, over 18604.00 frames. ], tot_loss[loss=0.286, simple_loss=0.2392, pruned_loss=0.1148, ctc_loss=0.229, over 18604.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:14:48,323 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:15:11,810 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 11, validation: loss=0.2335, simple_loss=0.2098, pruned_loss=0.0867, ctc_loss=0.1618, over 1073944.00 frames.
+2024-09-01 02:15:11,811 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:15:57,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=106906.66666666667, ans=0.07
+2024-09-01 02:16:00,426 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106906.66666666667, ans=0.1
+2024-09-01 02:16:01,157 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.106e+02 2.175e+02 2.350e+02 3.456e+02, threshold=4.351e+02, percent-clipped=0.0
+2024-09-01 02:16:19,066 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=29.65 vs. limit=22.5
+2024-09-01 02:16:49,083 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=107013.33333333333, ans=0.125
+2024-09-01 02:22:24,187 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 50, loss[loss=0.2323, simple_loss=0.2107, pruned_loss=0.07996, ctc_loss=0.1795, over 19110.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.2124, pruned_loss=0.08917, ctc_loss=0.1816, over 828132.31 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:22:29,193 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=107120.0, ans=0.0
+2024-09-01 02:22:57,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=107173.33333333333, ans=0.07
+2024-09-01 02:23:33,834 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=107173.33333333333, ans=0.025
+2024-09-01 02:23:33,840 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107173.33333333333, ans=0.1
+2024-09-01 02:23:38,834 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.97 vs. limit=6.0
+2024-09-01 02:24:56,229 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.69 vs. limit=15.0
+2024-09-01 02:25:09,436 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 100, loss[loss=0.2157, simple_loss=0.1995, pruned_loss=0.07149, ctc_loss=0.1681, over 19127.00 frames. ], tot_loss[loss=0.245, simple_loss=0.2152, pruned_loss=0.09198, ctc_loss=0.1859, over 1475363.18 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 16.0
+2024-09-01 02:25:20,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=107386.66666666667, ans=0.2
+2024-09-01 02:25:36,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107440.0, ans=0.125
+2024-09-01 02:25:52,128 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.68 vs. limit=10.0
+2024-09-01 02:25:52,645 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.027e+02 2.133e+02 2.278e+02 3.178e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-09-01 02:26:15,381 INFO [dysarthria_finetune.py:1435] (3/4) (38469632, 34072559616)
+2024-09-01 02:26:15,382 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:26:15,462 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:26:37,018 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 0, loss[loss=0.258, simple_loss=0.2231, pruned_loss=0.1014, ctc_loss=0.1955, over 18650.00 frames. ], tot_loss[loss=0.258, simple_loss=0.2231, pruned_loss=0.1014, ctc_loss=0.1955, over 18650.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:37,019 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:27:00,633 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 12, validation: loss=0.2234, simple_loss=0.2042, pruned_loss=0.08189, ctc_loss=0.1554, over 1073944.00 frames.
+2024-09-01 02:27:00,633 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:27:06,895 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.36 vs. limit=15.0
+2024-09-01 02:27:09,035 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.02 vs. limit=22.5
+2024-09-01 02:27:32,347 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.33 vs. limit=15.0
+2024-09-01 02:28:26,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=107701.33333333333, ans=0.0
+2024-09-01 02:28:53,637 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 50, loss[loss=0.2197, simple_loss=0.2091, pruned_loss=0.07319, ctc_loss=0.1581, over 19037.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2135, pruned_loss=0.09011, ctc_loss=0.1859, over 828666.57 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:28:57,797 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.14 vs. limit=15.0
+2024-09-01 02:28:59,414 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=107808.0, ans=0.125
+2024-09-01 02:29:04,057 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=107808.0, ans=10.0
+2024-09-01 02:29:15,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=107861.33333333333, ans=0.125
+2024-09-01 02:29:18,112 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=26.08 vs. limit=22.5
+2024-09-01 02:29:22,437 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=12.43 vs. limit=12.0
+2024-09-01 02:29:42,636 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=107914.66666666667, ans=0.125
+2024-09-01 02:30:04,705 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=107968.0, ans=6.0
+2024-09-01 02:30:20,075 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.056e+02 2.167e+02 2.338e+02 2.987e+02, threshold=4.333e+02, percent-clipped=0.0
+2024-09-01 02:30:23,745 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.64 vs. limit=22.5
+2024-09-01 02:30:43,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=108074.66666666667, ans=0.0
+2024-09-01 02:34:06,101 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 100, loss[loss=0.1993, simple_loss=0.1849, pruned_loss=0.06802, ctc_loss=0.1561, over 19142.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.212, pruned_loss=0.08878, ctc_loss=0.1828, over 1477170.44 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:36:24,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=108181.33333333333, ans=0.125
+2024-09-01 02:36:28,859 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.44 vs. limit=15.0
+2024-09-01 02:36:42,080 INFO [dysarthria_finetune.py:1435] (3/4) (13317636096, 34072559616)
+2024-09-01 02:36:42,080 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:36:42,127 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:36:54,939 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 0, loss[loss=0.2353, simple_loss=0.2058, pruned_loss=0.09363, ctc_loss=0.1727, over 18629.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2058, pruned_loss=0.09363, ctc_loss=0.1727, over 18629.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:54,940 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:37:18,565 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 13, validation: loss=0.2186, simple_loss=0.2014, pruned_loss=0.08061, ctc_loss=0.1543, over 1073944.00 frames.
+2024-09-01 02:37:18,566 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:37:46,987 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.50 vs. limit=15.0
+2024-09-01 02:37:55,104 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=108282.66666666667, ans=0.0
+2024-09-01 02:38:01,901 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=108336.0, ans=0.025
+2024-09-01 02:38:43,249 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108389.33333333333, ans=0.1
+2024-09-01 02:38:54,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=108442.66666666667, ans=0.2
+2024-09-01 02:39:08,428 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 50, loss[loss=0.2346, simple_loss=0.2087, pruned_loss=0.08958, ctc_loss=0.179, over 19050.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2099, pruned_loss=0.08861, ctc_loss=0.1834, over 828311.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:39:18,613 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=108496.0, ans=0.2
+2024-09-01 02:39:29,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=108549.33333333333, ans=0.025
+2024-09-01 02:39:30,395 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.005e+02 2.143e+02 2.348e+02 3.224e+02, threshold=4.286e+02, percent-clipped=0.0
+2024-09-01 02:39:53,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=108602.66666666667, ans=0.0
+2024-09-01 02:40:34,519 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.18 vs. limit=6.0
+2024-09-01 02:40:40,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 02:40:42,770 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 02:40:56,423 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 100, loss[loss=0.2441, simple_loss=0.2142, pruned_loss=0.09483, ctc_loss=0.1914, over 19095.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.2096, pruned_loss=0.08778, ctc_loss=0.1817, over 1474662.24 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:40:57,715 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108762.66666666667, ans=0.1
+2024-09-01 02:41:10,781 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=108762.66666666667, ans=0.0
+2024-09-01 02:41:12,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108762.66666666667, ans=0.1
+2024-09-01 02:41:15,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 02:41:22,140 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.48 vs. limit=15.0
+2024-09-01 02:41:26,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=108816.0, ans=0.125
+2024-09-01 02:41:30,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=108816.0, ans=0.0
+2024-09-01 02:41:36,826 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=108816.0, ans=0.2
+2024-09-01 02:41:43,392 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=108869.33333333333, ans=0.125
+2024-09-01 02:41:49,621 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=108869.33333333333, ans=0.5
+2024-09-01 02:41:56,702 INFO [dysarthria_finetune.py:1435] (3/4) (13313441792, 34072559616)
+2024-09-01 02:41:56,703 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:41:56,770 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:42:09,605 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 0, loss[loss=0.2681, simple_loss=0.2332, pruned_loss=0.1092, ctc_loss=0.1965, over 18650.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.2332, pruned_loss=0.1092, ctc_loss=0.1965, over 18650.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:42:09,606 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:42:27,805 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.3.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.3442, 1.2449, 1.9848, 1.7180, 1.7528, 1.8117, 2.0398, 1.3363],
+ device='cuda:3')
+2024-09-01 02:42:33,566 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 14, validation: loss=0.209, simple_loss=0.1966, pruned_loss=0.0763, ctc_loss=0.148, over 1073944.00 frames.
+2024-09-01 02:42:33,567 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:42:41,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 02:42:51,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=108917.33333333333, ans=0.05
+2024-09-01 02:43:10,609 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=108970.66666666667, ans=0.0
+2024-09-01 02:43:19,627 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109024.0, ans=0.125
+2024-09-01 02:43:22,045 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109024.0, ans=0.1
+2024-09-01 02:43:32,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=109024.0, ans=0.125
+2024-09-01 02:43:42,874 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.996e+02 2.096e+02 2.326e+02 2.912e+02, threshold=4.192e+02, percent-clipped=0.0
+2024-09-01 02:43:46,609 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=14.72 vs. limit=15.0
+2024-09-01 02:44:06,363 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.82 vs. limit=22.5
+2024-09-01 02:44:10,778 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.38 vs. limit=15.0
+2024-09-01 02:44:24,656 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 50, loss[loss=0.1907, simple_loss=0.1905, pruned_loss=0.05882, ctc_loss=0.1541, over 19012.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2081, pruned_loss=0.08681, ctc_loss=0.1832, over 829335.16 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:44:43,610 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:45:09,759 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=109290.66666666667, ans=0.2
+2024-09-01 02:45:11,585 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=109290.66666666667, ans=0.025
+2024-09-01 02:45:11,623 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109290.66666666667, ans=0.1
+2024-09-01 02:45:18,997 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=7.30 vs. limit=10.0
+2024-09-01 02:45:22,662 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=109290.66666666667, ans=0.125
+2024-09-01 02:46:06,879 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=109397.33333333333, ans=0.1
+2024-09-01 02:46:12,183 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 100, loss[loss=0.1904, simple_loss=0.1892, pruned_loss=0.06296, ctc_loss=0.1426, over 19114.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.2053, pruned_loss=0.08485, ctc_loss=0.1787, over 1476363.01 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:46:45,800 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=109504.0, ans=0.125
+2024-09-01 02:46:56,405 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:47:12,364 INFO [dysarthria_finetune.py:1435] (3/4) (13317636096, 34072559616)
+2024-09-01 02:47:12,365 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:47:12,421 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:47:25,787 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 0, loss[loss=0.2133, simple_loss=0.1969, pruned_loss=0.08074, ctc_loss=0.1578, over 18716.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.1969, pruned_loss=0.08074, ctc_loss=0.1578, over 18716.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:47:25,788 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:48:03,607 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 15, validation: loss=0.2059, simple_loss=0.1951, pruned_loss=0.07588, ctc_loss=0.1481, over 1073944.00 frames.
+2024-09-01 02:48:03,608 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:48:05,129 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109605.33333333333, ans=0.2
+2024-09-01 02:48:20,625 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.965e+02 2.102e+02 2.301e+02 3.159e+02, threshold=4.205e+02, percent-clipped=0.0
+2024-09-01 02:48:45,181 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109658.66666666667, ans=0.125
+2024-09-01 02:51:15,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=109818.66666666667, ans=0.125
+2024-09-01 02:51:41,396 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 50, loss[loss=0.1936, simple_loss=0.1821, pruned_loss=0.06909, ctc_loss=0.1566, over 19179.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2016, pruned_loss=0.0824, ctc_loss=0.1765, over 827713.24 frames. ], batch size: 103, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:51:51,228 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.04 vs. limit=15.0
+2024-09-01 02:53:50,203 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.53 vs. limit=6.0
+2024-09-01 02:54:24,750 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=110032.0, ans=0.125
+2024-09-01 02:54:35,323 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=110032.0, ans=0.0
+2024-09-01 02:55:28,639 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 100, loss[loss=0.1899, simple_loss=0.1831, pruned_loss=0.06653, ctc_loss=0.1513, over 19073.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2005, pruned_loss=0.08164, ctc_loss=0.1744, over 1475236.97 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:55:41,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=110138.66666666667, ans=0.2
+2024-09-01 02:55:44,088 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 1.961e+02 2.099e+02 2.266e+02 2.969e+02, threshold=4.197e+02, percent-clipped=0.0
+2024-09-01 02:56:07,455 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.10 vs. limit=6.0
+2024-09-01 02:56:59,036 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=10.63 vs. limit=12.0
+2024-09-01 02:57:16,524 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=110245.33333333333, ans=0.025
+2024-09-01 02:57:18,310 INFO [dysarthria_finetune.py:1435] (3/4) (13317636096, 34072559616)
+2024-09-01 02:57:18,311 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:57:18,356 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 02:57:33,285 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 0, loss[loss=0.2518, simple_loss=0.2202, pruned_loss=0.1011, ctc_loss=0.1999, over 18560.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.2202, pruned_loss=0.1011, ctc_loss=0.1999, over 18560.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:57:33,286 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:58:10,352 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 16, validation: loss=0.2065, simple_loss=0.1951, pruned_loss=0.07751, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 02:58:10,353 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 02:58:27,172 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=110293.33333333333, ans=0.0
+2024-09-01 02:58:49,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=110346.66666666667, ans=0.2
+2024-09-01 02:59:29,097 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=110400.0, ans=0.025
+2024-09-01 02:59:40,703 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.11 vs. limit=15.0
+2024-09-01 02:59:44,848 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=110453.33333333333, ans=0.025
+2024-09-01 02:59:49,941 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=110453.33333333333, ans=0.07
+2024-09-01 02:59:52,275 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.85 vs. limit=15.0
+2024-09-01 02:59:54,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=110506.66666666667, ans=0.0
+2024-09-01 03:00:21,807 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 50, loss[loss=0.2227, simple_loss=0.2109, pruned_loss=0.0807, ctc_loss=0.1812, over 19044.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2011, pruned_loss=0.08314, ctc_loss=0.1775, over 827661.95 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:01:16,951 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=110666.66666666667, ans=0.025
+2024-09-01 03:01:25,494 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 1.971e+02 2.093e+02 2.277e+02 2.936e+02, threshold=4.187e+02, percent-clipped=0.0
+2024-09-01 03:01:40,562 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.75 vs. limit=10.0
+2024-09-01 03:01:47,068 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=9.65 vs. limit=15.0
+2024-09-01 03:02:04,045 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-09-01 03:02:24,148 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=110773.33333333333, ans=0.125
+2024-09-01 03:02:24,280 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.24 vs. limit=15.0
+2024-09-01 03:02:27,247 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 100, loss[loss=0.199, simple_loss=0.1997, pruned_loss=0.06824, ctc_loss=0.1546, over 19120.00 frames. ], tot_loss[loss=0.216, simple_loss=0.1993, pruned_loss=0.08135, ctc_loss=0.1734, over 1474935.70 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:02:28,496 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110826.66666666667, ans=0.1
+2024-09-01 03:02:41,453 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110826.66666666667, ans=0.1
+2024-09-01 03:02:41,876 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.83 vs. limit=10.0
+2024-09-01 03:03:29,006 INFO [dysarthria_finetune.py:1435] (3/4) (13321830400, 34072559616)
+2024-09-01 03:03:29,007 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:03:29,049 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 03:03:41,384 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 0, loss[loss=0.2884, simple_loss=0.2418, pruned_loss=0.1234, ctc_loss=0.2209, over 18583.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.2418, pruned_loss=0.1234, ctc_loss=0.2209, over 18583.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:03:41,385 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:03:45,284 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.5985, 3.1697, 2.4082, 1.6290], device='cuda:3')
+2024-09-01 03:03:48,131 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([7.1225, 6.3742, 6.0682, 5.8848], device='cuda:3')
+2024-09-01 03:04:05,406 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 17, validation: loss=0.1943, simple_loss=0.1886, pruned_loss=0.07183, ctc_loss=0.1409, over 1073944.00 frames.
+2024-09-01 03:04:05,407 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 03:04:06,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110981.33333333333, ans=0.1
+2024-09-01 03:04:22,801 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=110981.33333333333, ans=0.0
+2024-09-01 03:05:14,004 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.45 vs. limit=12.0
+2024-09-01 03:05:45,143 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 1.958e+02 2.075e+02 2.282e+02 2.777e+02, threshold=4.150e+02, percent-clipped=0.0
+2024-09-01 03:05:46,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111194.66666666667, ans=0.1
+2024-09-01 03:05:51,072 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=111194.66666666667, ans=0.0
+2024-09-01 03:05:56,269 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 50, loss[loss=0.2272, simple_loss=0.2052, pruned_loss=0.08853, ctc_loss=0.1803, over 18982.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.1959, pruned_loss=0.0787, ctc_loss=0.1702, over 827806.80 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:06:06,493 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111248.0, ans=0.1
+2024-09-01 03:06:08,844 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.12 vs. limit=15.0
+2024-09-01 03:06:13,154 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.73 vs. limit=15.0
+2024-09-01 03:06:15,290 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=111248.0, ans=0.125
+2024-09-01 03:06:33,190 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111301.33333333333, ans=0.125
+2024-09-01 03:07:10,270 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111408.0, ans=0.1
+2024-09-01 03:07:12,421 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=111408.0, ans=0.125
+2024-09-01 03:07:14,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=111408.0, ans=0.0
+2024-09-01 03:07:19,225 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=11.22 vs. limit=15.0
+2024-09-01 03:07:33,626 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=111461.33333333333, ans=0.025
+2024-09-01 03:07:45,186 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 100, loss[loss=0.1815, simple_loss=0.1828, pruned_loss=0.06018, ctc_loss=0.1497, over 19078.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.1947, pruned_loss=0.0776, ctc_loss=0.1672, over 1476033.73 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 03:08:18,368 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=111568.0, ans=0.0
+2024-09-01 03:08:18,378 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:08:44,490 INFO [dysarthria_finetune.py:1435] (3/4) (13344899072, 34072559616)
+2024-09-01 03:08:44,490 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:08:44,520 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 03:08:52,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 03:09:16,218 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 0, loss[loss=0.2415, simple_loss=0.2135, pruned_loss=0.09544, ctc_loss=0.1967, over 18613.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2135, pruned_loss=0.09544, ctc_loss=0.1967, over 18613.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:09:16,218 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:09:39,625 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 18, validation: loss=0.1961, simple_loss=0.1886, pruned_loss=0.07291, ctc_loss=0.1441, over 1073944.00 frames.
+2024-09-01 03:09:39,626 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 03:09:45,464 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=111669.33333333333, ans=0.0
+2024-09-01 03:10:14,933 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 1.913e+02 2.060e+02 2.285e+02 3.151e+02, threshold=4.120e+02, percent-clipped=0.0
+2024-09-01 03:10:20,607 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=111722.66666666667, ans=0.0
+2024-09-01 03:10:59,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=111829.33333333333, ans=0.2
+2024-09-01 03:11:29,063 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 50, loss[loss=0.2061, simple_loss=0.1941, pruned_loss=0.07372, ctc_loss=0.1768, over 19004.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.19, pruned_loss=0.07554, ctc_loss=0.1647, over 828768.32 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:11:48,162 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.55 vs. limit=15.0
+2024-09-01 03:11:58,853 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=111989.33333333333, ans=0.95
+2024-09-01 03:13:08,106 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=112149.33333333333, ans=0.125
+2024-09-01 03:13:31,327 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=112149.33333333333, ans=0.0
+2024-09-01 03:13:44,696 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 100, loss[loss=0.1749, simple_loss=0.1696, pruned_loss=0.06203, ctc_loss=0.1404, over 19084.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.1895, pruned_loss=0.07561, ctc_loss=0.1646, over 1476677.05 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:13:50,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=112202.66666666667, ans=0.0
+2024-09-01 03:14:19,249 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.738e+02 1.898e+02 2.020e+02 2.262e+02 2.800e+02, threshold=4.040e+02, percent-clipped=0.0
+2024-09-01 03:14:27,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=112309.33333333333, ans=0.025
+2024-09-01 03:14:37,663 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112309.33333333333, ans=0.1
+2024-09-01 03:14:43,277 INFO [dysarthria_finetune.py:1435] (3/4) (12157911040, 34072559616)
+2024-09-01 03:14:43,278 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:14:43,304 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 03:14:55,811 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 0, loss[loss=0.2695, simple_loss=0.2295, pruned_loss=0.1123, ctc_loss=0.2121, over 18562.00 frames. ], tot_loss[loss=0.2695, simple_loss=0.2295, pruned_loss=0.1123, ctc_loss=0.2121, over 18562.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:14:55,812 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:15:42,621 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 19, validation: loss=0.1928, simple_loss=0.1862, pruned_loss=0.07146, ctc_loss=0.1413, over 1073944.00 frames.
+2024-09-01 03:15:42,622 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 03:16:01,149 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.58 vs. limit=15.0
+2024-09-01 03:17:03,008 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=112405.33333333333, ans=0.125
+2024-09-01 03:17:03,181 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=14.64 vs. limit=15.0
+2024-09-01 03:17:08,404 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=112405.33333333333, ans=10.0
+2024-09-01 03:17:19,078 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=14.93 vs. limit=15.0
+2024-09-01 03:18:01,561 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.17 vs. limit=15.0
+2024-09-01 03:18:18,590 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112512.0, ans=0.0
+2024-09-01 03:18:24,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=112512.0, ans=0.0
+2024-09-01 03:19:02,197 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-09-01 03:19:34,383 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 50, loss[loss=0.1746, simple_loss=0.1673, pruned_loss=0.06108, ctc_loss=0.1497, over 19015.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.1894, pruned_loss=0.07542, ctc_loss=0.165, over 829365.51 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:20:06,984 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=112618.66666666667, ans=0.0
+2024-09-01 03:20:21,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=112672.0, ans=0.0
+2024-09-01 03:20:32,551 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=112672.0, ans=0.125
+2024-09-01 03:21:50,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112725.33333333333, ans=0.1
+2024-09-01 03:22:34,905 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 1.922e+02 2.090e+02 2.243e+02 2.725e+02, threshold=4.180e+02, percent-clipped=0.0
+2024-09-01 03:23:02,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112832.0, ans=0.0
+2024-09-01 03:23:34,284 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 100, loss[loss=0.1821, simple_loss=0.1724, pruned_loss=0.06553, ctc_loss=0.1519, over 19083.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.1903, pruned_loss=0.0761, ctc_loss=0.1645, over 1476389.98 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:24:01,403 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=112885.33333333333, ans=0.125
+2024-09-01 03:24:54,113 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=112992.0, ans=0.125
+2024-09-01 03:24:54,175 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112992.0, ans=0.1
+2024-09-01 03:25:13,449 INFO [dysarthria_finetune.py:1435] (3/4) (95092736, 34072559616)
+2024-09-01 03:25:13,450 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:25:13,528 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 03:25:27,033 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 0, loss[loss=0.2494, simple_loss=0.2148, pruned_loss=0.102, ctc_loss=0.1999, over 18436.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.2148, pruned_loss=0.102, ctc_loss=0.1999, over 18436.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:25:27,033 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:26:10,756 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 20, validation: loss=0.19, simple_loss=0.1838, pruned_loss=0.07041, ctc_loss=0.1385, over 1073944.00 frames.
+2024-09-01 03:26:10,757 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14327MB
+2024-09-01 03:26:13,575 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.64 vs. limit=15.0
+2024-09-01 03:27:26,702 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=25.96 vs. limit=22.5
+2024-09-01 03:28:24,369 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113200.0, ans=0.1
+2024-09-01 03:28:30,978 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=113200.0, ans=0.0
+2024-09-01 03:29:26,712 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 50, loss[loss=0.1842, simple_loss=0.1833, pruned_loss=0.06356, ctc_loss=0.1451, over 18942.00 frames. ], tot_loss[loss=0.202, simple_loss=0.1878, pruned_loss=0.0754, ctc_loss=0.1638, over 827999.75 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:30:05,874 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 1.917e+02 2.046e+02 2.200e+02 2.791e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-09-01 03:30:39,709 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=11.74 vs. limit=15.0
+2024-09-01 03:31:20,890 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113413.33333333333, ans=0.125
+2024-09-01 03:31:28,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=113413.33333333333, ans=0.2
+2024-09-01 03:31:57,633 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.571e-03
+2024-09-01 03:33:06,755 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 100, loss[loss=0.1494, simple_loss=0.1504, pruned_loss=0.04917, ctc_loss=0.1252, over 19171.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.1865, pruned_loss=0.0743, ctc_loss=0.1611, over 1475487.52 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:33:50,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=113626.66666666667, ans=0.125
+2024-09-01 03:34:32,750 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 03:34:32,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=113626.66666666667, ans=0.0
+2024-09-01 03:34:36,116 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.99 vs. limit=15.0
+2024-09-01 03:34:56,924 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.93 vs. limit=15.0
+2024-09-01 03:35:10,301 INFO [dysarthria_finetune.py:1435] (3/4) (13292470272, 34072559616)
+2024-09-01 03:35:10,302 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:35:10,367 INFO [dysarthria_finetune.py:1440] (3/4) (28926738432, 34072559616)
+2024-09-01 03:35:10,367 INFO [dysarthria_finetune.py:1442] (3/4) Done!
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724927868.cdr2655.int.cedar.computecanada.ca.5363.0 b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724927868.cdr2655.int.cedar.computecanada.ca.5363.0
new file mode 100644
index 0000000000000000000000000000000000000000..c097c527475f89cf95cb7a6cf966385edaecd6fe
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724927868.cdr2655.int.cedar.computecanada.ca.5363.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c5fe84fca2e197b41eb13f237e284f2d5085db33c780ece42f606319f41c7486
+size 88
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724956893.cdr2558.int.cedar.computecanada.ca.4947.0 b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724956893.cdr2558.int.cedar.computecanada.ca.4947.0
new file mode 100644
index 0000000000000000000000000000000000000000..bc46c0d057bc950fc2b659b73d1ad6b76e52e0e7
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724956893.cdr2558.int.cedar.computecanada.ca.4947.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:676eac1dc5de20b5ff933c687fe80ab817bafe411db5fb341d04b8217c826d14
+size 88
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725077494.cdr2647.int.cedar.computecanada.ca.9011.0 b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725077494.cdr2647.int.cedar.computecanada.ca.9011.0
new file mode 100644
index 0000000000000000000000000000000000000000..3795359141aa24d0aa086c6985f044a41ca1562c
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725077494.cdr2647.int.cedar.computecanada.ca.9011.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a718b7150cc52b776e5f804ae62b41cfd8c566dbf9e14176188b11bce7343e72
+size 88
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725155438.cdr2535.int.cedar.computecanada.ca.964213.0 b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725155438.cdr2535.int.cedar.computecanada.ca.964213.0
new file mode 100644
index 0000000000000000000000000000000000000000..f4202b7e9755330774c1aa1723a8827c61ef2d06
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725155438.cdr2535.int.cedar.computecanada.ca.964213.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b899685ad2b429a7e59a6d3c4dae34bfa331c704a8dd948cf778ff4087026522
+size 88
diff --git a/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725167340.cdr2653.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725167340.cdr2653.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..15d02f125722a2d8cfb3909a8a0c896a04a60946
--- /dev/null
+++ b/zipformer/finetuned/ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725167340.cdr2653.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9a123bfe2910af523afb2c9b84d456185eef77c2685e8c038094c49d74ae0886
+size 41711
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/best-train-loss.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/best-train-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0177502cd371b3469ae2c12d59dcba09600d6cd8
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/best-train-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b7d7b69128a29cd54032dbe2295fad7a0b21a80a76c9a62afefee023e091049
+size 1053873294
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/best-valid-loss.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/best-valid-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0177502cd371b3469ae2c12d59dcba09600d6cd8
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/best-valid-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b7d7b69128a29cd54032dbe2295fad7a0b21a80a76c9a62afefee023e091049
+size 1053873294
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-1.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-1.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d7fcb15cc12a4cc0361b090f485b4a592246c81a
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-1.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34acbe2c712fbb8bc34842394a734a02020b9b4c67e6991c2af5732aadd9659a
+size 1053870013
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-10.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-10.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b0e2f7b0e14a855868e1b83b8089959d20f8dbb0
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-10.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce0231004222fac39cea9d10effba8d343dab92391554f4cdd7fbf8f1cbddfee
+size 1053872846
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-11.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-11.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c112a1a1bb35e7f724e183bd44d42c74d553aaf5
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-11.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:44e33dec6d61d25f8373c34f321f82146f2614edc1747effd1ffaf9c6c7e6066
+size 1053872846
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-12.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-12.pt
new file mode 100644
index 0000000000000000000000000000000000000000..43d0a9600922402067acd59a92ebcfe98a4599a0
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-12.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be18917bdaf5b2e70eee21750e4869203bec58963b2e03d9a44b8003e1161664
+size 1053872910
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-13.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-13.pt
new file mode 100644
index 0000000000000000000000000000000000000000..083471f7566585f171e4e3a1947f0c98d0204b06
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-13.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10dd1991a8636dca053e72899402eaf8e2916ec9628b416644cc813c737710b2
+size 1053872974
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-14.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-14.pt
new file mode 100644
index 0000000000000000000000000000000000000000..54c0fc37f043506a2d5a27731dde8b871d1d63e7
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-14.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc414f73f1dc6275731140be8e9cf16a4e5903c59d8997ccacf56e713d952720
+size 1053873038
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-15.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-15.pt
new file mode 100644
index 0000000000000000000000000000000000000000..84646b52c72cc90101f64c6f13eabfef22ca8055
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-15.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af49a1e204647024034be1b133409fbe678d8c44854259a83fa8f8cf17df5f79
+size 1053873038
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-16.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-16.pt
new file mode 100644
index 0000000000000000000000000000000000000000..da07a366f562e57397925ad5b504af5f97c71a88
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-16.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db9479d208000bf6d7bbc1f9bef59e5876e601da9e3cea133af95c019519aaee
+size 1053873102
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-17.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-17.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d39058d26f4f6ca4bfeab5360599bd27bdbc824b
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-17.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8475f6a8aca8f70c999a9ab322cd79c6eb84b8a81ee29bfa786377952a37695a
+size 1053873166
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-18.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-18.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8ea44cb96cce755b7d887c07b65584c19ad76463
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-18.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8dbfa68c582b6a11463e0c31c8adb1bc3a88f5c27e3f72955459dd08ba1d261c
+size 1053873166
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-19.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-19.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0e5f144e4c3ec577e832cd7f67a8f2f5b78675ca
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-19.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59ceffa28268612ec62c94f48d9c3c72793ac115878ffb42c264156fe6a787ec
+size 1053873230
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-2.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-2.pt
new file mode 100644
index 0000000000000000000000000000000000000000..14b773b347eba8bfc244434e7bbf15cc887fa0d9
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-2.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c134b1ee04fd87ec48624142022f1a7df672bf1b09fc3b9864215dcca944949e
+size 1053870141
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-20.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-20.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0177502cd371b3469ae2c12d59dcba09600d6cd8
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-20.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b7d7b69128a29cd54032dbe2295fad7a0b21a80a76c9a62afefee023e091049
+size 1053873294
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-3.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-3.pt
new file mode 100644
index 0000000000000000000000000000000000000000..72ce70db1abb5d80700d7db40cebceb183358835
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-3.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:32caab33b6111fc798d9442e1d6509c370101e9e2f23745f6ae45328bfdf76a3
+size 1053870205
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-4.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-4.pt
new file mode 100644
index 0000000000000000000000000000000000000000..08a00b4a467243f102c416ded6fc0d4a856ffbdb
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-4.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc4e8461ee21bf819d478d7fe467a62d9c09c386ecc907f2cfa50551e3a497cd
+size 1053870205
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-5.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-5.pt
new file mode 100644
index 0000000000000000000000000000000000000000..09f60562327a4275b250f1ee485c7eb5551417d4
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-5.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6099ebba17037a9b4efb7c84dc2e6093319fbaf7127d482c07fe17752f964823
+size 1053870269
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-6.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-6.pt
new file mode 100644
index 0000000000000000000000000000000000000000..43a830988d4a00b42d0e77c69295fd3551ebdc56
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-6.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd47b496cd4017c9c93f73672307d3dff16c22c48d9a4b6b7870646ce4b5b1b6
+size 1053870333
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-7.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-7.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7d146c96d434e8f81be03c9ed98924a886d37bb7
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-7.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:678691531440493c61429ba770daca9b99b882c6f97389ffeca828e8c644aaa5
+size 1053870397
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-8.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-8.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5ab65448ce9af0ab97c1edec62b55999540e23f4
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-8.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fdb949b9b780343945bbddd43a28016b5b30810c7b28c6f7bb267f1a56862cdb
+size 1053870397
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-9.pt b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-9.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0e12f1499eaaae605e27676a2ba01088f682f01b
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/epoch-9.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c91f8829402f0d6e4fa2058ef22ce7a956edd6cc4aecb4f5feaa13bb8306bb7
+size 1053870461
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-15-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-15-0
new file mode 100644
index 0000000000000000000000000000000000000000..c441bdd8368635dfa39084e3eb7362d41468bf66
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-15-0
@@ -0,0 +1,4 @@
+2024-08-25 10:42:15,884 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-25 10:42:16,124 INFO [dysarthria_finetune.py:1214] (0/4) (33735507968, 34072559616)
+2024-08-25 10:42:16,124 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-25 10:42:16,599 INFO [dysarthria_finetune.py:1219] (0/4) (33427226624, 34072559616)
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-1
new file mode 100644
index 0000000000000000000000000000000000000000..fb800f71a9810d75eae94e6d9605b35edb94e93e
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-1
@@ -0,0 +1,9 @@
+2024-08-25 10:42:16,068 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-25 10:42:26,109 INFO [dysarthria_finetune.py:1214] (1/4) (32783400960, 34072559616)
+2024-08-25 10:42:26,109 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-25 10:42:26,520 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-25 10:42:26,521 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-25 10:42:26,524 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-25 10:42:26,524 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-25 10:42:27,200 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-25 10:42:27,462 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-2
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-3
new file mode 100644
index 0000000000000000000000000000000000000000..3777d15df51cc2deb6a650fe66c0657cfbcfabfd
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-25-10-42-16-3
@@ -0,0 +1,9 @@
+2024-08-25 10:42:16,055 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-25 10:42:16,055 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-25 10:42:16,056 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-25 10:42:16,579 INFO [dysarthria_finetune.py:1219] (3/4) (33427226624, 34072559616)
+2024-08-25 10:42:16,580 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-25 10:42:24,705 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-25 10:42:24,705 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-25 10:42:25,375 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-25 10:42:25,375 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-0
new file mode 100644
index 0000000000000000000000000000000000000000..991e34ec8a6ea1b26c9c8439c98d85959fda917f
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-0
@@ -0,0 +1,4 @@
+2024-08-27 06:19:57,547 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-27 06:20:01,938 INFO [dysarthria_finetune.py:1214] (0/4) (33414643712, 34072559616)
+2024-08-27 06:20:01,939 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-27 06:20:02,467 INFO [dysarthria_finetune.py:1219] (0/4) (33106362368, 34072559616)
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-1
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-2
new file mode 100644
index 0000000000000000000000000000000000000000..68e084f8cb12fe693cc6dbbcdeda6dbe7f5c1bf4
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-2
@@ -0,0 +1,9 @@
+2024-08-27 06:19:57,489 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-27 06:20:01,008 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-27 06:20:01,008 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-27 06:20:01,365 INFO [dysarthria_finetune.py:1219] (2/4) (33748090880, 34072559616)
+2024-08-27 06:20:01,366 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-27 06:20:05,207 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2652.int.cedar.computecanada.ca', 'IP address': '172.16.146.89'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-27 06:20:05,208 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-27 06:20:05,885 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-27 06:20:05,885 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-3
new file mode 100644
index 0000000000000000000000000000000000000000..f881b4f7d89d1ef93a39af9e0bcfcc498367e2e0
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-27-06-19-53-3
@@ -0,0 +1,9 @@
+2024-08-27 06:19:57,690 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-27 06:20:01,895 INFO [dysarthria_finetune.py:1214] (3/4) (33427226624, 34072559616)
+2024-08-27 06:20:01,896 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-27 06:20:02,467 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-27 06:20:02,562 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-27 06:20:05,763 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2652.int.cedar.computecanada.ca', 'IP address': '172.16.146.89'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-27 06:20:05,763 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-27 06:20:06,437 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-27 06:20:06,437 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-0
new file mode 100644
index 0000000000000000000000000000000000000000..7c01a52d80fa3be6ecb2643db495733184ffbcbb
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-0
@@ -0,0 +1,11 @@
+2024-08-29 08:12:43,667 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-29 08:12:43,907 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-29 08:12:43,907 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-29 08:12:44,905 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-29 08:12:54,340 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-29 08:12:54,721 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2538.int.cedar.computecanada.ca', 'IP address': '172.16.145.231'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 08:12:54,721 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-29 08:12:55,427 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65805511
+2024-08-29 08:12:55,979 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 08:12:57,637 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-29 08:12:58,871 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-1
new file mode 100644
index 0000000000000000000000000000000000000000..ab7b6a2422a9933fb6dffb6767e7cd4d7cea1dae
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-1
@@ -0,0 +1,11 @@
+2024-08-29 08:12:43,926 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-29 08:12:43,951 INFO [dysarthria_finetune.py:1214] (1/4) (33735507968, 34072559616)
+2024-08-29 08:12:43,951 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-29 08:12:44,907 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-29 08:12:54,336 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-29 08:12:54,721 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2538.int.cedar.computecanada.ca', 'IP address': '172.16.145.231'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 08:12:54,721 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-29 08:12:55,415 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-29 08:12:55,415 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 08:12:56,638 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-29 08:13:00,540 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-2
new file mode 100644
index 0000000000000000000000000000000000000000..3c385148017a62f4989f6473772f422c469d48aa
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-2
@@ -0,0 +1,11 @@
+2024-08-29 08:12:43,914 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-29 08:12:43,951 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-29 08:12:43,951 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-29 08:12:44,898 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-29 08:12:54,335 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-29 08:12:54,721 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2538.int.cedar.computecanada.ca', 'IP address': '172.16.145.231'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 08:12:54,721 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-29 08:12:55,416 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-29 08:12:55,416 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 08:12:56,648 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-29 08:13:00,542 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-3
new file mode 100644
index 0000000000000000000000000000000000000000..9c4f1f879ae664093bafb0710aa4a48d5d7427b9
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-08-12-43-3
@@ -0,0 +1,11 @@
+2024-08-29 08:12:43,984 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-29 08:12:43,996 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-29 08:12:43,996 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-29 08:12:44,905 INFO [dysarthria_finetune.py:1219] (3/4) (32783400960, 34072559616)
+2024-08-29 08:12:54,336 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-29 08:12:54,721 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2538.int.cedar.computecanada.ca', 'IP address': '172.16.145.231'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 08:12:54,722 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-29 08:12:55,504 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-29 08:12:55,504 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 08:12:56,699 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-29 08:13:00,540 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-0
new file mode 100644
index 0000000000000000000000000000000000000000..4cdbf1ba07d9a1d6dfcbd1b3bc75cea3951d1e3c
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-0
@@ -0,0 +1,11 @@
+2024-08-29 15:49:51,145 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-29 15:49:51,348 INFO [dysarthria_finetune.py:1214] (0/4) (33735507968, 34072559616)
+2024-08-29 15:49:51,349 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-29 15:49:51,857 INFO [dysarthria_finetune.py:1219] (0/4) (33427226624, 34072559616)
+2024-08-29 15:49:51,863 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-29 15:49:52,254 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2563.int.cedar.computecanada.ca', 'IP address': '172.16.146.0'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 15:49:52,254 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-29 15:49:52,924 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65805511
+2024-08-29 15:49:53,465 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 15:49:55,209 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-29 15:51:26,934 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-1
new file mode 100644
index 0000000000000000000000000000000000000000..f73ebd5523e5cc97d12d73fc6d548f0d32f59768
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-1
@@ -0,0 +1,11 @@
+2024-08-29 15:49:51,308 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-29 15:49:52,108 INFO [dysarthria_finetune.py:1214] (1/4) (33106362368, 34072559616)
+2024-08-29 15:49:52,109 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-29 15:49:52,476 INFO [dysarthria_finetune.py:1219] (1/4) (33106362368, 34072559616)
+2024-08-29 15:49:52,476 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-29 15:49:52,479 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2563.int.cedar.computecanada.ca', 'IP address': '172.16.146.0'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 15:49:52,479 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-29 15:49:53,158 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-29 15:49:53,158 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 15:49:54,426 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-29 15:51:27,097 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-2
new file mode 100644
index 0000000000000000000000000000000000000000..abc2cad0b9babdbbec4529fead26301224817d88
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-2
@@ -0,0 +1,11 @@
+2024-08-29 15:49:51,303 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-29 15:49:51,304 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-29 15:49:51,304 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-29 15:49:51,867 INFO [dysarthria_finetune.py:1219] (2/4) (33427226624, 34072559616)
+2024-08-29 15:49:51,868 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-29 15:49:52,253 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2563.int.cedar.computecanada.ca', 'IP address': '172.16.146.0'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 15:49:52,254 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-29 15:49:52,949 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-29 15:49:52,949 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 15:49:54,160 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-29 15:51:27,033 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-3
new file mode 100644
index 0000000000000000000000000000000000000000..78831247904b95aed37044a7d651f9467fc4e786
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-29-15-49-51-3
@@ -0,0 +1,11 @@
+2024-08-29 15:49:51,296 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-29 15:51:23,354 INFO [dysarthria_finetune.py:1214] (3/4) (32783400960, 34072559616)
+2024-08-29 15:51:23,354 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-29 15:51:23,731 INFO [dysarthria_finetune.py:1219] (3/4) (32783400960, 34072559616)
+2024-08-29 15:51:23,732 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-29 15:51:23,735 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2563.int.cedar.computecanada.ca', 'IP address': '172.16.146.0'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 15:51:23,735 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-29 15:51:24,426 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-29 15:51:24,426 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-29 15:51:25,558 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-29 15:51:26,946 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-15-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-15-0
new file mode 100644
index 0000000000000000000000000000000000000000..1d6b3e282bf6e6df314d63b6369ae31e3fb6e79b
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-15-0
@@ -0,0 +1,11 @@
+2024-08-30 12:40:15,977 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-30 12:40:16,169 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-30 12:40:16,169 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-30 12:40:16,959 INFO [dysarthria_finetune.py:1219] (0/4) (33106362368, 34072559616)
+2024-08-30 12:40:22,401 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-30 12:40:22,404 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2547.int.cedar.computecanada.ca', 'IP address': '172.16.145.240'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 12:40:22,404 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-30 12:40:50,753 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65805511
+2024-08-30 12:40:51,300 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 12:49:26,185 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-30 12:49:32,120 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-1
new file mode 100644
index 0000000000000000000000000000000000000000..31cbd4e8532566bb6d6e890fe5388a89abeba1af
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-1
@@ -0,0 +1,11 @@
+2024-08-30 12:40:16,177 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-30 12:40:18,742 INFO [dysarthria_finetune.py:1214] (1/4) (32783400960, 34072559616)
+2024-08-30 12:40:18,742 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-30 12:40:19,123 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-30 12:40:19,123 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-30 12:40:19,126 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2547.int.cedar.computecanada.ca', 'IP address': '172.16.145.240'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 12:40:19,126 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-30 12:40:50,762 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-30 12:40:50,762 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 12:49:25,910 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-30 12:49:32,120 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-2
new file mode 100644
index 0000000000000000000000000000000000000000..187f880708616ae334368dc3da58e5d25bd38bf9
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-2
@@ -0,0 +1,11 @@
+2024-08-30 12:40:16,171 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-30 12:40:16,214 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-30 12:40:16,214 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-30 12:40:16,955 INFO [dysarthria_finetune.py:1219] (2/4) (33106362368, 34072559616)
+2024-08-30 12:40:16,955 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-30 12:40:17,100 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2547.int.cedar.computecanada.ca', 'IP address': '172.16.145.240'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 12:40:17,100 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-30 12:40:50,764 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-30 12:40:50,764 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 12:49:25,941 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-30 12:49:32,124 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-3
new file mode 100644
index 0000000000000000000000000000000000000000..5b02a552652f726798143bec6a7136915b5cc7f6
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-12-40-16-3
@@ -0,0 +1,11 @@
+2024-08-30 12:40:16,169 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-30 12:40:16,214 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-30 12:40:16,214 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-30 12:40:16,960 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-30 12:40:16,960 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-30 12:40:17,099 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2547.int.cedar.computecanada.ca', 'IP address': '172.16.145.240'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 12:40:17,099 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-30 12:40:50,732 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-30 12:40:50,733 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 12:49:25,933 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-30 12:49:32,120 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-0
new file mode 100644
index 0000000000000000000000000000000000000000..e8e52b502a95eab2e2c29c7ae4cfdf691a2d8299
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-0
@@ -0,0 +1,11 @@
+2024-08-30 21:13:40,655 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-30 21:13:40,845 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-30 21:13:40,845 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-30 21:13:41,615 INFO [dysarthria_finetune.py:1219] (0/4) (33106362368, 34072559616)
+2024-08-30 21:13:41,620 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-30 21:13:41,625 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:13:41,626 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-30 21:13:42,734 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65805511
+2024-08-30 21:13:43,274 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 21:15:04,102 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-30 21:15:06,552 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-1
new file mode 100644
index 0000000000000000000000000000000000000000..4d4c84d62bde40af8d98181372ecbd2243bc6acc
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-1
@@ -0,0 +1,11 @@
+2024-08-30 21:13:40,853 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-30 21:13:40,889 INFO [dysarthria_finetune.py:1214] (1/4) (33735507968, 34072559616)
+2024-08-30 21:13:40,889 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-30 21:13:41,617 INFO [dysarthria_finetune.py:1219] (1/4) (33106362368, 34072559616)
+2024-08-30 21:13:41,618 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-30 21:13:41,626 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:13:41,626 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-30 21:13:42,738 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-30 21:13:42,738 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 21:15:03,629 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-30 21:15:06,556 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-2
new file mode 100644
index 0000000000000000000000000000000000000000..073f91970d9bd86e86c3fda43ee9d384afbf4916
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-2
@@ -0,0 +1,11 @@
+2024-08-30 21:13:40,846 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-30 21:14:32,778 INFO [dysarthria_finetune.py:1214] (2/4) (32783400960, 34072559616)
+2024-08-30 21:14:32,778 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-30 21:14:33,157 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-30 21:14:33,158 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-30 21:14:33,160 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:14:33,160 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-30 21:14:33,856 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-30 21:14:33,856 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 21:15:03,720 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-30 21:15:06,562 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-3
new file mode 100644
index 0000000000000000000000000000000000000000..20ae2e489ee1b47d483af0bd327e012398e780b5
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-13-40-3
@@ -0,0 +1,11 @@
+2024-08-30 21:13:40,904 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-30 21:13:40,934 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-30 21:13:40,934 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-30 21:13:41,625 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-30 21:13:41,626 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-30 21:13:41,628 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:13:41,629 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-30 21:13:42,728 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-30 21:13:42,728 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 21:15:03,576 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-30 21:15:06,551 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-55-13 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-55-13
new file mode 100644
index 0000000000000000000000000000000000000000..9234e73866a032d3e1609af100d71f6fd835bd24
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-30-21-55-13
@@ -0,0 +1,22 @@
+2024-08-30 21:55:13,527 INFO [dysarthria_finetune.py:1212] Training started
+2024-08-30 21:55:14,228 INFO [dysarthria_finetune.py:1214] (33748090880, 34072559616)
+2024-08-30 21:55:14,228 INFO [dysarthria_finetune.py:1215] Empty cache: before and after
+2024-08-30 21:55:14,228 INFO [dysarthria_finetune.py:1219] (33748090880, 34072559616)
+2024-08-30 21:55:14,235 INFO [dysarthria_finetune.py:1229] Device: cuda:0
+2024-08-30 21:55:15,120 INFO [dysarthria_finetune.py:1241] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2608.int.cedar.computecanada.ca', 'IP address': '172.16.146.45'}, 'world_size': 1, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 21:55:15,121 INFO [dysarthria_finetune.py:1243] About to create model
+2024-08-30 21:55:17,865 INFO [dysarthria_finetune.py:1247] Number of model parameters: 65805511
+2024-08-30 21:55:18,402 INFO [dysarthria_finetune.py:769] Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-30 22:09:46,377 INFO [dysarthria_asr_datamodule.py:494] About to get train cuts
+2024-08-30 22:13:07,937 INFO [dysarthria_finetune.py:1319] CutSet(len=62255) [underlying data type: ]
+2024-08-30 22:13:08,780 INFO [dysarthria_asr_datamodule.py:239] Disable MUSAN
+2024-08-30 22:13:08,780 INFO [dysarthria_asr_datamodule.py:257] Enable SpecAugment
+2024-08-30 22:13:08,781 INFO [dysarthria_asr_datamodule.py:258] Time warp factor: 80
+2024-08-30 22:13:08,781 INFO [dysarthria_asr_datamodule.py:268] Num frame mask: 10
+2024-08-30 22:13:08,781 INFO [dysarthria_asr_datamodule.py:281] About to create train dataset
+2024-08-30 22:13:10,568 INFO [dysarthria_asr_datamodule.py:308] Using DynamicBucketingSampler.
+2024-08-30 22:13:11,483 INFO [dysarthria_asr_datamodule.py:325] About to create train dataloader
+2024-08-30 22:13:11,490 INFO [dysarthria_asr_datamodule.py:500] About to get dev cuts
+2024-08-30 22:13:11,687 INFO [dysarthria_asr_datamodule.py:356] About to create dev dataset
+2024-08-30 22:13:12,243 INFO [dysarthria_asr_datamodule.py:373] About to create dev dataloader
+2024-08-30 22:13:12,244 INFO [dysarthria_finetune.py:1490] Sanity check -- see if any of the batches in epoch 1 would cause OOM.
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-09-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-09-0
new file mode 100644
index 0000000000000000000000000000000000000000..e1876288e6ead2aefebd9a759d80ac10a4b76c22
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-09-0
@@ -0,0 +1,26 @@
+2024-08-31 00:09:09,992 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-31 00:09:11,091 INFO [dysarthria_finetune.py:1214] (0/4) (32783400960, 34072559616)
+2024-08-31 00:09:11,091 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-31 00:09:11,467 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-31 00:09:11,505 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-31 00:21:09,697 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 00:21:09,697 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-31 00:21:11,743 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65805511
+2024-08-31 00:21:12,288 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 00:22:26,316 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-31 00:22:31,012 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-31 00:22:31,252 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-31 00:22:33,063 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-31 00:22:33,953 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-31 00:22:33,953 INFO [dysarthria_asr_datamodule.py:501] (0/4) About to get dev cuts
+2024-08-31 00:22:34,027 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-31 00:22:34,393 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-31 00:22:34,393 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 02:23:24,740 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=18.74 vs. limit=7.5
+2024-08-31 02:23:26,354 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.21 vs. limit=7.5
+2024-08-31 02:23:27,402 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-1
new file mode 100644
index 0000000000000000000000000000000000000000..85d3018d2606fd45c1e07bc20b2a848317b85a5d
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-1
@@ -0,0 +1,26 @@
+2024-08-31 00:09:10,287 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-31 00:09:10,299 INFO [dysarthria_finetune.py:1214] (1/4) (33748090880, 34072559616)
+2024-08-31 00:09:10,300 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-31 00:09:11,153 INFO [dysarthria_finetune.py:1219] (1/4) (33091682304, 34072559616)
+2024-08-31 00:09:11,154 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-31 00:21:09,697 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 00:21:09,697 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-31 00:21:11,837 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-31 00:21:11,837 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 00:22:26,314 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-31 00:22:31,066 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-31 00:22:31,253 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-31 00:22:32,988 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-31 00:22:33,876 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-31 00:22:33,881 INFO [dysarthria_asr_datamodule.py:501] (1/4) About to get dev cuts
+2024-08-31 00:22:34,027 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-31 00:22:34,395 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-31 00:22:34,396 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 02:23:24,734 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=18.54 vs. limit=7.5
+2024-08-31 02:23:26,353 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=17.58 vs. limit=7.5
+2024-08-31 02:23:27,404 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-2
new file mode 100644
index 0000000000000000000000000000000000000000..5c3542fb5b797ab2eb25c687da9b86abcca74a87
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-2
@@ -0,0 +1,26 @@
+2024-08-31 00:09:10,260 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-31 00:09:10,299 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-31 00:09:10,300 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-31 00:09:11,135 INFO [dysarthria_finetune.py:1219] (2/4) (33106362368, 34072559616)
+2024-08-31 00:09:11,136 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-31 00:21:09,697 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 00:21:09,697 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-31 00:21:11,784 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-31 00:21:11,784 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 00:22:26,343 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-31 00:22:31,015 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-31 00:22:31,252 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-31 00:22:32,988 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-31 00:22:33,901 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-31 00:22:33,902 INFO [dysarthria_asr_datamodule.py:501] (2/4) About to get dev cuts
+2024-08-31 00:22:34,027 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-31 00:22:34,399 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-31 00:22:34,403 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 02:23:24,735 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=17.72 vs. limit=7.5
+2024-08-31 02:23:26,354 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.83 vs. limit=7.5
+2024-08-31 02:23:27,400 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-3
new file mode 100644
index 0000000000000000000000000000000000000000..60f9e8767c850103ae4a46b8ef9c2b3426eba179
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-00-09-10-3
@@ -0,0 +1,29 @@
+2024-08-31 00:09:10,255 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-31 00:09:10,256 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-31 00:09:10,256 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-31 00:09:11,092 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-31 00:09:11,092 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-31 00:21:09,697 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 00:21:09,718 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-31 00:21:11,782 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-31 00:21:11,782 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 00:22:26,336 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-31 00:22:31,011 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-31 00:22:31,252 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-31 00:22:31,541 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-31 00:22:32,988 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-31 00:22:33,876 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-31 00:22:33,881 INFO [dysarthria_asr_datamodule.py:501] (3/4) About to get dev cuts
+2024-08-31 00:22:34,027 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-31 00:22:34,395 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-31 00:22:34,395 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 02:23:24,734 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=16.32 vs. limit=7.5
+2024-08-31 02:23:26,354 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.06 vs. limit=7.5
+2024-08-31 02:23:27,398 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11828MB
+2024-08-31 02:59:28,695 INFO [dysarthria_finetune.py:1468] (3/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/batch-bdd640fb-0667-1ad1-1c80-317fa3b1799d.pt
+2024-08-31 03:12:30,862 INFO [dysarthria_finetune.py:1474] (3/4) features shape: torch.Size([26, 2997, 80])
+2024-08-31 03:12:30,864 INFO [dysarthria_finetune.py:1478] (3/4) num tokens: 2978
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-08-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-08-0
new file mode 100644
index 0000000000000000000000000000000000000000..c358989d4887c08a29ef326463750f9ea1d91148
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-08-0
@@ -0,0 +1,34 @@
+2024-08-31 13:20:08,819 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-31 13:20:17,703 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-31 13:20:17,703 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-31 13:20:18,056 INFO [dysarthria_finetune.py:1219] (0/4) (33748090880, 34072559616)
+2024-08-31 13:20:18,064 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-31 13:20:18,101 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2552.int.cedar.computecanada.ca', 'IP address': '172.16.145.245'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:20:18,102 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-31 13:20:24,135 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65805511
+2024-08-31 13:20:25,956 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 13:35:28,411 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-31 13:35:40,931 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-31 13:35:41,025 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-31 13:35:41,431 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-31 13:35:44,713 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-31 13:35:45,618 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-31 13:35:45,619 INFO [dysarthria_asr_datamodule.py:501] (0/4) About to get dev cuts
+2024-08-31 13:35:45,646 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-31 13:35:46,005 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-31 13:35:46,006 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:39:46,774 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=18.74 vs. limit=7.5
+2024-08-31 13:39:47,156 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.21 vs. limit=7.5
+2024-08-31 13:39:48,869 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 13:39:50,790 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 14:32:39,038 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 14:34:09,039 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 14:44:25,352 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=288, metric=7.02 vs. limit=5.0
+2024-08-31 14:44:26,023 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 14:44:28,189 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 14:46:35,797 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.3292, simple_loss=0.2739, pruned_loss=0.1428, ctc_loss=0.2051, over 18513.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.2739, pruned_loss=0.1428, ctc_loss=0.2051, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 14:46:35,797 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-1
new file mode 100644
index 0000000000000000000000000000000000000000..ffbc6539780fdc04bb246bb28b227ccbecd32744
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-1
@@ -0,0 +1,34 @@
+2024-08-31 13:20:09,080 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-31 13:20:30,461 INFO [dysarthria_finetune.py:1214] (1/4) (33427226624, 34072559616)
+2024-08-31 13:20:30,461 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-31 13:20:30,827 INFO [dysarthria_finetune.py:1219] (1/4) (33427226624, 34072559616)
+2024-08-31 13:20:30,828 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-31 13:20:30,831 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2552.int.cedar.computecanada.ca', 'IP address': '172.16.145.245'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:20:30,831 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-31 13:20:31,521 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-31 13:20:31,521 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 13:35:28,176 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-31 13:35:40,937 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-31 13:35:41,025 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-31 13:35:41,431 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-31 13:35:44,629 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-31 13:35:45,566 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-31 13:35:45,569 INFO [dysarthria_asr_datamodule.py:501] (1/4) About to get dev cuts
+2024-08-31 13:35:45,647 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-31 13:35:46,006 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-31 13:35:46,007 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:39:46,776 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=18.54 vs. limit=7.5
+2024-08-31 13:39:47,155 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=17.58 vs. limit=7.5
+2024-08-31 13:39:48,861 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 13:39:50,793 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 14:32:39,026 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 14:34:09,032 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 14:44:26,024 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 14:44:28,198 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 14:46:35,798 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.3929, simple_loss=0.3185, pruned_loss=0.1863, ctc_loss=0.2785, over 18549.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.3185, pruned_loss=0.1863, ctc_loss=0.2785, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 14:46:35,798 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-31 15:51:58,632 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3058, simple_loss=0.2552, pruned_loss=0.1294, ctc_loss=0.1884, over 276520.00 frames.
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-2
new file mode 100644
index 0000000000000000000000000000000000000000..61112c443750a1227aee69f85da1e73abbce4a6f
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-2
@@ -0,0 +1,35 @@
+2024-08-31 13:20:09,083 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-31 13:20:48,982 INFO [dysarthria_finetune.py:1214] (2/4) (32783400960, 34072559616)
+2024-08-31 13:20:48,982 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-31 13:20:49,354 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-31 13:20:49,354 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-31 13:20:49,357 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2552.int.cedar.computecanada.ca', 'IP address': '172.16.145.245'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:20:49,358 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-31 13:20:50,038 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-31 13:20:50,039 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 13:35:28,190 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-31 13:35:40,931 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-31 13:35:41,025 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-31 13:35:41,431 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-31 13:35:44,629 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-31 13:35:45,576 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-31 13:35:45,577 INFO [dysarthria_asr_datamodule.py:501] (2/4) About to get dev cuts
+2024-08-31 13:35:45,646 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-31 13:35:46,007 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-31 13:35:46,008 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:39:46,776 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=17.72 vs. limit=7.5
+2024-08-31 13:39:47,156 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.83 vs. limit=7.5
+2024-08-31 13:39:48,857 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 13:39:50,794 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 14:32:39,031 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 14:34:09,033 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 14:44:26,026 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 14:44:28,195 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 14:46:35,796 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.4003, simple_loss=0.3243, pruned_loss=0.1928, ctc_loss=0.2836, over 18533.00 frames. ], tot_loss[loss=0.4003, simple_loss=0.3243, pruned_loss=0.1928, ctc_loss=0.2836, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 14:46:35,797 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-31 15:52:10,156 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3058, simple_loss=0.2552, pruned_loss=0.1294, ctc_loss=0.1884, over 276520.00 frames.
+2024-08-31 15:52:11,609 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19755MB
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-3
new file mode 100644
index 0000000000000000000000000000000000000000..4d0deda8ef3e7f0f4b9bf82f3c04303ea07ff8a4
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-13-20-09-3
@@ -0,0 +1,35 @@
+2024-08-31 13:20:09,100 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-31 13:20:39,984 INFO [dysarthria_finetune.py:1214] (3/4) (33106362368, 34072559616)
+2024-08-31 13:20:39,985 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-31 13:20:40,357 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-31 13:20:40,357 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-31 13:20:40,360 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2552.int.cedar.computecanada.ca', 'IP address': '172.16.145.245'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:20:40,360 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-31 13:20:41,062 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-31 13:20:41,063 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 13:35:28,175 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-31 13:35:40,931 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-31 13:35:41,025 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-31 13:35:41,430 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-31 13:35:41,431 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-31 13:35:41,431 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-31 13:35:44,629 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-31 13:35:45,562 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-31 13:35:45,569 INFO [dysarthria_asr_datamodule.py:501] (3/4) About to get dev cuts
+2024-08-31 13:35:45,646 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-31 13:35:46,008 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-31 13:35:46,008 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:39:46,781 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=16.32 vs. limit=7.5
+2024-08-31 13:39:47,156 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.06 vs. limit=7.5
+2024-08-31 13:39:48,862 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11825MB
+2024-08-31 13:39:50,790 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11825MB
+2024-08-31 14:32:39,026 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11825MB
+2024-08-31 14:34:09,037 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11825MB
+2024-08-31 14:44:26,024 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11825MB
+2024-08-31 14:44:28,198 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11825MB
+2024-08-31 14:46:35,800 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3339, simple_loss=0.2734, pruned_loss=0.1449, ctc_loss=0.2303, over 18634.00 frames. ], tot_loss[loss=0.3339, simple_loss=0.2734, pruned_loss=0.1449, ctc_loss=0.2303, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 14:46:35,800 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-31 15:52:10,160 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3058, simple_loss=0.2552, pruned_loss=0.1294, ctc_loss=0.1884, over 276520.00 frames.
+2024-08-31 15:52:33,807 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14286MB
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-0
new file mode 100644
index 0000000000000000000000000000000000000000..46414bf69525278fa041192fe10d6859db276261
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-0
@@ -0,0 +1,559 @@
+2024-08-31 22:13:17,694 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-31 22:13:19,468 INFO [dysarthria_finetune.py:1214] (0/4) (32783400960, 34072559616)
+2024-08-31 22:13:19,468 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-31 22:13:19,851 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-31 22:13:19,868 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-31 22:13:19,872 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:13:19,872 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-31 22:13:21,156 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65805511
+2024-08-31 22:13:21,693 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 22:13:33,264 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-31 22:14:37,196 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-31 22:14:37,263 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-31 22:14:55,449 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-31 22:14:56,378 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-31 22:14:56,378 INFO [dysarthria_asr_datamodule.py:501] (0/4) About to get dev cuts
+2024-08-31 22:14:56,477 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-31 22:14:57,473 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-31 22:14:57,473 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:16:23,767 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=18.74 vs. limit=7.5
+2024-08-31 22:16:31,018 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.21 vs. limit=7.5
+2024-08-31 22:16:34,163 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 22:16:36,234 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 22:17:53,349 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 22:17:55,324 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 22:19:45,161 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=288, metric=7.02 vs. limit=5.0
+2024-08-31 22:19:46,278 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 22:19:48,456 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11759MB
+2024-08-31 22:20:26,656 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.3292, simple_loss=0.2739, pruned_loss=0.1428, ctc_loss=0.2051, over 18513.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.2739, pruned_loss=0.1428, ctc_loss=0.2051, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:26,657 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-31 22:32:57,022 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.373, simple_loss=0.3046, pruned_loss=0.1755, ctc_loss=0.2544, over 1073944.00 frames.
+2024-08-31 22:32:57,064 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-08-31 22:36:03,016 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=100000.0, ans=0.0
+2024-08-31 22:47:02,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-31 22:51:26,990 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 9.923e+02 1.157e+03 1.203e+03 1.280e+03 1.380e+03, threshold=4.812e+03, percent-clipped=0.0
+2024-08-31 22:57:43,578 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=35.69 vs. limit=15.0
+2024-08-31 23:02:59,151 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.26 vs. limit=15.0
+2024-08-31 23:03:06,383 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 8.113e+02 1.083e+03 1.198e+03 1.280e+03 1.431e+03, threshold=4.794e+03, percent-clipped=0.0
+2024-08-31 23:21:24,000 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=100160.0, ans=0.125
+2024-08-31 23:29:32,322 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 6.285e+02 9.052e+02 1.061e+03 1.198e+03 1.431e+03, threshold=4.243e+03, percent-clipped=0.0
+2024-08-31 23:49:33,573 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.4115, simple_loss=0.332, pruned_loss=0.1923, ctc_loss=0.3005, over 18890.00 frames. ], tot_loss[loss=0.4074, simple_loss=0.3296, pruned_loss=0.1961, ctc_loss=0.2899, over 828692.51 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 23:56:27,573 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.62 vs. limit=22.5
+2024-09-01 00:02:21,831 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=100320.0, ans=0.125
+2024-09-01 00:07:03,646 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100373.33333333333, ans=0.1
+2024-09-01 00:07:03,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100373.33333333333, ans=0.1
+2024-09-01 00:18:03,026 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=20.00 vs. limit=15.0
+2024-09-01 00:18:31,030 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.958e+02 6.817e+02 8.321e+02 1.009e+03 1.431e+03, threshold=1.664e+03, percent-clipped=0.0
+2024-09-01 00:18:31,064 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 100, loss[loss=0.3787, simple_loss=0.3065, pruned_loss=0.1801, ctc_loss=0.2691, over 19293.00 frames. ], tot_loss[loss=0.388, simple_loss=0.3145, pruned_loss=0.1836, ctc_loss=0.2747, over 1474004.25 frames. ], batch size: 144, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:19:11,004 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.24 vs. limit=6.0
+2024-09-01 00:28:35,452 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-1.pt
+2024-09-01 00:28:50,184 INFO [dysarthria_finetune.py:1435] (0/4) (1179320320, 34072559616)
+2024-09-01 00:28:50,184 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 00:28:50,216 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 00:29:13,525 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 0, loss[loss=0.3305, simple_loss=0.2745, pruned_loss=0.1334, ctc_loss=0.219, over 18874.00 frames. ], tot_loss[loss=0.3305, simple_loss=0.2745, pruned_loss=0.1334, ctc_loss=0.219, over 18874.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:29:13,526 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 00:34:07,571 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 2, validation: loss=0.3353, simple_loss=0.2773, pruned_loss=0.1482, ctc_loss=0.2175, over 1073944.00 frames.
+2024-09-01 00:34:07,571 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 00:49:20,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=100736.0, ans=10.0
+2024-09-01 00:55:06,238 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.36 vs. limit=15.0
+2024-09-01 00:56:09,450 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=40.10 vs. limit=22.5
+2024-09-01 00:56:46,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100896.0, ans=0.0
+2024-09-01 00:57:00,575 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 00:59:42,891 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 50, loss[loss=0.3734, simple_loss=0.2993, pruned_loss=0.1828, ctc_loss=0.2721, over 18964.00 frames. ], tot_loss[loss=0.3749, simple_loss=0.3056, pruned_loss=0.1713, ctc_loss=0.2604, over 826819.73 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 01:02:49,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-09-01 01:07:12,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=101002.66666666667, ans=0.07
+2024-09-01 01:08:04,419 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.183e+02 4.403e+02 5.126e+02 5.917e+02 6.888e+02, threshold=1.025e+03, percent-clipped=0.0
+2024-09-01 01:09:08,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101056.0, ans=0.1
+2024-09-01 01:09:23,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=101056.0, ans=0.0
+2024-09-01 01:10:02,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=101109.33333333333, ans=0.125
+2024-09-01 01:10:13,361 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.60 vs. limit=15.0
+2024-09-01 01:10:32,032 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=32.10 vs. limit=22.5
+2024-09-01 01:10:48,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=101109.33333333333, ans=0.125
+2024-09-01 01:14:19,324 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 100, loss[loss=0.4031, simple_loss=0.3274, pruned_loss=0.1838, ctc_loss=0.2858, over 19229.00 frames. ], tot_loss[loss=0.3604, simple_loss=0.295, pruned_loss=0.1616, ctc_loss=0.2476, over 1473154.80 frames. ], batch size: 144, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 01:14:23,202 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=23.98 vs. limit=15.0
+2024-09-01 01:14:58,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=101216.0, ans=0.125
+2024-09-01 01:18:19,541 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.80 vs. limit=15.0
+2024-09-01 01:18:25,314 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=101269.33333333333, ans=0.125
+2024-09-01 01:18:42,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=101322.66666666667, ans=0.0
+2024-09-01 01:20:51,911 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-2.pt
+2024-09-01 01:21:12,424 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 01:21:12,424 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:21:12,451 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 01:21:20,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-09-01 01:21:21,460 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 0, loss[loss=0.3464, simple_loss=0.2848, pruned_loss=0.1562, ctc_loss=0.2308, over 18603.00 frames. ], tot_loss[loss=0.3464, simple_loss=0.2848, pruned_loss=0.1562, ctc_loss=0.2308, over 18603.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:21:21,461 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:21:44,693 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 3, validation: loss=0.309, simple_loss=0.2588, pruned_loss=0.13, ctc_loss=0.1938, over 1073944.00 frames.
+2024-09-01 01:21:44,694 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 01:22:28,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=101370.66666666667, ans=0.04949747468305833
+2024-09-01 01:22:58,942 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.44 vs. limit=15.0
+2024-09-01 01:23:07,689 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=101424.0, ans=0.07
+2024-09-01 01:23:21,591 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:23:46,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101477.33333333333, ans=0.125
+2024-09-01 01:24:27,009 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.669e+02 3.351e+02 3.834e+02 4.204e+02 5.264e+02, threshold=7.667e+02, percent-clipped=0.0
+2024-09-01 01:24:47,459 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=101637.33333333333, ans=0.125
+2024-09-01 01:24:49,033 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 50, loss[loss=0.3486, simple_loss=0.2882, pruned_loss=0.1492, ctc_loss=0.235, over 18964.00 frames. ], tot_loss[loss=0.3454, simple_loss=0.2836, pruned_loss=0.1512, ctc_loss=0.2379, over 827741.27 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:25:16,177 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=17.86 vs. limit=15.0
+2024-09-01 01:26:25,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=101797.33333333333, ans=0.5
+2024-09-01 01:26:29,774 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=101797.33333333333, ans=0.2
+2024-09-01 01:26:34,692 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.78 vs. limit=22.5
+2024-09-01 01:27:00,716 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 100, loss[loss=0.3133, simple_loss=0.2615, pruned_loss=0.1308, ctc_loss=0.2054, over 19231.00 frames. ], tot_loss[loss=0.3356, simple_loss=0.2765, pruned_loss=0.1452, ctc_loss=0.229, over 1473938.15 frames. ], batch size: 144, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:27:28,713 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=18.45 vs. limit=15.0
+2024-09-01 01:27:32,912 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=101957.33333333333, ans=0.0
+2024-09-01 01:27:49,783 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=17.92 vs. limit=15.0
+2024-09-01 01:29:32,085 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-3.pt
+2024-09-01 01:29:36,842 INFO [dysarthria_finetune.py:1435] (0/4) (1128988672, 34072559616)
+2024-09-01 01:29:36,842 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:29:36,889 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 01:29:45,316 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 0, loss[loss=0.3173, simple_loss=0.2603, pruned_loss=0.1423, ctc_loss=0.2156, over 18523.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.2603, pruned_loss=0.1423, ctc_loss=0.2156, over 18523.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:29:45,317 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:30:08,489 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 4, validation: loss=0.2887, simple_loss=0.2447, pruned_loss=0.1169, ctc_loss=0.1781, over 1073944.00 frames.
+2024-09-01 01:30:08,490 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 01:30:42,262 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.378e+02 2.838e+02 3.147e+02 3.460e+02 5.318e+02, threshold=6.294e+02, percent-clipped=0.0
+2024-09-01 01:30:43,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=102106.66666666667, ans=0.125
+2024-09-01 01:30:54,192 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=18.43 vs. limit=15.0
+2024-09-01 01:31:02,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=102160.0, ans=0.0
+2024-09-01 01:31:27,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=102213.33333333333, ans=0.025
+2024-09-01 01:32:01,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=102320.0, ans=0.0
+2024-09-01 01:32:02,189 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 50, loss[loss=0.3468, simple_loss=0.2905, pruned_loss=0.1474, ctc_loss=0.2216, over 18961.00 frames. ], tot_loss[loss=0.3193, simple_loss=0.2647, pruned_loss=0.1338, ctc_loss=0.2182, over 828586.64 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:32:05,685 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.53 vs. limit=15.0
+2024-09-01 01:32:25,399 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.78 vs. limit=15.0
+2024-09-01 01:32:42,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102426.66666666667, ans=0.125
+2024-09-01 01:32:50,809 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=102426.66666666667, ans=0.2
+2024-09-01 01:32:56,576 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=102426.66666666667, ans=0.125
+2024-09-01 01:32:58,806 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=17.50 vs. limit=15.0
+2024-09-01 01:35:45,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102480.0, ans=0.1
+2024-09-01 01:35:53,014 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=102480.0, ans=0.0
+2024-09-01 01:37:29,677 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=14.54 vs. limit=15.0
+2024-09-01 01:37:33,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102533.33333333333, ans=0.0
+2024-09-01 01:37:37,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102533.33333333333, ans=0.1
+2024-09-01 01:37:40,023 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 100, loss[loss=0.3266, simple_loss=0.2695, pruned_loss=0.1364, ctc_loss=0.2321, over 19286.00 frames. ], tot_loss[loss=0.3131, simple_loss=0.26, pruned_loss=0.1307, ctc_loss=0.2136, over 1474147.24 frames. ], batch size: 144, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:38:05,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=102640.0, ans=0.0
+2024-09-01 01:38:09,925 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 2.526e+02 2.751e+02 3.040e+02 4.636e+02, threshold=5.501e+02, percent-clipped=0.0
+2024-09-01 01:38:30,912 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=102693.33333333333, ans=0.1
+2024-09-01 01:38:32,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=102693.33333333333, ans=0.2
+2024-09-01 01:38:34,680 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=102693.33333333333, ans=0.5
+2024-09-01 01:38:35,841 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-4.pt
+2024-09-01 01:38:40,667 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 01:38:40,667 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:38:40,695 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 01:38:49,733 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 0, loss[loss=0.27, simple_loss=0.224, pruned_loss=0.1086, ctc_loss=0.1944, over 18549.00 frames. ], tot_loss[loss=0.27, simple_loss=0.224, pruned_loss=0.1086, ctc_loss=0.1944, over 18549.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:38:49,734 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:39:30,974 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 5, validation: loss=0.2717, simple_loss=0.233, pruned_loss=0.1066, ctc_loss=0.1665, over 1073944.00 frames.
+2024-09-01 01:39:30,974 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 01:40:54,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=102741.33333333333, ans=0.2
+2024-09-01 01:41:16,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 01:41:24,330 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.84 vs. limit=15.0
+2024-09-01 01:41:31,303 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.78 vs. limit=15.0
+2024-09-01 01:41:33,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 01:41:34,103 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=15.83 vs. limit=15.0
+2024-09-01 01:42:07,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=102848.0, ans=0.025
+2024-09-01 01:42:16,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=102848.0, ans=0.025
+2024-09-01 01:43:55,506 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 50, loss[loss=0.2897, simple_loss=0.2467, pruned_loss=0.1121, ctc_loss=0.1907, over 19008.00 frames. ], tot_loss[loss=0.3019, simple_loss=0.2525, pruned_loss=0.122, ctc_loss=0.2082, over 828355.03 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:44:25,052 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.54 vs. limit=10.0
+2024-09-01 01:44:53,395 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=103061.33333333333, ans=0.125
+2024-09-01 01:44:59,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=103061.33333333333, ans=0.2
+2024-09-01 01:45:02,867 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.19 vs. limit=15.0
+2024-09-01 01:45:15,819 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:45:36,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:46:04,692 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.130e+02 2.382e+02 2.524e+02 2.770e+02 4.371e+02, threshold=5.047e+02, percent-clipped=0.0
+2024-09-01 01:46:25,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-09-01 01:46:25,463 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.27 vs. limit=15.0
+2024-09-01 01:46:33,255 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.22 vs. limit=15.0
+2024-09-01 01:46:38,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103221.33333333333, ans=0.1
+2024-09-01 01:46:52,893 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 100, loss[loss=0.2814, simple_loss=0.2368, pruned_loss=0.1128, ctc_loss=0.1924, over 19287.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.2481, pruned_loss=0.1192, ctc_loss=0.2027, over 1473652.43 frames. ], batch size: 144, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:46:59,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=103274.66666666667, ans=0.0
+2024-09-01 01:46:59,694 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.52 vs. limit=15.0
+2024-09-01 01:47:08,848 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103274.66666666667, ans=0.1
+2024-09-01 01:47:42,774 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.85 vs. limit=15.0
+2024-09-01 01:48:11,536 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=103381.33333333333, ans=0.025
+2024-09-01 01:48:20,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=103381.33333333333, ans=0.125
+2024-09-01 01:48:26,052 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-5.pt
+2024-09-01 01:48:33,615 INFO [dysarthria_finetune.py:1435] (0/4) (1124794368, 34072559616)
+2024-09-01 01:48:33,615 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:48:33,644 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 01:48:41,999 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 0, loss[loss=0.2961, simple_loss=0.2496, pruned_loss=0.1166, ctc_loss=0.2059, over 18610.00 frames. ], tot_loss[loss=0.2961, simple_loss=0.2496, pruned_loss=0.1166, ctc_loss=0.2059, over 18610.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:48:42,000 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:49:05,140 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 6, validation: loss=0.2578, simple_loss=0.2238, pruned_loss=0.09861, ctc_loss=0.1582, over 1073944.00 frames.
+2024-09-01 01:49:05,141 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 01:49:19,962 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.18 vs. limit=15.0
+2024-09-01 01:49:45,827 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=103477.33333333333, ans=0.125
+2024-09-01 01:49:45,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=103477.33333333333, ans=0.125
+2024-09-01 01:50:04,464 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.91 vs. limit=15.0
+2024-09-01 01:50:06,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=103530.66666666667, ans=0.0
+2024-09-01 01:50:44,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.04 vs. limit=15.0
+2024-09-01 01:50:46,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=103637.33333333333, ans=0.0
+2024-09-01 01:50:52,014 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 50, loss[loss=0.2805, simple_loss=0.2386, pruned_loss=0.1096, ctc_loss=0.1906, over 19047.00 frames. ], tot_loss[loss=0.2926, simple_loss=0.2459, pruned_loss=0.1176, ctc_loss=0.2034, over 829577.21 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:50:57,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=103690.66666666667, ans=0.09899494936611666
+2024-09-01 01:50:57,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=103690.66666666667, ans=0.125
+2024-09-01 01:51:08,006 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 2.277e+02 2.375e+02 2.614e+02 3.891e+02, threshold=4.750e+02, percent-clipped=0.0
+2024-09-01 01:51:46,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=103797.33333333333, ans=0.025
+2024-09-01 01:52:13,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=103904.0, ans=0.0
+2024-09-01 01:52:17,142 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.02 vs. limit=6.0
+2024-09-01 01:52:34,156 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 100, loss[loss=0.2687, simple_loss=0.2287, pruned_loss=0.1047, ctc_loss=0.1851, over 19232.00 frames. ], tot_loss[loss=0.2801, simple_loss=0.2375, pruned_loss=0.1104, ctc_loss=0.1921, over 1476247.28 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:52:53,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=103957.33333333333, ans=0.125
+2024-09-01 01:52:55,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=103957.33333333333, ans=0.125
+2024-09-01 01:53:14,927 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.19 vs. limit=15.0
+2024-09-01 01:53:34,207 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-6.pt
+2024-09-01 01:53:38,637 INFO [dysarthria_finetune.py:1435] (0/4) (1128988672, 34072559616)
+2024-09-01 01:53:38,637 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:53:38,664 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 01:53:47,074 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 0, loss[loss=0.2723, simple_loss=0.2373, pruned_loss=0.1046, ctc_loss=0.17, over 18570.00 frames. ], tot_loss[loss=0.2723, simple_loss=0.2373, pruned_loss=0.1046, ctc_loss=0.17, over 18570.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:53:47,075 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:54:10,657 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 7, validation: loss=0.2464, simple_loss=0.2165, pruned_loss=0.09214, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 01:54:10,658 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 01:55:12,003 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.149e+02 2.268e+02 2.457e+02 3.821e+02, threshold=4.535e+02, percent-clipped=0.0
+2024-09-01 01:55:53,896 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 50, loss[loss=0.2764, simple_loss=0.2408, pruned_loss=0.09974, ctc_loss=0.191, over 18968.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.2336, pruned_loss=0.1058, ctc_loss=0.1916, over 827907.61 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:56:26,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=104426.66666666667, ans=0.125
+2024-09-01 01:57:12,719 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=104586.66666666667, ans=0.125
+2024-09-01 01:57:30,199 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=104640.0, ans=0.125
+2024-09-01 01:57:30,837 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 100, loss[loss=0.2587, simple_loss=0.2229, pruned_loss=0.09694, ctc_loss=0.1833, over 19302.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.2294, pruned_loss=0.1027, ctc_loss=0.1868, over 1473040.93 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:57:36,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=104640.0, ans=0.125
+2024-09-01 01:58:00,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=104693.33333333333, ans=0.125
+2024-09-01 01:58:12,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=104746.66666666667, ans=0.0
+2024-09-01 01:58:22,457 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-7.pt
+2024-09-01 01:58:26,695 INFO [dysarthria_finetune.py:1435] (0/4) (1128988672, 34072559616)
+2024-09-01 01:58:26,696 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 01:58:26,722 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 01:58:35,282 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 0, loss[loss=0.2824, simple_loss=0.2416, pruned_loss=0.1115, ctc_loss=0.1942, over 18485.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.2416, pruned_loss=0.1115, ctc_loss=0.1942, over 18485.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:58:35,283 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 01:58:58,370 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 8, validation: loss=0.236, simple_loss=0.2103, pruned_loss=0.08624, ctc_loss=0.1474, over 1073944.00 frames.
+2024-09-01 01:58:58,370 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 01:59:02,416 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.080e+02 2.182e+02 2.331e+02 3.634e+02, threshold=4.365e+02, percent-clipped=0.0
+2024-09-01 01:59:33,987 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=15.88 vs. limit=15.0
+2024-09-01 02:00:22,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=105002.66666666667, ans=0.2
+2024-09-01 02:00:31,978 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=10.98 vs. limit=12.0
+2024-09-01 02:00:36,429 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 50, loss[loss=0.2596, simple_loss=0.2259, pruned_loss=0.09894, ctc_loss=0.1757, over 18938.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.2225, pruned_loss=0.0957, ctc_loss=0.1816, over 828565.55 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:02:06,815 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=105269.33333333333, ans=0.125
+2024-09-01 02:02:13,394 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 100, loss[loss=0.258, simple_loss=0.2276, pruned_loss=0.09404, ctc_loss=0.1778, over 19222.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.2215, pruned_loss=0.09523, ctc_loss=0.179, over 1474444.14 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:02:17,359 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.064e+02 2.191e+02 2.358e+02 3.385e+02, threshold=4.381e+02, percent-clipped=0.0
+2024-09-01 02:02:32,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=105376.0, ans=0.125
+2024-09-01 02:02:34,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105376.0, ans=0.1
+2024-09-01 02:02:36,139 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=105376.0, ans=0.125
+2024-09-01 02:02:38,459 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.62 vs. limit=15.0
+2024-09-01 02:02:41,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=105376.0, ans=0.2
+2024-09-01 02:02:43,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=105376.0, ans=0.125
+2024-09-01 02:02:59,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=105429.33333333333, ans=0.0
+2024-09-01 02:03:07,393 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-8.pt
+2024-09-01 02:03:11,779 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 02:03:11,780 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:03:11,807 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:03:20,910 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 0, loss[loss=0.2555, simple_loss=0.2233, pruned_loss=0.09714, ctc_loss=0.1749, over 18596.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.2233, pruned_loss=0.09714, ctc_loss=0.1749, over 18596.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:20,911 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:03:44,100 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 9, validation: loss=0.2267, simple_loss=0.2052, pruned_loss=0.08107, ctc_loss=0.1434, over 1073944.00 frames.
+2024-09-01 02:03:44,101 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:03:55,238 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.00 vs. limit=15.0
+2024-09-01 02:04:17,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=105530.66666666667, ans=0.05
+2024-09-01 02:04:38,130 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.20 vs. limit=15.0
+2024-09-01 02:05:27,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=105690.66666666667, ans=0.0
+2024-09-01 02:05:30,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=105690.66666666667, ans=0.2
+2024-09-01 02:05:35,852 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.99 vs. limit=15.0
+2024-09-01 02:05:38,303 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 50, loss[loss=0.2616, simple_loss=0.2292, pruned_loss=0.09597, ctc_loss=0.19, over 19065.00 frames. ], tot_loss[loss=0.248, simple_loss=0.2187, pruned_loss=0.09054, ctc_loss=0.1749, over 828972.56 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:44,130 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:06:08,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=105797.33333333333, ans=0.0
+2024-09-01 02:06:22,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105797.33333333333, ans=0.125
+2024-09-01 02:06:29,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:06:31,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=105850.66666666667, ans=0.2
+2024-09-01 02:06:31,756 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.077e+02 2.184e+02 2.316e+02 3.584e+02, threshold=4.367e+02, percent-clipped=0.0
+2024-09-01 02:07:12,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=105957.33333333333, ans=0.0
+2024-09-01 02:07:32,678 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 100, loss[loss=0.1912, simple_loss=0.1796, pruned_loss=0.06002, ctc_loss=0.129, over 19269.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.216, pruned_loss=0.08866, ctc_loss=0.1723, over 1474236.32 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:07:35,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:07:54,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:08:02,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=106064.0, ans=0.0
+2024-09-01 02:08:34,528 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-9.pt
+2024-09-01 02:08:39,100 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 02:08:39,100 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:08:39,127 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:08:48,127 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 0, loss[loss=0.2179, simple_loss=0.197, pruned_loss=0.07655, ctc_loss=0.1518, over 18682.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.197, pruned_loss=0.07655, ctc_loss=0.1518, over 18682.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:48,128 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:09:26,928 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 10, validation: loss=0.2182, simple_loss=0.2007, pruned_loss=0.07671, ctc_loss=0.1399, over 1073944.00 frames.
+2024-09-01 02:09:26,929 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:09:40,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=106165.33333333333, ans=0.125
+2024-09-01 02:10:02,437 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.09 vs. limit=22.5
+2024-09-01 02:10:10,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=106218.66666666667, ans=0.2
+2024-09-01 02:10:11,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106218.66666666667, ans=0.125
+2024-09-01 02:10:39,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=106272.0, ans=0.125
+2024-09-01 02:11:16,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:11:54,668 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.023e+02 2.117e+02 2.323e+02 3.505e+02, threshold=4.234e+02, percent-clipped=0.0
+2024-09-01 02:12:00,937 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=106378.66666666667, ans=0.025
+2024-09-01 02:12:16,359 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 50, loss[loss=0.2755, simple_loss=0.2387, pruned_loss=0.1048, ctc_loss=0.2069, over 19012.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2142, pruned_loss=0.0884, ctc_loss=0.1757, over 829104.52 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:12:37,275 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.90 vs. limit=22.5
+2024-09-01 02:12:55,968 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.89 vs. limit=15.0
+2024-09-01 02:13:32,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:13:48,945 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=7.81 vs. limit=12.0
+2024-09-01 02:14:12,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_abs, batch_count=106592.0, ans=0.5
+2024-09-01 02:15:12,129 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 100, loss[loss=0.2289, simple_loss=0.208, pruned_loss=0.08011, ctc_loss=0.1646, over 19226.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2129, pruned_loss=0.08675, ctc_loss=0.1726, over 1474931.95 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:15:33,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=106698.66666666667, ans=0.2
+2024-09-01 02:15:39,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=106698.66666666667, ans=0.0
+2024-09-01 02:16:08,456 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=106752.0, ans=0.125
+2024-09-01 02:16:21,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=106805.33333333333, ans=0.025
+2024-09-01 02:16:29,732 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=106805.33333333333, ans=0.125
+2024-09-01 02:16:36,655 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-10.pt
+2024-09-01 02:16:40,877 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 02:16:40,878 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:16:40,905 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:16:49,151 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 0, loss[loss=0.2417, simple_loss=0.2184, pruned_loss=0.0897, ctc_loss=0.1631, over 18505.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2184, pruned_loss=0.0897, ctc_loss=0.1631, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:16:49,152 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:17:12,832 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 11, validation: loss=0.211, simple_loss=0.1968, pruned_loss=0.07375, ctc_loss=0.137, over 1073944.00 frames.
+2024-09-01 02:17:12,833 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:17:43,503 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 1.989e+02 2.082e+02 2.188e+02 3.029e+02, threshold=4.165e+02, percent-clipped=0.0
+2024-09-01 02:17:56,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=106960.0, ans=0.0
+2024-09-01 02:18:38,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-09-01 02:18:40,624 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.46 vs. limit=10.0
+2024-09-01 02:18:46,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.38 vs. limit=15.0
+2024-09-01 02:18:53,621 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 50, loss[loss=0.222, simple_loss=0.203, pruned_loss=0.07789, ctc_loss=0.1607, over 19023.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2098, pruned_loss=0.08428, ctc_loss=0.1722, over 827570.26 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:18:57,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=107120.0, ans=0.0
+2024-09-01 02:19:15,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=107173.33333333333, ans=0.0
+2024-09-01 02:19:31,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=107173.33333333333, ans=0.0
+2024-09-01 02:20:09,994 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.08 vs. limit=22.5
+2024-09-01 02:20:35,688 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 100, loss[loss=0.2039, simple_loss=0.1895, pruned_loss=0.06603, ctc_loss=0.1611, over 19237.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.2076, pruned_loss=0.08272, ctc_loss=0.1697, over 1473115.37 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:20:43,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=107386.66666666667, ans=0.0
+2024-09-01 02:20:47,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=107386.66666666667, ans=0.125
+2024-09-01 02:21:05,341 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 1.934e+02 2.032e+02 2.152e+02 3.346e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-09-01 02:21:08,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=107440.0, ans=0.125
+2024-09-01 02:21:30,083 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-11.pt
+2024-09-01 02:21:36,882 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 02:21:36,882 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:21:36,911 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:21:45,317 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 0, loss[loss=0.2209, simple_loss=0.1993, pruned_loss=0.08268, ctc_loss=0.1561, over 18585.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.1993, pruned_loss=0.08268, ctc_loss=0.1561, over 18585.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:21:45,318 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:22:12,224 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 12, validation: loss=0.2042, simple_loss=0.1932, pruned_loss=0.07127, ctc_loss=0.1341, over 1073944.00 frames.
+2024-09-01 02:22:12,225 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:22:23,031 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=29.75 vs. limit=22.5
+2024-09-01 02:22:35,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=107541.33333333333, ans=0.125
+2024-09-01 02:22:42,726 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=17.24 vs. limit=15.0
+2024-09-01 02:23:00,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=107594.66666666667, ans=0.2
+2024-09-01 02:23:26,984 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=107648.0, ans=0.125
+2024-09-01 02:24:27,766 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 50, loss[loss=0.209, simple_loss=0.1967, pruned_loss=0.06904, ctc_loss=0.1599, over 18986.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2002, pruned_loss=0.07591, ctc_loss=0.1597, over 829307.75 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:25:31,866 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=107914.66666666667, ans=0.125
+2024-09-01 02:25:39,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=107914.66666666667, ans=0.125
+2024-09-01 02:25:48,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-09-01 02:25:55,564 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 1.958e+02 2.051e+02 2.245e+02 3.047e+02, threshold=4.102e+02, percent-clipped=0.0
+2024-09-01 02:25:59,014 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107968.0, ans=0.1
+2024-09-01 02:26:05,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=108021.33333333333, ans=0.0
+2024-09-01 02:26:30,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108074.66666666667, ans=0.125
+2024-09-01 02:26:30,856 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 100, loss[loss=0.2047, simple_loss=0.1873, pruned_loss=0.07253, ctc_loss=0.1584, over 19194.00 frames. ], tot_loss[loss=0.217, simple_loss=0.1997, pruned_loss=0.07696, ctc_loss=0.1602, over 1473409.16 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:50,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=108074.66666666667, ans=0.0
+2024-09-01 02:26:57,555 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=13.07 vs. limit=15.0
+2024-09-01 02:27:31,272 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-12.pt
+2024-09-01 02:27:35,879 INFO [dysarthria_finetune.py:1435] (0/4) (1128988672, 34072559616)
+2024-09-01 02:27:35,879 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:27:35,906 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:27:44,307 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 0, loss[loss=0.2553, simple_loss=0.2229, pruned_loss=0.1014, ctc_loss=0.1895, over 18643.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.2229, pruned_loss=0.1014, ctc_loss=0.1895, over 18643.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:27:44,308 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:27:51,667 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([7.0065, 6.7918, 6.6442, 6.7461], device='cuda:0')
+2024-09-01 02:28:07,302 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 13, validation: loss=0.1981, simple_loss=0.19, pruned_loss=0.06934, ctc_loss=0.1316, over 1073944.00 frames.
+2024-09-01 02:28:07,303 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:28:33,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 02:28:45,350 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:28:57,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=108336.0, ans=0.0
+2024-09-01 02:29:01,327 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=108336.0, ans=0.09899494936611666
+2024-09-01 02:29:47,493 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 50, loss[loss=0.2022, simple_loss=0.1926, pruned_loss=0.06639, ctc_loss=0.1605, over 19011.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.1995, pruned_loss=0.07699, ctc_loss=0.162, over 829773.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:29:48,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=108496.0, ans=0.09899494936611666
+2024-09-01 02:29:59,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=108496.0, ans=0.125
+2024-09-01 02:30:01,816 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 1.921e+02 2.017e+02 2.151e+02 2.785e+02, threshold=4.034e+02, percent-clipped=0.0
+2024-09-01 02:30:07,314 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.38 vs. limit=22.5
+2024-09-01 02:30:24,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=108549.33333333333, ans=0.025
+2024-09-01 02:30:34,528 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=108602.66666666667, ans=0.125
+2024-09-01 02:30:51,732 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108656.0, ans=0.0
+2024-09-01 02:31:01,452 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.83 vs. limit=15.0
+2024-09-01 02:31:03,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=108656.0, ans=0.0
+2024-09-01 02:31:09,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 02:31:25,778 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 100, loss[loss=0.1709, simple_loss=0.1634, pruned_loss=0.05655, ctc_loss=0.1361, over 19225.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.1975, pruned_loss=0.07526, ctc_loss=0.1582, over 1474982.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:31:26,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108762.66666666667, ans=0.1
+2024-09-01 02:31:34,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=108762.66666666667, ans=0.05
+2024-09-01 02:31:40,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 02:31:48,285 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=108816.0, ans=0.125
+2024-09-01 02:31:53,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=108816.0, ans=0.125
+2024-09-01 02:32:01,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=108816.0, ans=0.125
+2024-09-01 02:32:19,307 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-13.pt
+2024-09-01 02:32:50,905 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 02:32:50,905 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:32:50,932 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:33:00,707 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 0, loss[loss=0.2195, simple_loss=0.21, pruned_loss=0.07698, ctc_loss=0.1585, over 18695.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.21, pruned_loss=0.07698, ctc_loss=0.1585, over 18695.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:33:00,707 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:33:44,936 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 14, validation: loss=0.1924, simple_loss=0.1871, pruned_loss=0.06768, ctc_loss=0.1293, over 1073944.00 frames.
+2024-09-01 02:33:44,937 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:34:45,647 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.06 vs. limit=6.0
+2024-09-01 02:34:50,279 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=109024.0, ans=0.125
+2024-09-01 02:35:04,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=109024.0, ans=0.0
+2024-09-01 02:35:09,747 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 1.893e+02 1.977e+02 2.192e+02 2.916e+02, threshold=3.954e+02, percent-clipped=0.0
+2024-09-01 02:35:46,511 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.97 vs. limit=15.0
+2024-09-01 02:35:57,740 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.65 vs. limit=6.0
+2024-09-01 02:36:00,368 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=109130.66666666667, ans=0.0
+2024-09-01 02:36:12,035 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 50, loss[loss=0.1972, simple_loss=0.1935, pruned_loss=0.06447, ctc_loss=0.1529, over 18964.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.1941, pruned_loss=0.07394, ctc_loss=0.1576, over 828263.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:32,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:36:46,252 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:37:18,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109237.33333333333, ans=0.1
+2024-09-01 02:37:22,008 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:37:51,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=109344.0, ans=0.125
+2024-09-01 02:38:02,166 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=109344.0, ans=22.5
+2024-09-01 02:38:12,691 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.05 vs. limit=10.0
+2024-09-01 02:38:41,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=109450.66666666667, ans=0.0
+2024-09-01 02:38:42,947 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 100, loss[loss=0.1876, simple_loss=0.1769, pruned_loss=0.06694, ctc_loss=0.1447, over 19207.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.1935, pruned_loss=0.07294, ctc_loss=0.1541, over 1474261.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:39:21,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109504.0, ans=0.125
+2024-09-01 02:39:29,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=109557.33333333333, ans=0.125
+2024-09-01 02:39:29,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=109557.33333333333, ans=0.05
+2024-09-01 02:39:50,920 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 1.903e+02 1.972e+02 2.079e+02 2.713e+02, threshold=3.943e+02, percent-clipped=0.0
+2024-09-01 02:39:50,964 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-14.pt
+2024-09-01 02:39:55,362 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 02:39:55,363 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:39:55,390 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:40:03,794 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 0, loss[loss=0.2503, simple_loss=0.2338, pruned_loss=0.09422, ctc_loss=0.1799, over 18509.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.2338, pruned_loss=0.09422, ctc_loss=0.1799, over 18509.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:40:03,795 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:40:34,881 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 15, validation: loss=0.1871, simple_loss=0.1844, pruned_loss=0.06629, ctc_loss=0.1271, over 1073944.00 frames.
+2024-09-01 02:40:34,881 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:40:40,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=109605.33333333333, ans=0.125
+2024-09-01 02:40:51,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=109605.33333333333, ans=10.0
+2024-09-01 02:40:55,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=109605.33333333333, ans=0.125
+2024-09-01 02:41:56,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=109712.0, ans=0.0
+2024-09-01 02:42:09,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=109765.33333333333, ans=0.125
+2024-09-01 02:42:17,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=109765.33333333333, ans=0.0
+2024-09-01 02:42:31,048 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=109818.66666666667, ans=0.0
+2024-09-01 02:42:48,261 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=109818.66666666667, ans=0.2
+2024-09-01 02:43:06,231 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 50, loss[loss=0.1895, simple_loss=0.1866, pruned_loss=0.06509, ctc_loss=0.143, over 19011.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.1898, pruned_loss=0.07055, ctc_loss=0.1526, over 827942.50 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:43:21,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109872.0, ans=0.1
+2024-09-01 02:44:02,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=109925.33333333333, ans=0.125
+2024-09-01 02:44:02,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=109925.33333333333, ans=0.125
+2024-09-01 02:45:16,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110085.33333333333, ans=0.1
+2024-09-01 02:45:26,968 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 1.886e+02 2.042e+02 2.162e+02 2.644e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-09-01 02:45:29,588 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 100, loss[loss=0.1667, simple_loss=0.1683, pruned_loss=0.0525, ctc_loss=0.1414, over 19251.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.1885, pruned_loss=0.07, ctc_loss=0.1499, over 1473903.80 frames. ], batch size: 144, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:46:37,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=110245.33333333333, ans=0.125
+2024-09-01 02:46:46,673 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-15.pt
+2024-09-01 02:46:52,333 INFO [dysarthria_finetune.py:1435] (0/4) (1124794368, 34072559616)
+2024-09-01 02:46:52,333 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:46:52,362 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:47:00,713 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 0, loss[loss=0.2209, simple_loss=0.2124, pruned_loss=0.08209, ctc_loss=0.1577, over 18729.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2124, pruned_loss=0.08209, ctc_loss=0.1577, over 18729.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:47:00,714 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:47:23,694 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 16, validation: loss=0.182, simple_loss=0.1819, pruned_loss=0.06496, ctc_loss=0.1251, over 1073944.00 frames.
+2024-09-01 02:47:23,694 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:47:37,749 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=17.32 vs. limit=15.0
+2024-09-01 02:47:46,088 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=17.85 vs. limit=15.0
+2024-09-01 02:48:19,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=110400.0, ans=0.125
+2024-09-01 02:48:43,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=110506.66666666667, ans=0.025
+2024-09-01 02:49:04,112 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 50, loss[loss=0.1632, simple_loss=0.1731, pruned_loss=0.05215, ctc_loss=0.1206, over 18988.00 frames. ], tot_loss[loss=0.196, simple_loss=0.1891, pruned_loss=0.07039, ctc_loss=0.1519, over 828175.61 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:49:05,777 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.21 vs. limit=22.5
+2024-09-01 02:49:43,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=110666.66666666667, ans=0.025
+2024-09-01 02:49:43,768 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.44 vs. limit=15.0
+2024-09-01 02:49:44,011 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 1.879e+02 1.996e+02 2.191e+02 2.692e+02, threshold=3.992e+02, percent-clipped=0.0
+2024-09-01 02:49:45,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110666.66666666667, ans=0.1
+2024-09-01 02:50:29,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=110773.33333333333, ans=0.2
+2024-09-01 02:50:31,911 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.12 vs. limit=15.0
+2024-09-01 02:50:39,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=110773.33333333333, ans=0.125
+2024-09-01 02:50:42,118 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 100, loss[loss=0.1836, simple_loss=0.1767, pruned_loss=0.06443, ctc_loss=0.154, over 19270.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.1868, pruned_loss=0.06936, ctc_loss=0.1489, over 1473314.28 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:14,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110880.0, ans=0.1
+2024-09-01 02:51:35,763 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-16.pt
+2024-09-01 02:51:40,126 INFO [dysarthria_finetune.py:1435] (0/4) (1128988672, 34072559616)
+2024-09-01 02:51:40,126 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:51:40,156 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 02:51:50,596 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 0, loss[loss=0.207, simple_loss=0.1938, pruned_loss=0.07901, ctc_loss=0.1552, over 18739.00 frames. ], tot_loss[loss=0.207, simple_loss=0.1938, pruned_loss=0.07901, ctc_loss=0.1552, over 18739.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:50,597 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:51:59,247 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.0.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.2399, 5.1477, 5.1967, 5.1140], device='cuda:0')
+2024-09-01 02:52:13,655 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 17, validation: loss=0.1784, simple_loss=0.1796, pruned_loss=0.06394, ctc_loss=0.1232, over 1073944.00 frames.
+2024-09-01 02:52:13,655 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 02:52:31,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=110981.33333333333, ans=0.0
+2024-09-01 02:52:38,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=111034.66666666667, ans=0.05
+2024-09-01 02:52:52,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=111034.66666666667, ans=0.2
+2024-09-01 02:52:52,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 02:53:21,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=111088.0, ans=0.0
+2024-09-01 02:53:59,382 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 1.870e+02 1.982e+02 2.091e+02 2.808e+02, threshold=3.964e+02, percent-clipped=0.0
+2024-09-01 02:54:03,905 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.54 vs. limit=10.0
+2024-09-01 02:54:15,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=111194.66666666667, ans=0.2
+2024-09-01 02:54:24,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=111194.66666666667, ans=0.2
+2024-09-01 02:54:35,608 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 50, loss[loss=0.1962, simple_loss=0.1953, pruned_loss=0.06817, ctc_loss=0.152, over 19028.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.1837, pruned_loss=0.06735, ctc_loss=0.1462, over 827378.67 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:54:38,894 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111248.0, ans=0.1
+2024-09-01 02:56:18,057 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=111354.66666666667, ans=0.0
+2024-09-01 02:57:05,490 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.91 vs. limit=15.0
+2024-09-01 02:57:16,817 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111408.0, ans=0.1
+2024-09-01 02:57:24,324 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.35 vs. limit=12.0
+2024-09-01 02:58:16,892 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 100, loss[loss=0.1594, simple_loss=0.1594, pruned_loss=0.05394, ctc_loss=0.129, over 19218.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.1848, pruned_loss=0.06842, ctc_loss=0.1472, over 1473529.96 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:58:35,898 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.97 vs. limit=15.0
+2024-09-01 02:58:59,921 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:59:04,503 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.94 vs. limit=15.0
+2024-09-01 03:00:07,747 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-17.pt
+2024-09-01 03:00:12,514 INFO [dysarthria_finetune.py:1435] (0/4) (1128988672, 34072559616)
+2024-09-01 03:00:12,514 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:00:12,540 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 03:00:21,385 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 0, loss[loss=0.204, simple_loss=0.1988, pruned_loss=0.07543, ctc_loss=0.1456, over 18538.00 frames. ], tot_loss[loss=0.204, simple_loss=0.1988, pruned_loss=0.07543, ctc_loss=0.1456, over 18538.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:00:21,386 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:01:08,459 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 18, validation: loss=0.1758, simple_loss=0.1773, pruned_loss=0.06291, ctc_loss=0.1213, over 1073944.00 frames.
+2024-09-01 03:01:08,459 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 03:01:40,353 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.79 vs. limit=6.0
+2024-09-01 03:02:04,459 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 1.863e+02 1.965e+02 2.122e+02 2.833e+02, threshold=3.929e+02, percent-clipped=0.0
+2024-09-01 03:02:06,796 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.37 vs. limit=10.0
+2024-09-01 03:02:11,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=111722.66666666667, ans=0.125
+2024-09-01 03:02:11,685 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.68 vs. limit=15.0
+2024-09-01 03:04:43,909 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.38 vs. limit=15.0
+2024-09-01 03:04:48,811 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.35 vs. limit=15.0
+2024-09-01 03:05:51,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=111882.66666666667, ans=0.0
+2024-09-01 03:05:55,549 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 50, loss[loss=0.1689, simple_loss=0.1749, pruned_loss=0.05463, ctc_loss=0.1339, over 18998.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.1827, pruned_loss=0.06641, ctc_loss=0.1443, over 828205.61 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:06:04,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=111936.0, ans=0.125
+2024-09-01 03:07:21,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=111989.33333333333, ans=0.125
+2024-09-01 03:08:50,294 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=112042.66666666667, ans=0.2
+2024-09-01 03:09:05,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=112096.0, ans=0.2
+2024-09-01 03:09:40,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=112149.33333333333, ans=0.125
+2024-09-01 03:11:05,718 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 100, loss[loss=0.1452, simple_loss=0.1576, pruned_loss=0.04325, ctc_loss=0.1158, over 19294.00 frames. ], tot_loss[loss=0.184, simple_loss=0.1813, pruned_loss=0.06508, ctc_loss=0.1413, over 1473690.24 frames. ], batch size: 144, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:11:50,913 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.87 vs. limit=15.0
+2024-09-01 03:11:51,851 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.826e+02 1.931e+02 2.035e+02 3.279e+02, threshold=3.861e+02, percent-clipped=0.0
+2024-09-01 03:12:03,128 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=112256.0, ans=0.0
+2024-09-01 03:12:22,672 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=112309.33333333333, ans=0.2
+2024-09-01 03:12:26,089 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-18.pt
+2024-09-01 03:12:30,500 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 03:12:30,500 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:12:30,529 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 03:12:38,680 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 0, loss[loss=0.2037, simple_loss=0.2001, pruned_loss=0.07511, ctc_loss=0.1428, over 18598.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2001, pruned_loss=0.07511, ctc_loss=0.1428, over 18598.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:12:38,681 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:13:02,312 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 19, validation: loss=0.1735, simple_loss=0.1751, pruned_loss=0.06201, ctc_loss=0.1194, over 1073944.00 frames.
+2024-09-01 03:13:02,313 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 03:13:13,711 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.30 vs. limit=15.0
+2024-09-01 03:13:25,224 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:13:57,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=112458.66666666667, ans=0.0
+2024-09-01 03:13:59,466 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:14:34,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112565.33333333333, ans=0.1
+2024-09-01 03:14:35,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 03:14:46,403 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.86 vs. limit=22.5
+2024-09-01 03:14:48,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=112618.66666666667, ans=0.0
+2024-09-01 03:14:48,750 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 50, loss[loss=0.1746, simple_loss=0.1724, pruned_loss=0.06082, ctc_loss=0.1376, over 19038.00 frames. ], tot_loss[loss=0.1803, simple_loss=0.1771, pruned_loss=0.06374, ctc_loss=0.1402, over 827203.46 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:15:06,163 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112618.66666666667, ans=0.1
+2024-09-01 03:15:22,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-09-01 03:15:28,690 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.53 vs. limit=15.0
+2024-09-01 03:16:00,811 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 1.859e+02 1.957e+02 2.051e+02 3.574e+02, threshold=3.914e+02, percent-clipped=0.0
+2024-09-01 03:16:12,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.whiten.whitening_limit, batch_count=112832.0, ans=12.0
+2024-09-01 03:16:28,720 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 100, loss[loss=0.1598, simple_loss=0.1632, pruned_loss=0.05302, ctc_loss=0.1259, over 19274.00 frames. ], tot_loss[loss=0.1808, simple_loss=0.1768, pruned_loss=0.0643, ctc_loss=0.1406, over 1472434.33 frames. ], batch size: 144, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:16:30,367 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.43 vs. limit=22.5
+2024-09-01 03:17:01,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112938.66666666667, ans=0.0
+2024-09-01 03:17:22,977 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-19.pt
+2024-09-01 03:17:28,914 INFO [dysarthria_finetune.py:1435] (0/4) (1126891520, 34072559616)
+2024-09-01 03:17:28,914 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:17:28,942 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 03:17:37,119 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 0, loss[loss=0.1694, simple_loss=0.1732, pruned_loss=0.05904, ctc_loss=0.1189, over 18599.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.1732, pruned_loss=0.05904, ctc_loss=0.1189, over 18599.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:17:37,120 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:18:00,782 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 20, validation: loss=0.1713, simple_loss=0.1732, pruned_loss=0.06117, ctc_loss=0.1175, over 1073944.00 frames.
+2024-09-01 03:18:00,783 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26683MB
+2024-09-01 03:19:07,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113200.0, ans=0.1
+2024-09-01 03:19:09,017 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=113200.0, ans=0.025
+2024-09-01 03:19:11,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=113200.0, ans=0.125
+2024-09-01 03:19:32,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=113253.33333333333, ans=0.2
+2024-09-01 03:19:39,006 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 50, loss[loss=0.1865, simple_loss=0.1815, pruned_loss=0.06666, ctc_loss=0.1454, over 18985.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.1783, pruned_loss=0.06582, ctc_loss=0.1418, over 828130.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:19:42,340 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.82 vs. limit=15.0
+2024-09-01 03:19:50,257 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.25 vs. limit=6.0
+2024-09-01 03:19:52,480 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 1.810e+02 1.894e+02 2.049e+02 3.111e+02, threshold=3.788e+02, percent-clipped=0.0
+2024-09-01 03:20:47,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=113466.66666666667, ans=0.2
+2024-09-01 03:20:56,452 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.04 vs. limit=15.0
+2024-09-01 03:20:58,096 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=113520.0, ans=0.125
+2024-09-01 03:21:15,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=113573.33333333333, ans=0.0
+2024-09-01 03:21:15,358 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.17 vs. limit=22.5
+2024-09-01 03:21:15,875 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 100, loss[loss=0.1754, simple_loss=0.1686, pruned_loss=0.0633, ctc_loss=0.1389, over 19321.00 frames. ], tot_loss[loss=0.18, simple_loss=0.1764, pruned_loss=0.06423, ctc_loss=0.1377, over 1472900.97 frames. ], batch size: 144, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:21:22,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=113573.33333333333, ans=0.0
+2024-09-01 03:21:28,354 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=113573.33333333333, ans=0.125
+2024-09-01 03:21:41,713 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.13 vs. limit=15.0
+2024-09-01 03:21:50,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 03:22:08,714 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune/epoch-20.pt
+2024-09-01 03:22:13,061 INFO [dysarthria_finetune.py:1435] (0/4) (1128988672, 34072559616)
+2024-09-01 03:22:13,061 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:22:13,088 INFO [dysarthria_finetune.py:1440] (0/4) (29576855552, 34072559616)
+2024-09-01 03:22:13,089 INFO [dysarthria_finetune.py:1442] (0/4) Done!
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-1 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-1
new file mode 100644
index 0000000000000000000000000000000000000000..baaebd12d9c1c96c0b1a04d245a8b648b78c9576
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-1
@@ -0,0 +1,543 @@
+2024-08-31 22:13:17,922 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-31 22:13:17,923 INFO [dysarthria_finetune.py:1214] (1/4) (33748090880, 34072559616)
+2024-08-31 22:13:17,923 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-31 22:13:18,699 INFO [dysarthria_finetune.py:1219] (1/4) (33106362368, 34072559616)
+2024-08-31 22:13:18,699 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-31 22:13:18,725 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:13:18,725 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-31 22:13:21,118 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65805511
+2024-08-31 22:13:23,245 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 22:13:33,286 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-31 22:14:37,197 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-31 22:14:37,263 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-31 22:14:55,323 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-31 22:14:56,287 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-31 22:14:56,288 INFO [dysarthria_asr_datamodule.py:501] (1/4) About to get dev cuts
+2024-08-31 22:14:56,477 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-31 22:14:57,473 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-31 22:14:57,473 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:16:23,768 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=18.54 vs. limit=7.5
+2024-08-31 22:16:31,021 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=17.58 vs. limit=7.5
+2024-08-31 22:16:34,170 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:16:36,238 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:17:53,351 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:17:55,319 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:19:46,278 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:19:48,452 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:20:26,655 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.3929, simple_loss=0.3185, pruned_loss=0.1863, ctc_loss=0.2785, over 18549.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.3185, pruned_loss=0.1863, ctc_loss=0.2785, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:26,655 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-31 22:32:57,020 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.373, simple_loss=0.3046, pruned_loss=0.1755, ctc_loss=0.2544, over 1073944.00 frames.
+2024-08-31 22:32:57,063 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-31 22:34:49,196 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=21.81 vs. limit=22.5
+2024-08-31 22:36:04,116 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=17.03 vs. limit=12.0
+2024-08-31 22:48:03,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-31 22:51:26,991 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 9.923e+02 1.157e+03 1.203e+03 1.280e+03 1.380e+03, threshold=4.812e+03, percent-clipped=0.0
+2024-08-31 22:55:46,444 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=36.70 vs. limit=15.0
+2024-08-31 23:01:51,987 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=25.04 vs. limit=15.0
+2024-08-31 23:03:06,378 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 8.113e+02 1.083e+03 1.198e+03 1.280e+03 1.431e+03, threshold=4.794e+03, percent-clipped=0.0
+2024-08-31 23:16:33,798 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=34.78 vs. limit=22.5
+2024-08-31 23:20:08,819 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.95 vs. limit=15.0
+2024-08-31 23:29:32,325 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 6.285e+02 9.052e+02 1.061e+03 1.198e+03 1.431e+03, threshold=4.243e+03, percent-clipped=0.0
+2024-08-31 23:46:25,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-31 23:47:34,905 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=24.72 vs. limit=15.0
+2024-08-31 23:47:34,929 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=20.39 vs. limit=15.0
+2024-08-31 23:49:30,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=100266.66666666667, ans=0.05
+2024-08-31 23:49:33,571 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.4062, simple_loss=0.327, pruned_loss=0.191, ctc_loss=0.2993, over 19042.00 frames. ], tot_loss[loss=0.4112, simple_loss=0.3319, pruned_loss=0.1999, ctc_loss=0.2955, over 827432.33 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 23:52:55,700 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.86 vs. limit=6.0
+2024-08-31 23:53:24,915 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-09-01 00:08:29,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=100373.33333333333, ans=0.125
+2024-09-01 00:09:15,716 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=100373.33333333333, ans=0.2
+2024-09-01 00:11:15,340 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.54 vs. limit=6.0
+2024-09-01 00:16:02,257 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=21.94 vs. limit=15.0
+2024-09-01 00:18:30,687 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.05 vs. limit=6.0
+2024-09-01 00:18:31,037 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.958e+02 6.817e+02 8.321e+02 1.009e+03 1.431e+03, threshold=1.664e+03, percent-clipped=0.0
+2024-09-01 00:18:31,086 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 100, loss[loss=0.3915, simple_loss=0.3186, pruned_loss=0.1807, ctc_loss=0.2733, over 19093.00 frames. ], tot_loss[loss=0.3948, simple_loss=0.3199, pruned_loss=0.1879, ctc_loss=0.2794, over 1470684.91 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:19:42,902 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=22.77 vs. limit=15.0
+2024-09-01 00:27:43,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100640.0, ans=0.1
+2024-09-01 00:27:57,066 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.96 vs. limit=22.5
+2024-09-01 00:28:35,455 INFO [dysarthria_finetune.py:1435] (1/4) (13953073152, 34072559616)
+2024-09-01 00:28:35,455 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 00:28:35,511 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 00:29:13,534 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 0, loss[loss=0.3314, simple_loss=0.2697, pruned_loss=0.1393, ctc_loss=0.2423, over 18746.00 frames. ], tot_loss[loss=0.3314, simple_loss=0.2697, pruned_loss=0.1393, ctc_loss=0.2423, over 18746.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:29:13,534 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 00:34:07,572 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 2, validation: loss=0.3353, simple_loss=0.2773, pruned_loss=0.1482, ctc_loss=0.2175, over 1073944.00 frames.
+2024-09-01 00:34:07,573 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 00:35:35,412 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.59 vs. limit=15.0
+2024-09-01 00:51:25,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=100789.33333333333, ans=0.125
+2024-09-01 00:52:52,155 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=12.44 vs. limit=12.0
+2024-09-01 00:55:13,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=100842.66666666667, ans=0.125
+2024-09-01 00:57:01,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=100896.0, ans=0.09899494936611666
+2024-09-01 00:59:42,909 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 50, loss[loss=0.4006, simple_loss=0.3255, pruned_loss=0.1851, ctc_loss=0.2811, over 19071.00 frames. ], tot_loss[loss=0.3719, simple_loss=0.3032, pruned_loss=0.1698, ctc_loss=0.2585, over 827854.65 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 01:00:10,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100949.33333333333, ans=0.1
+2024-09-01 01:03:11,494 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100949.33333333333, ans=0.1
+2024-09-01 01:08:04,423 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.183e+02 4.403e+02 5.126e+02 5.917e+02 6.888e+02, threshold=1.025e+03, percent-clipped=0.0
+2024-09-01 01:08:14,549 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=17.19 vs. limit=15.0
+2024-09-01 01:09:14,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=101056.0, ans=0.2
+2024-09-01 01:10:03,409 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-09-01 01:10:54,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.min_positive, batch_count=101109.33333333333, ans=0.025
+2024-09-01 01:13:05,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=101162.66666666667, ans=0.125
+2024-09-01 01:14:19,328 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 100, loss[loss=0.3548, simple_loss=0.2915, pruned_loss=0.1566, ctc_loss=0.2409, over 19090.00 frames. ], tot_loss[loss=0.362, simple_loss=0.2959, pruned_loss=0.1629, ctc_loss=0.2501, over 1472213.55 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 01:15:26,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101216.0, ans=0.1
+2024-09-01 01:18:34,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=101322.66666666667, ans=0.125
+2024-09-01 01:18:48,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=101322.66666666667, ans=0.125
+2024-09-01 01:19:37,915 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.92 vs. limit=15.0
+2024-09-01 01:20:51,904 INFO [dysarthria_finetune.py:1435] (1/4) (1095434240, 34072559616)
+2024-09-01 01:20:51,905 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:20:51,973 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 01:20:59,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-09-01 01:21:21,466 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 0, loss[loss=0.3737, simple_loss=0.3051, pruned_loss=0.1704, ctc_loss=0.2575, over 18511.00 frames. ], tot_loss[loss=0.3737, simple_loss=0.3051, pruned_loss=0.1704, ctc_loss=0.2575, over 18511.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:21:21,466 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:21:44,699 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 3, validation: loss=0.309, simple_loss=0.2588, pruned_loss=0.13, ctc_loss=0.1938, over 1073944.00 frames.
+2024-09-01 01:21:44,700 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 01:22:18,912 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=101370.66666666667, ans=0.125
+2024-09-01 01:22:28,118 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=101370.66666666667, ans=0.2
+2024-09-01 01:22:38,673 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:22:55,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=101424.0, ans=0.0
+2024-09-01 01:23:20,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=101477.33333333333, ans=0.0
+2024-09-01 01:23:38,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=101477.33333333333, ans=0.125
+2024-09-01 01:23:38,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101477.33333333333, ans=0.1
+2024-09-01 01:23:47,000 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=101477.33333333333, ans=0.0
+2024-09-01 01:24:26,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=101584.0, ans=0.125
+2024-09-01 01:24:27,010 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.669e+02 3.351e+02 3.834e+02 4.204e+02 5.264e+02, threshold=7.667e+02, percent-clipped=0.0
+2024-09-01 01:24:44,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=101584.0, ans=0.2
+2024-09-01 01:24:49,028 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 50, loss[loss=0.343, simple_loss=0.2836, pruned_loss=0.1457, ctc_loss=0.2326, over 19005.00 frames. ], tot_loss[loss=0.3475, simple_loss=0.2851, pruned_loss=0.1519, ctc_loss=0.2398, over 828905.42 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:24:50,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=101637.33333333333, ans=0.2
+2024-09-01 01:24:53,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=101637.33333333333, ans=0.0
+2024-09-01 01:25:42,162 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=16.50 vs. limit=15.0
+2024-09-01 01:26:05,555 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.06 vs. limit=6.0
+2024-09-01 01:26:50,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=101850.66666666667, ans=0.0
+2024-09-01 01:27:00,710 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 100, loss[loss=0.2981, simple_loss=0.2518, pruned_loss=0.1191, ctc_loss=0.1887, over 19133.00 frames. ], tot_loss[loss=0.3348, simple_loss=0.2759, pruned_loss=0.1444, ctc_loss=0.2287, over 1474266.40 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:29:20,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=102010.66666666667, ans=0.125
+2024-09-01 01:29:32,083 INFO [dysarthria_finetune.py:1435] (1/4) (1168834560, 34072559616)
+2024-09-01 01:29:32,083 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:29:32,147 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 01:29:45,319 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 0, loss[loss=0.2882, simple_loss=0.2425, pruned_loss=0.1176, ctc_loss=0.1844, over 18466.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.2425, pruned_loss=0.1176, ctc_loss=0.1844, over 18466.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:29:45,319 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:30:08,497 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 4, validation: loss=0.2887, simple_loss=0.2447, pruned_loss=0.1169, ctc_loss=0.1781, over 1073944.00 frames.
+2024-09-01 01:30:08,498 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 01:30:42,266 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.378e+02 2.838e+02 3.147e+02 3.460e+02 5.318e+02, threshold=6.294e+02, percent-clipped=0.0
+2024-09-01 01:30:49,770 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=102106.66666666667, ans=0.125
+2024-09-01 01:31:02,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=102160.0, ans=0.0
+2024-09-01 01:31:08,919 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:31:27,106 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=102213.33333333333, ans=0.125
+2024-09-01 01:31:40,824 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.82 vs. limit=22.5
+2024-09-01 01:32:01,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=102320.0, ans=0.125
+2024-09-01 01:32:02,221 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 50, loss[loss=0.2984, simple_loss=0.2506, pruned_loss=0.1259, ctc_loss=0.1891, over 18961.00 frames. ], tot_loss[loss=0.3174, simple_loss=0.2639, pruned_loss=0.1315, ctc_loss=0.2153, over 827373.05 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:32:11,562 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=14.39 vs. limit=12.0
+2024-09-01 01:32:19,328 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=102320.0, ans=0.0
+2024-09-01 01:32:53,047 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.04 vs. limit=15.0
+2024-09-01 01:32:54,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=102426.66666666667, ans=0.125
+2024-09-01 01:32:56,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=102426.66666666667, ans=0.0
+2024-09-01 01:33:22,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102480.0, ans=0.0
+2024-09-01 01:35:51,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102480.0, ans=0.125
+2024-09-01 01:35:51,609 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.08 vs. limit=15.0
+2024-09-01 01:37:33,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=102533.33333333333, ans=0.025
+2024-09-01 01:37:40,028 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 100, loss[loss=0.3159, simple_loss=0.2645, pruned_loss=0.1296, ctc_loss=0.2111, over 19038.00 frames. ], tot_loss[loss=0.3123, simple_loss=0.2598, pruned_loss=0.1296, ctc_loss=0.2125, over 1472261.06 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:37:59,639 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.78 vs. limit=22.5
+2024-09-01 01:38:03,494 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=102640.0, ans=0.025
+2024-09-01 01:38:09,924 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 2.526e+02 2.751e+02 3.040e+02 4.636e+02, threshold=5.501e+02, percent-clipped=0.0
+2024-09-01 01:38:25,634 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.79 vs. limit=15.0
+2024-09-01 01:38:35,851 INFO [dysarthria_finetune.py:1435] (1/4) (2173370368, 34072559616)
+2024-09-01 01:38:35,851 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:38:35,919 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 01:38:49,739 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 0, loss[loss=0.2775, simple_loss=0.2361, pruned_loss=0.1081, ctc_loss=0.1796, over 18670.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.2361, pruned_loss=0.1081, ctc_loss=0.1796, over 18670.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:38:49,739 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:39:30,980 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 5, validation: loss=0.2717, simple_loss=0.233, pruned_loss=0.1066, ctc_loss=0.1665, over 1073944.00 frames.
+2024-09-01 01:39:30,981 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 01:40:54,455 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=102741.33333333333, ans=0.125
+2024-09-01 01:41:17,680 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=102794.66666666667, ans=0.025
+2024-09-01 01:42:51,067 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=21.16 vs. limit=15.0
+2024-09-01 01:43:09,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=102901.33333333333, ans=0.0
+2024-09-01 01:43:55,232 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.02 vs. limit=22.5
+2024-09-01 01:43:55,509 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 50, loss[loss=0.2851, simple_loss=0.241, pruned_loss=0.116, ctc_loss=0.185, over 18968.00 frames. ], tot_loss[loss=0.2983, simple_loss=0.2505, pruned_loss=0.1195, ctc_loss=0.2029, over 828630.89 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:44:04,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=103008.0, ans=0.0
+2024-09-01 01:44:45,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103061.33333333333, ans=0.1
+2024-09-01 01:44:49,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103061.33333333333, ans=0.1
+2024-09-01 01:45:12,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:45:56,496 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=103168.0, ans=0.125
+2024-09-01 01:46:04,699 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.130e+02 2.382e+02 2.524e+02 2.770e+02 4.371e+02, threshold=5.047e+02, percent-clipped=0.0
+2024-09-01 01:46:25,375 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=24.09 vs. limit=15.0
+2024-09-01 01:46:32,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-09-01 01:46:52,901 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 100, loss[loss=0.2655, simple_loss=0.2286, pruned_loss=0.1001, ctc_loss=0.172, over 19157.00 frames. ], tot_loss[loss=0.2929, simple_loss=0.2463, pruned_loss=0.1174, ctc_loss=0.1992, over 1473409.40 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:47:10,261 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:48:04,626 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.31 vs. limit=6.0
+2024-09-01 01:48:26,057 INFO [dysarthria_finetune.py:1435] (1/4) (434831360, 34072559616)
+2024-09-01 01:48:26,057 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:48:26,120 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 01:48:42,001 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 0, loss[loss=0.3156, simple_loss=0.2616, pruned_loss=0.129, ctc_loss=0.2293, over 18435.00 frames. ], tot_loss[loss=0.3156, simple_loss=0.2616, pruned_loss=0.129, ctc_loss=0.2293, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:48:42,002 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:49:05,146 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 6, validation: loss=0.2578, simple_loss=0.2238, pruned_loss=0.09861, ctc_loss=0.1582, over 1073944.00 frames.
+2024-09-01 01:49:05,146 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 01:50:06,539 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103530.66666666667, ans=0.1
+2024-09-01 01:50:08,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103584.0, ans=0.1
+2024-09-01 01:50:26,700 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103584.0, ans=0.125
+2024-09-01 01:50:44,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=103637.33333333333, ans=0.04949747468305833
+2024-09-01 01:50:52,021 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 50, loss[loss=0.2631, simple_loss=0.2241, pruned_loss=0.1011, ctc_loss=0.1811, over 19041.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.2408, pruned_loss=0.1121, ctc_loss=0.1968, over 827399.35 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:51:08,004 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 2.277e+02 2.375e+02 2.614e+02 3.891e+02, threshold=4.750e+02, percent-clipped=0.0
+2024-09-01 01:51:15,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=103744.0, ans=0.025
+2024-09-01 01:51:30,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103744.0, ans=0.1
+2024-09-01 01:52:10,862 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=103850.66666666667, ans=0.025
+2024-09-01 01:52:34,157 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 100, loss[loss=0.248, simple_loss=0.2116, pruned_loss=0.09546, ctc_loss=0.1716, over 19066.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.2364, pruned_loss=0.1088, ctc_loss=0.1905, over 1471849.14 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:53:08,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=104010.66666666667, ans=0.125
+2024-09-01 01:53:08,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=104010.66666666667, ans=0.125
+2024-09-01 01:53:17,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104010.66666666667, ans=0.1
+2024-09-01 01:53:21,709 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.47 vs. limit=15.0
+2024-09-01 01:53:34,203 INFO [dysarthria_finetune.py:1435] (1/4) (2053832704, 34072559616)
+2024-09-01 01:53:34,204 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:53:34,286 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 01:53:47,074 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 0, loss[loss=0.3082, simple_loss=0.2525, pruned_loss=0.132, ctc_loss=0.2269, over 18532.00 frames. ], tot_loss[loss=0.3082, simple_loss=0.2525, pruned_loss=0.132, ctc_loss=0.2269, over 18532.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:53:47,074 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:54:10,657 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 7, validation: loss=0.2464, simple_loss=0.2165, pruned_loss=0.09214, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 01:54:10,658 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 01:55:12,009 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.149e+02 2.268e+02 2.457e+02 3.821e+02, threshold=4.535e+02, percent-clipped=0.0
+2024-09-01 01:55:29,927 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.83 vs. limit=15.0
+2024-09-01 01:55:33,470 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=104320.0, ans=0.125
+2024-09-01 01:55:53,893 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 50, loss[loss=0.265, simple_loss=0.2272, pruned_loss=0.1011, ctc_loss=0.185, over 19096.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.2309, pruned_loss=0.1032, ctc_loss=0.188, over 827950.42 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:56:10,785 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:56:30,513 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.51 vs. limit=15.0
+2024-09-01 01:56:42,312 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.31 vs. limit=15.0
+2024-09-01 01:56:44,255 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.22 vs. limit=15.0
+2024-09-01 01:57:06,869 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=104533.33333333333, ans=0.125
+2024-09-01 01:57:08,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=104533.33333333333, ans=0.125
+2024-09-01 01:57:30,842 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 100, loss[loss=0.2526, simple_loss=0.2212, pruned_loss=0.09285, ctc_loss=0.1694, over 19105.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.2277, pruned_loss=0.1005, ctc_loss=0.1834, over 1472811.51 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:57:43,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=104640.0, ans=0.2
+2024-09-01 01:57:51,435 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=13.90 vs. limit=12.0
+2024-09-01 01:58:22,477 INFO [dysarthria_finetune.py:1435] (1/4) (12868845568, 34072559616)
+2024-09-01 01:58:22,477 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 01:58:22,508 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 01:58:35,287 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 0, loss[loss=0.2397, simple_loss=0.2111, pruned_loss=0.08677, ctc_loss=0.1616, over 18679.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.2111, pruned_loss=0.08677, ctc_loss=0.1616, over 18679.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:58:35,288 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 01:58:58,374 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 8, validation: loss=0.236, simple_loss=0.2103, pruned_loss=0.08624, ctc_loss=0.1474, over 1073944.00 frames.
+2024-09-01 01:58:58,374 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 01:59:02,417 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.080e+02 2.182e+02 2.331e+02 3.634e+02, threshold=4.365e+02, percent-clipped=0.0
+2024-09-01 01:59:23,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=104842.66666666667, ans=0.125
+2024-09-01 02:00:36,450 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 50, loss[loss=0.2318, simple_loss=0.2062, pruned_loss=0.08328, ctc_loss=0.1528, over 19009.00 frames. ], tot_loss[loss=0.2603, simple_loss=0.2247, pruned_loss=0.09848, ctc_loss=0.1839, over 829068.39 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:01:05,473 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=12.00 vs. limit=12.0
+2024-09-01 02:01:20,992 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=7.23 vs. limit=12.0
+2024-09-01 02:01:26,666 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=105162.66666666667, ans=0.0
+2024-09-01 02:02:01,213 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=105269.33333333333, ans=0.09899494936611666
+2024-09-01 02:02:09,093 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.55 vs. limit=10.0
+2024-09-01 02:02:13,404 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 100, loss[loss=0.2534, simple_loss=0.2206, pruned_loss=0.09383, ctc_loss=0.182, over 19109.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.2224, pruned_loss=0.09558, ctc_loss=0.1793, over 1473116.98 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:02:17,363 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.064e+02 2.191e+02 2.358e+02 3.385e+02, threshold=4.381e+02, percent-clipped=0.0
+2024-09-01 02:02:29,988 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105322.66666666667, ans=0.125
+2024-09-01 02:02:41,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=105376.0, ans=0.0
+2024-09-01 02:02:57,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=105429.33333333333, ans=0.0
+2024-09-01 02:03:07,409 INFO [dysarthria_finetune.py:1435] (1/4) (774569984, 34072559616)
+2024-09-01 02:03:07,410 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:03:07,476 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:03:20,944 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 0, loss[loss=0.2506, simple_loss=0.2184, pruned_loss=0.09615, ctc_loss=0.1711, over 18520.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.2184, pruned_loss=0.09615, ctc_loss=0.1711, over 18520.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:20,944 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:03:44,108 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 9, validation: loss=0.2267, simple_loss=0.2052, pruned_loss=0.08107, ctc_loss=0.1434, over 1073944.00 frames.
+2024-09-01 02:03:44,109 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 02:03:52,827 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.99 vs. limit=22.5
+2024-09-01 02:04:05,444 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.09 vs. limit=6.0
+2024-09-01 02:04:46,182 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.74 vs. limit=15.0
+2024-09-01 02:04:48,550 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.77 vs. limit=6.0
+2024-09-01 02:04:50,700 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.61 vs. limit=10.0
+2024-09-01 02:04:52,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=105637.33333333333, ans=0.125
+2024-09-01 02:05:06,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=105637.33333333333, ans=0.05
+2024-09-01 02:05:12,658 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:05:29,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=105690.66666666667, ans=0.2
+2024-09-01 02:05:38,307 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 50, loss[loss=0.2754, simple_loss=0.2433, pruned_loss=0.1011, ctc_loss=0.1925, over 19008.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.2167, pruned_loss=0.09097, ctc_loss=0.1767, over 827563.28 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:54,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=105744.0, ans=0.025
+2024-09-01 02:06:08,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=105797.33333333333, ans=0.07
+2024-09-01 02:06:22,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105797.33333333333, ans=0.1
+2024-09-01 02:06:31,765 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.077e+02 2.184e+02 2.316e+02 3.584e+02, threshold=4.367e+02, percent-clipped=0.0
+2024-09-01 02:07:32,671 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 100, loss[loss=0.246, simple_loss=0.2213, pruned_loss=0.08692, ctc_loss=0.1715, over 19113.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.2162, pruned_loss=0.08993, ctc_loss=0.1746, over 1473118.75 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:07:47,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:07:54,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:08:09,213 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.39 vs. limit=15.0
+2024-09-01 02:08:34,530 INFO [dysarthria_finetune.py:1435] (1/4) (2156593152, 34072559616)
+2024-09-01 02:08:34,611 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:08:34,680 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:08:44,810 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.05 vs. limit=10.0
+2024-09-01 02:08:48,132 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 0, loss[loss=0.2489, simple_loss=0.2221, pruned_loss=0.09378, ctc_loss=0.1645, over 18522.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.2221, pruned_loss=0.09378, ctc_loss=0.1645, over 18522.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:48,132 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:09:26,929 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 10, validation: loss=0.2182, simple_loss=0.2007, pruned_loss=0.07671, ctc_loss=0.1399, over 1073944.00 frames.
+2024-09-01 02:09:26,929 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 02:09:55,279 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.33 vs. limit=22.5
+2024-09-01 02:10:02,224 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.78 vs. limit=22.5
+2024-09-01 02:11:12,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:11:29,302 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:11:54,676 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.023e+02 2.117e+02 2.323e+02 3.505e+02, threshold=4.234e+02, percent-clipped=0.0
+2024-09-01 02:12:15,828 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:12:15,984 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.21 vs. limit=22.5
+2024-09-01 02:12:16,364 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 50, loss[loss=0.2192, simple_loss=0.2044, pruned_loss=0.07052, ctc_loss=0.1557, over 18973.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.2136, pruned_loss=0.08742, ctc_loss=0.1744, over 826863.11 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:12:24,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:13:00,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=106485.33333333333, ans=0.025
+2024-09-01 02:13:07,894 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:13:32,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:14:12,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=106592.0, ans=0.05
+2024-09-01 02:14:19,592 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=106592.0, ans=0.025
+2024-09-01 02:14:19,743 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.81 vs. limit=22.5
+2024-09-01 02:15:12,144 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 100, loss[loss=0.2319, simple_loss=0.2132, pruned_loss=0.07686, ctc_loss=0.1723, over 19188.00 frames. ], tot_loss[loss=0.235, simple_loss=0.2103, pruned_loss=0.08446, ctc_loss=0.1695, over 1472464.39 frames. ], batch size: 134, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:15:39,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=106698.66666666667, ans=0.0
+2024-09-01 02:16:23,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106805.33333333333, ans=0.125
+2024-09-01 02:16:29,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=106805.33333333333, ans=0.125
+2024-09-01 02:16:36,663 INFO [dysarthria_finetune.py:1435] (1/4) (14664007680, 34072559616)
+2024-09-01 02:16:36,664 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:16:36,698 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:16:49,174 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 0, loss[loss=0.2333, simple_loss=0.2045, pruned_loss=0.08879, ctc_loss=0.1714, over 18704.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2045, pruned_loss=0.08879, ctc_loss=0.1714, over 18704.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:16:49,174 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:17:12,841 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 11, validation: loss=0.211, simple_loss=0.1968, pruned_loss=0.07375, ctc_loss=0.137, over 1073944.00 frames.
+2024-09-01 02:17:12,841 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 02:17:43,504 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 1.989e+02 2.082e+02 2.188e+02 3.029e+02, threshold=4.165e+02, percent-clipped=0.0
+2024-09-01 02:17:44,724 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106906.66666666667, ans=0.125
+2024-09-01 02:17:54,749 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=106960.0, ans=0.025
+2024-09-01 02:17:56,738 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106960.0, ans=0.1
+2024-09-01 02:18:14,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=107013.33333333333, ans=0.2
+2024-09-01 02:18:36,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-09-01 02:18:38,606 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=10.73 vs. limit=12.0
+2024-09-01 02:18:40,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=107066.66666666667, ans=0.0
+2024-09-01 02:18:46,233 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-09-01 02:18:53,624 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 50, loss[loss=0.2338, simple_loss=0.2109, pruned_loss=0.08345, ctc_loss=0.174, over 18947.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2077, pruned_loss=0.08247, ctc_loss=0.1691, over 828704.78 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:18:56,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=107120.0, ans=0.125
+2024-09-01 02:19:01,118 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107120.0, ans=0.1
+2024-09-01 02:19:13,489 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=107173.33333333333, ans=0.125
+2024-09-01 02:20:13,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=107280.0, ans=0.05
+2024-09-01 02:20:17,830 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.05 vs. limit=22.5
+2024-09-01 02:20:21,550 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.38 vs. limit=22.5
+2024-09-01 02:20:35,707 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 100, loss[loss=0.2198, simple_loss=0.1993, pruned_loss=0.07881, ctc_loss=0.1626, over 19147.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.2059, pruned_loss=0.08112, ctc_loss=0.1659, over 1473582.76 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:20:56,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=107440.0, ans=0.125
+2024-09-01 02:21:05,348 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 1.934e+02 2.032e+02 2.152e+02 3.346e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-09-01 02:21:16,515 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.51 vs. limit=15.0
+2024-09-01 02:21:30,099 INFO [dysarthria_finetune.py:1435] (1/4) (1246429184, 34072559616)
+2024-09-01 02:21:30,100 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:21:30,165 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:21:45,316 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 0, loss[loss=0.2606, simple_loss=0.224, pruned_loss=0.101, ctc_loss=0.2074, over 18735.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.224, pruned_loss=0.101, ctc_loss=0.2074, over 18735.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:21:45,317 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:22:12,229 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 12, validation: loss=0.2042, simple_loss=0.1932, pruned_loss=0.07127, ctc_loss=0.1341, over 1073944.00 frames.
+2024-09-01 02:22:12,230 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13101MB
+2024-09-01 02:22:35,407 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:22:35,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=107541.33333333333, ans=0.125
+2024-09-01 02:22:58,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=107594.66666666667, ans=0.125
+2024-09-01 02:23:00,612 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107594.66666666667, ans=0.1
+2024-09-01 02:23:27,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=107648.0, ans=0.125
+2024-09-01 02:23:33,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107701.33333333333, ans=0.1
+2024-09-01 02:24:02,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=107754.66666666667, ans=0.125
+2024-09-01 02:24:12,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107754.66666666667, ans=0.125
+2024-09-01 02:24:27,768 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 50, loss[loss=0.1979, simple_loss=0.1832, pruned_loss=0.06733, ctc_loss=0.1532, over 18974.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.204, pruned_loss=0.07999, ctc_loss=0.1667, over 827168.58 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:24:55,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=107861.33333333333, ans=0.125
+2024-09-01 02:24:57,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=107861.33333333333, ans=0.0
+2024-09-01 02:25:24,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=107914.66666666667, ans=0.5
+2024-09-01 02:25:47,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-09-01 02:25:55,569 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 1.958e+02 2.051e+02 2.245e+02 3.047e+02, threshold=4.102e+02, percent-clipped=0.0
+2024-09-01 02:26:29,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=108074.66666666667, ans=0.04949747468305833
+2024-09-01 02:26:30,863 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 100, loss[loss=0.1878, simple_loss=0.1852, pruned_loss=0.06015, ctc_loss=0.1288, over 19114.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.202, pruned_loss=0.07903, ctc_loss=0.1632, over 1473649.48 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:52,933 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=108128.0, ans=0.0
+2024-09-01 02:26:52,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=108128.0, ans=0.125
+2024-09-01 02:26:53,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=108128.0, ans=0.95
+2024-09-01 02:27:01,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.55 vs. limit=6.0
+2024-09-01 02:27:06,366 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=8.95 vs. limit=12.0
+2024-09-01 02:27:31,273 INFO [dysarthria_finetune.py:1435] (1/4) (2171273216, 34072559616)
+2024-09-01 02:27:31,273 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:27:31,343 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:27:44,302 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 0, loss[loss=0.2617, simple_loss=0.2277, pruned_loss=0.1046, ctc_loss=0.1939, over 18361.00 frames. ], tot_loss[loss=0.2617, simple_loss=0.2277, pruned_loss=0.1046, ctc_loss=0.1939, over 18361.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:27:44,303 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:27:51,689 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.0.layers.0.self_attn_weights, attn_weights_entropy = tensor([7.8112, 7.5258, 7.6056, 7.5671], device='cuda:1')
+2024-09-01 02:28:07,304 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 13, validation: loss=0.1981, simple_loss=0.19, pruned_loss=0.06934, ctc_loss=0.1316, over 1073944.00 frames.
+2024-09-01 02:28:07,305 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 02:28:21,697 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=14.87 vs. limit=15.0
+2024-09-01 02:28:33,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 02:28:41,586 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.08 vs. limit=15.0
+2024-09-01 02:28:45,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=108282.66666666667, ans=0.04949747468305833
+2024-09-01 02:29:01,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108336.0, ans=0.1
+2024-09-01 02:29:24,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108389.33333333333, ans=0.1
+2024-09-01 02:29:47,490 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 50, loss[loss=0.1821, simple_loss=0.1872, pruned_loss=0.05393, ctc_loss=0.127, over 19011.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.1961, pruned_loss=0.07581, ctc_loss=0.1608, over 828396.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:29:48,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=108496.0, ans=0.125
+2024-09-01 02:30:01,818 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 1.921e+02 2.017e+02 2.151e+02 2.785e+02, threshold=4.034e+02, percent-clipped=0.0
+2024-09-01 02:30:40,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=108602.66666666667, ans=0.125
+2024-09-01 02:30:44,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=108602.66666666667, ans=0.125
+2024-09-01 02:30:46,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=108656.0, ans=0.125
+2024-09-01 02:30:46,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-09-01 02:30:55,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-09-01 02:31:01,591 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=9.91 vs. limit=15.0
+2024-09-01 02:31:07,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 02:31:07,794 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.55 vs. limit=15.0
+2024-09-01 02:31:24,904 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:31:25,776 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 100, loss[loss=0.1943, simple_loss=0.1869, pruned_loss=0.06741, ctc_loss=0.1383, over 19217.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.1958, pruned_loss=0.07534, ctc_loss=0.159, over 1472353.64 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:31:34,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 02:31:34,905 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=108762.66666666667, ans=0.2
+2024-09-01 02:31:48,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=108816.0, ans=0.025
+2024-09-01 02:31:52,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=108816.0, ans=0.2
+2024-09-01 02:31:53,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=108816.0, ans=0.0
+2024-09-01 02:31:57,754 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=108816.0, ans=0.125
+2024-09-01 02:32:07,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=108869.33333333333, ans=0.2
+2024-09-01 02:32:19,306 INFO [dysarthria_finetune.py:1435] (1/4) (11878989824, 34072559616)
+2024-09-01 02:32:19,307 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:32:19,325 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:33:00,704 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 0, loss[loss=0.247, simple_loss=0.2149, pruned_loss=0.09772, ctc_loss=0.1934, over 18619.00 frames. ], tot_loss[loss=0.247, simple_loss=0.2149, pruned_loss=0.09772, ctc_loss=0.1934, over 18619.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:33:00,705 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:33:44,927 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 14, validation: loss=0.1924, simple_loss=0.1871, pruned_loss=0.06768, ctc_loss=0.1293, over 1073944.00 frames.
+2024-09-01 02:33:44,927 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 02:34:07,544 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108917.33333333333, ans=0.1
+2024-09-01 02:34:20,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=108970.66666666667, ans=0.125
+2024-09-01 02:34:28,974 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.41 vs. limit=22.5
+2024-09-01 02:35:04,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109024.0, ans=0.1
+2024-09-01 02:35:09,749 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 1.893e+02 1.977e+02 2.192e+02 2.916e+02, threshold=3.954e+02, percent-clipped=0.0
+2024-09-01 02:35:30,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=109077.33333333333, ans=0.125
+2024-09-01 02:35:39,267 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=28.19 vs. limit=22.5
+2024-09-01 02:36:00,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=109130.66666666667, ans=0.125
+2024-09-01 02:36:11,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:36:12,041 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 50, loss[loss=0.2304, simple_loss=0.2158, pruned_loss=0.08233, ctc_loss=0.1772, over 19004.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.1941, pruned_loss=0.07349, ctc_loss=0.157, over 826629.84 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:13,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109184.0, ans=0.1
+2024-09-01 02:37:17,287 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109237.33333333333, ans=0.1
+2024-09-01 02:38:19,766 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.93 vs. limit=22.5
+2024-09-01 02:38:42,949 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 100, loss[loss=0.1921, simple_loss=0.1883, pruned_loss=0.06618, ctc_loss=0.1392, over 19114.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.1935, pruned_loss=0.07345, ctc_loss=0.1556, over 1472155.55 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:39:21,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=109504.0, ans=0.0
+2024-09-01 02:39:29,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=109557.33333333333, ans=0.05
+2024-09-01 02:39:50,922 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 1.903e+02 1.972e+02 2.079e+02 2.713e+02, threshold=3.943e+02, percent-clipped=0.0
+2024-09-01 02:39:50,971 INFO [dysarthria_finetune.py:1435] (1/4) (14632550400, 34072559616)
+2024-09-01 02:39:50,972 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:39:51,011 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:40:03,818 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 0, loss[loss=0.2244, simple_loss=0.1982, pruned_loss=0.088, ctc_loss=0.1765, over 18480.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.1982, pruned_loss=0.088, ctc_loss=0.1765, over 18480.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:40:03,818 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:40:34,886 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 15, validation: loss=0.1871, simple_loss=0.1844, pruned_loss=0.06629, ctc_loss=0.1271, over 1073944.00 frames.
+2024-09-01 02:40:34,887 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 02:40:37,294 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=109605.33333333333, ans=0.125
+2024-09-01 02:40:51,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=109605.33333333333, ans=10.0
+2024-09-01 02:40:51,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.37 vs. limit=15.0
+2024-09-01 02:41:26,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=109658.66666666667, ans=0.125
+2024-09-01 02:41:26,246 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=11.33 vs. limit=12.0
+2024-09-01 02:41:56,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=109712.0, ans=0.0
+2024-09-01 02:43:06,229 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 50, loss[loss=0.2169, simple_loss=0.2047, pruned_loss=0.07852, ctc_loss=0.1683, over 19020.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.1915, pruned_loss=0.07276, ctc_loss=0.1557, over 827766.05 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:44:42,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=110032.0, ans=0.2
+2024-09-01 02:44:46,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=110032.0, ans=0.125
+2024-09-01 02:45:16,035 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.73 vs. limit=22.5
+2024-09-01 02:45:26,968 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 1.886e+02 2.042e+02 2.162e+02 2.644e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-09-01 02:45:29,583 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 100, loss[loss=0.1688, simple_loss=0.1642, pruned_loss=0.05712, ctc_loss=0.1404, over 19074.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.1913, pruned_loss=0.07179, ctc_loss=0.1523, over 1471681.17 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:45:34,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=110138.66666666667, ans=0.0
+2024-09-01 02:46:46,684 INFO [dysarthria_finetune.py:1435] (1/4) (1296760832, 34072559616)
+2024-09-01 02:46:46,685 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:46:46,746 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:47:00,733 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 0, loss[loss=0.2129, simple_loss=0.2038, pruned_loss=0.07732, ctc_loss=0.1628, over 18847.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2038, pruned_loss=0.07732, ctc_loss=0.1628, over 18847.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:47:00,733 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:47:23,696 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 16, validation: loss=0.182, simple_loss=0.1819, pruned_loss=0.06496, ctc_loss=0.1251, over 1073944.00 frames.
+2024-09-01 02:47:23,697 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 02:47:35,539 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=110293.33333333333, ans=0.125
+2024-09-01 02:47:39,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=110293.33333333333, ans=0.0
+2024-09-01 02:47:41,811 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.59 vs. limit=15.0
+2024-09-01 02:47:55,664 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110346.66666666667, ans=0.1
+2024-09-01 02:48:45,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=110506.66666666667, ans=0.125
+2024-09-01 02:48:59,310 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:49:04,117 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 50, loss[loss=0.1839, simple_loss=0.186, pruned_loss=0.06362, ctc_loss=0.1348, over 19018.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.1877, pruned_loss=0.06917, ctc_loss=0.1501, over 827868.27 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:49:23,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=110613.33333333333, ans=0.0
+2024-09-01 02:49:44,008 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 1.879e+02 1.996e+02 2.191e+02 2.692e+02, threshold=3.992e+02, percent-clipped=0.0
+2024-09-01 02:49:45,385 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=110666.66666666667, ans=0.125
+2024-09-01 02:50:42,131 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 100, loss[loss=0.185, simple_loss=0.1788, pruned_loss=0.06614, ctc_loss=0.147, over 19118.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.1886, pruned_loss=0.07002, ctc_loss=0.15, over 1473208.83 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:35,752 INFO [dysarthria_finetune.py:1435] (1/4) (348848128, 34072559616)
+2024-09-01 02:51:35,753 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:51:35,836 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 02:51:50,595 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 0, loss[loss=0.2335, simple_loss=0.2254, pruned_loss=0.08817, ctc_loss=0.1633, over 18527.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.2254, pruned_loss=0.08817, ctc_loss=0.1633, over 18527.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:50,595 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:51:54,529 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.4.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.4605, 3.1179, 3.0393, 3.0576], device='cuda:1')
+2024-09-01 02:52:13,658 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 17, validation: loss=0.1784, simple_loss=0.1796, pruned_loss=0.06394, ctc_loss=0.1232, over 1073944.00 frames.
+2024-09-01 02:52:13,658 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 02:52:50,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 02:52:52,793 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=111034.66666666667, ans=0.0
+2024-09-01 02:52:54,646 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=111034.66666666667, ans=0.0
+2024-09-01 02:53:16,771 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.88 vs. limit=15.0
+2024-09-01 02:53:59,384 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 1.870e+02 1.982e+02 2.091e+02 2.808e+02, threshold=3.964e+02, percent-clipped=0.0
+2024-09-01 02:54:24,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111194.66666666667, ans=0.125
+2024-09-01 02:54:35,607 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 50, loss[loss=0.1864, simple_loss=0.1836, pruned_loss=0.06437, ctc_loss=0.151, over 19037.00 frames. ], tot_loss[loss=0.1915, simple_loss=0.1864, pruned_loss=0.06869, ctc_loss=0.1481, over 827680.99 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:55:16,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=111248.0, ans=0.05
+2024-09-01 02:56:18,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111354.66666666667, ans=0.1
+2024-09-01 02:56:22,819 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.71 vs. limit=15.0
+2024-09-01 02:56:44,129 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.75 vs. limit=22.5
+2024-09-01 02:56:55,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.74 vs. limit=15.0
+2024-09-01 02:56:55,537 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.46 vs. limit=15.0
+2024-09-01 02:57:16,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111408.0, ans=0.125
+2024-09-01 02:57:16,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111408.0, ans=0.1
+2024-09-01 02:57:27,168 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.94 vs. limit=15.0
+2024-09-01 02:57:34,903 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.63 vs. limit=15.0
+2024-09-01 02:58:16,886 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 100, loss[loss=0.1508, simple_loss=0.1603, pruned_loss=0.04824, ctc_loss=0.1122, over 19067.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.1827, pruned_loss=0.06649, ctc_loss=0.1442, over 1472664.14 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:59:27,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=111568.0, ans=0.015
+2024-09-01 03:00:07,752 INFO [dysarthria_finetune.py:1435] (1/4) (847970304, 34072559616)
+2024-09-01 03:00:07,753 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:00:07,831 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 03:00:21,397 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 0, loss[loss=0.2181, simple_loss=0.1998, pruned_loss=0.08303, ctc_loss=0.1757, over 18622.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.1998, pruned_loss=0.08303, ctc_loss=0.1757, over 18622.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:00:21,397 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:01:08,463 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 18, validation: loss=0.1758, simple_loss=0.1773, pruned_loss=0.06291, ctc_loss=0.1213, over 1073944.00 frames.
+2024-09-01 03:01:08,464 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 03:01:21,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111669.33333333333, ans=0.07
+2024-09-01 03:02:04,462 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 1.863e+02 1.965e+02 2.122e+02 2.833e+02, threshold=3.929e+02, percent-clipped=0.0
+2024-09-01 03:04:29,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111776.0, ans=0.125
+2024-09-01 03:05:06,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=111829.33333333333, ans=0.035
+2024-09-01 03:05:55,207 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.06 vs. limit=15.0
+2024-09-01 03:05:55,550 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 50, loss[loss=0.1765, simple_loss=0.1796, pruned_loss=0.06107, ctc_loss=0.1283, over 19026.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.1826, pruned_loss=0.06837, ctc_loss=0.1464, over 826500.31 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:07:09,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=111936.0, ans=0.2
+2024-09-01 03:07:41,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=111989.33333333333, ans=0.125
+2024-09-01 03:08:08,293 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:08:53,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112042.66666666667, ans=0.125
+2024-09-01 03:09:58,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=112149.33333333333, ans=10.0
+2024-09-01 03:11:05,717 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 100, loss[loss=0.2001, simple_loss=0.1876, pruned_loss=0.07348, ctc_loss=0.1644, over 19036.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.1812, pruned_loss=0.06738, ctc_loss=0.1441, over 1471672.61 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:11:51,849 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.826e+02 1.931e+02 2.035e+02 3.279e+02, threshold=3.861e+02, percent-clipped=0.0
+2024-09-01 03:12:06,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112256.0, ans=0.1
+2024-09-01 03:12:08,591 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=112309.33333333333, ans=0.125
+2024-09-01 03:12:26,102 INFO [dysarthria_finetune.py:1435] (1/4) (669712384, 34072559616)
+2024-09-01 03:12:26,103 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:12:26,165 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 03:12:38,700 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 0, loss[loss=0.2027, simple_loss=0.2018, pruned_loss=0.07314, ctc_loss=0.1432, over 18691.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2018, pruned_loss=0.07314, ctc_loss=0.1432, over 18691.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:12:38,700 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:13:02,318 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 19, validation: loss=0.1735, simple_loss=0.1751, pruned_loss=0.06201, ctc_loss=0.1194, over 1073944.00 frames.
+2024-09-01 03:13:02,319 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 03:13:59,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112458.66666666667, ans=0.125
+2024-09-01 03:14:03,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=112458.66666666667, ans=0.125
+2024-09-01 03:14:20,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=112512.0, ans=0.125
+2024-09-01 03:14:24,811 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112512.0, ans=0.125
+2024-09-01 03:14:34,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 03:14:44,023 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.47 vs. limit=10.0
+2024-09-01 03:14:48,752 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 50, loss[loss=0.1885, simple_loss=0.1874, pruned_loss=0.06613, ctc_loss=0.1432, over 18976.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.1805, pruned_loss=0.0668, ctc_loss=0.1443, over 828010.25 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:15:08,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=112672.0, ans=0.125
+2024-09-01 03:15:10,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=112672.0, ans=0.2
+2024-09-01 03:15:14,582 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=112672.0, ans=0.0
+2024-09-01 03:15:16,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=112672.0, ans=0.0
+2024-09-01 03:15:16,849 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.21 vs. limit=22.5
+2024-09-01 03:15:40,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=112725.33333333333, ans=0.125
+2024-09-01 03:15:44,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112725.33333333333, ans=0.0
+2024-09-01 03:15:48,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=112778.66666666667, ans=0.125
+2024-09-01 03:15:58,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=112778.66666666667, ans=0.125
+2024-09-01 03:16:00,817 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 1.859e+02 1.957e+02 2.051e+02 3.574e+02, threshold=3.914e+02, percent-clipped=0.0
+2024-09-01 03:16:06,095 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112778.66666666667, ans=0.1
+2024-09-01 03:16:28,743 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 100, loss[loss=0.1562, simple_loss=0.157, pruned_loss=0.05291, ctc_loss=0.1238, over 19118.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.1795, pruned_loss=0.06506, ctc_loss=0.1402, over 1474453.83 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:16:35,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112885.33333333333, ans=0.1
+2024-09-01 03:16:37,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-09-01 03:16:45,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=112885.33333333333, ans=0.2
+2024-09-01 03:17:04,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=112938.66666666667, ans=0.0
+2024-09-01 03:17:18,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112992.0, ans=0.125
+2024-09-01 03:17:20,795 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.06 vs. limit=15.0
+2024-09-01 03:17:22,980 INFO [dysarthria_finetune.py:1435] (1/4) (116064256, 34072559616)
+2024-09-01 03:17:22,980 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:17:23,052 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 03:17:37,120 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 0, loss[loss=0.2349, simple_loss=0.2173, pruned_loss=0.09138, ctc_loss=0.1742, over 18758.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2173, pruned_loss=0.09138, ctc_loss=0.1742, over 18758.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:17:37,120 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:18:00,790 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 20, validation: loss=0.1713, simple_loss=0.1732, pruned_loss=0.06117, ctc_loss=0.1175, over 1073944.00 frames.
+2024-09-01 03:18:00,790 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 19046MB
+2024-09-01 03:18:18,019 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=113040.0, ans=0.125
+2024-09-01 03:18:20,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=113093.33333333333, ans=0.0
+2024-09-01 03:18:34,098 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.20 vs. limit=15.0
+2024-09-01 03:18:49,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=113146.66666666667, ans=0.1
+2024-09-01 03:19:03,235 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=113200.0, ans=0.04949747468305833
+2024-09-01 03:19:07,111 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113200.0, ans=0.125
+2024-09-01 03:19:34,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=113253.33333333333, ans=0.1
+2024-09-01 03:19:34,534 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.20 vs. limit=15.0
+2024-09-01 03:19:39,001 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 50, loss[loss=0.1893, simple_loss=0.1804, pruned_loss=0.06867, ctc_loss=0.1518, over 19069.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.1771, pruned_loss=0.06551, ctc_loss=0.1411, over 828644.17 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:19:52,481 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 1.810e+02 1.894e+02 2.049e+02 3.111e+02, threshold=3.788e+02, percent-clipped=0.0
+2024-09-01 03:19:57,840 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=113360.0, ans=0.0
+2024-09-01 03:20:17,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=113413.33333333333, ans=0.125
+2024-09-01 03:20:32,760 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=113413.33333333333, ans=0.125
+2024-09-01 03:20:59,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=113520.0, ans=0.125
+2024-09-01 03:21:09,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=113520.0, ans=0.125
+2024-09-01 03:21:15,880 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 100, loss[loss=0.1508, simple_loss=0.1471, pruned_loss=0.05311, ctc_loss=0.1208, over 19104.00 frames. ], tot_loss[loss=0.1789, simple_loss=0.1751, pruned_loss=0.06389, ctc_loss=0.1372, over 1473557.06 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:21:17,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=113573.33333333333, ans=0.07
+2024-09-01 03:21:49,315 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.47 vs. limit=12.0
+2024-09-01 03:22:06,347 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.56 vs. limit=22.5
+2024-09-01 03:22:08,743 INFO [dysarthria_finetune.py:1435] (1/4) (931856384, 34072559616)
+2024-09-01 03:22:08,743 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:22:08,783 INFO [dysarthria_finetune.py:1440] (1/4) (29920788480, 34072559616)
+2024-09-01 03:22:08,783 INFO [dysarthria_finetune.py:1442] (1/4) Done!
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-2 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-2
new file mode 100644
index 0000000000000000000000000000000000000000..432c923e39ce90bc8c00e8c8b8ddeacdb11a32fb
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-2
@@ -0,0 +1,529 @@
+2024-08-31 22:13:17,928 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-31 22:13:17,967 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-31 22:13:17,968 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-31 22:13:18,721 INFO [dysarthria_finetune.py:1219] (2/4) (33106362368, 34072559616)
+2024-08-31 22:13:18,721 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-31 22:13:18,725 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:13:18,725 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-31 22:13:21,221 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65805511
+2024-08-31 22:13:21,924 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 22:13:33,287 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-31 22:14:37,196 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-31 22:14:37,263 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-31 22:14:55,323 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-31 22:14:56,266 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-31 22:14:56,272 INFO [dysarthria_asr_datamodule.py:501] (2/4) About to get dev cuts
+2024-08-31 22:14:56,477 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-31 22:14:57,473 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-31 22:14:57,473 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:16:23,769 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=17.72 vs. limit=7.5
+2024-08-31 22:16:31,019 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.83 vs. limit=7.5
+2024-08-31 22:16:34,164 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 22:16:36,228 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 22:17:53,346 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 22:17:55,317 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 22:19:46,278 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 22:19:48,456 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11838MB
+2024-08-31 22:20:26,655 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.4003, simple_loss=0.3243, pruned_loss=0.1928, ctc_loss=0.2836, over 18533.00 frames. ], tot_loss[loss=0.4003, simple_loss=0.3243, pruned_loss=0.1928, ctc_loss=0.2836, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:26,655 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-31 22:32:57,021 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.373, simple_loss=0.3046, pruned_loss=0.1755, ctc_loss=0.2544, over 1073944.00 frames.
+2024-08-31 22:32:57,064 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19759MB
+2024-08-31 22:34:50,903 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-31 22:36:08,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=100000.0, ans=0.125
+2024-08-31 22:46:56,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-31 22:49:21,745 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=21.10 vs. limit=15.0
+2024-08-31 22:51:12,341 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.81 vs. limit=6.0
+2024-08-31 22:51:26,994 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 9.923e+02 1.157e+03 1.203e+03 1.280e+03 1.380e+03, threshold=4.812e+03, percent-clipped=0.0
+2024-08-31 23:03:06,380 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 8.113e+02 1.083e+03 1.198e+03 1.280e+03 1.431e+03, threshold=4.794e+03, percent-clipped=0.0
+2024-08-31 23:06:50,697 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.66 vs. limit=6.0
+2024-08-31 23:21:30,089 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100160.0, ans=0.0
+2024-08-31 23:21:30,576 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.03 vs. limit=6.0
+2024-08-31 23:29:30,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-31 23:29:32,326 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 6.285e+02 9.052e+02 1.061e+03 1.198e+03 1.431e+03, threshold=4.243e+03, percent-clipped=0.0
+2024-08-31 23:49:33,570 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.4246, simple_loss=0.3403, pruned_loss=0.2104, ctc_loss=0.3139, over 19018.00 frames. ], tot_loss[loss=0.4107, simple_loss=0.3319, pruned_loss=0.1996, ctc_loss=0.2934, over 827419.58 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 23:52:53,080 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=100266.66666666667, ans=0.2
+2024-08-31 23:54:11,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=100266.66666666667, ans=0.2
+2024-08-31 23:56:26,528 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=31.02 vs. limit=22.5
+2024-09-01 00:02:23,732 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=100320.0, ans=0.09899494936611666
+2024-09-01 00:02:24,019 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=100320.0, ans=15.0
+2024-09-01 00:05:55,443 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=21.55 vs. limit=15.0
+2024-09-01 00:07:03,857 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=100373.33333333333, ans=0.04949747468305833
+2024-09-01 00:15:34,743 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=23.52 vs. limit=15.0
+2024-09-01 00:18:31,035 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.958e+02 6.817e+02 8.321e+02 1.009e+03 1.431e+03, threshold=1.664e+03, percent-clipped=0.0
+2024-09-01 00:18:31,069 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 100, loss[loss=0.3746, simple_loss=0.3051, pruned_loss=0.1784, ctc_loss=0.2558, over 19117.00 frames. ], tot_loss[loss=0.3952, simple_loss=0.32, pruned_loss=0.1891, ctc_loss=0.2805, over 1475925.13 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:22:55,039 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.59 vs. limit=15.0
+2024-09-01 00:28:35,454 INFO [dysarthria_finetune.py:1435] (2/4) (3714777088, 34072559616)
+2024-09-01 00:28:35,454 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 00:28:35,511 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 00:29:13,526 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 0, loss[loss=0.3335, simple_loss=0.2754, pruned_loss=0.1462, ctc_loss=0.2192, over 18502.00 frames. ], tot_loss[loss=0.3335, simple_loss=0.2754, pruned_loss=0.1462, ctc_loss=0.2192, over 18502.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:29:13,527 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 00:34:07,562 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 2, validation: loss=0.3353, simple_loss=0.2773, pruned_loss=0.1482, ctc_loss=0.2175, over 1073944.00 frames.
+2024-09-01 00:34:07,563 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 00:50:27,298 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.57 vs. limit=15.0
+2024-09-01 00:50:35,790 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-09-01 00:51:26,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-09-01 00:53:03,513 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=100789.33333333333, ans=0.125
+2024-09-01 00:53:03,888 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=10.02 vs. limit=12.0
+2024-09-01 00:53:15,153 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=44.13 vs. limit=22.5
+2024-09-01 00:55:14,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=100842.66666666667, ans=0.07
+2024-09-01 00:57:09,093 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=100896.0, ans=0.125
+2024-09-01 00:59:42,888 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 50, loss[loss=0.3746, simple_loss=0.3053, pruned_loss=0.1691, ctc_loss=0.2621, over 18952.00 frames. ], tot_loss[loss=0.3734, simple_loss=0.3039, pruned_loss=0.1721, ctc_loss=0.2606, over 829638.79 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 01:00:18,576 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-09-01 01:04:18,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101002.66666666667, ans=0.1
+2024-09-01 01:08:04,417 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.183e+02 4.403e+02 5.126e+02 5.917e+02 6.888e+02, threshold=1.025e+03, percent-clipped=0.0
+2024-09-01 01:10:07,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=101109.33333333333, ans=0.2
+2024-09-01 01:10:54,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=101109.33333333333, ans=0.2
+2024-09-01 01:11:03,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=101162.66666666667, ans=0.0
+2024-09-01 01:11:13,214 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.52 vs. limit=15.0
+2024-09-01 01:14:13,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=101216.0, ans=0.0
+2024-09-01 01:14:19,319 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 100, loss[loss=0.3146, simple_loss=0.2641, pruned_loss=0.1333, ctc_loss=0.1927, over 19108.00 frames. ], tot_loss[loss=0.3602, simple_loss=0.2943, pruned_loss=0.163, ctc_loss=0.2483, over 1476292.15 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 01:19:35,001 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=101322.66666666667, ans=0.0
+2024-09-01 01:20:51,904 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 01:20:51,905 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:20:51,946 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 01:21:21,469 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 0, loss[loss=0.3605, simple_loss=0.2929, pruned_loss=0.1647, ctc_loss=0.2546, over 18600.00 frames. ], tot_loss[loss=0.3605, simple_loss=0.2929, pruned_loss=0.1647, ctc_loss=0.2546, over 18600.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:21:21,470 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:21:44,700 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 3, validation: loss=0.309, simple_loss=0.2588, pruned_loss=0.13, ctc_loss=0.1938, over 1073944.00 frames.
+2024-09-01 01:21:44,701 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 01:22:17,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=101370.66666666667, ans=0.2
+2024-09-01 01:22:17,647 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=22.27 vs. limit=15.0
+2024-09-01 01:22:56,426 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101424.0, ans=0.1
+2024-09-01 01:23:07,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=101424.0, ans=0.125
+2024-09-01 01:23:29,757 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.96 vs. limit=15.0
+2024-09-01 01:23:32,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=101477.33333333333, ans=0.0
+2024-09-01 01:24:06,866 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.73 vs. limit=22.5
+2024-09-01 01:24:27,005 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.669e+02 3.351e+02 3.834e+02 4.204e+02 5.264e+02, threshold=7.667e+02, percent-clipped=0.0
+2024-09-01 01:24:45,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=101584.0, ans=0.125
+2024-09-01 01:24:47,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=101637.33333333333, ans=0.1
+2024-09-01 01:24:49,026 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 50, loss[loss=0.3317, simple_loss=0.2721, pruned_loss=0.1444, ctc_loss=0.2304, over 19168.00 frames. ], tot_loss[loss=0.3478, simple_loss=0.2855, pruned_loss=0.1526, ctc_loss=0.2393, over 828229.52 frames. ], batch size: 103, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:24:50,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=101637.33333333333, ans=0.2
+2024-09-01 01:26:25,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=101797.33333333333, ans=0.5
+2024-09-01 01:26:34,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101797.33333333333, ans=0.0
+2024-09-01 01:26:45,165 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=19.66 vs. limit=15.0
+2024-09-01 01:26:46,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 01:27:00,717 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 100, loss[loss=0.2821, simple_loss=0.2376, pruned_loss=0.1111, ctc_loss=0.1838, over 19024.00 frames. ], tot_loss[loss=0.3379, simple_loss=0.2782, pruned_loss=0.1466, ctc_loss=0.231, over 1476045.82 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:27:30,545 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=18.88 vs. limit=15.0
+2024-09-01 01:27:35,427 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=101957.33333333333, ans=0.125
+2024-09-01 01:29:32,085 INFO [dysarthria_finetune.py:1435] (2/4) (10729750528, 34072559616)
+2024-09-01 01:29:32,086 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:29:32,121 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 01:29:45,318 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 0, loss[loss=0.317, simple_loss=0.2654, pruned_loss=0.1272, ctc_loss=0.2122, over 18618.00 frames. ], tot_loss[loss=0.317, simple_loss=0.2654, pruned_loss=0.1272, ctc_loss=0.2122, over 18618.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:29:45,318 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:30:08,496 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 4, validation: loss=0.2887, simple_loss=0.2447, pruned_loss=0.1169, ctc_loss=0.1781, over 1073944.00 frames.
+2024-09-01 01:30:08,496 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 01:30:42,263 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.378e+02 2.838e+02 3.147e+02 3.460e+02 5.318e+02, threshold=6.294e+02, percent-clipped=0.0
+2024-09-01 01:30:43,720 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=9.30 vs. limit=12.0
+2024-09-01 01:30:49,376 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=102106.66666666667, ans=0.125
+2024-09-01 01:30:56,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:31:03,036 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=102160.0, ans=0.0
+2024-09-01 01:31:12,827 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=32.34 vs. limit=15.0
+2024-09-01 01:31:18,964 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.64 vs. limit=22.5
+2024-09-01 01:32:02,190 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 50, loss[loss=0.31, simple_loss=0.2574, pruned_loss=0.1262, ctc_loss=0.216, over 18961.00 frames. ], tot_loss[loss=0.32, simple_loss=0.2655, pruned_loss=0.1335, ctc_loss=0.2187, over 828488.26 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:32:09,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=102320.0, ans=0.125
+2024-09-01 01:32:19,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=102320.0, ans=0.125
+2024-09-01 01:32:25,366 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=36.92 vs. limit=22.5
+2024-09-01 01:32:33,248 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=13.04 vs. limit=12.0
+2024-09-01 01:32:35,216 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.57 vs. limit=15.0
+2024-09-01 01:32:40,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=102426.66666666667, ans=0.125
+2024-09-01 01:32:56,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=102426.66666666667, ans=0.0
+2024-09-01 01:35:50,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102480.0, ans=0.0
+2024-09-01 01:35:57,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=102533.33333333333, ans=0.0
+2024-09-01 01:36:37,368 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.94 vs. limit=6.0
+2024-09-01 01:37:40,020 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 100, loss[loss=0.292, simple_loss=0.2442, pruned_loss=0.1212, ctc_loss=0.1942, over 19090.00 frames. ], tot_loss[loss=0.3158, simple_loss=0.2624, pruned_loss=0.1317, ctc_loss=0.215, over 1476821.49 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:38:09,931 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 2.526e+02 2.751e+02 3.040e+02 4.636e+02, threshold=5.501e+02, percent-clipped=0.0
+2024-09-01 01:38:19,386 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.16 vs. limit=15.0
+2024-09-01 01:38:30,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-09-01 01:38:35,842 INFO [dysarthria_finetune.py:1435] (2/4) (10731847680, 34072559616)
+2024-09-01 01:38:35,842 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:38:35,891 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 01:38:49,736 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 0, loss[loss=0.299, simple_loss=0.2515, pruned_loss=0.1206, ctc_loss=0.1996, over 18551.00 frames. ], tot_loss[loss=0.299, simple_loss=0.2515, pruned_loss=0.1206, ctc_loss=0.1996, over 18551.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:38:49,736 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:39:30,977 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 5, validation: loss=0.2717, simple_loss=0.233, pruned_loss=0.1066, ctc_loss=0.1665, over 1073944.00 frames.
+2024-09-01 01:39:30,978 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 01:40:54,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=102741.33333333333, ans=0.2
+2024-09-01 01:41:06,686 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:41:15,768 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=102794.66666666667, ans=0.0
+2024-09-01 01:41:23,677 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.45 vs. limit=15.0
+2024-09-01 01:42:50,160 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=12.43 vs. limit=15.0
+2024-09-01 01:43:07,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=102901.33333333333, ans=0.0
+2024-09-01 01:43:55,504 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 50, loss[loss=0.2797, simple_loss=0.236, pruned_loss=0.1101, ctc_loss=0.19, over 19027.00 frames. ], tot_loss[loss=0.305, simple_loss=0.2551, pruned_loss=0.1237, ctc_loss=0.2097, over 828775.72 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:44:01,635 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=103008.0, ans=0.0
+2024-09-01 01:44:21,320 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=6.61 vs. limit=12.0
+2024-09-01 01:44:36,741 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=17.70 vs. limit=15.0
+2024-09-01 01:45:12,675 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:45:29,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:46:04,690 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.130e+02 2.382e+02 2.524e+02 2.770e+02 4.371e+02, threshold=5.047e+02, percent-clipped=0.0
+2024-09-01 01:46:35,325 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=26.90 vs. limit=22.5
+2024-09-01 01:46:51,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:46:52,897 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 100, loss[loss=0.2983, simple_loss=0.2483, pruned_loss=0.1244, ctc_loss=0.2067, over 19114.00 frames. ], tot_loss[loss=0.2957, simple_loss=0.248, pruned_loss=0.1196, ctc_loss=0.202, over 1478197.42 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:47:03,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=103274.66666666667, ans=0.0
+2024-09-01 01:48:01,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=103328.0, ans=0.2
+2024-09-01 01:48:07,757 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.59 vs. limit=15.0
+2024-09-01 01:48:10,622 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:48:25,293 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103381.33333333333, ans=0.1
+2024-09-01 01:48:26,051 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 01:48:26,052 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:48:26,090 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 01:48:41,997 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 0, loss[loss=0.283, simple_loss=0.2417, pruned_loss=0.1094, ctc_loss=0.1881, over 18783.00 frames. ], tot_loss[loss=0.283, simple_loss=0.2417, pruned_loss=0.1094, ctc_loss=0.1881, over 18783.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:48:41,998 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:49:05,149 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 6, validation: loss=0.2578, simple_loss=0.2238, pruned_loss=0.09861, ctc_loss=0.1582, over 1073944.00 frames.
+2024-09-01 01:49:05,149 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 01:49:45,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=103477.33333333333, ans=0.125
+2024-09-01 01:50:06,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103530.66666666667, ans=0.1
+2024-09-01 01:50:14,513 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103584.0, ans=0.1
+2024-09-01 01:50:24,933 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=103584.0, ans=0.0
+2024-09-01 01:50:43,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=103637.33333333333, ans=0.0
+2024-09-01 01:50:52,023 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 50, loss[loss=0.287, simple_loss=0.2442, pruned_loss=0.1152, ctc_loss=0.1884, over 19006.00 frames. ], tot_loss[loss=0.2821, simple_loss=0.2381, pruned_loss=0.1113, ctc_loss=0.196, over 828020.78 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:50:55,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103690.66666666667, ans=0.125
+2024-09-01 01:50:55,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=103690.66666666667, ans=0.125
+2024-09-01 01:51:03,920 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=21.79 vs. limit=15.0
+2024-09-01 01:51:07,999 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 2.277e+02 2.375e+02 2.614e+02 3.891e+02, threshold=4.750e+02, percent-clipped=0.0
+2024-09-01 01:51:13,980 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=103744.0, ans=0.09899494936611666
+2024-09-01 01:51:34,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=103797.33333333333, ans=0.125
+2024-09-01 01:52:09,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=103850.66666666667, ans=0.0
+2024-09-01 01:52:34,153 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 100, loss[loss=0.2988, simple_loss=0.2538, pruned_loss=0.1164, ctc_loss=0.208, over 19060.00 frames. ], tot_loss[loss=0.28, simple_loss=0.2372, pruned_loss=0.1103, ctc_loss=0.1926, over 1475525.13 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:52:49,242 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=103957.33333333333, ans=22.5
+2024-09-01 01:53:03,334 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.84 vs. limit=10.0
+2024-09-01 01:53:06,923 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=104010.66666666667, ans=0.125
+2024-09-01 01:53:07,238 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=104010.66666666667, ans=15.0
+2024-09-01 01:53:25,785 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.09 vs. limit=15.0
+2024-09-01 01:53:34,207 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 01:53:34,208 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:53:34,257 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 01:53:47,071 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 0, loss[loss=0.2767, simple_loss=0.2399, pruned_loss=0.1067, ctc_loss=0.1772, over 18435.00 frames. ], tot_loss[loss=0.2767, simple_loss=0.2399, pruned_loss=0.1067, ctc_loss=0.1772, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:53:47,072 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:54:10,665 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 7, validation: loss=0.2464, simple_loss=0.2165, pruned_loss=0.09214, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 01:54:10,666 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 01:54:30,363 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.87 vs. limit=22.5
+2024-09-01 01:55:12,006 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.149e+02 2.268e+02 2.457e+02 3.821e+02, threshold=4.535e+02, percent-clipped=0.0
+2024-09-01 01:55:33,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=104320.0, ans=0.125
+2024-09-01 01:55:53,623 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=27.70 vs. limit=22.5
+2024-09-01 01:55:53,889 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 50, loss[loss=0.2528, simple_loss=0.2215, pruned_loss=0.08959, ctc_loss=0.1735, over 18970.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.2352, pruned_loss=0.1067, ctc_loss=0.1922, over 828175.40 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:57:07,168 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104533.33333333333, ans=0.1
+2024-09-01 01:57:10,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=104586.66666666667, ans=0.2
+2024-09-01 01:57:30,837 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 100, loss[loss=0.2526, simple_loss=0.2209, pruned_loss=0.09424, ctc_loss=0.1673, over 19065.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.2307, pruned_loss=0.1038, ctc_loss=0.1878, over 1476190.89 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:57:59,075 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104693.33333333333, ans=0.125
+2024-09-01 01:58:12,657 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.81 vs. limit=15.0
+2024-09-01 01:58:22,454 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 01:58:22,454 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 01:58:22,506 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 01:58:35,293 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 0, loss[loss=0.24, simple_loss=0.2117, pruned_loss=0.08632, ctc_loss=0.1619, over 18635.00 frames. ], tot_loss[loss=0.24, simple_loss=0.2117, pruned_loss=0.08632, ctc_loss=0.1619, over 18635.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:58:35,294 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 01:58:58,374 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 8, validation: loss=0.236, simple_loss=0.2103, pruned_loss=0.08624, ctc_loss=0.1474, over 1073944.00 frames.
+2024-09-01 01:58:58,375 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 01:59:02,421 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.080e+02 2.182e+02 2.331e+02 3.634e+02, threshold=4.365e+02, percent-clipped=0.0
+2024-09-01 01:59:15,888 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.92 vs. limit=15.0
+2024-09-01 02:00:10,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=104949.33333333333, ans=0.2
+2024-09-01 02:00:22,080 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=105002.66666666667, ans=0.125
+2024-09-01 02:00:24,403 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.59 vs. limit=10.0
+2024-09-01 02:00:36,423 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 50, loss[loss=0.2406, simple_loss=0.2204, pruned_loss=0.0796, ctc_loss=0.153, over 19000.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.2251, pruned_loss=0.09806, ctc_loss=0.1853, over 827531.12 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:01:03,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=105109.33333333333, ans=0.025
+2024-09-01 02:01:07,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105109.33333333333, ans=0.1
+2024-09-01 02:01:27,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=105162.66666666667, ans=15.0
+2024-09-01 02:01:40,404 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=9.90 vs. limit=12.0
+2024-09-01 02:02:13,400 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 100, loss[loss=0.2446, simple_loss=0.2157, pruned_loss=0.08849, ctc_loss=0.1706, over 19093.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.2202, pruned_loss=0.09397, ctc_loss=0.1786, over 1475468.79 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:02:17,363 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.064e+02 2.191e+02 2.358e+02 3.385e+02, threshold=4.381e+02, percent-clipped=0.0
+2024-09-01 02:02:30,139 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=105322.66666666667, ans=0.05
+2024-09-01 02:03:07,391 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 02:03:07,392 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:03:07,422 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:03:20,910 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 0, loss[loss=0.2596, simple_loss=0.2274, pruned_loss=0.09849, ctc_loss=0.1763, over 18461.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.2274, pruned_loss=0.09849, ctc_loss=0.1763, over 18461.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:20,910 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:03:44,108 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 9, validation: loss=0.2267, simple_loss=0.2052, pruned_loss=0.08107, ctc_loss=0.1434, over 1073944.00 frames.
+2024-09-01 02:03:44,109 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:03:55,046 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.21 vs. limit=15.0
+2024-09-01 02:04:35,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=105584.0, ans=0.125
+2024-09-01 02:05:08,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=105637.33333333333, ans=0.05
+2024-09-01 02:05:38,302 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 50, loss[loss=0.2611, simple_loss=0.2336, pruned_loss=0.0895, ctc_loss=0.1905, over 18943.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.2168, pruned_loss=0.09126, ctc_loss=0.1778, over 826909.81 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:54,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=105744.0, ans=0.09899494936611666
+2024-09-01 02:06:03,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=105797.33333333333, ans=0.125
+2024-09-01 02:06:24,400 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:06:31,752 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.077e+02 2.184e+02 2.316e+02 3.584e+02, threshold=4.367e+02, percent-clipped=0.0
+2024-09-01 02:06:54,911 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=105904.0, ans=0.2
+2024-09-01 02:07:29,856 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=105957.33333333333, ans=0.0
+2024-09-01 02:07:31,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=106010.66666666667, ans=0.0
+2024-09-01 02:07:32,676 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 100, loss[loss=0.2387, simple_loss=0.2168, pruned_loss=0.08077, ctc_loss=0.1699, over 19136.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.2154, pruned_loss=0.08912, ctc_loss=0.1746, over 1474643.82 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:07:34,125 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:07:36,221 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.47 vs. limit=15.0
+2024-09-01 02:08:34,533 INFO [dysarthria_finetune.py:1435] (2/4) (10731847680, 34072559616)
+2024-09-01 02:08:34,611 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:08:34,655 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:08:48,126 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 0, loss[loss=0.2754, simple_loss=0.24, pruned_loss=0.1071, ctc_loss=0.1914, over 18505.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.24, pruned_loss=0.1071, ctc_loss=0.1914, over 18505.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:48,127 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:09:26,937 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 10, validation: loss=0.2182, simple_loss=0.2007, pruned_loss=0.07671, ctc_loss=0.1399, over 1073944.00 frames.
+2024-09-01 02:09:26,938 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:09:40,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=106165.33333333333, ans=0.2
+2024-09-01 02:09:48,204 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=18.69 vs. limit=15.0
+2024-09-01 02:10:32,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=106218.66666666667, ans=0.125
+2024-09-01 02:11:13,381 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=106325.33333333333, ans=0.0
+2024-09-01 02:11:50,818 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.83 vs. limit=15.0
+2024-09-01 02:11:54,673 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.023e+02 2.117e+02 2.323e+02 3.505e+02, threshold=4.234e+02, percent-clipped=0.0
+2024-09-01 02:12:16,362 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 50, loss[loss=0.242, simple_loss=0.2174, pruned_loss=0.08316, ctc_loss=0.1835, over 19019.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.2127, pruned_loss=0.08738, ctc_loss=0.1743, over 827816.98 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:12:19,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:12:19,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=106432.0, ans=0.2
+2024-09-01 02:12:24,530 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:12:33,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=106432.0, ans=0.125
+2024-09-01 02:12:59,184 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.47 vs. limit=22.5
+2024-09-01 02:14:00,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106592.0, ans=0.1
+2024-09-01 02:15:12,126 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 100, loss[loss=0.2091, simple_loss=0.1888, pruned_loss=0.0722, ctc_loss=0.1578, over 19070.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.208, pruned_loss=0.08243, ctc_loss=0.1673, over 1475821.74 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:15:33,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=106698.66666666667, ans=0.0
+2024-09-01 02:15:39,630 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=106698.66666666667, ans=0.2
+2024-09-01 02:16:11,335 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=106752.0, ans=0.0
+2024-09-01 02:16:30,144 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=27.92 vs. limit=22.5
+2024-09-01 02:16:33,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=106805.33333333333, ans=0.2
+2024-09-01 02:16:36,658 INFO [dysarthria_finetune.py:1435] (2/4) (10702487552, 34072559616)
+2024-09-01 02:16:36,658 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:16:36,697 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:16:49,149 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 0, loss[loss=0.2627, simple_loss=0.2275, pruned_loss=0.1052, ctc_loss=0.1849, over 18525.00 frames. ], tot_loss[loss=0.2627, simple_loss=0.2275, pruned_loss=0.1052, ctc_loss=0.1849, over 18525.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:16:49,150 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:17:12,839 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 11, validation: loss=0.211, simple_loss=0.1968, pruned_loss=0.07375, ctc_loss=0.137, over 1073944.00 frames.
+2024-09-01 02:17:12,840 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:17:42,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=106906.66666666667, ans=0.0
+2024-09-01 02:17:43,501 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 1.989e+02 2.082e+02 2.188e+02 3.029e+02, threshold=4.165e+02, percent-clipped=0.0
+2024-09-01 02:17:54,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=106960.0, ans=0.0
+2024-09-01 02:17:59,007 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=11.29 vs. limit=15.0
+2024-09-01 02:18:18,352 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.28 vs. limit=15.0
+2024-09-01 02:18:42,088 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-09-01 02:18:46,284 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-09-01 02:18:53,621 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 50, loss[loss=0.2359, simple_loss=0.2111, pruned_loss=0.08585, ctc_loss=0.1757, over 19068.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.2062, pruned_loss=0.08189, ctc_loss=0.168, over 827285.47 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:18:57,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107120.0, ans=0.1
+2024-09-01 02:18:57,327 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:19:15,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=107173.33333333333, ans=0.0
+2024-09-01 02:19:35,539 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107226.66666666667, ans=0.1
+2024-09-01 02:20:35,686 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 100, loss[loss=0.2191, simple_loss=0.1975, pruned_loss=0.07976, ctc_loss=0.1617, over 19059.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2044, pruned_loss=0.08028, ctc_loss=0.1641, over 1474809.38 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:20:49,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=107386.66666666667, ans=0.125
+2024-09-01 02:20:51,186 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.93 vs. limit=15.0
+2024-09-01 02:20:58,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107440.0, ans=0.125
+2024-09-01 02:21:05,341 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 1.934e+02 2.032e+02 2.152e+02 3.346e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-09-01 02:21:12,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=107440.0, ans=0.125
+2024-09-01 02:21:30,082 INFO [dysarthria_finetune.py:1435] (2/4) (10731847680, 34072559616)
+2024-09-01 02:21:30,084 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:21:30,119 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:21:45,323 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 0, loss[loss=0.2246, simple_loss=0.2054, pruned_loss=0.0805, ctc_loss=0.1625, over 18505.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2054, pruned_loss=0.0805, ctc_loss=0.1625, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:21:45,324 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:22:12,230 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 12, validation: loss=0.2042, simple_loss=0.1932, pruned_loss=0.07127, ctc_loss=0.1341, over 1073944.00 frames.
+2024-09-01 02:22:12,230 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:22:22,895 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.94 vs. limit=6.0
+2024-09-01 02:22:39,110 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=107594.66666666667, ans=0.125
+2024-09-01 02:23:24,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=107648.0, ans=0.0
+2024-09-01 02:23:26,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=107648.0, ans=0.09899494936611666
+2024-09-01 02:23:33,564 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=107701.33333333333, ans=0.125
+2024-09-01 02:23:38,719 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.51 vs. limit=15.0
+2024-09-01 02:23:59,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=107754.66666666667, ans=0.125
+2024-09-01 02:24:21,546 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107754.66666666667, ans=0.1
+2024-09-01 02:24:27,762 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 50, loss[loss=0.2083, simple_loss=0.2007, pruned_loss=0.06736, ctc_loss=0.15, over 18979.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2005, pruned_loss=0.07786, ctc_loss=0.1632, over 828348.40 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:25:55,561 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 1.958e+02 2.051e+02 2.245e+02 3.047e+02, threshold=4.102e+02, percent-clipped=0.0
+2024-09-01 02:26:01,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=107968.0, ans=0.125
+2024-09-01 02:26:07,511 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=108021.33333333333, ans=0.2
+2024-09-01 02:26:30,862 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 100, loss[loss=0.1846, simple_loss=0.1856, pruned_loss=0.05625, ctc_loss=0.1266, over 19089.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2006, pruned_loss=0.07702, ctc_loss=0.1615, over 1475248.85 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:32,384 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.53 vs. limit=22.5
+2024-09-01 02:26:54,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=108128.0, ans=0.0
+2024-09-01 02:26:54,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=108128.0, ans=0.125
+2024-09-01 02:27:31,273 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 02:27:31,273 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:27:31,316 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:27:44,308 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 0, loss[loss=0.2366, simple_loss=0.2158, pruned_loss=0.089, ctc_loss=0.1669, over 18540.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2158, pruned_loss=0.089, ctc_loss=0.1669, over 18540.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:27:44,308 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:27:51,981 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([7.1314, 6.7609, 6.6446, 6.7421], device='cuda:2')
+2024-09-01 02:28:07,302 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 13, validation: loss=0.1981, simple_loss=0.19, pruned_loss=0.06934, ctc_loss=0.1316, over 1073944.00 frames.
+2024-09-01 02:28:07,303 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:28:13,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=108229.33333333333, ans=0.125
+2024-09-01 02:28:33,793 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=108282.66666666667, ans=0.0
+2024-09-01 02:28:57,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=108336.0, ans=0.2
+2024-09-01 02:29:01,323 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=108336.0, ans=0.125
+2024-09-01 02:29:11,563 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.86 vs. limit=8.0
+2024-09-01 02:29:25,032 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.32 vs. limit=15.0
+2024-09-01 02:29:47,490 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 50, loss[loss=0.2234, simple_loss=0.2105, pruned_loss=0.07913, ctc_loss=0.1604, over 18984.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.1985, pruned_loss=0.07439, ctc_loss=0.1587, over 829065.08 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:29:52,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=108496.0, ans=0.0
+2024-09-01 02:29:59,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=108496.0, ans=0.125
+2024-09-01 02:30:01,821 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 1.921e+02 2.017e+02 2.151e+02 2.785e+02, threshold=4.034e+02, percent-clipped=0.0
+2024-09-01 02:30:38,382 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=108602.66666666667, ans=0.125
+2024-09-01 02:30:47,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=108656.0, ans=10.0
+2024-09-01 02:30:49,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-09-01 02:31:11,286 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-09-01 02:31:25,776 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 100, loss[loss=0.2028, simple_loss=0.1903, pruned_loss=0.07005, ctc_loss=0.1594, over 19116.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.198, pruned_loss=0.07471, ctc_loss=0.158, over 1477011.25 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:31:38,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 02:31:44,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=108816.0, ans=0.125
+2024-09-01 02:31:46,622 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.97 vs. limit=22.5
+2024-09-01 02:31:53,848 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=108816.0, ans=0.025
+2024-09-01 02:31:57,746 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=108816.0, ans=0.2
+2024-09-01 02:31:59,625 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=108816.0, ans=0.09899494936611666
+2024-09-01 02:32:12,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108869.33333333333, ans=0.1
+2024-09-01 02:32:16,593 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108869.33333333333, ans=0.1
+2024-09-01 02:32:19,312 INFO [dysarthria_finetune.py:1435] (2/4) (10731847680, 34072559616)
+2024-09-01 02:32:27,538 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:32:27,576 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:33:00,702 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 0, loss[loss=0.2161, simple_loss=0.1988, pruned_loss=0.07934, ctc_loss=0.1641, over 18523.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.1988, pruned_loss=0.07934, ctc_loss=0.1641, over 18523.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:33:00,702 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:33:44,931 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 14, validation: loss=0.1924, simple_loss=0.1871, pruned_loss=0.06768, ctc_loss=0.1293, over 1073944.00 frames.
+2024-09-01 02:33:44,931 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:34:07,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 02:34:38,237 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.82 vs. limit=15.0
+2024-09-01 02:35:09,744 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 1.893e+02 1.977e+02 2.192e+02 2.916e+02, threshold=3.954e+02, percent-clipped=0.0
+2024-09-01 02:35:45,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=109130.66666666667, ans=0.0
+2024-09-01 02:35:53,450 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:36:12,038 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 50, loss[loss=0.1972, simple_loss=0.191, pruned_loss=0.06519, ctc_loss=0.1571, over 18999.00 frames. ], tot_loss[loss=0.206, simple_loss=0.1938, pruned_loss=0.07297, ctc_loss=0.1572, over 827850.18 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:46,486 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:37:16,201 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=109237.33333333333, ans=0.2
+2024-09-01 02:38:17,059 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=109344.0, ans=0.125
+2024-09-01 02:38:39,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=109397.33333333333, ans=0.0
+2024-09-01 02:38:42,949 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 100, loss[loss=0.1956, simple_loss=0.1924, pruned_loss=0.06485, ctc_loss=0.1513, over 19059.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.1944, pruned_loss=0.07358, ctc_loss=0.1557, over 1475617.37 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:39:10,355 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=109504.0, ans=0.0
+2024-09-01 02:39:27,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=109504.0, ans=0.0
+2024-09-01 02:39:50,923 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 1.903e+02 1.972e+02 2.079e+02 2.713e+02, threshold=3.943e+02, percent-clipped=0.0
+2024-09-01 02:39:50,971 INFO [dysarthria_finetune.py:1435] (2/4) (10702487552, 34072559616)
+2024-09-01 02:39:50,972 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:39:51,011 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:40:03,803 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 0, loss[loss=0.273, simple_loss=0.2393, pruned_loss=0.1097, ctc_loss=0.2079, over 18678.00 frames. ], tot_loss[loss=0.273, simple_loss=0.2393, pruned_loss=0.1097, ctc_loss=0.2079, over 18678.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:40:03,804 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:40:34,881 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 15, validation: loss=0.1871, simple_loss=0.1844, pruned_loss=0.06629, ctc_loss=0.1271, over 1073944.00 frames.
+2024-09-01 02:40:34,882 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:40:36,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109605.33333333333, ans=0.2
+2024-09-01 02:40:40,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-09-01 02:42:07,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=109765.33333333333, ans=0.0
+2024-09-01 02:42:18,416 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:43:06,231 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 50, loss[loss=0.1988, simple_loss=0.2007, pruned_loss=0.06618, ctc_loss=0.1465, over 18994.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.192, pruned_loss=0.07302, ctc_loss=0.1541, over 827605.34 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:44:02,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=109925.33333333333, ans=0.2
+2024-09-01 02:44:10,190 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=13.16 vs. limit=15.0
+2024-09-01 02:44:28,910 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=109978.66666666667, ans=0.0
+2024-09-01 02:44:42,835 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=110032.0, ans=0.125
+2024-09-01 02:45:26,975 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 1.886e+02 2.042e+02 2.162e+02 2.644e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-09-01 02:45:29,587 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 100, loss[loss=0.2103, simple_loss=0.1993, pruned_loss=0.07644, ctc_loss=0.1632, over 19062.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.1909, pruned_loss=0.07155, ctc_loss=0.1507, over 1475114.53 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:45:33,958 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=110138.66666666667, ans=0.2
+2024-09-01 02:46:46,674 INFO [dysarthria_finetune.py:1435] (2/4) (10731847680, 34072559616)
+2024-09-01 02:46:46,674 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:46:46,717 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:47:00,714 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 0, loss[loss=0.2247, simple_loss=0.2055, pruned_loss=0.08675, ctc_loss=0.1717, over 18504.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2055, pruned_loss=0.08675, ctc_loss=0.1717, over 18504.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:47:00,714 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:47:23,703 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 16, validation: loss=0.182, simple_loss=0.1819, pruned_loss=0.06496, ctc_loss=0.1251, over 1073944.00 frames.
+2024-09-01 02:47:23,704 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:47:31,664 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.66 vs. limit=15.0
+2024-09-01 02:47:39,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110293.33333333333, ans=0.1
+2024-09-01 02:48:19,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=110400.0, ans=0.0
+2024-09-01 02:48:22,299 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.95 vs. limit=15.0
+2024-09-01 02:48:35,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110453.33333333333, ans=0.1
+2024-09-01 02:49:04,110 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 50, loss[loss=0.207, simple_loss=0.1973, pruned_loss=0.07561, ctc_loss=0.1623, over 19044.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.1857, pruned_loss=0.06909, ctc_loss=0.1503, over 828171.03 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:49:44,008 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 1.879e+02 1.996e+02 2.191e+02 2.692e+02, threshold=3.992e+02, percent-clipped=0.0
+2024-09-01 02:49:45,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=110666.66666666667, ans=0.0
+2024-09-01 02:50:05,001 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=19.00 vs. limit=22.5
+2024-09-01 02:50:25,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=110773.33333333333, ans=0.125
+2024-09-01 02:50:29,735 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-09-01 02:50:42,119 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 100, loss[loss=0.148, simple_loss=0.1613, pruned_loss=0.04574, ctc_loss=0.108, over 19090.00 frames. ], tot_loss[loss=0.1907, simple_loss=0.1854, pruned_loss=0.06829, ctc_loss=0.1472, over 1476933.27 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:35,759 INFO [dysarthria_finetune.py:1435] (2/4) (10729750528, 34072559616)
+2024-09-01 02:51:35,759 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:51:35,813 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 02:51:50,595 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 0, loss[loss=0.1922, simple_loss=0.1926, pruned_loss=0.06908, ctc_loss=0.1343, over 18336.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.1926, pruned_loss=0.06908, ctc_loss=0.1343, over 18336.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:50,595 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:51:55,656 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([4.5937, 4.0910, 4.4951, 4.2329], device='cuda:2')
+2024-09-01 02:52:13,657 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 17, validation: loss=0.1784, simple_loss=0.1796, pruned_loss=0.06394, ctc_loss=0.1232, over 1073944.00 frames.
+2024-09-01 02:52:13,657 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 02:52:38,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=111034.66666666667, ans=0.025
+2024-09-01 02:52:52,737 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=111034.66666666667, ans=0.5
+2024-09-01 02:52:54,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=111034.66666666667, ans=0.0
+2024-09-01 02:53:56,152 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=111194.66666666667, ans=0.125
+2024-09-01 02:53:59,380 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 1.870e+02 1.982e+02 2.091e+02 2.808e+02, threshold=3.964e+02, percent-clipped=0.0
+2024-09-01 02:54:35,614 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 50, loss[loss=0.1758, simple_loss=0.1878, pruned_loss=0.05534, ctc_loss=0.1329, over 19057.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.1853, pruned_loss=0.06877, ctc_loss=0.1476, over 827125.84 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:54:58,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111248.0, ans=0.125
+2024-09-01 02:55:15,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=111248.0, ans=0.0
+2024-09-01 02:55:26,382 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=22.38 vs. limit=22.5
+2024-09-01 02:55:56,447 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.42 vs. limit=15.0
+2024-09-01 02:56:13,447 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=111354.66666666667, ans=0.0
+2024-09-01 02:58:16,890 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 100, loss[loss=0.1727, simple_loss=0.1708, pruned_loss=0.06052, ctc_loss=0.1337, over 19126.00 frames. ], tot_loss[loss=0.188, simple_loss=0.1837, pruned_loss=0.06731, ctc_loss=0.1442, over 1475165.47 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:59:24,068 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=111568.0, ans=0.2
+2024-09-01 03:00:07,747 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 03:00:07,747 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:00:07,779 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 03:00:21,392 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 0, loss[loss=0.207, simple_loss=0.2003, pruned_loss=0.07552, ctc_loss=0.1567, over 18559.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2003, pruned_loss=0.07552, ctc_loss=0.1567, over 18559.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:00:21,393 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:01:08,467 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 18, validation: loss=0.1758, simple_loss=0.1773, pruned_loss=0.06291, ctc_loss=0.1213, over 1073944.00 frames.
+2024-09-01 03:01:08,468 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 03:01:20,615 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111669.33333333333, ans=0.07
+2024-09-01 03:02:04,451 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 1.863e+02 1.965e+02 2.122e+02 2.833e+02, threshold=3.929e+02, percent-clipped=0.0
+2024-09-01 03:02:31,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=111776.0, ans=0.125
+2024-09-01 03:04:29,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=111776.0, ans=0.125
+2024-09-01 03:04:54,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=111829.33333333333, ans=0.0
+2024-09-01 03:05:04,729 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=111829.33333333333, ans=0.125
+2024-09-01 03:05:55,541 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 50, loss[loss=0.2046, simple_loss=0.1999, pruned_loss=0.07307, ctc_loss=0.1577, over 18975.00 frames. ], tot_loss[loss=0.1865, simple_loss=0.1831, pruned_loss=0.06615, ctc_loss=0.1438, over 827610.12 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:06:11,234 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=111936.0, ans=0.125
+2024-09-01 03:06:16,506 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.71 vs. limit=22.5
+2024-09-01 03:07:44,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=111989.33333333333, ans=0.5
+2024-09-01 03:08:53,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=112042.66666666667, ans=0.5
+2024-09-01 03:09:40,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=112149.33333333333, ans=0.125
+2024-09-01 03:09:43,257 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112149.33333333333, ans=0.1
+2024-09-01 03:10:04,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=112202.66666666667, ans=0.035
+2024-09-01 03:11:02,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=112202.66666666667, ans=0.0
+2024-09-01 03:11:05,713 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 100, loss[loss=0.1682, simple_loss=0.1659, pruned_loss=0.05914, ctc_loss=0.1308, over 19135.00 frames. ], tot_loss[loss=0.1835, simple_loss=0.1807, pruned_loss=0.06495, ctc_loss=0.1408, over 1477220.69 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:11:51,852 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.826e+02 1.931e+02 2.035e+02 3.279e+02, threshold=3.861e+02, percent-clipped=0.0
+2024-09-01 03:11:59,958 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.34 vs. limit=8.0
+2024-09-01 03:12:26,078 INFO [dysarthria_finetune.py:1435] (2/4) (10731847680, 34072559616)
+2024-09-01 03:12:26,079 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:12:26,110 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 03:12:34,295 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.42 vs. limit=15.0
+2024-09-01 03:12:38,678 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 0, loss[loss=0.1939, simple_loss=0.1869, pruned_loss=0.0716, ctc_loss=0.1444, over 18438.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.1869, pruned_loss=0.0716, ctc_loss=0.1444, over 18438.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:12:38,678 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:13:02,321 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 19, validation: loss=0.1735, simple_loss=0.1751, pruned_loss=0.06201, ctc_loss=0.1194, over 1073944.00 frames.
+2024-09-01 03:13:02,321 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 03:13:19,768 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112352.0, ans=0.125
+2024-09-01 03:13:29,377 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:13:31,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=112405.33333333333, ans=0.0
+2024-09-01 03:13:52,101 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.77 vs. limit=10.0
+2024-09-01 03:13:57,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=112458.66666666667, ans=0.125
+2024-09-01 03:14:18,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=112512.0, ans=0.0
+2024-09-01 03:14:22,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=112512.0, ans=0.125
+2024-09-01 03:14:33,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-09-01 03:14:36,793 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=112565.33333333333, ans=0.025
+2024-09-01 03:14:48,752 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 50, loss[loss=0.1785, simple_loss=0.1834, pruned_loss=0.05929, ctc_loss=0.1374, over 19013.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.181, pruned_loss=0.06746, ctc_loss=0.1465, over 827262.88 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:14:56,037 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.05 vs. limit=15.0
+2024-09-01 03:15:00,796 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.37 vs. limit=10.0
+2024-09-01 03:15:10,704 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=10.70 vs. limit=12.0
+2024-09-01 03:15:12,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=112672.0, ans=0.125
+2024-09-01 03:15:14,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=112672.0, ans=0.0
+2024-09-01 03:15:24,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-09-01 03:15:42,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112725.33333333333, ans=0.125
+2024-09-01 03:15:46,878 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.64 vs. limit=22.5
+2024-09-01 03:15:46,934 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=13.36 vs. limit=12.0
+2024-09-01 03:15:48,514 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=112778.66666666667, ans=0.2
+2024-09-01 03:15:52,329 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:15:56,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=112778.66666666667, ans=0.2
+2024-09-01 03:16:00,377 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.65 vs. limit=10.0
+2024-09-01 03:16:00,811 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 1.859e+02 1.957e+02 2.051e+02 3.574e+02, threshold=3.914e+02, percent-clipped=0.0
+2024-09-01 03:16:13,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112832.0, ans=0.1
+2024-09-01 03:16:28,716 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 100, loss[loss=0.1433, simple_loss=0.1459, pruned_loss=0.04748, ctc_loss=0.1146, over 19169.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.1807, pruned_loss=0.06717, ctc_loss=0.1437, over 1475351.90 frames. ], batch size: 134, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:16:31,919 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=112885.33333333333, ans=0.125
+2024-09-01 03:16:47,552 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112938.66666666667, ans=0.1
+2024-09-01 03:17:16,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=112992.0, ans=0.2
+2024-09-01 03:17:22,980 INFO [dysarthria_finetune.py:1435] (2/4) (10731847680, 34072559616)
+2024-09-01 03:17:22,981 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:17:23,024 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 03:17:37,125 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 0, loss[loss=0.2007, simple_loss=0.1982, pruned_loss=0.07284, ctc_loss=0.144, over 18527.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.1982, pruned_loss=0.07284, ctc_loss=0.144, over 18527.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:17:37,125 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:18:00,787 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 20, validation: loss=0.1713, simple_loss=0.1732, pruned_loss=0.06117, ctc_loss=0.1175, over 1073944.00 frames.
+2024-09-01 03:18:00,788 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19762MB
+2024-09-01 03:18:10,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=113040.0, ans=0.125
+2024-09-01 03:18:44,181 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=25.37 vs. limit=15.0
+2024-09-01 03:18:57,794 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.17 vs. limit=15.0
+2024-09-01 03:19:07,181 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:19:07,347 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.93 vs. limit=15.0
+2024-09-01 03:19:32,252 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=113253.33333333333, ans=0.1
+2024-09-01 03:19:32,408 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.15 vs. limit=15.0
+2024-09-01 03:19:39,005 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 50, loss[loss=0.1959, simple_loss=0.1899, pruned_loss=0.06938, ctc_loss=0.1579, over 18968.00 frames. ], tot_loss[loss=0.1802, simple_loss=0.1767, pruned_loss=0.06392, ctc_loss=0.1395, over 828106.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:19:42,275 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=13.65 vs. limit=15.0
+2024-09-01 03:19:52,484 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 1.810e+02 1.894e+02 2.049e+02 3.111e+02, threshold=3.788e+02, percent-clipped=0.0
+2024-09-01 03:19:55,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=113306.66666666667, ans=0.125
+2024-09-01 03:21:01,922 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=113520.0, ans=0.09899494936611666
+2024-09-01 03:21:15,374 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.47 vs. limit=22.5
+2024-09-01 03:21:15,878 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 100, loss[loss=0.1521, simple_loss=0.1573, pruned_loss=0.04963, ctc_loss=0.1189, over 19074.00 frames. ], tot_loss[loss=0.1787, simple_loss=0.1758, pruned_loss=0.06325, ctc_loss=0.1377, over 1476081.83 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:21:28,356 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=113573.33333333333, ans=0.0
+2024-09-01 03:21:35,899 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=113626.66666666667, ans=0.025
+2024-09-01 03:21:43,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=113626.66666666667, ans=0.025
+2024-09-01 03:21:50,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 03:22:02,509 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.56 vs. limit=22.5
+2024-09-01 03:22:08,709 INFO [dysarthria_finetune.py:1435] (2/4) (10733944832, 34072559616)
+2024-09-01 03:22:08,710 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:22:08,730 INFO [dysarthria_finetune.py:1440] (2/4) (29526523904, 34072559616)
+2024-09-01 03:22:08,730 INFO [dysarthria_finetune.py:1442] (2/4) Done!
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-3 b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-3
new file mode 100644
index 0000000000000000000000000000000000000000..7a3412c47a45bca124d8223196fd9f1c07ceb8f2
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/log/log-train-2024-08-31-22-13-17-3
@@ -0,0 +1,546 @@
+2024-08-31 22:13:17,927 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-31 22:13:17,967 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-31 22:13:17,968 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-31 22:13:18,721 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-31 22:13:18,721 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-31 22:13:18,725 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 22:13:18,725 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-31 22:13:21,126 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65805511
+2024-08-31 22:14:30,940 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-20.pt
+2024-08-31 22:14:32,110 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-31 22:14:37,200 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-31 22:14:37,263 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-31 22:14:37,991 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-31 22:14:37,992 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-31 22:14:55,323 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-31 22:14:56,267 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-31 22:14:56,272 INFO [dysarthria_asr_datamodule.py:501] (3/4) About to get dev cuts
+2024-08-31 22:14:56,477 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-31 22:14:57,476 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-31 22:14:57,476 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 22:16:23,767 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=16.32 vs. limit=7.5
+2024-08-31 22:16:31,024 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=21.06 vs. limit=7.5
+2024-08-31 22:16:34,157 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:16:36,229 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:17:53,348 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:17:55,325 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:19:46,278 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:19:48,462 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11828MB
+2024-08-31 22:20:26,675 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3339, simple_loss=0.2734, pruned_loss=0.1449, ctc_loss=0.2303, over 18634.00 frames. ], tot_loss[loss=0.3339, simple_loss=0.2734, pruned_loss=0.1449, ctc_loss=0.2303, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 22:20:26,675 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-31 22:32:57,030 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.373, simple_loss=0.3046, pruned_loss=0.1755, ctc_loss=0.2544, over 1073944.00 frames.
+2024-08-31 22:32:57,064 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14282MB
+2024-08-31 22:34:42,881 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.50 vs. limit=10.0
+2024-08-31 22:48:08,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-31 22:48:40,818 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=29.16 vs. limit=22.5
+2024-08-31 22:51:26,991 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 9.923e+02 1.157e+03 1.203e+03 1.280e+03 1.380e+03, threshold=4.812e+03, percent-clipped=0.0
+2024-08-31 23:03:06,387 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 8.113e+02 1.083e+03 1.198e+03 1.280e+03 1.431e+03, threshold=4.794e+03, percent-clipped=0.0
+2024-08-31 23:06:52,166 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=12.65 vs. limit=15.0
+2024-08-31 23:29:32,327 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 6.285e+02 9.052e+02 1.061e+03 1.198e+03 1.431e+03, threshold=4.243e+03, percent-clipped=0.0
+2024-08-31 23:31:39,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=100213.33333333333, ans=0.09899494936611666
+2024-08-31 23:49:33,585 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.4371, simple_loss=0.3501, pruned_loss=0.2139, ctc_loss=0.3259, over 19001.00 frames. ], tot_loss[loss=0.4103, simple_loss=0.3317, pruned_loss=0.1979, ctc_loss=0.2931, over 828973.50 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 23:53:22,761 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=100266.66666666667, ans=0.125
+2024-09-01 00:02:21,761 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=100320.0, ans=0.125
+2024-09-01 00:05:08,425 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=20.15 vs. limit=15.0
+2024-09-01 00:11:34,795 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.27 vs. limit=15.0
+2024-09-01 00:18:31,026 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.958e+02 6.817e+02 8.321e+02 1.009e+03 1.431e+03, threshold=1.664e+03, percent-clipped=0.0
+2024-09-01 00:18:31,060 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 100, loss[loss=0.3806, simple_loss=0.3074, pruned_loss=0.1808, ctc_loss=0.2732, over 19146.00 frames. ], tot_loss[loss=0.3942, simple_loss=0.3192, pruned_loss=0.1878, ctc_loss=0.2804, over 1476162.18 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-09-01 00:28:33,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100640.0, ans=0.125
+2024-09-01 00:28:35,485 INFO [dysarthria_finetune.py:1435] (3/4) (13281984512, 34072559616)
+2024-09-01 00:28:35,486 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 00:28:35,512 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 00:29:13,550 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 0, loss[loss=0.3599, simple_loss=0.2961, pruned_loss=0.1555, ctc_loss=0.2437, over 18501.00 frames. ], tot_loss[loss=0.3599, simple_loss=0.2961, pruned_loss=0.1555, ctc_loss=0.2437, over 18501.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-09-01 00:29:13,551 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 00:34:07,569 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 2, validation: loss=0.3353, simple_loss=0.2773, pruned_loss=0.1482, ctc_loss=0.2175, over 1073944.00 frames.
+2024-09-01 00:34:07,570 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 00:35:35,051 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.09 vs. limit=15.0
+2024-09-01 00:50:34,921 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-09-01 00:51:25,963 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=100789.33333333333, ans=0.05
+2024-09-01 00:51:25,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=100789.33333333333, ans=0.125
+2024-09-01 00:55:05,704 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.69 vs. limit=15.0
+2024-09-01 00:56:09,333 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.56 vs. limit=15.0
+2024-09-01 00:56:43,514 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=100896.0, ans=0.2
+2024-09-01 00:57:03,205 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 00:58:34,955 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.40 vs. limit=15.0
+2024-09-01 00:59:42,896 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 50, loss[loss=0.3911, simple_loss=0.3179, pruned_loss=0.1808, ctc_loss=0.2741, over 18956.00 frames. ], tot_loss[loss=0.3739, simple_loss=0.3043, pruned_loss=0.1721, ctc_loss=0.2609, over 828460.00 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-09-01 01:02:49,612 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-09-01 01:08:04,415 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.183e+02 4.403e+02 5.126e+02 5.917e+02 6.888e+02, threshold=1.025e+03, percent-clipped=0.0
+2024-09-01 01:09:21,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=101056.0, ans=0.125
+2024-09-01 01:10:44,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-09-01 01:10:53,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-09-01 01:14:19,318 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 100, loss[loss=0.3113, simple_loss=0.258, pruned_loss=0.131, ctc_loss=0.2073, over 19077.00 frames. ], tot_loss[loss=0.361, simple_loss=0.2949, pruned_loss=0.1631, ctc_loss=0.2494, over 1476919.42 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-09-01 01:14:24,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=101216.0, ans=0.025
+2024-09-01 01:15:08,067 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101216.0, ans=0.1
+2024-09-01 01:15:23,711 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=18.27 vs. limit=15.0
+2024-09-01 01:17:26,523 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=10.45 vs. limit=15.0
+2024-09-01 01:18:50,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=101322.66666666667, ans=0.0
+2024-09-01 01:19:12,409 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=101322.66666666667, ans=0.0
+2024-09-01 01:20:51,921 INFO [dysarthria_finetune.py:1435] (3/4) (13305053184, 34072559616)
+2024-09-01 01:20:51,922 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:20:51,954 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 01:21:02,713 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-09-01 01:21:21,463 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 0, loss[loss=0.3055, simple_loss=0.2542, pruned_loss=0.1262, ctc_loss=0.2024, over 18579.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.2542, pruned_loss=0.1262, ctc_loss=0.2024, over 18579.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-09-01 01:21:21,463 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:21:44,700 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 3, validation: loss=0.309, simple_loss=0.2588, pruned_loss=0.13, ctc_loss=0.1938, over 1073944.00 frames.
+2024-09-01 01:21:44,700 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 01:23:14,942 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=21.97 vs. limit=15.0
+2024-09-01 01:23:34,345 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.23 vs. limit=6.0
+2024-09-01 01:23:47,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101477.33333333333, ans=0.125
+2024-09-01 01:24:27,008 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.669e+02 3.351e+02 3.834e+02 4.204e+02 5.264e+02, threshold=7.667e+02, percent-clipped=0.0
+2024-09-01 01:24:48,382 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=101637.33333333333, ans=0.125
+2024-09-01 01:24:48,478 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=101637.33333333333, ans=0.5
+2024-09-01 01:24:49,051 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 50, loss[loss=0.3404, simple_loss=0.2856, pruned_loss=0.1373, ctc_loss=0.2202, over 19113.00 frames. ], tot_loss[loss=0.345, simple_loss=0.2839, pruned_loss=0.1496, ctc_loss=0.2359, over 827781.85 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-09-01 01:24:50,485 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=101637.33333333333, ans=0.2
+2024-09-01 01:24:51,535 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=17.30 vs. limit=15.0
+2024-09-01 01:25:13,295 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.61 vs. limit=6.0
+2024-09-01 01:25:39,906 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101690.66666666667, ans=0.0
+2024-09-01 01:26:11,086 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.72 vs. limit=15.0
+2024-09-01 01:26:33,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=101797.33333333333, ans=0.125
+2024-09-01 01:26:44,822 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 01:26:48,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 01:27:00,716 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 100, loss[loss=0.306, simple_loss=0.2566, pruned_loss=0.1242, ctc_loss=0.1998, over 19145.00 frames. ], tot_loss[loss=0.3332, simple_loss=0.2751, pruned_loss=0.1431, ctc_loss=0.226, over 1476240.06 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-09-01 01:28:06,018 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=102010.66666666667, ans=0.125
+2024-09-01 01:29:32,077 INFO [dysarthria_finetune.py:1435] (3/4) (14445903872, 34072559616)
+2024-09-01 01:29:32,078 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:29:32,119 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 01:29:45,321 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 0, loss[loss=0.3649, simple_loss=0.2971, pruned_loss=0.1592, ctc_loss=0.2645, over 18645.00 frames. ], tot_loss[loss=0.3649, simple_loss=0.2971, pruned_loss=0.1592, ctc_loss=0.2645, over 18645.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-09-01 01:29:45,321 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:30:08,494 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 4, validation: loss=0.2887, simple_loss=0.2447, pruned_loss=0.1169, ctc_loss=0.1781, over 1073944.00 frames.
+2024-09-01 01:30:08,495 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 01:30:42,268 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.378e+02 2.838e+02 3.147e+02 3.460e+02 5.318e+02, threshold=6.294e+02, percent-clipped=0.0
+2024-09-01 01:30:47,470 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102106.66666666667, ans=0.125
+2024-09-01 01:31:06,832 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=102160.0, ans=0.5
+2024-09-01 01:31:19,264 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102213.33333333333, ans=0.0
+2024-09-01 01:32:02,185 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 50, loss[loss=0.3091, simple_loss=0.2589, pruned_loss=0.1289, ctc_loss=0.2013, over 18993.00 frames. ], tot_loss[loss=0.3279, simple_loss=0.2707, pruned_loss=0.1388, ctc_loss=0.2273, over 827748.42 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-09-01 01:32:15,757 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=16.55 vs. limit=15.0
+2024-09-01 01:32:50,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=102426.66666666667, ans=0.2
+2024-09-01 01:32:55,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=102426.66666666667, ans=22.5
+2024-09-01 01:32:56,686 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=102426.66666666667, ans=0.0
+2024-09-01 01:33:00,547 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=102480.0, ans=0.0
+2024-09-01 01:33:22,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=102480.0, ans=0.0
+2024-09-01 01:35:45,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=102480.0, ans=0.5
+2024-09-01 01:35:51,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=102480.0, ans=0.125
+2024-09-01 01:37:40,025 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 100, loss[loss=0.3241, simple_loss=0.2658, pruned_loss=0.141, ctc_loss=0.2283, over 19161.00 frames. ], tot_loss[loss=0.317, simple_loss=0.2629, pruned_loss=0.1328, ctc_loss=0.2172, over 1475350.41 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-09-01 01:37:59,825 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.24 vs. limit=15.0
+2024-09-01 01:38:09,931 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 2.526e+02 2.751e+02 3.040e+02 4.636e+02, threshold=5.501e+02, percent-clipped=0.0
+2024-09-01 01:38:21,151 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.64 vs. limit=22.5
+2024-09-01 01:38:32,884 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-09-01 01:38:35,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=102693.33333333333, ans=0.125
+2024-09-01 01:38:35,392 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten.whitening_limit, batch_count=102693.33333333333, ans=15.0
+2024-09-01 01:38:35,835 INFO [dysarthria_finetune.py:1435] (3/4) (545980416, 34072559616)
+2024-09-01 01:38:35,835 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:38:35,904 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 01:38:49,735 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 0, loss[loss=0.2823, simple_loss=0.2383, pruned_loss=0.1151, ctc_loss=0.1824, over 18566.00 frames. ], tot_loss[loss=0.2823, simple_loss=0.2383, pruned_loss=0.1151, ctc_loss=0.1824, over 18566.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:38:49,736 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:39:30,980 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 5, validation: loss=0.2717, simple_loss=0.233, pruned_loss=0.1066, ctc_loss=0.1665, over 1073944.00 frames.
+2024-09-01 01:39:30,980 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 01:41:16,718 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=102794.66666666667, ans=0.0
+2024-09-01 01:41:29,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 01:43:25,224 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=13.47 vs. limit=15.0
+2024-09-01 01:43:55,517 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 50, loss[loss=0.3143, simple_loss=0.2635, pruned_loss=0.1299, ctc_loss=0.2099, over 18976.00 frames. ], tot_loss[loss=0.2996, simple_loss=0.251, pruned_loss=0.1211, ctc_loss=0.2051, over 827749.28 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:44:01,620 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.82 vs. limit=15.0
+2024-09-01 01:44:04,816 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=103008.0, ans=0.0
+2024-09-01 01:44:50,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=103061.33333333333, ans=0.125
+2024-09-01 01:45:15,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:45:35,682 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 01:46:04,697 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.130e+02 2.382e+02 2.524e+02 2.770e+02 4.371e+02, threshold=5.047e+02, percent-clipped=0.0
+2024-09-01 01:46:13,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=103168.0, ans=0.2
+2024-09-01 01:46:25,005 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-09-01 01:46:52,899 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 100, loss[loss=0.2783, simple_loss=0.2383, pruned_loss=0.1066, ctc_loss=0.1825, over 19091.00 frames. ], tot_loss[loss=0.2953, simple_loss=0.2477, pruned_loss=0.1192, ctc_loss=0.2019, over 1475913.16 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-09-01 01:47:10,406 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=103274.66666666667, ans=0.125
+2024-09-01 01:47:12,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=103274.66666666667, ans=0.0
+2024-09-01 01:48:10,901 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.05 vs. limit=6.0
+2024-09-01 01:48:20,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=103381.33333333333, ans=0.0
+2024-09-01 01:48:26,073 INFO [dysarthria_finetune.py:1435] (3/4) (14450098176, 34072559616)
+2024-09-01 01:48:26,074 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:48:26,115 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 01:48:41,998 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 0, loss[loss=0.2784, simple_loss=0.235, pruned_loss=0.1134, ctc_loss=0.1853, over 18684.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.235, pruned_loss=0.1134, ctc_loss=0.1853, over 18684.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:48:41,999 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:49:05,144 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 6, validation: loss=0.2578, simple_loss=0.2238, pruned_loss=0.09861, ctc_loss=0.1582, over 1073944.00 frames.
+2024-09-01 01:49:05,145 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 01:49:20,017 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.60 vs. limit=15.0
+2024-09-01 01:49:23,970 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=103424.0, ans=22.5
+2024-09-01 01:49:38,361 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=14.17 vs. limit=15.0
+2024-09-01 01:49:45,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=103477.33333333333, ans=0.125
+2024-09-01 01:49:46,109 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=28.45 vs. limit=22.5
+2024-09-01 01:49:56,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=103530.66666666667, ans=0.0
+2024-09-01 01:50:08,295 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103584.0, ans=0.1
+2024-09-01 01:50:14,540 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=103584.0, ans=0.025
+2024-09-01 01:50:52,019 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 50, loss[loss=0.2816, simple_loss=0.2394, pruned_loss=0.1123, ctc_loss=0.1872, over 19058.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.241, pruned_loss=0.113, ctc_loss=0.1976, over 828493.81 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:50:57,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=103690.66666666667, ans=0.0
+2024-09-01 01:50:57,130 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=103690.66666666667, ans=0.125
+2024-09-01 01:51:08,007 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 2.277e+02 2.375e+02 2.614e+02 3.891e+02, threshold=4.750e+02, percent-clipped=0.0
+2024-09-01 01:51:26,873 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=103744.0, ans=0.125
+2024-09-01 01:51:32,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=103797.33333333333, ans=0.2
+2024-09-01 01:51:59,301 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.90 vs. limit=6.0
+2024-09-01 01:52:34,172 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 100, loss[loss=0.262, simple_loss=0.223, pruned_loss=0.1007, ctc_loss=0.1837, over 19113.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.2395, pruned_loss=0.1117, ctc_loss=0.195, over 1475249.23 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:52:53,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=103957.33333333333, ans=0.0
+2024-09-01 01:52:53,115 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103957.33333333333, ans=0.125
+2024-09-01 01:53:10,799 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=104010.66666666667, ans=0.1
+2024-09-01 01:53:18,944 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=104064.0, ans=0.125
+2024-09-01 01:53:20,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=104064.0, ans=0.125
+2024-09-01 01:53:34,223 INFO [dysarthria_finetune.py:1435] (3/4) (411762688, 34072559616)
+2024-09-01 01:53:34,224 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:53:34,291 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 01:53:47,072 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 0, loss[loss=0.3217, simple_loss=0.2649, pruned_loss=0.136, ctc_loss=0.2354, over 18595.00 frames. ], tot_loss[loss=0.3217, simple_loss=0.2649, pruned_loss=0.136, ctc_loss=0.2354, over 18595.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:53:47,072 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:54:10,658 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 7, validation: loss=0.2464, simple_loss=0.2165, pruned_loss=0.09214, ctc_loss=0.1523, over 1073944.00 frames.
+2024-09-01 01:54:10,658 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 01:55:12,004 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.149e+02 2.268e+02 2.457e+02 3.821e+02, threshold=4.535e+02, percent-clipped=0.0
+2024-09-01 01:55:53,894 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 50, loss[loss=0.2874, simple_loss=0.2453, pruned_loss=0.1128, ctc_loss=0.1976, over 18963.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.2344, pruned_loss=0.1057, ctc_loss=0.1905, over 827887.87 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:56:05,032 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:56:12,819 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:56:18,627 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=104426.66666666667, ans=0.0
+2024-09-01 01:56:59,225 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:57:08,926 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=104533.33333333333, ans=0.125
+2024-09-01 01:57:11,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=104586.66666666667, ans=0.125
+2024-09-01 01:57:18,642 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=15.29 vs. limit=15.0
+2024-09-01 01:57:30,855 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 100, loss[loss=0.2523, simple_loss=0.22, pruned_loss=0.0936, ctc_loss=0.1706, over 19124.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.2293, pruned_loss=0.1022, ctc_loss=0.1851, over 1475075.17 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:57:32,370 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=10.99 vs. limit=12.0
+2024-09-01 01:57:36,139 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=104640.0, ans=0.125
+2024-09-01 01:57:36,321 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.89 vs. limit=15.0
+2024-09-01 01:57:45,843 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.88 vs. limit=15.0
+2024-09-01 01:57:55,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten.whitening_limit, batch_count=104693.33333333333, ans=15.0
+2024-09-01 01:58:12,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=104746.66666666667, ans=0.0
+2024-09-01 01:58:21,720 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104746.66666666667, ans=0.1
+2024-09-01 01:58:22,459 INFO [dysarthria_finetune.py:1435] (3/4) (13317636096, 34072559616)
+2024-09-01 01:58:22,460 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 01:58:22,507 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 01:58:35,306 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 0, loss[loss=0.257, simple_loss=0.2233, pruned_loss=0.09832, ctc_loss=0.1715, over 18547.00 frames. ], tot_loss[loss=0.257, simple_loss=0.2233, pruned_loss=0.09832, ctc_loss=0.1715, over 18547.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 01:58:35,307 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 01:58:58,371 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 8, validation: loss=0.236, simple_loss=0.2103, pruned_loss=0.08624, ctc_loss=0.1474, over 1073944.00 frames.
+2024-09-01 01:58:58,371 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 01:59:02,414 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.080e+02 2.182e+02 2.331e+02 3.634e+02, threshold=4.365e+02, percent-clipped=0.0
+2024-09-01 01:59:58,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=104949.33333333333, ans=0.125
+2024-09-01 02:00:10,654 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:00:25,808 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:00:34,014 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.35 vs. limit=15.0
+2024-09-01 02:00:36,425 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 50, loss[loss=0.2715, simple_loss=0.2348, pruned_loss=0.103, ctc_loss=0.1903, over 18964.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.2253, pruned_loss=0.09813, ctc_loss=0.1831, over 828441.23 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:01:40,288 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=105216.0, ans=0.0
+2024-09-01 02:02:02,951 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=105269.33333333333, ans=0.125
+2024-09-01 02:02:13,396 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 100, loss[loss=0.2455, simple_loss=0.2194, pruned_loss=0.08673, ctc_loss=0.1662, over 19119.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.2226, pruned_loss=0.09605, ctc_loss=0.1797, over 1475727.62 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-09-01 02:02:17,358 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.064e+02 2.191e+02 2.358e+02 3.385e+02, threshold=4.381e+02, percent-clipped=0.0
+2024-09-01 02:02:41,825 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105376.0, ans=0.1
+2024-09-01 02:02:41,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=105376.0, ans=0.2
+2024-09-01 02:02:57,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=105429.33333333333, ans=0.125
+2024-09-01 02:03:07,396 INFO [dysarthria_finetune.py:1435] (3/4) (428539904, 34072559616)
+2024-09-01 02:03:07,397 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:03:07,474 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:03:20,916 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 0, loss[loss=0.2566, simple_loss=0.2288, pruned_loss=0.09494, ctc_loss=0.1661, over 18777.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.2288, pruned_loss=0.09494, ctc_loss=0.1661, over 18777.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:03:20,916 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:03:44,108 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 9, validation: loss=0.2267, simple_loss=0.2052, pruned_loss=0.08107, ctc_loss=0.1434, over 1073944.00 frames.
+2024-09-01 02:03:44,109 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 02:04:33,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=105584.0, ans=0.0
+2024-09-01 02:05:01,314 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=18.78 vs. limit=15.0
+2024-09-01 02:05:08,280 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff2.min_abs, batch_count=105637.33333333333, ans=0.1
+2024-09-01 02:05:14,824 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:05:31,428 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=26.00 vs. limit=22.5
+2024-09-01 02:05:38,307 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 50, loss[loss=0.2413, simple_loss=0.2176, pruned_loss=0.08417, ctc_loss=0.1654, over 18965.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.2186, pruned_loss=0.09307, ctc_loss=0.1797, over 827503.70 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:05:52,309 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=105744.0, ans=0.0
+2024-09-01 02:06:11,283 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.54 vs. limit=15.0
+2024-09-01 02:06:24,252 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105850.66666666667, ans=0.1
+2024-09-01 02:06:31,109 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:06:31,751 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.077e+02 2.184e+02 2.316e+02 3.584e+02, threshold=4.367e+02, percent-clipped=0.0
+2024-09-01 02:06:46,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:07:32,678 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 100, loss[loss=0.2162, simple_loss=0.198, pruned_loss=0.0729, ctc_loss=0.1484, over 19159.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.2153, pruned_loss=0.09052, ctc_loss=0.175, over 1475225.92 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:07:35,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=106010.66666666667, ans=0.0
+2024-09-01 02:07:45,412 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.58 vs. limit=22.5
+2024-09-01 02:08:04,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=106064.0, ans=0.0
+2024-09-01 02:08:33,761 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=106117.33333333333, ans=0.05
+2024-09-01 02:08:34,530 INFO [dysarthria_finetune.py:1435] (3/4) (13296664576, 34072559616)
+2024-09-01 02:08:34,611 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:08:34,655 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:08:48,148 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 0, loss[loss=0.2323, simple_loss=0.2072, pruned_loss=0.08459, ctc_loss=0.1627, over 18587.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.2072, pruned_loss=0.08459, ctc_loss=0.1627, over 18587.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:08:48,149 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:09:26,929 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 10, validation: loss=0.2182, simple_loss=0.2007, pruned_loss=0.07671, ctc_loss=0.1399, over 1073944.00 frames.
+2024-09-01 02:09:26,929 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 02:09:40,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106165.33333333333, ans=0.1
+2024-09-01 02:10:57,738 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.66 vs. limit=6.0
+2024-09-01 02:11:54,661 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.023e+02 2.117e+02 2.323e+02 3.505e+02, threshold=4.234e+02, percent-clipped=0.0
+2024-09-01 02:12:15,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:12:15,534 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:12:16,363 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 50, loss[loss=0.2345, simple_loss=0.2211, pruned_loss=0.07679, ctc_loss=0.1539, over 19101.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.2118, pruned_loss=0.08627, ctc_loss=0.1741, over 827631.91 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:12:32,925 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:13:07,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=106485.33333333333, ans=0.09899494936611666
+2024-09-01 02:13:33,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=106538.66666666667, ans=0.2
+2024-09-01 02:13:33,804 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=12.73 vs. limit=15.0
+2024-09-01 02:14:04,316 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.41 vs. limit=22.5
+2024-09-01 02:14:14,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=106592.0, ans=0.0
+2024-09-01 02:14:19,805 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106592.0, ans=0.125
+2024-09-01 02:15:12,133 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 100, loss[loss=0.2145, simple_loss=0.1977, pruned_loss=0.07197, ctc_loss=0.1551, over 19051.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2113, pruned_loss=0.08504, ctc_loss=0.1712, over 1475773.03 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-09-01 02:15:32,913 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=106698.66666666667, ans=0.5
+2024-09-01 02:15:56,554 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.82 vs. limit=15.0
+2024-09-01 02:16:29,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=106805.33333333333, ans=0.0
+2024-09-01 02:16:31,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=106805.33333333333, ans=0.2
+2024-09-01 02:16:32,150 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.69 vs. limit=12.0
+2024-09-01 02:16:36,645 INFO [dysarthria_finetune.py:1435] (3/4) (13290373120, 34072559616)
+2024-09-01 02:16:36,645 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:16:36,696 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:16:49,155 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 0, loss[loss=0.2535, simple_loss=0.2222, pruned_loss=0.09401, ctc_loss=0.1949, over 18604.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.2222, pruned_loss=0.09401, ctc_loss=0.1949, over 18604.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:16:49,155 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:17:12,841 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 11, validation: loss=0.211, simple_loss=0.1968, pruned_loss=0.07375, ctc_loss=0.137, over 1073944.00 frames.
+2024-09-01 02:17:12,841 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 02:17:43,500 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 1.989e+02 2.082e+02 2.188e+02 3.029e+02, threshold=4.165e+02, percent-clipped=0.0
+2024-09-01 02:17:44,677 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=106906.66666666667, ans=0.0
+2024-09-01 02:17:56,779 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=106960.0, ans=10.0
+2024-09-01 02:18:04,485 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=106960.0, ans=0.125
+2024-09-01 02:18:06,472 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=106960.0, ans=0.125
+2024-09-01 02:18:44,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=107066.66666666667, ans=0.0
+2024-09-01 02:18:53,615 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 50, loss[loss=0.2405, simple_loss=0.2142, pruned_loss=0.08627, ctc_loss=0.1867, over 19110.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2014, pruned_loss=0.0772, ctc_loss=0.1612, over 828132.31 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:18:55,251 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107120.0, ans=0.125
+2024-09-01 02:19:29,293 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=107173.33333333333, ans=0.025
+2024-09-01 02:20:13,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107280.0, ans=0.1
+2024-09-01 02:20:35,684 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 100, loss[loss=0.2002, simple_loss=0.1919, pruned_loss=0.0629, ctc_loss=0.1462, over 19127.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2039, pruned_loss=0.07926, ctc_loss=0.1635, over 1475363.18 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:20:43,123 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=107386.66666666667, ans=0.04949747468305833
+2024-09-01 02:20:43,405 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=10.30 vs. limit=12.0
+2024-09-01 02:20:47,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=107386.66666666667, ans=0.0
+2024-09-01 02:21:05,346 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 1.934e+02 2.032e+02 2.152e+02 3.346e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-09-01 02:21:08,551 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=107440.0, ans=0.125
+2024-09-01 02:21:25,882 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.94 vs. limit=15.0
+2024-09-01 02:21:30,080 INFO [dysarthria_finetune.py:1435] (3/4) (688586752, 34072559616)
+2024-09-01 02:21:30,081 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:21:30,160 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:21:45,324 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 0, loss[loss=0.2243, simple_loss=0.2054, pruned_loss=0.08135, ctc_loss=0.158, over 18650.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2054, pruned_loss=0.08135, ctc_loss=0.158, over 18650.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:21:45,324 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:22:12,234 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 12, validation: loss=0.2042, simple_loss=0.1932, pruned_loss=0.07127, ctc_loss=0.1341, over 1073944.00 frames.
+2024-09-01 02:22:12,234 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14285MB
+2024-09-01 02:22:35,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=107541.33333333333, ans=0.0
+2024-09-01 02:22:58,057 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=107594.66666666667, ans=0.0
+2024-09-01 02:23:03,013 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=107594.66666666667, ans=0.0
+2024-09-01 02:23:24,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=107648.0, ans=0.125
+2024-09-01 02:24:02,409 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-09-01 02:24:27,774 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 50, loss[loss=0.2162, simple_loss=0.2053, pruned_loss=0.0726, ctc_loss=0.1549, over 19037.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2026, pruned_loss=0.07876, ctc_loss=0.1634, over 828666.57 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:24:53,596 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107861.33333333333, ans=0.1
+2024-09-01 02:25:24,210 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-09-01 02:25:31,211 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-09-01 02:25:37,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=107914.66666666667, ans=0.125
+2024-09-01 02:25:48,173 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107968.0, ans=0.1
+2024-09-01 02:25:55,568 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 1.958e+02 2.051e+02 2.245e+02 3.047e+02, threshold=4.102e+02, percent-clipped=0.0
+2024-09-01 02:25:58,921 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=107968.0, ans=0.0
+2024-09-01 02:26:05,749 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=108021.33333333333, ans=0.0
+2024-09-01 02:26:30,879 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 100, loss[loss=0.1882, simple_loss=0.1771, pruned_loss=0.06143, ctc_loss=0.1507, over 19142.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2012, pruned_loss=0.077, ctc_loss=0.1599, over 1477170.44 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-09-01 02:26:53,036 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=108128.0, ans=0.2
+2024-09-01 02:27:31,271 INFO [dysarthria_finetune.py:1435] (3/4) (14458486784, 34072559616)
+2024-09-01 02:27:31,272 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:27:31,315 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:27:44,329 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 0, loss[loss=0.2076, simple_loss=0.1909, pruned_loss=0.07786, ctc_loss=0.1427, over 18629.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.1909, pruned_loss=0.07786, ctc_loss=0.1427, over 18629.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:27:44,329 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:27:51,180 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.0.layers.0.self_attn_weights, attn_weights_entropy = tensor([7.8226, 7.8059, 7.8470, 7.7607], device='cuda:3')
+2024-09-01 02:28:07,310 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 13, validation: loss=0.1981, simple_loss=0.19, pruned_loss=0.06934, ctc_loss=0.1316, over 1073944.00 frames.
+2024-09-01 02:28:07,310 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 02:28:25,360 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108229.33333333333, ans=0.0
+2024-09-01 02:28:33,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 02:28:47,283 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=108336.0, ans=0.95
+2024-09-01 02:28:57,366 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=108336.0, ans=0.125
+2024-09-01 02:29:01,445 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:29:26,924 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=108442.66666666667, ans=0.2
+2024-09-01 02:29:47,492 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 50, loss[loss=0.2123, simple_loss=0.1976, pruned_loss=0.07518, ctc_loss=0.16, over 19050.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.199, pruned_loss=0.077, ctc_loss=0.1615, over 828311.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:29:57,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=108496.0, ans=0.025
+2024-09-01 02:30:01,822 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 1.921e+02 2.017e+02 2.151e+02 2.785e+02, threshold=4.034e+02, percent-clipped=0.0
+2024-09-01 02:30:13,104 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108549.33333333333, ans=0.1
+2024-09-01 02:30:15,249 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.08 vs. limit=15.0
+2024-09-01 02:30:28,717 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=108602.66666666667, ans=0.0
+2024-09-01 02:30:34,603 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=108602.66666666667, ans=0.125
+2024-09-01 02:30:44,281 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=108602.66666666667, ans=0.1
+2024-09-01 02:30:49,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108656.0, ans=0.1
+2024-09-01 02:30:49,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-09-01 02:31:13,042 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 02:31:25,779 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 100, loss[loss=0.2117, simple_loss=0.1989, pruned_loss=0.07476, ctc_loss=0.1586, over 19095.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.1982, pruned_loss=0.07565, ctc_loss=0.158, over 1474662.24 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:31:28,824 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:31:40,561 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=108762.66666666667, ans=0.025
+2024-09-01 02:31:59,634 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=108816.0, ans=0.2
+2024-09-01 02:32:12,923 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=108869.33333333333, ans=0.2
+2024-09-01 02:32:14,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=108869.33333333333, ans=0.025
+2024-09-01 02:32:19,307 INFO [dysarthria_finetune.py:1435] (3/4) (10194976768, 34072559616)
+2024-09-01 02:32:27,537 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:32:27,576 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:33:00,722 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 0, loss[loss=0.2422, simple_loss=0.2202, pruned_loss=0.09329, ctc_loss=0.1727, over 18650.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2202, pruned_loss=0.09329, ctc_loss=0.1727, over 18650.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:33:00,722 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:33:40,869 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.5.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.1891, 2.1878, 2.8789, 3.6593], device='cuda:3')
+2024-09-01 02:33:44,931 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 14, validation: loss=0.1924, simple_loss=0.1871, pruned_loss=0.06768, ctc_loss=0.1293, over 1073944.00 frames.
+2024-09-01 02:33:44,932 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 02:34:19,716 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=108970.66666666667, ans=0.125
+2024-09-01 02:35:04,341 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:35:09,744 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 1.893e+02 1.977e+02 2.192e+02 2.916e+02, threshold=3.954e+02, percent-clipped=0.0
+2024-09-01 02:35:46,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=109130.66666666667, ans=0.125
+2024-09-01 02:36:00,023 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=109130.66666666667, ans=0.0
+2024-09-01 02:36:12,048 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 50, loss[loss=0.1756, simple_loss=0.1826, pruned_loss=0.05069, ctc_loss=0.1366, over 19012.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.197, pruned_loss=0.07558, ctc_loss=0.1602, over 829335.16 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:36:45,693 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=109184.0, ans=0.125
+2024-09-01 02:37:10,503 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=12.49 vs. limit=15.0
+2024-09-01 02:37:17,737 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109237.33333333333, ans=0.1
+2024-09-01 02:37:32,857 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:38:17,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=109344.0, ans=0.025
+2024-09-01 02:38:42,958 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 100, loss[loss=0.1924, simple_loss=0.1896, pruned_loss=0.06381, ctc_loss=0.1479, over 19114.00 frames. ], tot_loss[loss=0.206, simple_loss=0.1942, pruned_loss=0.07354, ctc_loss=0.1556, over 1476363.01 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-09-01 02:38:55,320 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=16.21 vs. limit=15.0
+2024-09-01 02:39:00,940 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=109450.66666666667, ans=0.0
+2024-09-01 02:39:11,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=109504.0, ans=0.2
+2024-09-01 02:39:19,696 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=109504.0, ans=0.125
+2024-09-01 02:39:26,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109504.0, ans=0.125
+2024-09-01 02:39:27,083 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=109504.0, ans=0.125
+2024-09-01 02:39:27,274 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.19 vs. limit=15.0
+2024-09-01 02:39:47,672 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=29.49 vs. limit=22.5
+2024-09-01 02:39:50,915 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 1.903e+02 1.972e+02 2.079e+02 2.713e+02, threshold=3.943e+02, percent-clipped=0.0
+2024-09-01 02:39:50,959 INFO [dysarthria_finetune.py:1435] (3/4) (13302956032, 34072559616)
+2024-09-01 02:39:50,959 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:39:51,008 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:40:03,799 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 0, loss[loss=0.1968, simple_loss=0.1884, pruned_loss=0.07154, ctc_loss=0.1406, over 18716.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.1884, pruned_loss=0.07154, ctc_loss=0.1406, over 18716.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:40:03,799 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:40:34,885 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 15, validation: loss=0.1871, simple_loss=0.1844, pruned_loss=0.06629, ctc_loss=0.1271, over 1073944.00 frames.
+2024-09-01 02:40:34,886 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 02:40:36,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109605.33333333333, ans=0.2
+2024-09-01 02:40:40,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-09-01 02:40:51,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=109605.33333333333, ans=0.125
+2024-09-01 02:41:54,499 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=109712.0, ans=0.0
+2024-09-01 02:42:06,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=109765.33333333333, ans=0.125
+2024-09-01 02:42:36,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=109818.66666666667, ans=0.125
+2024-09-01 02:43:06,252 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 50, loss[loss=0.167, simple_loss=0.1685, pruned_loss=0.05464, ctc_loss=0.1276, over 19179.00 frames. ], tot_loss[loss=0.198, simple_loss=0.1898, pruned_loss=0.07017, ctc_loss=0.1513, over 827713.24 frames. ], batch size: 103, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:44:00,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=109925.33333333333, ans=0.125
+2024-09-01 02:44:24,137 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.92 vs. limit=15.0
+2024-09-01 02:44:42,718 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110032.0, ans=0.125
+2024-09-01 02:44:56,247 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.83 vs. limit=6.0
+2024-09-01 02:45:26,974 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 1.886e+02 2.042e+02 2.162e+02 2.644e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-09-01 02:45:29,599 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 100, loss[loss=0.182, simple_loss=0.1765, pruned_loss=0.06338, ctc_loss=0.1442, over 19073.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.1893, pruned_loss=0.07005, ctc_loss=0.1509, over 1475236.97 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-09-01 02:46:23,849 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:46:46,674 INFO [dysarthria_finetune.py:1435] (3/4) (13296664576, 34072559616)
+2024-09-01 02:46:46,675 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:46:46,717 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:47:00,713 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 0, loss[loss=0.2143, simple_loss=0.2025, pruned_loss=0.08034, ctc_loss=0.1586, over 18560.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2025, pruned_loss=0.08034, ctc_loss=0.1586, over 18560.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:47:00,713 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:47:23,703 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 16, validation: loss=0.182, simple_loss=0.1819, pruned_loss=0.06496, ctc_loss=0.1251, over 1073944.00 frames.
+2024-09-01 02:47:23,704 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 02:47:35,508 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=110293.33333333333, ans=0.125
+2024-09-01 02:47:46,143 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=18.39 vs. limit=15.0
+2024-09-01 02:47:57,763 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.17 vs. limit=15.0
+2024-09-01 02:48:35,610 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=110453.33333333333, ans=0.125
+2024-09-01 02:49:04,116 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 50, loss[loss=0.1771, simple_loss=0.1879, pruned_loss=0.05662, ctc_loss=0.1308, over 19044.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.1892, pruned_loss=0.07087, ctc_loss=0.1514, over 827661.95 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:49:21,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=110560.0, ans=0.2
+2024-09-01 02:49:33,702 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.68 vs. limit=15.0
+2024-09-01 02:49:43,043 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=110666.66666666667, ans=0.125
+2024-09-01 02:49:43,219 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=110666.66666666667, ans=0.0
+2024-09-01 02:49:44,017 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 1.879e+02 1.996e+02 2.191e+02 2.692e+02, threshold=3.992e+02, percent-clipped=0.0
+2024-09-01 02:50:27,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=110773.33333333333, ans=0.04949747468305833
+2024-09-01 02:50:37,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=110773.33333333333, ans=0.125
+2024-09-01 02:50:42,114 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 100, loss[loss=0.1919, simple_loss=0.1923, pruned_loss=0.06697, ctc_loss=0.1436, over 19120.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.1877, pruned_loss=0.06982, ctc_loss=0.1482, over 1474935.70 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:06,869 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.72 vs. limit=15.0
+2024-09-01 02:51:33,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=110933.33333333333, ans=0.025
+2024-09-01 02:51:35,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=110933.33333333333, ans=0.0
+2024-09-01 02:51:35,751 INFO [dysarthria_finetune.py:1435] (3/4) (235601920, 34072559616)
+2024-09-01 02:51:35,751 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:51:35,835 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 02:51:50,606 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 0, loss[loss=0.25, simple_loss=0.2231, pruned_loss=0.1017, ctc_loss=0.1841, over 18583.00 frames. ], tot_loss[loss=0.25, simple_loss=0.2231, pruned_loss=0.1017, ctc_loss=0.1841, over 18583.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:51:50,607 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:51:54,665 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.5.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.2776, 2.0626, 2.8034, 3.5089], device='cuda:3')
+2024-09-01 02:52:13,661 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 17, validation: loss=0.1784, simple_loss=0.1796, pruned_loss=0.06394, ctc_loss=0.1232, over 1073944.00 frames.
+2024-09-01 02:52:13,662 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 02:52:38,426 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=111034.66666666667, ans=0.025
+2024-09-01 02:52:52,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 02:52:52,747 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=111034.66666666667, ans=0.5
+2024-09-01 02:53:21,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=111088.0, ans=0.125
+2024-09-01 02:53:27,569 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=111088.0, ans=0.0
+2024-09-01 02:53:46,312 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.48 vs. limit=15.0
+2024-09-01 02:53:59,378 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 1.870e+02 1.982e+02 2.091e+02 2.808e+02, threshold=3.964e+02, percent-clipped=0.0
+2024-09-01 02:54:19,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=111194.66666666667, ans=0.2
+2024-09-01 02:54:35,620 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 50, loss[loss=0.1912, simple_loss=0.1886, pruned_loss=0.06888, ctc_loss=0.1401, over 18982.00 frames. ], tot_loss[loss=0.1899, simple_loss=0.1849, pruned_loss=0.06812, ctc_loss=0.1466, over 827806.80 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:55:15,247 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=111248.0, ans=0.07
+2024-09-01 02:55:25,168 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:55:46,464 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.59 vs. limit=15.0
+2024-09-01 02:55:52,305 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111301.33333333333, ans=0.07
+2024-09-01 02:56:22,590 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.62 vs. limit=15.0
+2024-09-01 02:57:32,015 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.41 vs. limit=6.0
+2024-09-01 02:58:16,894 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 100, loss[loss=0.1836, simple_loss=0.1811, pruned_loss=0.06227, ctc_loss=0.1539, over 19078.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.1835, pruned_loss=0.06674, ctc_loss=0.1438, over 1476033.73 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-09-01 02:59:13,963 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=7.27 vs. limit=12.0
+2024-09-01 02:59:18,027 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.26 vs. limit=6.0
+2024-09-01 03:00:07,763 INFO [dysarthria_finetune.py:1435] (3/4) (627769344, 34072559616)
+2024-09-01 03:00:07,763 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:00:07,832 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 03:00:21,387 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 0, loss[loss=0.2033, simple_loss=0.1934, pruned_loss=0.0746, ctc_loss=0.1601, over 18613.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.1934, pruned_loss=0.0746, ctc_loss=0.1601, over 18613.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-09-01 03:00:21,387 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:01:08,462 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 18, validation: loss=0.1758, simple_loss=0.1773, pruned_loss=0.06291, ctc_loss=0.1213, over 1073944.00 frames.
+2024-09-01 03:01:08,463 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 03:01:20,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=111669.33333333333, ans=0.125
+2024-09-01 03:01:20,098 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=111669.33333333333, ans=0.125
+2024-09-01 03:02:04,459 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 1.863e+02 1.965e+02 2.122e+02 2.833e+02, threshold=3.929e+02, percent-clipped=0.0
+2024-09-01 03:02:31,192 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=111776.0, ans=0.2
+2024-09-01 03:04:37,317 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.95 vs. limit=6.0
+2024-09-01 03:05:54,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=111936.0, ans=0.125
+2024-09-01 03:05:55,554 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 50, loss[loss=0.1719, simple_loss=0.1772, pruned_loss=0.05631, ctc_loss=0.1349, over 19004.00 frames. ], tot_loss[loss=0.1815, simple_loss=0.1786, pruned_loss=0.064, ctc_loss=0.1409, over 828768.32 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:06:04,696 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=111936.0, ans=0.2
+2024-09-01 03:07:05,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111936.0, ans=0.125
+2024-09-01 03:07:21,448 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=111989.33333333333, ans=0.0
+2024-09-01 03:07:44,404 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=111989.33333333333, ans=0.125
+2024-09-01 03:08:50,355 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=112042.66666666667, ans=0.125
+2024-09-01 03:09:23,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=112149.33333333333, ans=10.0
+2024-09-01 03:09:44,080 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=112149.33333333333, ans=10.0
+2024-09-01 03:09:56,959 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.76 vs. limit=15.0
+2024-09-01 03:10:04,466 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=112202.66666666667, ans=0.0
+2024-09-01 03:11:05,728 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 100, loss[loss=0.157, simple_loss=0.1568, pruned_loss=0.05465, ctc_loss=0.1196, over 19084.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.1785, pruned_loss=0.06487, ctc_loss=0.1409, over 1476677.05 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 16.0
+2024-09-01 03:11:34,558 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.78 vs. limit=22.5
+2024-09-01 03:11:50,987 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.89 vs. limit=15.0
+2024-09-01 03:11:51,861 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.826e+02 1.931e+02 2.035e+02 3.279e+02, threshold=3.861e+02, percent-clipped=0.0
+2024-09-01 03:12:04,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=112256.0, ans=0.0
+2024-09-01 03:12:08,618 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.33 vs. limit=15.0
+2024-09-01 03:12:26,079 INFO [dysarthria_finetune.py:1435] (3/4) (164298752, 34072559616)
+2024-09-01 03:12:26,080 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:12:26,160 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 03:12:38,680 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 0, loss[loss=0.2327, simple_loss=0.2108, pruned_loss=0.09255, ctc_loss=0.1741, over 18562.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2108, pruned_loss=0.09255, ctc_loss=0.1741, over 18562.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:12:38,680 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:13:02,315 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 19, validation: loss=0.1735, simple_loss=0.1751, pruned_loss=0.06201, ctc_loss=0.1194, over 1073944.00 frames.
+2024-09-01 03:13:02,315 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 03:13:19,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112352.0, ans=0.125
+2024-09-01 03:13:31,712 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=112405.33333333333, ans=0.0
+2024-09-01 03:14:18,650 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112512.0, ans=0.1
+2024-09-01 03:14:22,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=112512.0, ans=0.2
+2024-09-01 03:14:33,029 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 03:14:37,213 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=112565.33333333333, ans=0.025
+2024-09-01 03:14:37,225 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 03:14:48,395 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.07 vs. limit=15.0
+2024-09-01 03:14:48,747 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 50, loss[loss=0.1669, simple_loss=0.1656, pruned_loss=0.05631, ctc_loss=0.139, over 19015.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.1797, pruned_loss=0.06587, ctc_loss=0.1435, over 829365.51 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:15:08,640 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=112672.0, ans=0.125
+2024-09-01 03:15:08,818 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=112672.0, ans=0.125
+2024-09-01 03:15:12,726 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=112672.0, ans=0.0
+2024-09-01 03:15:14,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=112672.0, ans=0.0
+2024-09-01 03:15:24,480 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-09-01 03:15:48,499 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=112778.66666666667, ans=0.125
+2024-09-01 03:16:00,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112778.66666666667, ans=0.1
+2024-09-01 03:16:00,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=112778.66666666667, ans=0.05
+2024-09-01 03:16:00,813 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 1.859e+02 1.957e+02 2.051e+02 3.574e+02, threshold=3.914e+02, percent-clipped=0.0
+2024-09-01 03:16:15,853 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112832.0, ans=0.1
+2024-09-01 03:16:28,718 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 100, loss[loss=0.1609, simple_loss=0.1599, pruned_loss=0.05447, ctc_loss=0.1323, over 19083.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.1794, pruned_loss=0.06534, ctc_loss=0.1406, over 1476389.98 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 32.0
+2024-09-01 03:16:33,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-09-01 03:16:35,849 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=112885.33333333333, ans=0.125
+2024-09-01 03:16:43,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-09-01 03:17:04,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=112938.66666666667, ans=0.125
+2024-09-01 03:17:05,227 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.45 vs. limit=15.0
+2024-09-01 03:17:18,509 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=112992.0, ans=22.5
+2024-09-01 03:17:22,996 INFO [dysarthria_finetune.py:1435] (3/4) (13292470272, 34072559616)
+2024-09-01 03:17:22,997 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:17:23,027 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 03:17:37,149 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 0, loss[loss=0.2298, simple_loss=0.206, pruned_loss=0.09203, ctc_loss=0.1737, over 18436.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.206, pruned_loss=0.09203, ctc_loss=0.1737, over 18436.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:17:37,150 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:18:00,790 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 20, validation: loss=0.1713, simple_loss=0.1732, pruned_loss=0.06117, ctc_loss=0.1175, over 1073944.00 frames.
+2024-09-01 03:18:00,790 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 20821MB
+2024-09-01 03:18:10,175 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=113040.0, ans=0.125
+2024-09-01 03:18:18,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=113040.0, ans=0.025
+2024-09-01 03:18:49,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=113146.66666666667, ans=0.2
+2024-09-01 03:18:57,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=113146.66666666667, ans=0.5
+2024-09-01 03:19:07,113 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=113200.0, ans=0.0
+2024-09-01 03:19:09,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=113200.0, ans=0.035
+2024-09-01 03:19:39,005 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 50, loss[loss=0.1828, simple_loss=0.1836, pruned_loss=0.06274, ctc_loss=0.1412, over 18942.00 frames. ], tot_loss[loss=0.1798, simple_loss=0.1764, pruned_loss=0.06407, ctc_loss=0.1379, over 827999.75 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:19:52,487 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 1.810e+02 1.894e+02 2.049e+02 3.111e+02, threshold=3.788e+02, percent-clipped=0.0
+2024-09-01 03:20:23,209 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=11.57 vs. limit=12.0
+2024-09-01 03:20:40,498 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.28 vs. limit=15.0
+2024-09-01 03:20:51,453 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113466.66666666667, ans=0.1
+2024-09-01 03:21:09,447 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=113520.0, ans=0.125
+2024-09-01 03:21:15,874 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 100, loss[loss=0.1365, simple_loss=0.1436, pruned_loss=0.04304, ctc_loss=0.1084, over 19171.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.1753, pruned_loss=0.06338, ctc_loss=0.1362, over 1475487.52 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-09-01 03:21:24,861 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.33 vs. limit=15.0
+2024-09-01 03:21:32,224 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:21:45,465 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.94 vs. limit=15.0
+2024-09-01 03:21:47,196 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=113626.66666666667, ans=0.025
+2024-09-01 03:21:56,537 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=113680.0, ans=0.125
+2024-09-01 03:22:08,728 INFO [dysarthria_finetune.py:1435] (3/4) (14441709568, 34072559616)
+2024-09-01 03:22:08,729 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:22:08,752 INFO [dysarthria_finetune.py:1440] (3/4) (30023548928, 34072559616)
+2024-09-01 03:22:08,752 INFO [dysarthria_finetune.py:1442] (3/4) Done!
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724607736.cdr2649.int.cedar.computecanada.ca.964583.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724607736.cdr2649.int.cedar.computecanada.ca.964583.0
new file mode 100644
index 0000000000000000000000000000000000000000..6643547f93411f6eed3ab08c017c2f2344079e07
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724607736.cdr2649.int.cedar.computecanada.ca.964583.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf1c9f21b4b7ac22e576a7faab605fb5a738a9d3485331668fd5d8b4559cd484
+size 88
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724764802.cdr2652.int.cedar.computecanada.ca.875662.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724764802.cdr2652.int.cedar.computecanada.ca.875662.0
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724944374.cdr2538.int.cedar.computecanada.ca.643204.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724944374.cdr2538.int.cedar.computecanada.ca.643204.0
new file mode 100644
index 0000000000000000000000000000000000000000..8832d7d6f39ce5479a5030f7844cf8f6899c9d76
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724944374.cdr2538.int.cedar.computecanada.ca.643204.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b463c51e3579b50cea73f34be8a33b133c5a77e5dc7f8c6dfd8279c9e5aeacd6
+size 88
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724971791.cdr2563.int.cedar.computecanada.ca.643204.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724971791.cdr2563.int.cedar.computecanada.ca.643204.0
new file mode 100644
index 0000000000000000000000000000000000000000..40ca887e6780470c9c0c42deae5310b3509a066f
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1724971791.cdr2563.int.cedar.computecanada.ca.643204.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06bd2f735f45c44148a25231373bcdfad98cd8b83c28460cc3249a499b011c5c
+size 88
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725046816.cdr2547.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725046816.cdr2547.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..a64a74b588330e0f3b6d353985bdea2b40a5f883
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725046816.cdr2547.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9fa645d3d4a34b3ddb50310e7b5debb054d71d1e718ad9247df940b5e7760d7b
+size 88
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725077621.cdr2549.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725077621.cdr2549.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..9071201c0b955122247b83a631d9fce800e6f35a
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725077621.cdr2549.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03d1fc80071133e3229cbe1e41928f813babf238a3c4d22b212b27f183c063da
+size 88
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725080114.cdr2608.int.cedar.computecanada.ca.2191949.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725080114.cdr2608.int.cedar.computecanada.ca.2191949.0
new file mode 100644
index 0000000000000000000000000000000000000000..0ea19f87ece56aa537a6bb9e58592714eff54a16
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725080114.cdr2608.int.cedar.computecanada.ca.2191949.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2804ae12525233bdf0a7ebde73f18d367304cfa25b3b6280c2627459389ad24
+size 88
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725088151.cdr2654.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725088151.cdr2654.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..984e31baebf37309ac3cf3b6eeb0e7aa610a4a42
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725088151.cdr2654.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:282d36dd02b36719e394d5230a6476e3894e7d685f30d76a73d56ab290641cd5
+size 88
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725135618.cdr2552.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725135618.cdr2552.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..fe0cb02e2a20a659995f42aa33cf970e8811acee
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725135618.cdr2552.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68a06904a6431e75ace39f0cf2d6279ebd9b8bd74e1f7db3c925039c880cc713
+size 714
diff --git a/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725167599.cdr2549.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725167599.cdr2549.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..0973f42138cf3e8030d6dc61cf43d98b1afa6cf3
--- /dev/null
+++ b/zipformer/finetuned/ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1725167599.cdr2549.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:284a6506c4c4fc05501d607de67fbd8ea8382660345d8ee4e52f891c6d4b9ae6
+size 41711
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/best-train-loss.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/best-train-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ffa4bb6c37319990ba2f91b71fea9edd3c9eb939
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/best-train-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6939f0b14acb06ac04e9495d8309f13547d51492ee28dff423a3b34ebca4a63a
+size 1058858830
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/best-valid-loss.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/best-valid-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ffa4bb6c37319990ba2f91b71fea9edd3c9eb939
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/best-valid-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6939f0b14acb06ac04e9495d8309f13547d51492ee28dff423a3b34ebca4a63a
+size 1058858830
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-1.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-1.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f5216827d83b3d76045254a263cccb716553b75e
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-1.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9ee575e714a6ad8f8b7e62b3ac0fd2d04874c7df51293a477d62f0b503a6c33
+size 1058855257
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-10.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-10.pt
new file mode 100644
index 0000000000000000000000000000000000000000..76c5ba656b6d02345b6a5fd87e0ef1792ef8f397
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-10.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:762f579b13a9bb27e684d2e0d602a2d7e76e56016bc27101ac01e0142057c403
+size 1058858382
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-11.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-11.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ca1a9613fe19e0deabfe6bf1d8fb2f601f66c52e
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-11.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:78f654453c9f79819582d54e116438eaf86de93766f83edaaf3e36f85d6aa5e6
+size 1058858446
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-12.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-12.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e583e3a7355d153cdfb6944d7361d27d008e0630
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-12.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0c53f1d3de28c9b3ddcabc6095ab398a65e0981fb3611be25df1d48089b74e2
+size 1058858446
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-13.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-13.pt
new file mode 100644
index 0000000000000000000000000000000000000000..db226ff77456b79847ee66e8931642e0a2d531aa
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-13.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2bf8b5e3160d1a817e2377133621d50302d46775b8ec49e0860f8ca87f2d919a
+size 1058858510
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-14.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-14.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6da548d113e0a3d883623af278d730db44297132
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-14.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9a230fe1082ce1f6bd23825822a2627e633245cbfb28b3ec5c4c58debfce0ab6
+size 1058858574
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-15.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-15.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e8a98c9de43bb498a26d4c21eec25b4bedc7a444
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-15.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:261f4b766a8f546beff0f27620484c8caa92c532a020af14d5a9ca56ab5a2854
+size 1058858638
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-16.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-16.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fd58ccfda7b717eb1a5239d25ad3eb4e3f375a78
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-16.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bab97df0e02ae023b9e4f330239a298fc35cfe6609709e8ea480bf727f98298c
+size 1058858638
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-17.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-17.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5e81f0d6e38488820d464c4545cbc862a5aaf431
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-17.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15cb14a99cd13a336ecae106653d535d2a18f83fd5d08d3f987bd8fccf50b4e8
+size 1058858702
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-18.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-18.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f02ef7f6c827991aa7eb554bc4cdda02d6070eff
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-18.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b8552178f9dfa56dd642b1f5c880207970e6386e9cd12b28403fb6079553f8
+size 1058858766
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-19.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-19.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8c09eb22addab9b9e904f35659d3a369d62d7828
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-19.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b4e08058c6417de4cc2a258e46217113b15d488b15615826128d1115e0c1385
+size 1058858766
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-2.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-2.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f862aa33d9279f56dd9b3bf1edb0db5a1d27c6ca
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-2.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1f7d608d7c7b9a6c7e80edcbb07c304368e95dbb4a10d9f1fd617528c9c7764
+size 1058855321
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-20.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-20.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ffa4bb6c37319990ba2f91b71fea9edd3c9eb939
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-20.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6939f0b14acb06ac04e9495d8309f13547d51492ee28dff423a3b34ebca4a63a
+size 1058858830
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-3.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-3.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4d3dd60c233dedb343f1923c71f8443d0ac4ccaf
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-3.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:051b813ec720021c4f5a674d7858147d9323ae1f97addf97c5f667c81fec05ef
+size 1058855449
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-4.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-4.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c8aed26a7df280c71791e387715258a67b03768c
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-4.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41beb58939343f2562cbb949102c0ffe508a2ace8041c98041ec306afe4b961f
+size 1058855513
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-5.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-5.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3f8e7610a733beae1883cf4e8c2fb404cc5da97b
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-5.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:482578baea01f8869eeef49a3c00e22d8a207e0ea8157122e078249c7d1553d4
+size 1058855513
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-6.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-6.pt
new file mode 100644
index 0000000000000000000000000000000000000000..862dad5682b326a4a9039c2009434fd1dff59bfc
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-6.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0754ac745c5cc155f74af3190a6fef50daee30045eebdec241ed9d8be9787059
+size 1058855577
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-7.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-7.pt
new file mode 100644
index 0000000000000000000000000000000000000000..70a6f70890bada8d552a54a71b50773afe14211e
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-7.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ebaebb8cb52deabcf8a0dc5cdff24482607ea58a9e9c2428a9df7f92bfc70f5e
+size 1058855641
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-8.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-8.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f18a6a8b7a5cf3d406c2ca5ef5c81768f6c8cc24
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-8.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b7285e4963e76f8337ca5b494877cbefb1d6100312a206fb19cab53f733bb4b
+size 1058855705
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-9.pt b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-9.pt
new file mode 100644
index 0000000000000000000000000000000000000000..78c24289c47a4a008efcd502bc3405f59f95a27c
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/epoch-9.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4d9d3bef1f15b8c04562b4f67218fc3d5173ccf83de845d3a3ff53d137537626
+size 1058855705
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-0
new file mode 100644
index 0000000000000000000000000000000000000000..8274b45c10f78c36f7715e7c8c132747021824c3
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-0
@@ -0,0 +1,38 @@
+2024-08-29 02:02:55,177 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-29 02:02:55,415 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-29 02:02:55,415 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-29 02:02:56,206 INFO [dysarthria_finetune.py:1219] (0/4) (33106362368, 34072559616)
+2024-08-29 02:02:56,211 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-29 02:02:56,313 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2545.int.cedar.computecanada.ca', 'IP address': '172.16.145.238'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 02:02:56,313 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-29 02:03:22,041 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66110931
+2024-08-29 02:03:22,599 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 02:03:49,472 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-29 02:05:03,022 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-29 02:05:03,303 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-29 02:05:03,648 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-29 02:05:05,175 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-29 02:05:06,111 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-29 02:05:06,117 INFO [dysarthria_asr_datamodule.py:500] (0/4) About to get dev cuts
+2024-08-29 02:05:06,259 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-29 02:05:06,633 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-29 02:05:06,633 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:08:02,279 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.90 vs. limit=5.0
+2024-08-29 02:08:02,600 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=10.49 vs. limit=7.5
+2024-08-29 02:08:11,678 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 02:08:13,090 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.39 vs. limit=7.5
+2024-08-29 02:08:13,758 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 02:17:32,002 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 02:17:34,329 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 02:29:44,993 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-29 02:29:50,600 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 02:30:10,801 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 02:33:10,176 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], tot_loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 02:33:10,177 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-29 04:58:19,211 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 04:58:19,549 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-29 05:17:50,818 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.68 vs. limit=15.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-1 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-1
new file mode 100644
index 0000000000000000000000000000000000000000..b76afbd5848e3e8865e4f551c2379f91560a8476
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-1
@@ -0,0 +1,39 @@
+2024-08-29 02:02:55,410 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-29 02:04:21,190 INFO [dysarthria_finetune.py:1214] (1/4) (32783400960, 34072559616)
+2024-08-29 02:04:21,190 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-29 02:04:21,590 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-29 02:04:21,590 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-29 02:04:21,593 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2545.int.cedar.computecanada.ca', 'IP address': '172.16.145.238'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 02:04:21,593 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-29 02:04:22,288 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66110931
+2024-08-29 02:04:22,926 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 02:04:24,057 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-29 02:05:03,019 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-29 02:05:03,303 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-29 02:05:03,648 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-29 02:05:05,175 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-29 02:05:06,112 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-29 02:05:06,117 INFO [dysarthria_asr_datamodule.py:500] (1/4) About to get dev cuts
+2024-08-29 02:05:06,259 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-29 02:05:06,633 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-29 02:05:06,633 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:08:02,279 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.15 vs. limit=5.0
+2024-08-29 02:08:02,597 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=8.80 vs. limit=7.5
+2024-08-29 02:08:11,678 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-29 02:08:12,984 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=15.02 vs. limit=7.5
+2024-08-29 02:08:13,759 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-29 02:17:31,998 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-29 02:17:34,336 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-29 02:29:13,735 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-29 02:29:50,598 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-29 02:30:10,792 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-29 02:33:10,200 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], tot_loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 02:33:10,200 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-29 04:58:19,216 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 04:58:20,509 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13133MB
+2024-08-29 05:08:46,374 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.75 vs. limit=15.0
+2024-08-29 05:18:40,692 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.24 vs. limit=15.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-2 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-2
new file mode 100644
index 0000000000000000000000000000000000000000..7d2e67fd3fb95cc7ec17bff8524508113017bb6d
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-2
@@ -0,0 +1,41 @@
+2024-08-29 02:02:55,411 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-29 02:02:55,415 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-29 02:02:55,415 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-29 02:02:56,223 INFO [dysarthria_finetune.py:1219] (2/4) (33106362368, 34072559616)
+2024-08-29 02:02:56,223 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-29 02:02:56,313 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2545.int.cedar.computecanada.ca', 'IP address': '172.16.145.238'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 02:02:56,313 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-29 02:03:22,026 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66110931
+2024-08-29 02:03:22,026 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 02:03:49,473 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-29 02:05:03,014 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-29 02:05:03,302 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-29 02:05:03,648 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-29 02:05:03,648 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-29 02:05:03,648 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-29 02:05:05,175 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-29 02:05:06,111 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-29 02:05:06,938 INFO [dysarthria_asr_datamodule.py:500] (2/4) About to get dev cuts
+2024-08-29 02:05:06,939 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-29 02:05:07,259 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-29 02:05:07,259 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:08:02,281 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.96 vs. limit=5.0
+2024-08-29 02:08:02,601 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.49 vs. limit=7.5
+2024-08-29 02:08:11,680 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 02:08:13,047 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.53 vs. limit=7.5
+2024-08-29 02:08:13,755 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 02:17:31,999 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 02:17:34,338 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 02:28:46,234 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.99 vs. limit=3.0
+2024-08-29 02:29:50,598 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 02:30:10,797 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 02:33:10,176 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], tot_loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 02:33:10,177 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-29 04:58:19,206 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 04:58:19,738 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19764MB
+2024-08-29 05:37:40,941 INFO [checkpoint.py:75] (2/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/bad-model-2.pt
+2024-08-29 05:37:42,975 INFO [dysarthria_finetune.py:1468] (2/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/batch-bdd640fb-0667-1ad1-1c80-317fa3b1799d.pt
+2024-08-29 05:38:18,044 INFO [dysarthria_finetune.py:1474] (2/4) features shape: torch.Size([31, 2509, 80])
+2024-08-29 05:38:18,046 INFO [dysarthria_finetune.py:1478] (2/4) num tokens: 2568
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-3 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-3
new file mode 100644
index 0000000000000000000000000000000000000000..b0818a7b6869040279f8ddb30f8d171d6c75151d
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-02-02-55-3
@@ -0,0 +1,37 @@
+2024-08-29 02:02:55,422 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-29 02:02:55,461 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-29 02:02:55,461 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-29 02:02:56,207 INFO [dysarthria_finetune.py:1219] (3/4) (33106362368, 34072559616)
+2024-08-29 02:02:56,208 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-29 02:02:56,313 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2545.int.cedar.computecanada.ca', 'IP address': '172.16.145.238'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 02:02:56,314 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-29 02:03:22,059 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66110931
+2024-08-29 02:03:22,060 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 02:03:49,471 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-29 02:05:03,013 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-29 02:05:03,302 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-29 02:05:03,647 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-29 02:05:05,175 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-29 02:05:06,117 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-29 02:05:06,962 INFO [dysarthria_asr_datamodule.py:500] (3/4) About to get dev cuts
+2024-08-29 02:05:06,963 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-29 02:05:07,279 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-29 02:05:07,279 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:08:02,289 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.07 vs. limit=5.0
+2024-08-29 02:08:02,609 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.46 vs. limit=7.5
+2024-08-29 02:08:11,685 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 02:08:13,183 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.37 vs. limit=7.5
+2024-08-29 02:08:13,756 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 02:17:32,005 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 02:17:34,335 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 02:27:28,508 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.87 vs. limit=3.0
+2024-08-29 02:29:50,603 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 02:30:10,797 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 02:33:10,177 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], tot_loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 02:33:10,178 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-29 04:58:19,215 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 04:58:19,723 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-0
new file mode 100644
index 0000000000000000000000000000000000000000..bfcd2b8d7b749c782a73a16015694bf08be4f188
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-0
@@ -0,0 +1,84 @@
+2024-08-29 10:53:05,329 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-29 10:54:08,831 INFO [dysarthria_finetune.py:1214] (0/4) (32783400960, 34072559616)
+2024-08-29 10:54:08,832 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-29 10:54:09,214 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-29 10:54:09,220 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-29 10:54:09,223 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2500.int.cedar.computecanada.ca', 'IP address': '172.16.145.194'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 10:54:09,224 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-29 10:54:09,916 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66110931
+2024-08-29 10:54:10,456 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 10:56:59,533 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-29 10:57:05,797 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-29 10:57:06,462 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 10:57:07,140 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-29 10:57:10,482 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-29 10:57:11,440 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-29 10:57:11,441 INFO [dysarthria_asr_datamodule.py:500] (0/4) About to get dev cuts
+2024-08-29 10:57:11,672 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-29 10:57:12,059 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-29 10:57:12,060 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 11:02:23,370 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.90 vs. limit=5.0
+2024-08-29 11:02:23,804 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=10.49 vs. limit=7.5
+2024-08-29 11:02:25,089 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 11:02:26,511 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.39 vs. limit=7.5
+2024-08-29 11:02:34,143 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 11:10:11,722 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 11:10:14,039 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 11:32:20,120 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-29 11:32:32,333 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 11:32:34,529 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-29 11:37:09,584 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], tot_loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 11:37:09,585 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-29 12:10:26,833 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 12:11:03,378 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-29 12:17:21,558 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.68 vs. limit=15.0
+2024-08-29 12:23:07,996 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.970e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-29 12:29:12,532 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.22 vs. limit=15.0
+2024-08-29 12:36:03,988 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.160e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-29 12:40:09,429 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=31.60 vs. limit=22.5
+2024-08-29 12:50:17,089 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.00 vs. limit=6.0
+2024-08-29 12:57:57,260 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 6.191e+02 7.816e+02 8.684e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-29 13:14:58,208 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.4119, simple_loss=0.3886, pruned_loss=0.2299, over 18890.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.3707, pruned_loss=0.2207, over 828692.51 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-29 13:32:59,561 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=30.13 vs. limit=15.0
+2024-08-29 13:52:25,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100373.33333333333, ans=0.1
+2024-08-29 13:59:24,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=100426.66666666667, ans=0.0
+2024-08-29 14:09:35,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=100480.0, ans=0.2
+2024-08-29 14:16:08,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-08-29 14:20:31,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100480.0, ans=0.1
+2024-08-29 14:22:13,043 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=25.79 vs. limit=15.0
+2024-08-29 14:22:13,145 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=24.57 vs. limit=15.0
+2024-08-29 14:23:38,344 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.935e+02 7.716e+02 8.607e+02 1.055e+03, threshold=1.543e+03, percent-clipped=0.0
+2024-08-29 14:23:38,382 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 100, loss[loss=0.3861, simple_loss=0.3668, pruned_loss=0.1961, over 19293.00 frames. ], tot_loss[loss=0.3756, simple_loss=0.3548, pruned_loss=0.2066, over 1474004.25 frames. ], batch size: 144, lr: 6.01e-05, grad_scale: 4.0
+2024-08-29 14:42:49,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=100586.66666666667, ans=0.2
+2024-08-29 14:46:18,610 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=18.55 vs. limit=15.0
+2024-08-29 14:48:45,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100640.0, ans=0.1
+2024-08-29 14:52:44,408 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-1.pt
+2024-08-29 14:52:50,249 INFO [dysarthria_finetune.py:1435] (0/4) (1470824448, 34072559616)
+2024-08-29 14:52:50,249 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-29 14:52:50,276 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-08-29 14:53:05,588 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 0, loss[loss=0.3086, simple_loss=0.2942, pruned_loss=0.1514, over 18874.00 frames. ], tot_loss[loss=0.3086, simple_loss=0.2942, pruned_loss=0.1514, over 18874.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-29 14:53:05,588 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-29 15:16:53,502 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 2, validation: loss=0.3287, simple_loss=0.3125, pruned_loss=0.1663, over 1073944.00 frames.
+2024-08-29 15:16:53,949 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-29 15:22:37,357 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.28 vs. limit=15.0
+2024-08-29 15:33:50,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=100736.0, ans=0.2
+2024-08-29 15:38:20,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-29 15:41:15,867 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-29 15:41:15,895 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-08-29 15:42:02,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=100789.33333333333, ans=0.025
+2024-08-29 15:42:02,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-29 15:45:25,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100789.33333333333, ans=0.1
+2024-08-29 15:50:26,334 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=7.80 vs. limit=12.0
+2024-08-29 16:09:52,084 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-29 16:33:57,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-08-29 16:34:48,761 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 50, loss[loss=0.3584, simple_loss=0.3373, pruned_loss=0.2001, over 18964.00 frames. ], tot_loss[loss=0.3554, simple_loss=0.3363, pruned_loss=0.1893, over 826819.73 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-29 16:59:00,169 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:55:58,098 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.24 vs. limit=15.0
+2024-08-29 19:26:12,742 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/bad-model-0.pt
+2024-08-29 19:26:20,383 INFO [dysarthria_finetune.py:1468] (0/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/batch-c33f4584-b23b-c1d8-493c-d01609de8895.pt
+2024-08-29 19:26:58,809 INFO [dysarthria_finetune.py:1474] (0/4) features shape: torch.Size([122, 652, 80])
+2024-08-29 19:26:58,811 INFO [dysarthria_finetune.py:1478] (0/4) num tokens: 2359
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-1 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-1
new file mode 100644
index 0000000000000000000000000000000000000000..0d623beb0d5556777892ca03dc9d760ea660ca52
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-1
@@ -0,0 +1,71 @@
+2024-08-29 10:53:05,605 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-29 10:53:27,400 INFO [dysarthria_finetune.py:1214] (1/4) (33106362368, 34072559616)
+2024-08-29 10:53:27,400 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-29 10:53:27,779 INFO [dysarthria_finetune.py:1219] (1/4) (33106362368, 34072559616)
+2024-08-29 10:53:27,779 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-29 10:53:27,782 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2500.int.cedar.computecanada.ca', 'IP address': '172.16.145.194'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 10:53:27,782 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-29 10:53:28,463 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66110931
+2024-08-29 10:53:28,463 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 10:56:59,563 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-29 10:57:05,790 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-29 10:58:07,498 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 10:58:07,657 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-29 10:58:07,657 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-29 10:58:07,657 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-29 10:58:07,657 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-29 10:58:07,658 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-29 10:58:07,739 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-29 10:58:08,682 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-29 10:58:08,683 INFO [dysarthria_asr_datamodule.py:500] (1/4) About to get dev cuts
+2024-08-29 10:58:08,684 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-29 10:58:09,004 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-29 10:58:09,005 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 11:02:23,374 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.15 vs. limit=5.0
+2024-08-29 11:02:23,804 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=8.80 vs. limit=7.5
+2024-08-29 11:02:25,093 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-29 11:02:26,396 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=15.02 vs. limit=7.5
+2024-08-29 11:02:34,145 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-29 11:10:11,728 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-29 11:10:14,045 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-29 11:32:23,892 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-29 11:32:32,344 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-29 11:32:34,534 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-29 11:37:09,587 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], tot_loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 11:37:09,588 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-29 12:10:26,831 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 12:10:51,379 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13133MB
+2024-08-29 12:12:38,617 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.75 vs. limit=15.0
+2024-08-29 12:17:22,845 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.24 vs. limit=15.0
+2024-08-29 12:23:07,995 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.970e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-29 12:32:06,836 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=8.34 vs. limit=12.0
+2024-08-29 12:36:03,996 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.160e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-29 12:47:28,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=100160.0, ans=0.0
+2024-08-29 12:48:51,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=100160.0, ans=0.125
+2024-08-29 12:57:56,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=100213.33333333333, ans=0.0
+2024-08-29 12:57:57,263 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 6.191e+02 7.816e+02 8.684e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-29 12:58:19,498 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=25.05 vs. limit=15.0
+2024-08-29 13:14:56,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-29 13:14:58,208 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.4065, simple_loss=0.3849, pruned_loss=0.2161, over 19042.00 frames. ], tot_loss[loss=0.3907, simple_loss=0.3686, pruned_loss=0.2194, over 827432.33 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-29 13:25:27,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=100266.66666666667, ans=0.025
+2024-08-29 13:52:22,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100373.33333333333, ans=0.125
+2024-08-29 13:55:56,076 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.24 vs. limit=15.0
+2024-08-29 14:23:38,351 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.935e+02 7.716e+02 8.607e+02 1.055e+03, threshold=1.543e+03, percent-clipped=0.0
+2024-08-29 14:23:38,389 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 100, loss[loss=0.3902, simple_loss=0.3697, pruned_loss=0.2049, over 19093.00 frames. ], tot_loss[loss=0.3768, simple_loss=0.3559, pruned_loss=0.2076, over 1470684.91 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-29 14:41:42,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=100586.66666666667, ans=0.2
+2024-08-29 14:44:05,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100586.66666666667, ans=0.125
+2024-08-29 14:52:44,380 INFO [dysarthria_finetune.py:1435] (1/4) (4260036608, 34072559616)
+2024-08-29 14:52:44,381 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-29 14:52:44,438 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-08-29 14:53:05,596 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 0, loss[loss=0.3255, simple_loss=0.3086, pruned_loss=0.1697, over 18746.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3086, pruned_loss=0.1697, over 18746.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-29 14:53:05,597 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-29 15:16:53,492 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 2, validation: loss=0.3287, simple_loss=0.3125, pruned_loss=0.1663, over 1073944.00 frames.
+2024-08-29 15:17:43,739 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13360MB
+2024-08-29 15:45:44,403 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=18.53 vs. limit=15.0
+2024-08-29 16:08:14,796 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.42 vs. limit=22.5
+2024-08-29 16:08:39,668 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:08:51,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=100896.0, ans=0.2
+2024-08-29 16:34:48,768 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 50, loss[loss=0.4057, simple_loss=0.382, pruned_loss=0.2256, over 19071.00 frames. ], tot_loss[loss=0.3517, simple_loss=0.3329, pruned_loss=0.1866, over 827854.65 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-29 16:49:18,460 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.92 vs. limit=15.0
+2024-08-29 17:55:00,839 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=17.85 vs. limit=15.0
+2024-08-29 18:41:08,262 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.11 vs. limit=12.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-2 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-2
new file mode 100644
index 0000000000000000000000000000000000000000..6c7b58d89b6187df04e9d7410db4b72a6d82af24
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-2
@@ -0,0 +1,75 @@
+2024-08-29 10:53:05,604 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-29 10:53:05,605 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-29 10:53:05,605 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-29 10:53:06,155 INFO [dysarthria_finetune.py:1219] (2/4) (33427226624, 34072559616)
+2024-08-29 10:53:06,155 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-29 10:53:07,982 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2500.int.cedar.computecanada.ca', 'IP address': '172.16.145.194'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 10:53:07,983 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-29 10:53:10,064 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66110931
+2024-08-29 10:54:16,144 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 10:56:59,533 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-29 10:57:05,799 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-29 10:57:06,461 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-29 10:57:10,481 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-29 10:57:11,431 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-29 10:57:11,432 INFO [dysarthria_asr_datamodule.py:500] (2/4) About to get dev cuts
+2024-08-29 10:57:11,672 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-29 10:57:12,058 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-29 10:57:12,059 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 11:02:23,366 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.96 vs. limit=5.0
+2024-08-29 11:02:23,805 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.49 vs. limit=7.5
+2024-08-29 11:02:25,089 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 11:02:26,427 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.53 vs. limit=7.5
+2024-08-29 11:02:34,140 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 11:10:11,722 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 11:10:14,044 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 11:30:33,237 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.99 vs. limit=3.0
+2024-08-29 11:32:32,344 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 11:32:34,527 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-29 11:37:09,582 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], tot_loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 11:37:09,583 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-29 12:10:26,825 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 12:10:26,826 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19764MB
+2024-08-29 12:20:52,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-29 12:21:51,526 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=38.73 vs. limit=15.0
+2024-08-29 12:22:57,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=100053.33333333333, ans=0.0
+2024-08-29 12:23:07,996 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.970e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-29 12:36:03,002 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.96 vs. limit=15.0
+2024-08-29 12:36:03,990 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.160e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-29 12:49:19,235 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.29 vs. limit=15.0
+2024-08-29 12:50:18,687 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=22.00 vs. limit=15.0
+2024-08-29 12:57:57,260 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 6.191e+02 7.816e+02 8.684e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-29 13:00:17,521 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.01 vs. limit=15.0
+2024-08-29 13:13:18,769 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.24 vs. limit=22.5
+2024-08-29 13:14:58,206 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.4013, simple_loss=0.3798, pruned_loss=0.2136, over 19018.00 frames. ], tot_loss[loss=0.3889, simple_loss=0.367, pruned_loss=0.2174, over 827419.58 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-29 13:15:23,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=100266.66666666667, ans=0.04949747468305833
+2024-08-29 13:17:25,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-29 14:14:24,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-08-29 14:23:38,348 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.935e+02 7.716e+02 8.607e+02 1.055e+03, threshold=1.543e+03, percent-clipped=0.0
+2024-08-29 14:23:38,387 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 100, loss[loss=0.3574, simple_loss=0.3383, pruned_loss=0.1901, over 19117.00 frames. ], tot_loss[loss=0.3756, simple_loss=0.3548, pruned_loss=0.2063, over 1475925.13 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-29 14:42:22,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=100586.66666666667, ans=0.1
+2024-08-29 14:51:39,391 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=100640.0, ans=0.125
+2024-08-29 14:51:53,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=100640.0, ans=0.0
+2024-08-29 14:52:44,378 INFO [dysarthria_finetune.py:1435] (2/4) (10291445760, 34072559616)
+2024-08-29 14:52:44,379 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-29 14:52:44,419 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-08-29 14:53:05,587 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 0, loss[loss=0.3342, simple_loss=0.3157, pruned_loss=0.1813, over 18502.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3157, pruned_loss=0.1813, over 18502.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-29 14:53:05,587 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-29 15:16:53,495 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 2, validation: loss=0.3287, simple_loss=0.3125, pruned_loss=0.1663, over 1073944.00 frames.
+2024-08-29 15:16:53,495 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-08-29 15:30:17,200 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.74 vs. limit=15.0
+2024-08-29 15:33:50,071 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=100736.0, ans=0.2
+2024-08-29 15:38:22,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-29 15:42:00,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100789.33333333333, ans=0.1
+2024-08-29 15:45:43,441 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=37.32 vs. limit=22.5
+2024-08-29 15:57:23,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=100842.66666666667, ans=0.125
+2024-08-29 16:03:43,631 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=19.37 vs. limit=15.0
+2024-08-29 16:12:45,777 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=13.29 vs. limit=12.0
+2024-08-29 16:34:48,761 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 50, loss[loss=0.3586, simple_loss=0.3384, pruned_loss=0.1958, over 18952.00 frames. ], tot_loss[loss=0.3548, simple_loss=0.3354, pruned_loss=0.1905, over 829638.79 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-29 16:54:35,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100949.33333333333, ans=0.125
+2024-08-29 18:29:01,150 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101002.66666666667, ans=0.1
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-3 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-3
new file mode 100644
index 0000000000000000000000000000000000000000..616ac1d0d471297c86b49083741d0598f27eb3f0
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-29-10-53-05-3
@@ -0,0 +1,72 @@
+2024-08-29 10:53:05,640 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-29 10:53:05,651 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-29 10:53:05,651 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-29 10:53:06,159 INFO [dysarthria_finetune.py:1219] (3/4) (33427226624, 34072559616)
+2024-08-29 10:53:06,159 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-29 10:53:07,983 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2500.int.cedar.computecanada.ca', 'IP address': '172.16.145.194'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-29 10:53:07,983 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-29 10:53:10,084 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66110931
+2024-08-29 10:54:43,568 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-29 10:56:59,512 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-29 10:57:05,792 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-29 10:57:06,461 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-29 10:57:07,140 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-29 10:57:07,140 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-29 10:57:07,141 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-29 10:57:10,482 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-29 10:57:11,425 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-29 10:57:11,431 INFO [dysarthria_asr_datamodule.py:500] (3/4) About to get dev cuts
+2024-08-29 10:57:11,672 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-29 10:57:12,056 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-29 10:57:12,057 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 11:02:23,367 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.07 vs. limit=5.0
+2024-08-29 11:02:23,805 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.46 vs. limit=7.5
+2024-08-29 11:02:25,088 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 11:02:26,397 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.37 vs. limit=7.5
+2024-08-29 11:02:34,139 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 11:10:11,722 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 11:10:14,049 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 11:31:33,889 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.87 vs. limit=3.0
+2024-08-29 11:32:32,344 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 11:32:34,529 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12651MB
+2024-08-29 11:37:09,584 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], tot_loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-29 11:37:09,584 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-29 12:10:26,833 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-29 12:10:26,834 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-08-29 12:23:07,999 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.970e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-29 12:34:01,942 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=100053.33333333333, ans=0.2
+2024-08-29 12:36:03,984 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.160e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-29 12:43:16,280 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.01 vs. limit=15.0
+2024-08-29 12:47:57,015 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=100160.0, ans=0.2
+2024-08-29 12:48:52,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100160.0, ans=0.125
+2024-08-29 12:49:20,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=100160.0, ans=0.125
+2024-08-29 12:57:57,256 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 6.191e+02 7.816e+02 8.684e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-29 12:58:19,650 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-29 13:14:57,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-29 13:14:58,206 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.4536, simple_loss=0.4286, pruned_loss=0.248, over 19001.00 frames. ], tot_loss[loss=0.3945, simple_loss=0.3721, pruned_loss=0.2223, over 828973.50 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-29 13:15:27,340 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-29 13:26:40,682 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=100266.66666666667, ans=0.025
+2024-08-29 14:08:15,668 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.25 vs. limit=6.0
+2024-08-29 14:09:34,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=100480.0, ans=0.0
+2024-08-29 14:16:08,616 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=17.63 vs. limit=15.0
+2024-08-29 14:23:38,344 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.935e+02 7.716e+02 8.607e+02 1.055e+03, threshold=1.543e+03, percent-clipped=0.0
+2024-08-29 14:23:38,382 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 100, loss[loss=0.3629, simple_loss=0.3427, pruned_loss=0.1984, over 19146.00 frames. ], tot_loss[loss=0.3806, simple_loss=0.3592, pruned_loss=0.2113, over 1476162.18 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-29 14:24:07,843 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=100533.33333333333, ans=0.95
+2024-08-29 14:32:16,903 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=100533.33333333333, ans=0.1
+2024-08-29 14:52:44,373 INFO [dysarthria_finetune.py:1435] (3/4) (13187612672, 34072559616)
+2024-08-29 14:52:44,374 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-29 14:52:44,420 INFO [dysarthria_finetune.py:1440] (3/4) (29811736576, 34072559616)
+2024-08-29 14:53:05,584 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 0, loss[loss=0.353, simple_loss=0.3342, pruned_loss=0.1871, over 18501.00 frames. ], tot_loss[loss=0.353, simple_loss=0.3342, pruned_loss=0.1871, over 18501.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-29 14:53:05,585 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-29 15:16:53,501 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 2, validation: loss=0.3287, simple_loss=0.3125, pruned_loss=0.1663, over 1073944.00 frames.
+2024-08-29 15:16:53,502 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14322MB
+2024-08-29 15:25:17,491 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=21.54 vs. limit=15.0
+2024-08-29 16:09:55,642 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-29 16:30:55,917 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=100949.33333333333, ans=0.125
+2024-08-29 16:34:48,762 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 50, loss[loss=0.3756, simple_loss=0.3534, pruned_loss=0.2106, over 18956.00 frames. ], tot_loss[loss=0.3543, simple_loss=0.335, pruned_loss=0.1903, over 828460.00 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-29 17:32:43,654 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.08 vs. limit=10.0
+2024-08-29 17:38:07,499 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=101002.66666666667, ans=0.09899494936611666
+2024-08-29 18:02:02,762 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.13 vs. limit=15.0
+2024-08-29 18:55:51,343 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=8.02 vs. limit=12.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-0
new file mode 100644
index 0000000000000000000000000000000000000000..b9885eb5725bc8c88aeaab2875dfd09dcc4180fb
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-0
@@ -0,0 +1,79 @@
+2024-08-30 13:13:09,521 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-30 13:15:42,213 INFO [dysarthria_finetune.py:1214] (0/4) (32783400960, 34072559616)
+2024-08-30 13:15:42,214 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-30 13:15:42,591 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-30 13:15:42,596 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-30 13:15:42,599 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 13:15:42,599 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-30 13:15:43,269 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66110931
+2024-08-30 13:15:43,814 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 13:17:10,957 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-30 13:17:18,119 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-30 13:18:24,886 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-30 13:18:28,518 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-30 13:18:42,137 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-30 13:19:40,938 INFO [dysarthria_asr_datamodule.py:500] (0/4) About to get dev cuts
+2024-08-30 13:20:07,514 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-30 13:20:35,460 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-30 13:20:35,633 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 13:21:50,039 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.90 vs. limit=5.0
+2024-08-30 13:21:50,381 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=10.49 vs. limit=7.5
+2024-08-30 13:21:52,106 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 13:21:53,491 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.39 vs. limit=7.5
+2024-08-30 13:21:53,954 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 13:25:17,407 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 13:25:19,694 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 13:30:44,199 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-30 13:30:59,590 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 13:31:01,821 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 13:32:06,010 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], tot_loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-30 13:32:06,010 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-30 13:58:08,878 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-30 13:58:35,643 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-30 14:00:53,813 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.68 vs. limit=15.0
+2024-08-30 14:38:36,222 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-30 14:54:22,605 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.22 vs. limit=15.0
+2024-08-30 15:20:58,865 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-30 15:25:11,925 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=31.60 vs. limit=22.5
+2024-08-30 16:18:31,468 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.00 vs. limit=6.0
+2024-08-30 16:41:34,164 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 6.238e+02 7.870e+02 8.666e+02 9.467e+02 1.055e+03, threshold=3.466e+03, percent-clipped=0.0
+2024-08-30 17:29:25,327 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.4119, simple_loss=0.3886, pruned_loss=0.2299, over 18890.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.3707, pruned_loss=0.2207, over 828692.51 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-30 18:46:52,770 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=29.98 vs. limit=15.0
+2024-08-30 19:11:00,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100373.33333333333, ans=0.1
+2024-08-30 19:34:38,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=100426.66666666667, ans=0.0
+2024-08-30 19:48:06,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=100480.0, ans=0.2
+2024-08-30 19:50:32,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-08-30 19:55:14,999 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100480.0, ans=0.1
+2024-08-30 19:56:56,423 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=25.62 vs. limit=15.0
+2024-08-30 19:56:56,525 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=24.11 vs. limit=15.0
+2024-08-30 19:57:50,791 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.781e+02 6.912e+02 7.699e+02 8.540e+02 1.055e+03, threshold=1.540e+03, percent-clipped=0.0
+2024-08-30 19:57:50,830 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 100, loss[loss=0.3858, simple_loss=0.3667, pruned_loss=0.195, over 19293.00 frames. ], tot_loss[loss=0.3756, simple_loss=0.3548, pruned_loss=0.2065, over 1474004.25 frames. ], batch size: 144, lr: 6.01e-05, grad_scale: 4.0
+2024-08-30 20:23:06,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=100586.66666666667, ans=0.2
+2024-08-30 20:40:34,809 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=18.52 vs. limit=15.0
+2024-08-30 20:45:20,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100640.0, ans=0.1
+2024-08-30 20:52:05,739 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-1.pt
+2024-08-30 20:52:12,862 INFO [dysarthria_finetune.py:1435] (0/4) (1470824448, 34072559616)
+2024-08-30 20:52:12,862 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-30 20:52:12,892 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-08-30 20:53:26,607 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 0, loss[loss=0.3087, simple_loss=0.2943, pruned_loss=0.1512, over 18874.00 frames. ], tot_loss[loss=0.3087, simple_loss=0.2943, pruned_loss=0.1512, over 18874.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-30 20:54:00,490 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-30 21:18:09,287 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 2, validation: loss=0.3282, simple_loss=0.3121, pruned_loss=0.1657, over 1073944.00 frames.
+2024-08-30 21:19:02,934 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-30 21:29:16,995 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.28 vs. limit=15.0
+2024-08-30 21:47:04,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=100736.0, ans=0.2
+2024-08-30 21:51:22,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-30 22:00:23,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-30 22:00:23,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-08-30 22:10:06,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=100789.33333333333, ans=0.025
+2024-08-30 22:10:06,642 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-30 22:17:30,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100789.33333333333, ans=0.1
+2024-08-30 22:29:09,391 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=7.74 vs. limit=12.0
+2024-08-30 23:20:41,282 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-30 23:31:41,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-08-30 23:31:43,019 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 50, loss[loss=0.359, simple_loss=0.3379, pruned_loss=0.2005, over 18964.00 frames. ], tot_loss[loss=0.3556, simple_loss=0.3364, pruned_loss=0.1894, over 826819.73 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-30 23:43:12,193 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-1 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-1
new file mode 100644
index 0000000000000000000000000000000000000000..de3c87ba4ca7c1571018e7881bf2542c5da87cad
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-1
@@ -0,0 +1,82 @@
+2024-08-30 13:13:09,783 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-30 13:14:09,497 INFO [dysarthria_finetune.py:1214] (1/4) (33106362368, 34072559616)
+2024-08-30 13:14:09,497 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-30 13:14:09,864 INFO [dysarthria_finetune.py:1219] (1/4) (33106362368, 34072559616)
+2024-08-30 13:14:09,864 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-30 13:14:09,867 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 13:14:09,867 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-30 13:14:10,537 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66110931
+2024-08-30 13:14:10,537 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 13:17:10,445 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-30 13:17:18,119 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-30 13:18:24,886 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-30 13:18:25,731 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-30 13:18:25,731 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-30 13:18:25,731 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-30 13:18:28,518 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-30 13:18:42,141 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-30 13:19:40,938 INFO [dysarthria_asr_datamodule.py:500] (1/4) About to get dev cuts
+2024-08-30 13:20:07,513 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-30 13:20:35,461 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-30 13:20:35,633 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 13:21:50,023 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.15 vs. limit=5.0
+2024-08-30 13:21:50,380 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=8.80 vs. limit=7.5
+2024-08-30 13:21:52,106 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 13:21:53,429 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=15.02 vs. limit=7.5
+2024-08-30 13:21:53,958 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 13:25:17,403 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 13:25:19,701 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 13:30:58,758 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-30 13:30:59,585 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 13:31:01,823 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 13:32:05,994 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], tot_loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-30 13:32:05,995 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-30 13:58:08,867 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-30 13:58:09,774 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13133MB
+2024-08-30 13:59:17,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.75 vs. limit=15.0
+2024-08-30 14:00:54,466 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.24 vs. limit=15.0
+2024-08-30 14:38:36,224 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-30 15:06:06,002 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=8.34 vs. limit=12.0
+2024-08-30 15:20:58,857 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-30 15:55:09,621 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.31 vs. limit=15.0
+2024-08-30 16:33:11,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100160.0, ans=0.125
+2024-08-30 16:41:34,171 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 6.238e+02 7.870e+02 8.666e+02 9.467e+02 1.055e+03, threshold=3.466e+03, percent-clipped=0.0
+2024-08-30 17:07:43,704 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=22.16 vs. limit=15.0
+2024-08-30 17:28:29,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=100266.66666666667, ans=0.0
+2024-08-30 17:29:25,318 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.401, simple_loss=0.3803, pruned_loss=0.2082, over 19042.00 frames. ], tot_loss[loss=0.3933, simple_loss=0.3709, pruned_loss=0.2224, over 827432.33 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-30 18:25:42,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-30 18:25:47,797 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.18 vs. limit=15.0
+2024-08-30 18:33:31,591 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=26.78 vs. limit=15.0
+2024-08-30 19:00:47,014 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=7.18 vs. limit=6.0
+2024-08-30 19:46:54,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=100480.0, ans=0.125
+2024-08-30 19:47:12,523 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-08-30 19:54:23,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=100480.0, ans=0.125
+2024-08-30 19:57:50,786 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.781e+02 6.912e+02 7.699e+02 8.540e+02 1.055e+03, threshold=1.540e+03, percent-clipped=0.0
+2024-08-30 19:57:50,825 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 100, loss[loss=0.3791, simple_loss=0.3607, pruned_loss=0.1891, over 19093.00 frames. ], tot_loss[loss=0.3772, simple_loss=0.3563, pruned_loss=0.2074, over 1470684.91 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-30 20:20:34,111 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=38.65 vs. limit=22.5
+2024-08-30 20:41:47,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100586.66666666667, ans=0.1
+2024-08-30 20:52:05,701 INFO [dysarthria_finetune.py:1435] (1/4) (4245356544, 34072559616)
+2024-08-30 20:52:05,805 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-30 20:52:05,867 INFO [dysarthria_finetune.py:1440] (1/4) (29490872320, 34072559616)
+2024-08-30 20:53:26,609 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 0, loss[loss=0.3255, simple_loss=0.3086, pruned_loss=0.1698, over 18746.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3086, pruned_loss=0.1698, over 18746.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-30 20:53:26,609 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-30 21:18:09,290 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 2, validation: loss=0.3282, simple_loss=0.3121, pruned_loss=0.1657, over 1073944.00 frames.
+2024-08-30 21:18:09,895 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13448MB
+2024-08-30 21:39:09,657 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=20.96 vs. limit=15.0
+2024-08-30 21:46:45,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=100736.0, ans=0.125
+2024-08-30 21:49:58,812 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100736.0, ans=0.1
+2024-08-30 22:01:40,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-30 22:45:23,642 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=20.59 vs. limit=15.0
+2024-08-30 23:07:14,494 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100896.0, ans=0.125
+2024-08-30 23:07:18,824 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=24.13 vs. limit=22.5
+2024-08-30 23:28:09,007 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=21.91 vs. limit=15.0
+2024-08-30 23:31:43,016 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 50, loss[loss=0.4026, simple_loss=0.3807, pruned_loss=0.2158, over 19071.00 frames. ], tot_loss[loss=0.3529, simple_loss=0.334, pruned_loss=0.1871, over 827854.65 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-30 23:32:43,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100949.33333333333, ans=0.125
+2024-08-30 23:34:12,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-08-30 23:37:14,353 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.98 vs. limit=15.0
+2024-08-30 23:59:48,263 INFO [checkpoint.py:75] (1/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/bad-model-1.pt
+2024-08-31 00:00:05,102 INFO [dysarthria_finetune.py:1468] (1/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/batch-c33f4584-b23b-c1d8-493c-d01609de8895.pt
+2024-08-31 00:01:41,345 INFO [dysarthria_finetune.py:1474] (1/4) features shape: torch.Size([154, 516, 80])
+2024-08-31 00:01:41,348 INFO [dysarthria_finetune.py:1478] (1/4) num tokens: 2318
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-2 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-2
new file mode 100644
index 0000000000000000000000000000000000000000..786a94e02651ebb270e41b0d0bbae051b5f8819f
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-2
@@ -0,0 +1,76 @@
+2024-08-30 13:13:09,783 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-30 13:13:09,785 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-30 13:13:09,785 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-30 13:13:10,354 INFO [dysarthria_finetune.py:1219] (2/4) (33427226624, 34072559616)
+2024-08-30 13:13:10,354 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-30 13:13:12,308 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 13:13:12,308 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-30 13:13:13,679 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66110931
+2024-08-30 13:13:13,679 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 13:17:10,420 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-30 13:17:18,118 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-30 13:18:24,886 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-30 13:18:25,731 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-30 13:18:28,518 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-30 13:18:42,127 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-30 13:19:40,938 INFO [dysarthria_asr_datamodule.py:500] (2/4) About to get dev cuts
+2024-08-30 13:20:07,513 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-30 13:20:35,459 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-30 13:20:35,633 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 13:21:50,022 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.96 vs. limit=5.0
+2024-08-30 13:21:50,381 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.49 vs. limit=7.5
+2024-08-30 13:21:52,106 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 13:21:53,429 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.53 vs. limit=7.5
+2024-08-30 13:21:53,957 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 13:25:17,404 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 13:25:19,692 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 13:30:44,740 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.99 vs. limit=3.0
+2024-08-30 13:30:59,593 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 13:31:01,822 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 13:32:05,988 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], tot_loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-30 13:32:05,989 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-30 13:58:08,871 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-30 13:58:08,872 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19764MB
+2024-08-30 14:13:09,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-30 14:14:58,999 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=38.73 vs. limit=15.0
+2024-08-30 14:38:33,705 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=100053.33333333333, ans=0.0
+2024-08-30 14:38:36,226 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-30 15:20:56,402 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.96 vs. limit=15.0
+2024-08-30 15:20:58,858 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-30 16:08:23,135 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.29 vs. limit=15.0
+2024-08-30 16:18:39,252 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=22.00 vs. limit=15.0
+2024-08-30 16:41:34,168 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 6.238e+02 7.870e+02 8.666e+02 9.467e+02 1.055e+03, threshold=3.466e+03, percent-clipped=0.0
+2024-08-30 16:59:41,319 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.99 vs. limit=15.0
+2024-08-30 17:29:25,313 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.4073, simple_loss=0.3851, pruned_loss=0.2204, over 19018.00 frames. ], tot_loss[loss=0.389, simple_loss=0.3671, pruned_loss=0.2175, over 827419.58 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-30 18:28:02,899 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=13.78 vs. limit=12.0
+2024-08-30 18:46:59,455 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=49.03 vs. limit=22.5
+2024-08-30 18:53:57,898 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=100320.0, ans=0.0
+2024-08-30 18:53:57,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=100320.0, ans=0.0
+2024-08-30 19:26:03,898 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=17.17 vs. limit=15.0
+2024-08-30 19:51:04,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=100480.0, ans=0.0
+2024-08-30 19:57:50,787 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.781e+02 6.912e+02 7.699e+02 8.540e+02 1.055e+03, threshold=1.540e+03, percent-clipped=0.0
+2024-08-30 19:57:50,826 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 100, loss[loss=0.3714, simple_loss=0.35, pruned_loss=0.2081, over 19117.00 frames. ], tot_loss[loss=0.3755, simple_loss=0.3547, pruned_loss=0.2066, over 1475925.13 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-30 20:10:17,082 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100533.33333333333, ans=0.1
+2024-08-30 20:45:08,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100640.0, ans=0.125
+2024-08-30 20:47:09,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=100640.0, ans=0.025
+2024-08-30 20:49:40,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100640.0, ans=0.1
+2024-08-30 20:52:05,699 INFO [dysarthria_finetune.py:1435] (2/4) (10291445760, 34072559616)
+2024-08-30 20:52:05,805 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-30 20:52:05,862 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-08-30 20:53:26,608 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 0, loss[loss=0.3258, simple_loss=0.3083, pruned_loss=0.1731, over 18502.00 frames. ], tot_loss[loss=0.3258, simple_loss=0.3083, pruned_loss=0.1731, over 18502.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-30 20:53:26,609 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-30 21:18:09,292 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 2, validation: loss=0.3282, simple_loss=0.3121, pruned_loss=0.1657, over 1073944.00 frames.
+2024-08-30 21:18:09,895 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-08-30 21:39:08,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100736.0, ans=0.1
+2024-08-30 21:50:30,051 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.25 vs. limit=15.0
+2024-08-30 22:17:30,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=100789.33333333333, ans=0.125
+2024-08-30 22:17:56,646 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=12.00 vs. limit=15.0
+2024-08-30 22:41:36,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100842.66666666667, ans=0.0
+2024-08-30 23:00:16,371 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.88 vs. limit=22.5
+2024-08-30 23:20:03,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100896.0, ans=0.125
+2024-08-30 23:31:43,022 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 50, loss[loss=0.3654, simple_loss=0.3449, pruned_loss=0.199, over 18952.00 frames. ], tot_loss[loss=0.3543, simple_loss=0.3351, pruned_loss=0.1895, over 829638.79 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-30 23:39:32,070 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.74 vs. limit=15.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-3 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-3
new file mode 100644
index 0000000000000000000000000000000000000000..2b3995bfd6efcd3757d066f20d773983c6dbf3e5
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-13-13-09-3
@@ -0,0 +1,75 @@
+2024-08-30 13:13:09,784 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-30 13:13:09,785 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-30 13:13:09,785 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-30 13:13:10,359 INFO [dysarthria_finetune.py:1219] (3/4) (33427226624, 34072559616)
+2024-08-30 13:13:10,360 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-30 13:13:12,308 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 13:13:12,309 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-30 13:13:13,656 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66110931
+2024-08-30 13:13:13,656 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 13:17:10,444 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-30 13:17:18,122 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-30 13:18:24,886 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-30 13:18:25,730 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-30 13:18:25,731 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-30 13:18:28,518 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-30 13:18:42,111 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-30 13:19:40,938 INFO [dysarthria_asr_datamodule.py:500] (3/4) About to get dev cuts
+2024-08-30 13:20:07,513 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-30 13:20:35,454 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-30 13:20:35,633 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 13:21:50,022 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.07 vs. limit=5.0
+2024-08-30 13:21:50,381 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.46 vs. limit=7.5
+2024-08-30 13:21:52,111 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 13:21:53,457 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.37 vs. limit=7.5
+2024-08-30 13:21:53,964 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 13:25:17,400 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 13:25:19,700 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 13:30:50,514 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.87 vs. limit=3.0
+2024-08-30 13:30:59,587 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 13:31:01,825 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 13:32:05,988 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], tot_loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-30 13:32:05,989 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-30 13:58:08,875 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-30 13:58:09,627 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14320MB
+2024-08-30 14:38:36,222 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-30 15:14:24,762 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=100053.33333333333, ans=0.2
+2024-08-30 15:20:58,864 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.687e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-30 15:26:30,735 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.01 vs. limit=15.0
+2024-08-30 15:54:40,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=100160.0, ans=0.2
+2024-08-30 15:58:04,290 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100160.0, ans=0.125
+2024-08-30 16:08:16,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=100160.0, ans=0.2
+2024-08-30 16:39:39,967 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:41:34,177 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 6.238e+02 7.870e+02 8.666e+02 9.467e+02 1.055e+03, threshold=3.466e+03, percent-clipped=0.0
+2024-08-30 17:14:51,941 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=100213.33333333333, ans=0.0
+2024-08-30 17:29:25,318 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.4508, simple_loss=0.4255, pruned_loss=0.2501, over 19001.00 frames. ], tot_loss[loss=0.3942, simple_loss=0.3718, pruned_loss=0.2222, over 828973.50 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-30 17:41:50,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=43.94 vs. limit=22.5
+2024-08-30 18:12:29,174 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.04 vs. limit=22.5
+2024-08-30 18:48:12,943 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=17.58 vs. limit=15.0
+2024-08-30 18:51:50,322 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100320.0, ans=0.125
+2024-08-30 19:23:10,334 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=100373.33333333333, ans=0.125
+2024-08-30 19:34:03,273 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=21.31 vs. limit=15.0
+2024-08-30 19:35:03,987 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=100426.66666666667, ans=0.025
+2024-08-30 19:48:08,292 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=47.84 vs. limit=22.5
+2024-08-30 19:57:50,790 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.781e+02 6.912e+02 7.699e+02 8.540e+02 1.055e+03, threshold=1.540e+03, percent-clipped=0.0
+2024-08-30 19:57:50,828 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 100, loss[loss=0.358, simple_loss=0.3396, pruned_loss=0.1854, over 19146.00 frames. ], tot_loss[loss=0.3783, simple_loss=0.3572, pruned_loss=0.2086, over 1476162.18 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-30 20:20:28,580 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=100586.66666666667, ans=0.0
+2024-08-30 20:52:05,697 INFO [dysarthria_finetune.py:1435] (3/4) (13370064896, 34072559616)
+2024-08-30 20:52:05,805 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-30 20:52:05,864 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-08-30 20:53:26,625 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 0, loss[loss=0.3531, simple_loss=0.3343, pruned_loss=0.1872, over 18501.00 frames. ], tot_loss[loss=0.3531, simple_loss=0.3343, pruned_loss=0.1872, over 18501.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-30 20:53:26,626 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-30 21:18:09,293 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 2, validation: loss=0.3282, simple_loss=0.3121, pruned_loss=0.1657, over 1073944.00 frames.
+2024-08-30 21:18:09,895 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-08-30 21:51:22,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=100789.33333333333, ans=0.125
+2024-08-30 22:01:03,726 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-08-30 22:17:56,580 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.34 vs. limit=22.5
+2024-08-30 22:41:35,275 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100842.66666666667, ans=0.125
+2024-08-30 22:56:01,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=100896.0, ans=0.125
+2024-08-30 23:20:48,930 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=100896.0, ans=0.125
+2024-08-30 23:20:49,665 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-30 23:31:43,028 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 50, loss[loss=0.3498, simple_loss=0.333, pruned_loss=0.176, over 18956.00 frames. ], tot_loss[loss=0.3546, simple_loss=0.3354, pruned_loss=0.1896, over 828460.00 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-30 23:37:14,980 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.70 vs. limit=15.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-0
new file mode 100644
index 0000000000000000000000000000000000000000..e96ff57f22a9a63682d391246514cb39faf688b9
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-0
@@ -0,0 +1,31 @@
+2024-08-30 20:41:22,568 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-30 20:41:22,905 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-30 20:41:22,905 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-30 20:41:23,898 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-30 20:41:26,110 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-30 20:43:36,544 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 20:43:36,544 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-30 20:43:37,243 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66110931
+2024-08-30 20:43:37,791 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 20:44:19,305 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-30 20:44:25,336 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-30 20:44:30,345 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-30 20:44:37,518 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-30 20:45:04,799 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-30 20:45:04,800 INFO [dysarthria_asr_datamodule.py:500] (0/4) About to get dev cuts
+2024-08-30 20:45:04,942 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-30 20:45:05,361 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-30 20:45:05,361 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:46:10,595 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.90 vs. limit=5.0
+2024-08-30 20:46:10,917 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=10.49 vs. limit=7.5
+2024-08-30 20:46:12,726 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 20:46:14,117 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.39 vs. limit=7.5
+2024-08-30 20:46:14,580 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 20:51:59,472 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 20:52:02,100 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-30 21:01:55,509 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-1 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-1
new file mode 100644
index 0000000000000000000000000000000000000000..36a86041de5ef1d251a077b8a745cf200092ae3c
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-1
@@ -0,0 +1,30 @@
+2024-08-30 20:41:22,903 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-30 20:41:22,904 INFO [dysarthria_finetune.py:1214] (1/4) (33748090880, 34072559616)
+2024-08-30 20:41:22,904 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-30 20:41:23,898 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-30 20:41:23,899 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-30 20:41:24,835 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 20:41:24,835 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-30 20:41:26,828 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66110931
+2024-08-30 20:43:36,541 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 20:44:19,304 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-30 20:44:25,334 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-30 20:44:30,345 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-30 20:44:37,518 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-30 20:45:04,805 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-30 20:45:04,807 INFO [dysarthria_asr_datamodule.py:500] (1/4) About to get dev cuts
+2024-08-30 20:45:04,942 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-30 20:45:05,355 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-30 20:45:05,356 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:46:10,596 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.15 vs. limit=5.0
+2024-08-30 20:46:10,917 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=8.80 vs. limit=7.5
+2024-08-30 20:46:12,726 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 20:46:14,087 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=15.02 vs. limit=7.5
+2024-08-30 20:46:14,578 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 20:51:59,477 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
+2024-08-30 20:52:02,100 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12657MB
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-2 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-2
new file mode 100644
index 0000000000000000000000000000000000000000..5c0037bcea1e568e2775b8a2c55c17ff9f75279a
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-2
@@ -0,0 +1,30 @@
+2024-08-30 20:41:22,909 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-30 20:41:22,949 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-30 20:41:22,949 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-30 20:41:23,918 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-30 20:41:23,918 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-30 20:41:24,834 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 20:41:24,835 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-30 20:41:26,847 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66110931
+2024-08-30 20:43:36,541 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 20:44:19,313 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-30 20:44:25,337 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-30 20:44:30,345 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-30 20:44:37,518 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-30 20:45:04,794 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-30 20:45:04,798 INFO [dysarthria_asr_datamodule.py:500] (2/4) About to get dev cuts
+2024-08-30 20:45:04,942 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-30 20:45:05,355 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-30 20:45:05,355 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:46:10,596 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.96 vs. limit=5.0
+2024-08-30 20:46:10,918 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.49 vs. limit=7.5
+2024-08-30 20:46:12,731 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 20:46:14,069 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.53 vs. limit=7.5
+2024-08-30 20:46:14,578 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 20:51:59,475 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-30 20:52:02,095 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-3 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-3
new file mode 100644
index 0000000000000000000000000000000000000000..bf29db92debd79f2aaca2006070a854fb9d5e3cf
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-30-20-41-22-3
@@ -0,0 +1,31 @@
+2024-08-30 20:41:22,908 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-30 20:41:22,949 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-30 20:41:22,949 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-30 20:41:23,917 INFO [dysarthria_finetune.py:1219] (3/4) (32783400960, 34072559616)
+2024-08-30 20:41:23,917 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-30 20:41:24,834 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-30 20:41:24,835 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-30 20:41:26,826 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66110931
+2024-08-30 20:43:35,413 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-30 20:44:19,309 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-30 20:44:25,343 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-30 20:44:30,345 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-30 20:44:35,319 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-30 20:44:35,320 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-30 20:44:37,518 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-30 20:45:04,790 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-30 20:45:04,799 INFO [dysarthria_asr_datamodule.py:500] (3/4) About to get dev cuts
+2024-08-30 20:45:04,942 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-30 20:45:05,353 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-30 20:45:05,353 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:46:10,602 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.07 vs. limit=5.0
+2024-08-30 20:46:10,918 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.46 vs. limit=7.5
+2024-08-30 20:46:12,724 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 20:46:14,053 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.37 vs. limit=7.5
+2024-08-30 20:46:14,577 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 20:51:59,470 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 20:52:02,094 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-30 21:00:00,981 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.87 vs. limit=3.0
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-0
new file mode 100644
index 0000000000000000000000000000000000000000..498915f6530f7372a7b3cd09ba910a23fcada922
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-0
@@ -0,0 +1,545 @@
+2024-08-31 13:16:10,659 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-31 13:16:10,940 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-31 13:16:10,941 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-31 13:16:11,940 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-31 13:16:11,946 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-31 13:16:13,232 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:16:13,232 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-31 13:16:14,925 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 66110931
+2024-08-31 13:16:16,057 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-31 13:18:23,840 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-31 13:20:29,536 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-31 13:20:29,666 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-31 13:20:31,921 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-31 13:20:32,863 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-31 13:20:32,866 INFO [dysarthria_asr_datamodule.py:501] (0/4) About to get dev cuts
+2024-08-31 13:20:33,113 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-31 13:20:33,461 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-31 13:20:33,461 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:44:09,215 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.90 vs. limit=5.0
+2024-08-31 13:44:10,210 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=10.49 vs. limit=7.5
+2024-08-31 13:44:14,874 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-31 13:45:00,457 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.39 vs. limit=7.5
+2024-08-31 13:45:00,956 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-31 13:47:50,046 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-31 13:47:52,428 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-31 13:50:17,751 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-31 13:50:20,330 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-31 13:50:22,585 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 12578MB
+2024-08-31 13:51:23,114 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], tot_loss[loss=0.3242, simple_loss=0.3071, pruned_loss=0.1717, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 13:51:23,115 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-31 14:29:03,496 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-31 14:29:03,497 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-31 14:41:38,264 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.68 vs. limit=15.0
+2024-08-31 15:24:16,876 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-31 15:41:04,047 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.22 vs. limit=15.0
+2024-08-31 15:52:42,133 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.685e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-31 15:57:12,063 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=31.60 vs. limit=22.5
+2024-08-31 16:29:06,735 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.00 vs. limit=6.0
+2024-08-31 16:32:13,896 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 6.192e+02 7.846e+02 8.685e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-31 17:02:30,134 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.4119, simple_loss=0.3886, pruned_loss=0.2299, over 18890.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.3707, pruned_loss=0.2207, over 828692.51 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 17:22:55,942 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=30.11 vs. limit=15.0
+2024-08-31 17:37:25,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100373.33333333333, ans=0.1
+2024-08-31 18:12:52,329 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.982e+02 7.682e+02 8.607e+02 1.055e+03, threshold=1.536e+03, percent-clipped=0.0
+2024-08-31 18:12:52,368 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 100, loss[loss=0.4038, simple_loss=0.3814, pruned_loss=0.2206, over 19293.00 frames. ], tot_loss[loss=0.3752, simple_loss=0.3544, pruned_loss=0.2062, over 1474004.25 frames. ], batch size: 144, lr: 6.01e-05, grad_scale: 4.0
+2024-08-31 18:36:35,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=100586.66666666667, ans=0.0
+2024-08-31 18:40:57,979 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.82 vs. limit=6.0
+2024-08-31 18:42:23,195 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=100640.0, ans=0.2
+2024-08-31 18:43:29,199 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=10.02 vs. limit=15.0
+2024-08-31 18:44:19,310 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-1.pt
+2024-08-31 18:44:43,509 INFO [dysarthria_finetune.py:1435] (0/4) (1470824448, 34072559616)
+2024-08-31 18:44:43,510 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-31 18:44:43,539 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-08-31 18:46:01,836 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 0, loss[loss=0.3207, simple_loss=0.3046, pruned_loss=0.1641, over 18874.00 frames. ], tot_loss[loss=0.3207, simple_loss=0.3046, pruned_loss=0.1641, over 18874.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-31 18:46:01,836 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-31 19:10:08,822 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 2, validation: loss=0.3307, simple_loss=0.3141, pruned_loss=0.1687, over 1073944.00 frames.
+2024-08-31 19:10:08,823 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-31 19:22:26,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=100682.66666666667, ans=0.025
+2024-08-31 19:46:09,022 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=100736.0, ans=0.125
+2024-08-31 19:57:17,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=100789.33333333333, ans=0.125
+2024-08-31 20:05:00,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-08-31 20:21:09,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100896.0, ans=0.1
+2024-08-31 20:29:28,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=100896.0, ans=0.125
+2024-08-31 20:31:44,806 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 50, loss[loss=0.3587, simple_loss=0.3374, pruned_loss=0.2017, over 18964.00 frames. ], tot_loss[loss=0.3547, simple_loss=0.3357, pruned_loss=0.1885, over 826819.73 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-31 20:34:07,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=100949.33333333333, ans=0.125
+2024-08-31 20:40:53,474 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=18.40 vs. limit=15.0
+2024-08-31 20:43:40,419 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=101002.66666666667, ans=0.125
+2024-08-31 21:03:09,444 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.367e+02 4.995e+02 5.661e+02 6.268e+02 7.321e+02, threshold=1.132e+03, percent-clipped=0.0
+2024-08-31 21:09:37,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101056.0, ans=0.125
+2024-08-31 21:15:13,524 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.56 vs. limit=15.0
+2024-08-31 21:20:55,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=101109.33333333333, ans=0.125
+2024-08-31 21:42:13,849 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 100, loss[loss=0.3782, simple_loss=0.3604, pruned_loss=0.1893, over 19229.00 frames. ], tot_loss[loss=0.3412, simple_loss=0.3236, pruned_loss=0.1779, over 1473154.80 frames. ], batch size: 144, lr: 7.29e-05, grad_scale: 4.0
+2024-08-31 21:47:36,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=101216.0, ans=6.0
+2024-08-31 21:48:42,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101216.0, ans=0.0
+2024-08-31 22:10:06,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101269.33333333333, ans=0.1
+2024-08-31 22:21:43,457 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=18.46 vs. limit=15.0
+2024-08-31 22:21:43,991 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-2.pt
+2024-08-31 22:21:48,544 INFO [dysarthria_finetune.py:1435] (0/4) (1412104192, 34072559616)
+2024-08-31 22:21:48,544 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-31 22:21:48,574 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-08-31 22:22:39,012 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 0, loss[loss=0.3229, simple_loss=0.3053, pruned_loss=0.1723, over 18603.00 frames. ], tot_loss[loss=0.3229, simple_loss=0.3053, pruned_loss=0.1723, over 18603.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 2.0
+2024-08-31 22:22:39,013 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-31 22:31:34,594 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 3, validation: loss=0.2979, simple_loss=0.2853, pruned_loss=0.1432, over 1073944.00 frames.
+2024-08-31 22:31:34,954 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-08-31 22:50:38,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=101424.0, ans=0.0
+2024-08-31 23:06:08,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=101477.33333333333, ans=0.125
+2024-08-31 23:08:20,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=101477.33333333333, ans=0.0
+2024-08-31 23:19:22,450 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=101530.66666666667, ans=0.1
+2024-08-31 23:30:40,082 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.802e+02 3.787e+02 4.308e+02 4.929e+02 6.122e+02, threshold=8.616e+02, percent-clipped=0.0
+2024-08-31 23:32:40,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=101637.33333333333, ans=0.015
+2024-08-31 23:32:40,875 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 23:32:42,312 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 50, loss[loss=0.3352, simple_loss=0.3187, pruned_loss=0.1717, over 18964.00 frames. ], tot_loss[loss=0.3276, simple_loss=0.3113, pruned_loss=0.1684, over 827741.27 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 1.0
+2024-08-31 23:54:22,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101690.66666666667, ans=0.125
+2024-08-31 23:55:08,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=101690.66666666667, ans=0.125
+2024-09-01 00:02:13,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=101744.0, ans=0.0
+2024-09-01 00:04:29,068 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.90 vs. limit=15.0
+2024-09-01 00:13:14,908 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 00:17:08,369 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 100, loss[loss=0.3094, simple_loss=0.2961, pruned_loss=0.1523, over 19231.00 frames. ], tot_loss[loss=0.3186, simple_loss=0.3033, pruned_loss=0.1621, over 1473938.15 frames. ], batch size: 144, lr: 8.58e-05, grad_scale: 1.0
+2024-09-01 00:22:57,983 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.64 vs. limit=10.0
+2024-09-01 00:25:34,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=102010.66666666667, ans=0.07
+2024-09-01 00:26:08,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=102010.66666666667, ans=0.0
+2024-09-01 00:26:09,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=102010.66666666667, ans=0.09899494936611666
+2024-09-01 00:27:26,415 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-3.pt
+2024-09-01 00:27:30,507 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 00:27:30,507 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 00:27:30,537 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 00:27:42,730 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 0, loss[loss=0.3028, simple_loss=0.2854, pruned_loss=0.1641, over 18523.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.2854, pruned_loss=0.1641, over 18523.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 2.0
+2024-09-01 00:27:42,731 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 00:46:27,440 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 4, validation: loss=0.279, simple_loss=0.2687, pruned_loss=0.1325, over 1073944.00 frames.
+2024-09-01 00:46:27,441 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 01:16:52,153 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.614e+02 3.221e+02 3.659e+02 4.077e+02 5.349e+02, threshold=7.318e+02, percent-clipped=0.0
+2024-09-01 01:19:06,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=102160.0, ans=0.0
+2024-09-01 01:29:12,906 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:39:48,723 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 50, loss[loss=0.3261, simple_loss=0.3126, pruned_loss=0.1607, over 18961.00 frames. ], tot_loss[loss=0.2993, simple_loss=0.2866, pruned_loss=0.1482, over 828586.64 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 0.5
+2024-09-01 01:43:16,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=102320.0, ans=0.2
+2024-09-01 01:44:42,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=102373.33333333333, ans=0.1
+2024-09-01 01:47:41,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=102373.33333333333, ans=0.05
+2024-09-01 01:48:30,680 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=102426.66666666667, ans=0.0
+2024-09-01 01:53:53,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=102480.0, ans=0.025
+2024-09-01 01:54:46,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_ff2.min_abs, batch_count=102480.0, ans=0.1
+2024-09-01 01:55:59,735 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=10.08 vs. limit=15.0
+2024-09-01 01:58:53,814 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.62 vs. limit=6.0
+2024-09-01 02:01:07,722 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 100, loss[loss=0.3101, simple_loss=0.2959, pruned_loss=0.1573, over 19286.00 frames. ], tot_loss[loss=0.2947, simple_loss=0.2822, pruned_loss=0.1462, over 1474147.24 frames. ], batch size: 144, lr: 9.86e-05, grad_scale: 1.0
+2024-09-01 02:09:39,909 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 2.859e+02 3.213e+02 3.589e+02 4.738e+02, threshold=6.426e+02, percent-clipped=0.0
+2024-09-01 02:10:13,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102693.33333333333, ans=0.1
+2024-09-01 02:10:16,937 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-4.pt
+2024-09-01 02:10:26,429 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 02:10:26,429 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:10:26,462 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 02:10:37,117 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 0, loss[loss=0.2472, simple_loss=0.2384, pruned_loss=0.119, over 18549.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.2384, pruned_loss=0.119, over 18549.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 1.0
+2024-09-01 02:10:37,118 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:15:37,614 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 5, validation: loss=0.2588, simple_loss=0.2515, pruned_loss=0.1195, over 1073944.00 frames.
+2024-09-01 02:15:37,615 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 02:17:13,048 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=102741.33333333333, ans=0.025
+2024-09-01 02:18:42,556 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 02:23:25,257 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 50, loss[loss=0.2825, simple_loss=0.2735, pruned_loss=0.1344, over 19008.00 frames. ], tot_loss[loss=0.2803, simple_loss=0.2701, pruned_loss=0.1361, over 828355.03 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 0.25
+2024-09-01 02:24:35,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=103061.33333333333, ans=0.025
+2024-09-01 02:25:48,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=103114.66666666667, ans=0.2
+2024-09-01 02:26:53,322 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.73 vs. limit=15.0
+2024-09-01 02:26:55,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103168.0, ans=0.125
+2024-09-01 02:27:11,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103168.0, ans=0.1
+2024-09-01 02:27:48,818 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 2.619e+02 2.908e+02 3.410e+02 5.061e+02, threshold=5.817e+02, percent-clipped=0.0
+2024-09-01 02:28:05,165 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 100, loss[loss=0.258, simple_loss=0.2514, pruned_loss=0.1201, over 19287.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.2659, pruned_loss=0.1335, over 1473652.43 frames. ], batch size: 144, lr: 1.00e-04, grad_scale: 0.5
+2024-09-01 02:28:34,860 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.42 vs. limit=15.0
+2024-09-01 02:29:16,735 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103328.0, ans=0.1
+2024-09-01 02:29:56,307 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=103381.33333333333, ans=0.025
+2024-09-01 02:30:03,404 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-5.pt
+2024-09-01 02:30:10,482 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 02:30:10,482 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:30:10,512 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 02:30:19,013 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 0, loss[loss=0.2578, simple_loss=0.2521, pruned_loss=0.1188, over 18610.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.2521, pruned_loss=0.1188, over 18610.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:30:19,014 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:30:42,393 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 6, validation: loss=0.247, simple_loss=0.2415, pruned_loss=0.1137, over 1073944.00 frames.
+2024-09-01 02:30:51,758 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 02:33:06,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=103584.0, ans=0.125
+2024-09-01 02:33:15,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=103584.0, ans=0.125
+2024-09-01 02:33:49,358 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 50, loss[loss=0.2701, simple_loss=0.2628, pruned_loss=0.1283, over 19047.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.2606, pruned_loss=0.1292, over 829577.21 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 0.5
+2024-09-01 02:34:09,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=103690.66666666667, ans=0.0
+2024-09-01 02:34:09,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=103690.66666666667, ans=0.0
+2024-09-01 02:34:12,830 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=20.23 vs. limit=15.0
+2024-09-01 02:34:20,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=103744.0, ans=0.125
+2024-09-01 02:34:34,965 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=10.53 vs. limit=15.0
+2024-09-01 02:34:35,011 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.02 vs. limit=6.0
+2024-09-01 02:34:39,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-09-01 02:34:40,107 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.401e+02 2.633e+02 2.975e+02 4.049e+02, threshold=5.266e+02, percent-clipped=0.0
+2024-09-01 02:34:44,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103797.33333333333, ans=0.1
+2024-09-01 02:34:52,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=103797.33333333333, ans=0.125
+2024-09-01 02:35:51,952 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:35:55,609 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 100, loss[loss=0.2561, simple_loss=0.2488, pruned_loss=0.1233, over 19232.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.2528, pruned_loss=0.1237, over 1476247.28 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:36:48,673 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:36:58,487 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-6.pt
+2024-09-01 02:37:28,669 INFO [dysarthria_finetune.py:1435] (0/4) (1454047232, 34072559616)
+2024-09-01 02:37:28,669 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:37:28,698 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 02:37:37,090 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 0, loss[loss=0.2413, simple_loss=0.2389, pruned_loss=0.1085, over 18570.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.2389, pruned_loss=0.1085, over 18570.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:37:37,090 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:38:00,927 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 7, validation: loss=0.2303, simple_loss=0.2284, pruned_loss=0.1027, over 1073944.00 frames.
+2024-09-01 02:38:00,928 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 02:38:02,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104106.66666666667, ans=0.1
+2024-09-01 02:38:03,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104106.66666666667, ans=0.1
+2024-09-01 02:38:12,411 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.83 vs. limit=6.0
+2024-09-01 02:38:48,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=104213.33333333333, ans=0.125
+2024-09-01 02:39:39,126 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.248e+02 2.388e+02 2.643e+02 3.863e+02, threshold=4.776e+02, percent-clipped=0.0
+2024-09-01 02:39:40,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=104320.0, ans=0.125
+2024-09-01 02:39:54,677 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 50, loss[loss=0.2572, simple_loss=0.255, pruned_loss=0.1164, over 18968.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.246, pruned_loss=0.1185, over 827907.61 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:40:19,469 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=104426.66666666667, ans=0.125
+2024-09-01 02:41:17,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=104533.33333333333, ans=0.035
+2024-09-01 02:41:32,446 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.30 vs. limit=10.0
+2024-09-01 02:41:41,952 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 100, loss[loss=0.2236, simple_loss=0.2256, pruned_loss=0.09572, over 19302.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.2418, pruned_loss=0.1155, over 1473040.93 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:42:08,796 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=104693.33333333333, ans=0.125
+2024-09-01 02:42:39,964 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-7.pt
+2024-09-01 02:42:44,190 INFO [dysarthria_finetune.py:1435] (0/4) (1412104192, 34072559616)
+2024-09-01 02:42:44,190 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:42:44,219 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 02:42:52,904 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 0, loss[loss=0.2588, simple_loss=0.2521, pruned_loss=0.1256, over 18485.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.2521, pruned_loss=0.1256, over 18485.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 4.0
+2024-09-01 02:42:52,904 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:43:16,304 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 8, validation: loss=0.2224, simple_loss=0.2225, pruned_loss=0.09892, over 1073944.00 frames.
+2024-09-01 02:43:16,305 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 02:43:18,014 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.58 vs. limit=12.0
+2024-09-01 02:43:28,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=104789.33333333333, ans=0.125
+2024-09-01 02:43:29,242 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.05 vs. limit=22.5
+2024-09-01 02:43:51,414 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.041e+02 2.195e+02 2.485e+02 3.530e+02, threshold=4.390e+02, percent-clipped=0.0
+2024-09-01 02:44:21,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-09-01 02:44:37,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=104949.33333333333, ans=0.09899494936611666
+2024-09-01 02:44:47,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=105002.66666666667, ans=0.025
+2024-09-01 02:45:06,407 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 50, loss[loss=0.2511, simple_loss=0.2451, pruned_loss=0.1218, over 18938.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2369, pruned_loss=0.1124, over 828565.55 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:46:41,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=105109.33333333333, ans=0.0
+2024-09-01 02:47:14,237 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.17 vs. limit=22.5
+2024-09-01 02:47:40,120 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=11.51 vs. limit=12.0
+2024-09-01 02:47:48,593 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.17 vs. limit=10.0
+2024-09-01 02:47:50,408 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:47:55,798 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 100, loss[loss=0.2388, simple_loss=0.2411, pruned_loss=0.1055, over 19222.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.2356, pruned_loss=0.1113, over 1474444.14 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:48:07,931 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=105322.66666666667, ans=0.0
+2024-09-01 02:48:24,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105376.0, ans=0.1
+2024-09-01 02:48:34,267 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 1.930e+02 2.062e+02 2.246e+02 3.148e+02, threshold=4.124e+02, percent-clipped=0.0
+2024-09-01 02:48:37,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=105429.33333333333, ans=0.125
+2024-09-01 02:48:42,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=105429.33333333333, ans=0.125
+2024-09-01 02:48:55,520 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-8.pt
+2024-09-01 02:49:00,083 INFO [dysarthria_finetune.py:1435] (0/4) (1412104192, 34072559616)
+2024-09-01 02:49:00,084 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:49:00,113 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 02:49:09,692 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 0, loss[loss=0.2515, simple_loss=0.2454, pruned_loss=0.1229, over 18596.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.2454, pruned_loss=0.1229, over 18596.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:49:09,693 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:49:40,375 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 9, validation: loss=0.211, simple_loss=0.2147, pruned_loss=0.09159, over 1073944.00 frames.
+2024-09-01 02:49:40,376 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 02:49:47,832 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.16 vs. limit=22.5
+2024-09-01 02:49:53,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=105477.33333333333, ans=0.0
+2024-09-01 02:50:05,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=105477.33333333333, ans=0.2
+2024-09-01 02:50:12,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=105477.33333333333, ans=10.0
+2024-09-01 02:50:21,123 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=17.18 vs. limit=15.0
+2024-09-01 02:50:36,408 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.34 vs. limit=15.0
+2024-09-01 02:51:30,736 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.79 vs. limit=10.0
+2024-09-01 02:52:12,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=105690.66666666667, ans=0.0
+2024-09-01 02:52:18,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=105690.66666666667, ans=0.2
+2024-09-01 02:52:30,683 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 50, loss[loss=0.2332, simple_loss=0.2358, pruned_loss=0.1042, over 19065.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.2305, pruned_loss=0.1044, over 828972.56 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:52:35,445 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:53:01,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=105744.0, ans=0.0
+2024-09-01 02:53:01,723 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=105744.0, ans=0.025
+2024-09-01 02:53:08,084 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=105797.33333333333, ans=0.2
+2024-09-01 02:53:13,554 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.39 vs. limit=15.0
+2024-09-01 02:53:37,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=105850.66666666667, ans=0.2
+2024-09-01 02:53:46,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=105850.66666666667, ans=0.07
+2024-09-01 02:54:05,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=105850.66666666667, ans=0.0
+2024-09-01 02:54:08,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=105904.0, ans=0.0
+2024-09-01 02:54:17,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105904.0, ans=0.125
+2024-09-01 02:54:35,247 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 1.850e+02 1.979e+02 2.143e+02 2.885e+02, threshold=3.959e+02, percent-clipped=0.0
+2024-09-01 02:54:39,867 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105957.33333333333, ans=0.1
+2024-09-01 02:54:55,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:54:58,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:55:01,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:55:06,103 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 100, loss[loss=0.184, simple_loss=0.1937, pruned_loss=0.07395, over 19269.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.2285, pruned_loss=0.1031, over 1474236.32 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:55:15,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=106010.66666666667, ans=0.0
+2024-09-01 02:55:42,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=106064.0, ans=0.2
+2024-09-01 02:56:27,104 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-9.pt
+2024-09-01 02:56:31,510 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 02:56:31,511 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 02:56:31,543 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 02:56:40,078 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 0, loss[loss=0.1934, simple_loss=0.1995, pruned_loss=0.0832, over 18682.00 frames. ], tot_loss[loss=0.1934, simple_loss=0.1995, pruned_loss=0.0832, over 18682.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:56:40,079 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 02:56:52,578 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.5255, 1.8610, 4.1780, 3.9660], device='cuda:0')
+2024-09-01 02:57:03,504 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 10, validation: loss=0.2075, simple_loss=0.2129, pruned_loss=0.09054, over 1073944.00 frames.
+2024-09-01 02:57:03,505 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 02:57:22,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=106165.33333333333, ans=0.125
+2024-09-01 02:57:38,114 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:57:40,469 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.38 vs. limit=22.5
+2024-09-01 02:58:14,399 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:58:14,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=106325.33333333333, ans=0.2
+2024-09-01 02:58:20,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:58:36,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106378.66666666667, ans=0.1
+2024-09-01 02:58:55,063 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 50, loss[loss=0.2409, simple_loss=0.2443, pruned_loss=0.1094, over 19012.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2261, pruned_loss=0.102, over 829104.52 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:58:56,459 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=106432.0, ans=0.07
+2024-09-01 02:58:56,490 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106432.0, ans=0.125
+2024-09-01 02:59:02,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=106432.0, ans=0.2
+2024-09-01 02:59:07,324 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:59:13,815 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.30 vs. limit=15.0
+2024-09-01 02:59:20,726 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.769e+02 1.897e+02 2.105e+02 2.891e+02, threshold=3.793e+02, percent-clipped=0.0
+2024-09-01 02:59:28,815 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=106485.33333333333, ans=0.2
+2024-09-01 02:59:45,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 03:00:26,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=106645.33333333333, ans=0.07
+2024-09-01 03:00:29,049 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.69 vs. limit=15.0
+2024-09-01 03:00:42,966 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 100, loss[loss=0.1987, simple_loss=0.2117, pruned_loss=0.0803, over 19226.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.223, pruned_loss=0.09873, over 1474931.95 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 03:00:46,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106698.66666666667, ans=0.125
+2024-09-01 03:00:57,338 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=106698.66666666667, ans=0.2
+2024-09-01 03:01:27,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=106752.0, ans=0.0
+2024-09-01 03:01:46,995 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-10.pt
+2024-09-01 03:07:52,130 INFO [dysarthria_finetune.py:1435] (0/4) (1416298496, 34072559616)
+2024-09-01 03:07:52,130 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:07:52,159 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 03:08:00,728 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 0, loss[loss=0.2256, simple_loss=0.23, pruned_loss=0.1025, over 18505.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.23, pruned_loss=0.1025, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:08:00,729 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:08:32,542 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 11, validation: loss=0.2002, simple_loss=0.2088, pruned_loss=0.08618, over 1073944.00 frames.
+2024-09-01 03:08:32,542 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 03:09:44,121 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=106906.66666666667, ans=0.2
+2024-09-01 03:09:56,777 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.21 vs. limit=15.0
+2024-09-01 03:10:00,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=106960.0, ans=0.125
+2024-09-01 03:10:19,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=106960.0, ans=0.0
+2024-09-01 03:10:28,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=107013.33333333333, ans=0.0
+2024-09-01 03:10:32,792 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.50 vs. limit=22.5
+2024-09-01 03:10:42,593 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.721e+02 1.824e+02 2.016e+02 2.682e+02, threshold=3.648e+02, percent-clipped=0.0
+2024-09-01 03:12:03,941 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 50, loss[loss=0.2143, simple_loss=0.2209, pruned_loss=0.09575, over 19023.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2221, pruned_loss=0.09671, over 827570.26 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:12:48,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=107173.33333333333, ans=0.0
+2024-09-01 03:13:56,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=107226.66666666667, ans=0.2
+2024-09-01 03:15:13,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=107333.33333333333, ans=0.125
+2024-09-01 03:15:56,720 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 100, loss[loss=0.2003, simple_loss=0.2132, pruned_loss=0.08428, over 19237.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2204, pruned_loss=0.09574, over 1473115.37 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:16:20,510 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.35 vs. limit=10.0
+2024-09-01 03:17:12,316 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=107440.0, ans=0.125
+2024-09-01 03:17:56,546 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-11.pt
+2024-09-01 03:18:00,890 INFO [dysarthria_finetune.py:1435] (0/4) (1412104192, 34072559616)
+2024-09-01 03:18:00,890 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:18:00,920 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 03:18:09,586 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 0, loss[loss=0.2107, simple_loss=0.2148, pruned_loss=0.09758, over 18585.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2148, pruned_loss=0.09758, over 18585.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:18:09,587 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:18:33,048 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 12, validation: loss=0.1929, simple_loss=0.2049, pruned_loss=0.0821, over 1073944.00 frames.
+2024-09-01 03:18:33,049 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 03:18:41,827 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.14 vs. limit=15.0
+2024-09-01 03:18:54,487 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 1.683e+02 1.764e+02 1.920e+02 2.754e+02, threshold=3.529e+02, percent-clipped=0.0
+2024-09-01 03:20:46,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=107754.66666666667, ans=0.025
+2024-09-01 03:20:49,960 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.09 vs. limit=12.0
+2024-09-01 03:20:56,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=107754.66666666667, ans=0.125
+2024-09-01 03:21:03,703 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 50, loss[loss=0.197, simple_loss=0.2118, pruned_loss=0.08272, over 18986.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2128, pruned_loss=0.08763, over 829307.75 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:21:26,179 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=12.18 vs. limit=15.0
+2024-09-01 03:22:34,812 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-09-01 03:23:14,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=107968.0, ans=0.2
+2024-09-01 03:24:01,752 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 100, loss[loss=0.1852, simple_loss=0.1986, pruned_loss=0.07908, over 19194.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2124, pruned_loss=0.08917, over 1473409.16 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:24:21,998 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.650e+02 1.753e+02 1.928e+02 2.697e+02, threshold=3.507e+02, percent-clipped=0.0
+2024-09-01 03:24:23,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108074.66666666667, ans=0.1
+2024-09-01 03:24:48,727 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.06 vs. limit=15.0
+2024-09-01 03:24:50,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=108181.33333333333, ans=0.125
+2024-09-01 03:25:11,683 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-12.pt
+2024-09-01 03:25:16,171 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 03:25:16,171 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:25:16,201 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 03:25:24,769 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 0, loss[loss=0.2429, simple_loss=0.2404, pruned_loss=0.1199, over 18643.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2404, pruned_loss=0.1199, over 18643.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:25:24,770 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:25:48,259 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 13, validation: loss=0.1886, simple_loss=0.2026, pruned_loss=0.08078, over 1073944.00 frames.
+2024-09-01 03:25:48,259 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 03:26:07,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=108229.33333333333, ans=0.125
+2024-09-01 03:26:17,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108282.66666666667, ans=0.1
+2024-09-01 03:27:10,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=108389.33333333333, ans=0.0
+2024-09-01 03:27:12,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=108389.33333333333, ans=0.025
+2024-09-01 03:27:30,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=108442.66666666667, ans=0.125
+2024-09-01 03:27:54,668 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 50, loss[loss=0.1725, simple_loss=0.1958, pruned_loss=0.06699, over 19011.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2112, pruned_loss=0.08759, over 829773.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 1.0
+2024-09-01 03:27:58,718 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.26 vs. limit=15.0
+2024-09-01 03:28:05,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108496.0, ans=0.1
+2024-09-01 03:28:42,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=108602.66666666667, ans=0.0
+2024-09-01 03:29:07,380 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.617e+02 1.723e+02 2.007e+02 2.594e+02, threshold=3.446e+02, percent-clipped=0.0
+2024-09-01 03:29:08,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-09-01 03:29:25,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-09-01 03:29:32,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 03:29:45,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 03:29:45,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=108762.66666666667, ans=0.0
+2024-09-01 03:29:46,147 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 100, loss[loss=0.1379, simple_loss=0.1628, pruned_loss=0.05001, over 19225.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.21, pruned_loss=0.08675, over 1474982.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 2.0
+2024-09-01 03:30:00,768 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.34 vs. limit=10.0
+2024-09-01 03:30:13,647 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.44 vs. limit=15.0
+2024-09-01 03:30:41,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=108869.33333333333, ans=0.025
+2024-09-01 03:30:46,117 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-13.pt
+2024-09-01 03:30:50,601 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 03:30:50,601 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:30:50,631 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 03:30:58,993 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 0, loss[loss=0.2071, simple_loss=0.2241, pruned_loss=0.09005, over 18695.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2241, pruned_loss=0.09005, over 18695.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:30:58,994 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:31:07,320 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.4236, 5.1880, 4.8787, 4.2392], device='cuda:0')
+2024-09-01 03:31:23,131 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.6024, 1.4805, 1.0069, 1.4129, 1.6895, 1.5457, 1.5948, 1.6163],
+ device='cuda:0')
+2024-09-01 03:31:23,178 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 14, validation: loss=0.1833, simple_loss=0.2, pruned_loss=0.07856, over 1073944.00 frames.
+2024-09-01 03:31:23,178 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 03:31:26,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 03:31:40,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108917.33333333333, ans=0.1
+2024-09-01 03:31:49,943 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.04 vs. limit=10.0
+2024-09-01 03:32:00,140 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108970.66666666667, ans=0.1
+2024-09-01 03:32:08,924 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff3.min_abs, batch_count=109024.0, ans=0.2
+2024-09-01 03:32:11,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=109024.0, ans=0.125
+2024-09-01 03:32:21,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=109024.0, ans=0.125
+2024-09-01 03:33:13,481 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 50, loss[loss=0.1743, simple_loss=0.2043, pruned_loss=0.06634, over 18964.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2072, pruned_loss=0.08496, over 828263.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:33:19,742 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.619e+02 1.722e+02 1.984e+02 2.668e+02, threshold=3.445e+02, percent-clipped=0.0
+2024-09-01 03:33:31,570 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=109184.0, ans=0.125
+2024-09-01 03:33:31,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=109184.0, ans=0.0
+2024-09-01 03:33:57,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=109290.66666666667, ans=0.0
+2024-09-01 03:34:17,462 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.77 vs. limit=6.0
+2024-09-01 03:35:00,699 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 100, loss[loss=0.1676, simple_loss=0.1841, pruned_loss=0.07243, over 19207.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2073, pruned_loss=0.08486, over 1474261.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 8.0
+2024-09-01 03:35:14,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=109450.66666666667, ans=0.0
+2024-09-01 03:35:41,754 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=109557.33333333333, ans=0.0
+2024-09-01 03:36:00,182 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-14.pt
+2024-09-01 03:36:05,441 INFO [dysarthria_finetune.py:1435] (0/4) (1454047232, 34072559616)
+2024-09-01 03:36:05,441 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:36:05,470 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 03:36:14,232 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 0, loss[loss=0.2357, simple_loss=0.2492, pruned_loss=0.1084, over 18509.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2492, pruned_loss=0.1084, over 18509.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:36:14,233 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:36:45,416 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 15, validation: loss=0.1765, simple_loss=0.1963, pruned_loss=0.07531, over 1073944.00 frames.
+2024-09-01 03:36:45,417 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 03:36:47,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-09-01 03:37:11,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109605.33333333333, ans=0.1
+2024-09-01 03:37:18,331 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=12.91 vs. limit=12.0
+2024-09-01 03:37:18,353 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=22.26 vs. limit=15.0
+2024-09-01 03:37:31,972 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.49 vs. limit=15.0
+2024-09-01 03:38:03,356 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.579e+02 1.672e+02 1.908e+02 2.431e+02, threshold=3.343e+02, percent-clipped=0.0
+2024-09-01 03:39:13,577 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109872.0, ans=0.1
+2024-09-01 03:39:14,758 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 50, loss[loss=0.1856, simple_loss=0.2042, pruned_loss=0.08139, over 19011.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.2009, pruned_loss=0.07931, over 827942.50 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:40:36,591 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.32 vs. limit=10.0
+2024-09-01 03:40:44,730 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.30 vs. limit=6.0
+2024-09-01 03:40:49,913 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.21 vs. limit=15.0
+2024-09-01 03:41:54,222 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 100, loss[loss=0.1683, simple_loss=0.1907, pruned_loss=0.07146, over 19251.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2008, pruned_loss=0.07977, over 1473903.80 frames. ], batch size: 144, lr: 9.95e-05, grad_scale: 8.0
+2024-09-01 03:42:24,137 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.35 vs. limit=15.0
+2024-09-01 03:43:12,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=110245.33333333333, ans=0.1
+2024-09-01 03:43:21,363 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.548e+02 1.650e+02 1.862e+02 2.617e+02, threshold=3.300e+02, percent-clipped=0.0
+2024-09-01 03:43:24,585 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-15.pt
+2024-09-01 03:43:30,754 INFO [dysarthria_finetune.py:1435] (0/4) (1412104192, 34072559616)
+2024-09-01 03:43:30,754 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:43:30,783 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 03:43:40,069 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 0, loss[loss=0.212, simple_loss=0.2271, pruned_loss=0.09751, over 18729.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2271, pruned_loss=0.09751, over 18729.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:43:40,069 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:44:25,988 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 16, validation: loss=0.1763, simple_loss=0.1967, pruned_loss=0.07691, over 1073944.00 frames.
+2024-09-01 03:44:25,989 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 03:45:04,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=110293.33333333333, ans=0.0
+2024-09-01 03:45:54,236 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.09 vs. limit=6.0
+2024-09-01 03:46:03,009 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:46:25,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=110400.0, ans=0.0
+2024-09-01 03:48:00,195 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=110506.66666666667, ans=0.1
+2024-09-01 03:48:37,719 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 50, loss[loss=0.1597, simple_loss=0.1896, pruned_loss=0.06457, over 18988.00 frames. ], tot_loss[loss=0.1824, simple_loss=0.2023, pruned_loss=0.08062, over 828175.61 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 03:49:07,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110560.0, ans=0.1
+2024-09-01 03:51:34,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110720.0, ans=0.1
+2024-09-01 03:51:52,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=110773.33333333333, ans=0.025
+2024-09-01 03:52:02,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=110773.33333333333, ans=0.125
+2024-09-01 03:52:26,721 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.555e+02 1.657e+02 1.896e+02 2.445e+02, threshold=3.314e+02, percent-clipped=0.0
+2024-09-01 03:52:31,263 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 100, loss[loss=0.1603, simple_loss=0.1863, pruned_loss=0.06715, over 19270.00 frames. ], tot_loss[loss=0.1801, simple_loss=0.2004, pruned_loss=0.07963, over 1473314.28 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:54:05,648 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.95 vs. limit=6.0
+2024-09-01 03:54:10,906 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-16.pt
+2024-09-01 03:54:30,050 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 03:54:30,050 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 03:54:30,080 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 03:54:38,976 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 0, loss[loss=0.2101, simple_loss=0.2144, pruned_loss=0.1029, over 18739.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2144, pruned_loss=0.1029, over 18739.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:54:38,977 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 03:54:45,295 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.3006, 1.6254, 3.2721, 3.1045], device='cuda:0')
+2024-09-01 03:54:58,430 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([6.9866, 6.0336, 6.2285, 6.1565], device='cuda:0')
+2024-09-01 03:55:19,872 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 17, validation: loss=0.1671, simple_loss=0.1912, pruned_loss=0.07151, over 1073944.00 frames.
+2024-09-01 03:55:19,873 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 03:55:22,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110981.33333333333, ans=0.1
+2024-09-01 03:56:16,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 03:56:27,258 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=111034.66666666667, ans=0.1
+2024-09-01 03:56:56,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=111088.0, ans=0.125
+2024-09-01 03:57:45,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=111141.33333333333, ans=0.125
+2024-09-01 03:58:57,661 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 50, loss[loss=0.1722, simple_loss=0.2039, pruned_loss=0.0702, over 19028.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.1963, pruned_loss=0.07632, over 827378.67 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 03:59:47,869 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 04:01:02,159 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.555e+02 1.659e+02 1.888e+02 2.626e+02, threshold=3.319e+02, percent-clipped=0.0
+2024-09-01 04:01:09,209 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111354.66666666667, ans=0.125
+2024-09-01 04:01:45,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=111408.0, ans=0.2
+2024-09-01 04:01:58,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=111408.0, ans=0.125
+2024-09-01 04:02:21,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111461.33333333333, ans=0.125
+2024-09-01 04:02:48,360 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 100, loss[loss=0.1444, simple_loss=0.1741, pruned_loss=0.05738, over 19218.00 frames. ], tot_loss[loss=0.178, simple_loss=0.199, pruned_loss=0.07852, over 1473529.96 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 04:02:59,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=111514.66666666667, ans=0.1
+2024-09-01 04:03:17,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=111514.66666666667, ans=0.125
+2024-09-01 04:04:03,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=111568.0, ans=0.125
+2024-09-01 04:04:09,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=111621.33333333333, ans=0.125
+2024-09-01 04:05:21,573 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-17.pt
+2024-09-01 04:05:29,262 INFO [dysarthria_finetune.py:1435] (0/4) (1454047232, 34072559616)
+2024-09-01 04:05:29,262 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 04:05:29,291 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 04:05:38,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 04:05:38,881 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 0, loss[loss=0.2063, simple_loss=0.2202, pruned_loss=0.09617, over 18538.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2202, pruned_loss=0.09617, over 18538.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:05:38,882 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 04:06:14,846 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 18, validation: loss=0.1676, simple_loss=0.191, pruned_loss=0.07213, over 1073944.00 frames.
+2024-09-01 04:06:14,847 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 04:08:04,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111776.0, ans=0.125
+2024-09-01 04:08:44,231 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.19 vs. limit=22.5
+2024-09-01 04:09:08,777 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.22 vs. limit=15.0
+2024-09-01 04:09:19,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111829.33333333333, ans=0.1
+2024-09-01 04:09:42,029 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 1.516e+02 1.635e+02 1.895e+02 3.024e+02, threshold=3.269e+02, percent-clipped=0.0
+2024-09-01 04:09:43,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=111882.66666666667, ans=0.125
+2024-09-01 04:10:05,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=111882.66666666667, ans=0.0
+2024-09-01 04:10:10,252 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 50, loss[loss=0.1676, simple_loss=0.1984, pruned_loss=0.0684, over 18998.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.1981, pruned_loss=0.0775, over 828205.61 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:10:57,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=111989.33333333333, ans=0.125
+2024-09-01 04:12:38,031 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=112042.66666666667, ans=0.0
+2024-09-01 04:12:52,376 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=112042.66666666667, ans=0.0
+2024-09-01 04:13:26,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=112096.0, ans=0.2
+2024-09-01 04:13:30,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=112096.0, ans=0.125
+2024-09-01 04:14:13,993 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 100, loss[loss=0.1437, simple_loss=0.1802, pruned_loss=0.05357, over 19294.00 frames. ], tot_loss[loss=0.1739, simple_loss=0.1965, pruned_loss=0.07568, over 1473690.24 frames. ], batch size: 144, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:14:27,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112202.66666666667, ans=0.1
+2024-09-01 04:14:52,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=112256.0, ans=0.0
+2024-09-01 04:15:53,971 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=112309.33333333333, ans=0.2
+2024-09-01 04:15:56,121 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-18.pt
+2024-09-01 04:16:07,172 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 04:16:07,172 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 04:16:07,203 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 04:16:15,956 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 0, loss[loss=0.1948, simple_loss=0.2148, pruned_loss=0.08744, over 18598.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2148, pruned_loss=0.08744, over 18598.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:16:15,957 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 04:16:39,398 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 19, validation: loss=0.1638, simple_loss=0.1883, pruned_loss=0.06968, over 1073944.00 frames.
+2024-09-01 04:16:39,398 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 04:16:45,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=112352.0, ans=0.0
+2024-09-01 04:16:50,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112352.0, ans=0.125
+2024-09-01 04:16:57,023 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=112352.0, ans=0.09899494936611666
+2024-09-01 04:16:59,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=112352.0, ans=0.0
+2024-09-01 04:17:15,263 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=112405.33333333333, ans=0.025
+2024-09-01 04:17:22,801 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.531e+02 1.615e+02 1.818e+02 2.373e+02, threshold=3.231e+02, percent-clipped=0.0
+2024-09-01 04:17:38,216 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=112458.66666666667, ans=0.125
+2024-09-01 04:17:38,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=112458.66666666667, ans=0.125
+2024-09-01 04:18:14,103 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.50 vs. limit=15.0
+2024-09-01 04:18:37,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-09-01 04:19:10,160 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 50, loss[loss=0.1625, simple_loss=0.1897, pruned_loss=0.06767, over 19038.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.1935, pruned_loss=0.075, over 827203.46 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 4.0
+2024-09-01 04:19:16,118 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.25 vs. limit=15.0
+2024-09-01 04:19:32,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=112672.0, ans=0.125
+2024-09-01 04:20:27,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.51 vs. limit=10.0
+2024-09-01 04:20:38,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112778.66666666667, ans=0.1
+2024-09-01 04:21:10,130 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 100, loss[loss=0.1603, simple_loss=0.1872, pruned_loss=0.06673, over 19274.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.1912, pruned_loss=0.07308, over 1472434.33 frames. ], batch size: 144, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:21:22,819 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-09-01 04:21:45,541 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.33 vs. limit=15.0
+2024-09-01 04:21:47,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112938.66666666667, ans=0.1
+2024-09-01 04:21:54,611 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.501e+02 1.584e+02 1.820e+02 2.268e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-09-01 04:22:11,533 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-19.pt
+2024-09-01 04:22:18,153 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 04:22:18,154 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 04:22:18,185 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 04:22:26,680 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 0, loss[loss=0.1587, simple_loss=0.1814, pruned_loss=0.06804, over 18599.00 frames. ], tot_loss[loss=0.1587, simple_loss=0.1814, pruned_loss=0.06804, over 18599.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:22:26,680 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-09-01 04:22:50,260 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 20, validation: loss=0.1638, simple_loss=0.1875, pruned_loss=0.07, over 1073944.00 frames.
+2024-09-01 04:22:50,261 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26725MB
+2024-09-01 04:22:51,932 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.92 vs. limit=15.0
+2024-09-01 04:23:00,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=113040.0, ans=0.125
+2024-09-01 04:23:09,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=113040.0, ans=0.0
+2024-09-01 04:23:20,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=113093.33333333333, ans=0.025
+2024-09-01 04:23:23,296 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.19 vs. limit=6.0
+2024-09-01 04:23:55,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=113146.66666666667, ans=0.125
+2024-09-01 04:24:18,926 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=113200.0, ans=0.025
+2024-09-01 04:24:43,339 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.62 vs. limit=22.5
+2024-09-01 04:24:55,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=113253.33333333333, ans=0.025
+2024-09-01 04:25:07,018 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 50, loss[loss=0.168, simple_loss=0.1934, pruned_loss=0.07124, over 18985.00 frames. ], tot_loss[loss=0.171, simple_loss=0.193, pruned_loss=0.07451, over 828130.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 4.0
+2024-09-01 04:26:06,481 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.99 vs. limit=15.0
+2024-09-01 04:26:32,664 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=113413.33333333333, ans=0.125
+2024-09-01 04:26:35,450 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.13 vs. limit=15.0
+2024-09-01 04:27:05,373 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.522e+02 1.605e+02 1.869e+02 2.652e+02, threshold=3.210e+02, percent-clipped=0.0
+2024-09-01 04:27:15,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=113520.0, ans=0.2
+2024-09-01 04:27:26,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=113573.33333333333, ans=0.125
+2024-09-01 04:27:27,162 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 100, loss[loss=0.1512, simple_loss=0.1762, pruned_loss=0.06313, over 19321.00 frames. ], tot_loss[loss=0.1686, simple_loss=0.1915, pruned_loss=0.07286, over 1472900.97 frames. ], batch size: 144, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:28:21,567 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=113626.66666666667, ans=0.125
+2024-09-01 04:28:24,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=113680.0, ans=0.0
+2024-09-01 04:28:26,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=113680.0, ans=0.2
+2024-09-01 04:28:38,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=113680.0, ans=0.0
+2024-09-01 04:28:43,106 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune/epoch-20.pt
+2024-09-01 04:28:49,631 INFO [dysarthria_finetune.py:1435] (0/4) (1414201344, 34072559616)
+2024-09-01 04:28:49,631 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-09-01 04:28:49,661 INFO [dysarthria_finetune.py:1440] (0/4) (29908205568, 34072559616)
+2024-09-01 04:28:49,661 INFO [dysarthria_finetune.py:1442] (0/4) Done!
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-1 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-1
new file mode 100644
index 0000000000000000000000000000000000000000..bbdeada70f7077e1130313078061fc641e97b81c
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-1
@@ -0,0 +1,547 @@
+2024-08-31 13:16:10,953 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-31 13:16:10,986 INFO [dysarthria_finetune.py:1214] (1/4) (33735507968, 34072559616)
+2024-08-31 13:16:10,986 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-31 13:16:11,957 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-31 13:16:11,957 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-31 13:16:13,232 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:16:13,232 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-31 13:16:14,953 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 66110931
+2024-08-31 13:16:16,265 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-31 13:18:23,850 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-31 13:20:29,533 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-31 13:20:29,665 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:20:29,919 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-31 13:20:29,919 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-31 13:20:29,919 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-31 13:20:29,919 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-31 13:20:29,919 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-31 13:20:31,921 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-31 13:20:32,860 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-31 13:20:32,865 INFO [dysarthria_asr_datamodule.py:501] (1/4) About to get dev cuts
+2024-08-31 13:20:33,113 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-31 13:20:33,461 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-31 13:20:33,461 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:44:09,211 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.15 vs. limit=5.0
+2024-08-31 13:44:10,208 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=8.80 vs. limit=7.5
+2024-08-31 13:44:14,874 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-31 13:45:00,384 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=15.02 vs. limit=7.5
+2024-08-31 13:45:00,949 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-31 13:47:50,045 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-31 13:47:52,427 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-31 13:50:07,939 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.92 vs. limit=3.0
+2024-08-31 13:50:20,329 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-31 13:50:22,578 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 12658MB
+2024-08-31 13:51:23,105 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], tot_loss[loss=0.385, simple_loss=0.3627, pruned_loss=0.2224, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 13:51:23,105 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-31 14:29:03,504 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-31 14:29:03,505 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13133MB
+2024-08-31 14:32:02,017 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.75 vs. limit=15.0
+2024-08-31 14:42:00,278 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.24 vs. limit=15.0
+2024-08-31 15:24:16,876 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-31 15:47:57,557 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=8.34 vs. limit=12.0
+2024-08-31 15:52:42,138 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.685e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-31 16:18:01,601 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=100160.0, ans=0.0
+2024-08-31 16:22:18,542 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=100160.0, ans=0.125
+2024-08-31 16:32:12,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=100213.33333333333, ans=0.0
+2024-08-31 16:32:13,898 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 6.192e+02 7.846e+02 8.685e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-31 16:33:28,400 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=25.05 vs. limit=15.0
+2024-08-31 17:02:17,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-31 17:02:30,144 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.4065, simple_loss=0.3849, pruned_loss=0.216, over 19042.00 frames. ], tot_loss[loss=0.3907, simple_loss=0.3686, pruned_loss=0.2194, over 827432.33 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 17:15:25,325 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=14.92 vs. limit=15.0
+2024-08-31 17:24:15,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100320.0, ans=0.1
+2024-08-31 17:26:36,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=100320.0, ans=0.0
+2024-08-31 17:38:48,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100373.33333333333, ans=0.125
+2024-08-31 17:57:49,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=100426.66666666667, ans=0.2
+2024-08-31 18:02:28,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=100426.66666666667, ans=0.2
+2024-08-31 18:02:28,600 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.83 vs. limit=15.0
+2024-08-31 18:03:25,561 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=100480.0, ans=0.0
+2024-08-31 18:03:26,030 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=21.70 vs. limit=15.0
+2024-08-31 18:03:46,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=100480.0, ans=0.125
+2024-08-31 18:06:32,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100480.0, ans=0.0
+2024-08-31 18:08:20,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=100480.0, ans=0.125
+2024-08-31 18:12:52,331 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.982e+02 7.682e+02 8.607e+02 1.055e+03, threshold=1.536e+03, percent-clipped=0.0
+2024-08-31 18:12:52,368 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 100, loss[loss=0.389, simple_loss=0.3686, pruned_loss=0.2046, over 19093.00 frames. ], tot_loss[loss=0.3765, simple_loss=0.3557, pruned_loss=0.2068, over 1470684.91 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-31 18:14:55,260 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=8.09 vs. limit=6.0
+2024-08-31 18:40:56,939 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.92 vs. limit=15.0
+2024-08-31 18:42:00,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=100640.0, ans=0.2
+2024-08-31 18:44:19,282 INFO [dysarthria_finetune.py:1435] (1/4) (4260036608, 34072559616)
+2024-08-31 18:44:19,283 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-31 18:44:19,342 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-08-31 18:46:01,811 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 0, loss[loss=0.3255, simple_loss=0.3086, pruned_loss=0.1697, over 18746.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3086, pruned_loss=0.1697, over 18746.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-31 18:46:01,812 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-31 19:10:08,816 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 2, validation: loss=0.3307, simple_loss=0.3141, pruned_loss=0.1687, over 1073944.00 frames.
+2024-08-31 19:10:08,816 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13360MB
+2024-08-31 19:26:44,334 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=21.90 vs. limit=15.0
+2024-08-31 19:29:07,880 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.91 vs. limit=22.5
+2024-08-31 19:48:00,190 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=14.77 vs. limit=12.0
+2024-08-31 19:51:01,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-08-31 20:05:00,931 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.38 vs. limit=15.0
+2024-08-31 20:15:21,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=100842.66666666667, ans=0.125
+2024-08-31 20:15:22,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=100842.66666666667, ans=0.125
+2024-08-31 20:31:44,803 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 50, loss[loss=0.402, simple_loss=0.3797, pruned_loss=0.2175, over 19071.00 frames. ], tot_loss[loss=0.3532, simple_loss=0.3343, pruned_loss=0.1876, over 827854.65 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-31 20:33:01,095 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.96 vs. limit=22.5
+2024-08-31 20:51:20,585 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=16.69 vs. limit=15.0
+2024-08-31 20:55:24,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101002.66666666667, ans=0.1
+2024-08-31 20:58:18,279 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.84 vs. limit=15.0
+2024-08-31 21:01:41,643 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.35 vs. limit=6.0
+2024-08-31 21:03:09,445 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.367e+02 4.995e+02 5.661e+02 6.268e+02 7.321e+02, threshold=1.132e+03, percent-clipped=0.0
+2024-08-31 21:18:50,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-08-31 21:18:51,000 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=101109.33333333333, ans=0.125
+2024-08-31 21:20:54,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=101109.33333333333, ans=0.125
+2024-08-31 21:20:54,857 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.51 vs. limit=6.0
+2024-08-31 21:26:13,034 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.57 vs. limit=15.0
+2024-08-31 21:40:46,797 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101162.66666666667, ans=0.125
+2024-08-31 21:42:13,844 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 100, loss[loss=0.3321, simple_loss=0.3165, pruned_loss=0.1663, over 19090.00 frames. ], tot_loss[loss=0.3448, simple_loss=0.3267, pruned_loss=0.1813, over 1472213.55 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 4.0
+2024-08-31 22:21:43,987 INFO [dysarthria_finetune.py:1435] (1/4) (30081024, 34072559616)
+2024-08-31 22:21:43,988 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-31 22:21:44,061 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-08-31 22:22:38,984 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 0, loss[loss=0.352, simple_loss=0.3312, pruned_loss=0.1954, over 18511.00 frames. ], tot_loss[loss=0.352, simple_loss=0.3312, pruned_loss=0.1954, over 18511.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 2.0
+2024-08-31 22:22:38,985 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-31 22:31:34,595 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 3, validation: loss=0.2979, simple_loss=0.2853, pruned_loss=0.1432, over 1073944.00 frames.
+2024-08-31 22:31:34,955 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13741MB
+2024-08-31 22:44:42,479 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 22:50:32,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=101424.0, ans=0.0
+2024-08-31 23:03:41,630 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=13.14 vs. limit=15.0
+2024-08-31 23:06:44,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=101477.33333333333, ans=0.0
+2024-08-31 23:17:17,664 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.64 vs. limit=15.0
+2024-08-31 23:30:40,080 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.802e+02 3.787e+02 4.308e+02 4.929e+02 6.122e+02, threshold=8.616e+02, percent-clipped=0.0
+2024-08-31 23:32:38,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=101637.33333333333, ans=0.125
+2024-08-31 23:32:38,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=101637.33333333333, ans=0.07
+2024-08-31 23:32:42,302 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 50, loss[loss=0.3363, simple_loss=0.3191, pruned_loss=0.175, over 19005.00 frames. ], tot_loss[loss=0.3273, simple_loss=0.3109, pruned_loss=0.1692, over 828905.42 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 1.0
+2024-09-01 00:13:44,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=101850.66666666667, ans=0.05
+2024-09-01 00:13:44,418 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.59 vs. limit=12.0
+2024-09-01 00:17:08,365 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 100, loss[loss=0.2752, simple_loss=0.2658, pruned_loss=0.1272, over 19133.00 frames. ], tot_loss[loss=0.3162, simple_loss=0.3011, pruned_loss=0.1606, over 1474266.40 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 1.0
+2024-09-01 00:27:26,415 INFO [dysarthria_finetune.py:1435] (1/4) (751501312, 34072559616)
+2024-09-01 00:27:26,416 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 00:27:26,488 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 00:27:42,721 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 0, loss[loss=0.2836, simple_loss=0.2705, pruned_loss=0.1431, over 18466.00 frames. ], tot_loss[loss=0.2836, simple_loss=0.2705, pruned_loss=0.1431, over 18466.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 2.0
+2024-09-01 00:27:42,721 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 00:46:27,445 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 4, validation: loss=0.279, simple_loss=0.2687, pruned_loss=0.1325, over 1073944.00 frames.
+2024-09-01 00:46:27,445 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13741MB
+2024-09-01 01:00:08,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102053.33333333333, ans=0.125
+2024-09-01 01:00:08,498 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.43 vs. limit=15.0
+2024-09-01 01:16:25,440 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=7.50 vs. limit=10.0
+2024-09-01 01:16:52,147 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.614e+02 3.221e+02 3.659e+02 4.077e+02 5.349e+02, threshold=7.318e+02, percent-clipped=0.0
+2024-09-01 01:18:56,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:21:13,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=102213.33333333333, ans=0.0
+2024-09-01 01:25:29,400 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.11 vs. limit=10.0
+2024-09-01 01:27:58,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=102266.66666666667, ans=0.1
+2024-09-01 01:31:59,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=102266.66666666667, ans=0.125
+2024-09-01 01:39:48,718 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 50, loss[loss=0.2835, simple_loss=0.2737, pruned_loss=0.1338, over 18961.00 frames. ], tot_loss[loss=0.3005, simple_loss=0.2876, pruned_loss=0.1492, over 827373.05 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 0.5
+2024-09-01 01:41:25,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=102320.0, ans=0.0
+2024-09-01 01:42:52,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=102320.0, ans=0.125
+2024-09-01 01:44:49,010 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.69 vs. limit=6.0
+2024-09-01 01:47:07,988 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=102373.33333333333, ans=0.125
+2024-09-01 01:47:40,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=12.81 vs. limit=12.0
+2024-09-01 01:50:19,740 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.59 vs. limit=6.0
+2024-09-01 01:59:50,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.min_positive, batch_count=102533.33333333333, ans=0.025
+2024-09-01 02:01:07,716 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 100, loss[loss=0.292, simple_loss=0.2822, pruned_loss=0.1383, over 19038.00 frames. ], tot_loss[loss=0.2947, simple_loss=0.2821, pruned_loss=0.1463, over 1472261.06 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 1.0
+2024-09-01 02:05:31,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=102640.0, ans=0.0
+2024-09-01 02:07:59,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=102640.0, ans=0.5
+2024-09-01 02:09:39,912 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 2.859e+02 3.213e+02 3.589e+02 4.738e+02, threshold=6.426e+02, percent-clipped=0.0
+2024-09-01 02:09:57,809 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102693.33333333333, ans=0.1
+2024-09-01 02:10:16,926 INFO [dysarthria_finetune.py:1435] (1/4) (986382336, 34072559616)
+2024-09-01 02:10:16,927 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:10:16,997 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 02:10:37,119 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 0, loss[loss=0.2672, simple_loss=0.2581, pruned_loss=0.1276, over 18670.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.2581, pruned_loss=0.1276, over 18670.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 1.0
+2024-09-01 02:10:37,120 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:15:37,616 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 5, validation: loss=0.2588, simple_loss=0.2515, pruned_loss=0.1195, over 1073944.00 frames.
+2024-09-01 02:15:37,616 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13741MB
+2024-09-01 02:17:13,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=102741.33333333333, ans=0.025
+2024-09-01 02:18:24,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=102794.66666666667, ans=0.0
+2024-09-01 02:18:38,280 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.15 vs. limit=15.0
+2024-09-01 02:18:41,911 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.54 vs. limit=22.5
+2024-09-01 02:19:48,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=102848.0, ans=0.125
+2024-09-01 02:20:16,807 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=102848.0, ans=0.025
+2024-09-01 02:20:17,157 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.34 vs. limit=6.0
+2024-09-01 02:23:25,250 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 50, loss[loss=0.2869, simple_loss=0.2761, pruned_loss=0.1406, over 18968.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.2688, pruned_loss=0.135, over 828630.89 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 0.25
+2024-09-01 02:24:38,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=103061.33333333333, ans=0.125
+2024-09-01 02:25:34,005 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103061.33333333333, ans=0.125
+2024-09-01 02:25:52,919 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 02:26:35,043 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.28 vs. limit=15.0
+2024-09-01 02:27:25,351 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.97 vs. limit=6.0
+2024-09-01 02:27:28,390 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.75 vs. limit=15.0
+2024-09-01 02:27:48,820 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 2.619e+02 2.908e+02 3.410e+02 5.061e+02, threshold=5.817e+02, percent-clipped=0.0
+2024-09-01 02:27:54,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=103221.33333333333, ans=0.2
+2024-09-01 02:28:05,167 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 100, loss[loss=0.2608, simple_loss=0.2525, pruned_loss=0.1251, over 19157.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.2656, pruned_loss=0.1337, over 1473409.40 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 0.5
+2024-09-01 02:29:05,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103328.0, ans=0.1
+2024-09-01 02:29:05,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=103328.0, ans=0.125
+2024-09-01 02:30:03,385 INFO [dysarthria_finetune.py:1435] (1/4) (206241792, 34072559616)
+2024-09-01 02:30:03,386 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:30:03,459 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 02:30:19,017 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 0, loss[loss=0.273, simple_loss=0.2654, pruned_loss=0.1292, over 18435.00 frames. ], tot_loss[loss=0.273, simple_loss=0.2654, pruned_loss=0.1292, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:30:19,018 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:30:42,395 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 6, validation: loss=0.247, simple_loss=0.2415, pruned_loss=0.1137, over 1073944.00 frames.
+2024-09-01 02:30:51,758 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13802MB
+2024-09-01 02:32:21,839 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103477.33333333333, ans=0.1
+2024-09-01 02:32:55,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=103530.66666666667, ans=0.125
+2024-09-01 02:33:12,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=103584.0, ans=0.2
+2024-09-01 02:33:29,944 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=20.43 vs. limit=15.0
+2024-09-01 02:33:49,355 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 50, loss[loss=0.2351, simple_loss=0.2307, pruned_loss=0.1078, over 19041.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.2549, pruned_loss=0.1245, over 827399.35 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 0.5
+2024-09-01 02:33:52,846 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.59 vs. limit=15.0
+2024-09-01 02:34:02,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=103690.66666666667, ans=0.2
+2024-09-01 02:34:29,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=103744.0, ans=0.125
+2024-09-01 02:34:37,117 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=103744.0, ans=0.0
+2024-09-01 02:34:37,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103744.0, ans=0.1
+2024-09-01 02:34:40,101 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.401e+02 2.633e+02 2.975e+02 4.049e+02, threshold=5.266e+02, percent-clipped=0.0
+2024-09-01 02:34:42,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=103797.33333333333, ans=0.2
+2024-09-01 02:35:55,583 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 100, loss[loss=0.2186, simple_loss=0.2172, pruned_loss=0.09615, over 19066.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.2511, pruned_loss=0.1221, over 1471849.14 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:36:03,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103957.33333333333, ans=0.125
+2024-09-01 02:36:08,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=103957.33333333333, ans=0.5
+2024-09-01 02:36:15,328 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.74 vs. limit=15.0
+2024-09-01 02:36:58,487 INFO [dysarthria_finetune.py:1435] (1/4) (1099628544, 34072559616)
+2024-09-01 02:36:58,488 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:36:58,570 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 02:37:37,099 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 0, loss[loss=0.2803, simple_loss=0.2652, pruned_loss=0.1482, over 18532.00 frames. ], tot_loss[loss=0.2803, simple_loss=0.2652, pruned_loss=0.1482, over 18532.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:37:37,099 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:38:00,932 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 7, validation: loss=0.2303, simple_loss=0.2284, pruned_loss=0.1027, over 1073944.00 frames.
+2024-09-01 02:38:00,932 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13802MB
+2024-09-01 02:38:05,413 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=16.53 vs. limit=15.0
+2024-09-01 02:38:12,048 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=104106.66666666667, ans=0.0
+2024-09-01 02:38:48,201 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104213.33333333333, ans=0.1
+2024-09-01 02:38:50,861 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.93 vs. limit=22.5
+2024-09-01 02:39:00,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=104213.33333333333, ans=0.0
+2024-09-01 02:39:31,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=104320.0, ans=0.125
+2024-09-01 02:39:39,116 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.248e+02 2.388e+02 2.643e+02 3.863e+02, threshold=4.776e+02, percent-clipped=0.0
+2024-09-01 02:39:40,397 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:39:54,673 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 50, loss[loss=0.2541, simple_loss=0.2467, pruned_loss=0.1236, over 19096.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.2481, pruned_loss=0.12, over 827950.42 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:40:02,794 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=17.78 vs. limit=15.0
+2024-09-01 02:40:27,014 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.27 vs. limit=8.0
+2024-09-01 02:40:30,884 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.27 vs. limit=12.0
+2024-09-01 02:40:39,251 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.20 vs. limit=15.0
+2024-09-01 02:40:39,449 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.09 vs. limit=15.0
+2024-09-01 02:41:15,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=104533.33333333333, ans=0.125
+2024-09-01 02:41:17,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=104533.33333333333, ans=0.0
+2024-09-01 02:41:20,117 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.78 vs. limit=6.0
+2024-09-01 02:41:41,952 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 100, loss[loss=0.2433, simple_loss=0.2411, pruned_loss=0.1111, over 19105.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.2432, pruned_loss=0.1163, over 1472811.51 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:41:43,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=104640.0, ans=0.125
+2024-09-01 02:42:06,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff3.min_abs, batch_count=104693.33333333333, ans=0.2
+2024-09-01 02:42:39,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten.whitening_limit, batch_count=104746.66666666667, ans=15.0
+2024-09-01 02:42:39,968 INFO [dysarthria_finetune.py:1435] (1/4) (13976141824, 34072559616)
+2024-09-01 02:42:39,969 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:42:40,006 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 02:42:52,884 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 0, loss[loss=0.21, simple_loss=0.2125, pruned_loss=0.08962, over 18679.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2125, pruned_loss=0.08962, over 18679.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 4.0
+2024-09-01 02:42:52,884 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:43:16,312 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 8, validation: loss=0.2224, simple_loss=0.2225, pruned_loss=0.09892, over 1073944.00 frames.
+2024-09-01 02:43:16,312 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13802MB
+2024-09-01 02:43:18,011 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.78 vs. limit=12.0
+2024-09-01 02:43:35,829 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.41 vs. limit=22.5
+2024-09-01 02:43:51,416 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.041e+02 2.195e+02 2.485e+02 3.530e+02, threshold=4.390e+02, percent-clipped=0.0
+2024-09-01 02:43:52,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=104842.66666666667, ans=0.125
+2024-09-01 02:44:26,493 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.97 vs. limit=22.5
+2024-09-01 02:45:00,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105002.66666666667, ans=0.1
+2024-09-01 02:45:06,401 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 50, loss[loss=0.2115, simple_loss=0.2135, pruned_loss=0.09232, over 19009.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2365, pruned_loss=0.1107, over 829068.39 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:45:14,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=105056.0, ans=0.125
+2024-09-01 02:46:34,778 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=105109.33333333333, ans=0.125
+2024-09-01 02:46:43,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105109.33333333333, ans=0.1
+2024-09-01 02:46:43,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105109.33333333333, ans=0.1
+2024-09-01 02:47:46,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=17.03 vs. limit=15.0
+2024-09-01 02:47:55,802 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 100, loss[loss=0.2412, simple_loss=0.2401, pruned_loss=0.1112, over 19109.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.2354, pruned_loss=0.1097, over 1473116.98 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:48:01,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=105322.66666666667, ans=0.025
+2024-09-01 02:48:34,268 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 1.930e+02 2.062e+02 2.246e+02 3.148e+02, threshold=4.124e+02, percent-clipped=0.0
+2024-09-01 02:48:55,528 INFO [dysarthria_finetune.py:1435] (1/4) (2892693504, 34072559616)
+2024-09-01 02:48:55,529 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:48:55,597 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 02:49:09,691 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 0, loss[loss=0.2444, simple_loss=0.2384, pruned_loss=0.1195, over 18520.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.2384, pruned_loss=0.1195, over 18520.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:49:09,691 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:49:40,376 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 9, validation: loss=0.211, simple_loss=0.2147, pruned_loss=0.09159, over 1073944.00 frames.
+2024-09-01 02:49:40,377 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13802MB
+2024-09-01 02:49:47,083 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.78 vs. limit=22.5
+2024-09-01 02:50:36,220 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.01 vs. limit=22.5
+2024-09-01 02:50:50,932 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten.whitening_limit, batch_count=105584.0, ans=15.0
+2024-09-01 02:50:55,479 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.03 vs. limit=22.5
+2024-09-01 02:51:33,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=105637.33333333333, ans=0.125
+2024-09-01 02:52:18,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=105690.66666666667, ans=0.1
+2024-09-01 02:52:30,683 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 50, loss[loss=0.2645, simple_loss=0.259, pruned_loss=0.1287, over 19008.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2272, pruned_loss=0.1031, over 827563.28 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:52:56,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=105744.0, ans=0.0
+2024-09-01 02:52:56,540 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=11.42 vs. limit=12.0
+2024-09-01 02:53:09,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105797.33333333333, ans=0.1
+2024-09-01 02:53:13,982 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.61 vs. limit=22.5
+2024-09-01 02:53:46,928 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:53:56,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:54:35,246 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 1.850e+02 1.979e+02 2.143e+02 2.885e+02, threshold=3.959e+02, percent-clipped=0.0
+2024-09-01 02:54:39,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105957.33333333333, ans=0.1
+2024-09-01 02:55:06,099 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 100, loss[loss=0.2255, simple_loss=0.2295, pruned_loss=0.0999, over 19113.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.2282, pruned_loss=0.1036, over 1473118.75 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:55:59,762 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=17.84 vs. limit=15.0
+2024-09-01 02:56:27,086 INFO [dysarthria_finetune.py:1435] (1/4) (879427584, 34072559616)
+2024-09-01 02:56:27,087 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 02:56:27,156 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 02:56:40,077 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 0, loss[loss=0.2391, simple_loss=0.2381, pruned_loss=0.1126, over 18522.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.2381, pruned_loss=0.1126, over 18522.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:56:40,078 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 02:56:45,717 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.3437, 1.5189, 3.2162, 3.0289], device='cuda:1')
+2024-09-01 02:57:03,512 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 10, validation: loss=0.2075, simple_loss=0.2129, pruned_loss=0.09054, over 1073944.00 frames.
+2024-09-01 02:57:03,512 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13802MB
+2024-09-01 02:57:11,691 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=106165.33333333333, ans=0.125
+2024-09-01 02:57:30,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106218.66666666667, ans=0.125
+2024-09-01 02:57:52,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106272.0, ans=0.1
+2024-09-01 02:57:55,131 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.90 vs. limit=22.5
+2024-09-01 02:58:03,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=106272.0, ans=0.025
+2024-09-01 02:58:12,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:58:14,390 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:58:18,797 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:58:54,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=106432.0, ans=0.125
+2024-09-01 02:58:54,139 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106432.0, ans=0.1
+2024-09-01 02:58:55,057 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 50, loss[loss=0.2182, simple_loss=0.2253, pruned_loss=0.09465, over 18973.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2246, pruned_loss=0.1008, over 826863.11 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:58:58,981 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=30.43 vs. limit=22.5
+2024-09-01 02:59:20,727 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.769e+02 1.897e+02 2.105e+02 2.891e+02, threshold=3.793e+02, percent-clipped=0.0
+2024-09-01 02:59:22,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106485.33333333333, ans=0.125
+2024-09-01 02:59:43,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=106538.66666666667, ans=0.0
+2024-09-01 02:59:44,106 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.31 vs. limit=15.0
+2024-09-01 03:00:09,285 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=106592.0, ans=0.09899494936611666
+2024-09-01 03:00:09,662 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.02 vs. limit=15.0
+2024-09-01 03:00:24,548 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.44 vs. limit=15.0
+2024-09-01 03:00:41,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=106698.66666666667, ans=0.125
+2024-09-01 03:00:42,960 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 100, loss[loss=0.2084, simple_loss=0.2207, pruned_loss=0.08569, over 19188.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2207, pruned_loss=0.09663, over 1472464.39 frames. ], batch size: 134, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 03:00:57,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=106698.66666666667, ans=0.1
+2024-09-01 03:01:01,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=106698.66666666667, ans=0.125
+2024-09-01 03:01:46,984 INFO [dysarthria_finetune.py:1435] (1/4) (13751746560, 34072559616)
+2024-09-01 03:01:46,985 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:01:47,028 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 03:08:00,726 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 0, loss[loss=0.2229, simple_loss=0.2207, pruned_loss=0.1079, over 18704.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2207, pruned_loss=0.1079, over 18704.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:08:00,726 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:08:32,550 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 11, validation: loss=0.2002, simple_loss=0.2088, pruned_loss=0.08618, over 1073944.00 frames.
+2024-09-01 03:08:32,551 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13802MB
+2024-09-01 03:09:34,688 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106906.66666666667, ans=0.125
+2024-09-01 03:09:39,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=106906.66666666667, ans=0.2
+2024-09-01 03:10:17,185 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.20 vs. limit=10.0
+2024-09-01 03:10:19,684 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=2.065e-02
+2024-09-01 03:10:42,588 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.721e+02 1.824e+02 2.016e+02 2.682e+02, threshold=3.648e+02, percent-clipped=0.0
+2024-09-01 03:10:51,141 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.97 vs. limit=15.0
+2024-09-01 03:11:27,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=107066.66666666667, ans=0.0
+2024-09-01 03:12:03,915 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 50, loss[loss=0.2246, simple_loss=0.2287, pruned_loss=0.1031, over 18947.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2186, pruned_loss=0.09438, over 828704.78 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:12:09,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=107120.0, ans=0.07
+2024-09-01 03:12:17,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=107120.0, ans=0.125
+2024-09-01 03:13:48,074 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=17.65 vs. limit=15.0
+2024-09-01 03:13:52,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=107173.33333333333, ans=0.0
+2024-09-01 03:13:52,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=107173.33333333333, ans=0.125
+2024-09-01 03:15:13,688 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107333.33333333333, ans=0.1
+2024-09-01 03:15:56,724 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 100, loss[loss=0.2048, simple_loss=0.2119, pruned_loss=0.09169, over 19147.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2164, pruned_loss=0.09262, over 1473582.76 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:16:04,623 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.76 vs. limit=15.0
+2024-09-01 03:17:23,526 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=9.56 vs. limit=12.0
+2024-09-01 03:17:56,543 INFO [dysarthria_finetune.py:1435] (1/4) (761987072, 34072559616)
+2024-09-01 03:17:56,543 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:17:56,610 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 03:18:09,585 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 0, loss[loss=0.2491, simple_loss=0.2421, pruned_loss=0.1257, over 18735.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.2421, pruned_loss=0.1257, over 18735.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:18:09,585 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:18:33,051 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 12, validation: loss=0.1929, simple_loss=0.2049, pruned_loss=0.0821, over 1073944.00 frames.
+2024-09-01 03:18:33,051 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 03:18:54,484 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 1.683e+02 1.764e+02 1.920e+02 2.754e+02, threshold=3.529e+02, percent-clipped=0.0
+2024-09-01 03:19:05,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=107594.66666666667, ans=0.125
+2024-09-01 03:19:13,069 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.42 vs. limit=15.0
+2024-09-01 03:19:20,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=107594.66666666667, ans=10.0
+2024-09-01 03:19:28,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=107594.66666666667, ans=0.0
+2024-09-01 03:20:27,107 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.24 vs. limit=22.5
+2024-09-01 03:20:40,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-09-01 03:21:00,430 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.88 vs. limit=22.5
+2024-09-01 03:21:03,705 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 50, loss[loss=0.191, simple_loss=0.2034, pruned_loss=0.08188, over 18974.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2168, pruned_loss=0.09266, over 827168.58 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:21:26,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=107808.0, ans=0.125
+2024-09-01 03:22:11,991 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=107861.33333333333, ans=0.025
+2024-09-01 03:23:19,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107968.0, ans=0.1
+2024-09-01 03:23:36,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=108021.33333333333, ans=0.05
+2024-09-01 03:24:01,733 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 100, loss[loss=0.1954, simple_loss=0.2087, pruned_loss=0.08413, over 19114.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2153, pruned_loss=0.09222, over 1473649.48 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:24:05,439 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:24:21,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108074.66666666667, ans=0.1
+2024-09-01 03:24:21,991 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.650e+02 1.753e+02 1.928e+02 2.697e+02, threshold=3.507e+02, percent-clipped=0.0
+2024-09-01 03:25:11,655 INFO [dysarthria_finetune.py:1435] (1/4) (13963558912, 34072559616)
+2024-09-01 03:25:11,656 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:25:11,707 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 03:25:24,762 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 0, loss[loss=0.2518, simple_loss=0.2478, pruned_loss=0.1254, over 18361.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.2478, pruned_loss=0.1254, over 18361.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:25:24,763 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:25:48,266 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 13, validation: loss=0.1886, simple_loss=0.2026, pruned_loss=0.08078, over 1073944.00 frames.
+2024-09-01 03:25:48,267 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 03:26:03,848 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=11.36 vs. limit=15.0
+2024-09-01 03:26:17,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108282.66666666667, ans=0.1
+2024-09-01 03:26:25,348 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 03:26:34,428 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=108336.0, ans=0.09899494936611666
+2024-09-01 03:26:49,068 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.94 vs. limit=15.0
+2024-09-01 03:27:30,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108442.66666666667, ans=0.1
+2024-09-01 03:27:54,665 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 50, loss[loss=0.1767, simple_loss=0.2053, pruned_loss=0.0651, over 19011.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2082, pruned_loss=0.08648, over 828396.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 1.0
+2024-09-01 03:27:58,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=108496.0, ans=0.125
+2024-09-01 03:28:16,752 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.87 vs. limit=22.5
+2024-09-01 03:28:39,013 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108602.66666666667, ans=0.1
+2024-09-01 03:28:59,937 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108602.66666666667, ans=0.1
+2024-09-01 03:29:07,377 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.617e+02 1.723e+02 2.007e+02 2.594e+02, threshold=3.446e+02, percent-clipped=0.0
+2024-09-01 03:29:43,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 03:29:46,152 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 100, loss[loss=0.1983, simple_loss=0.2085, pruned_loss=0.08984, over 19217.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2076, pruned_loss=0.08571, over 1472353.64 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 2.0
+2024-09-01 03:29:47,411 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:30:04,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 03:30:40,954 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=108869.33333333333, ans=0.125
+2024-09-01 03:30:46,121 INFO [dysarthria_finetune.py:1435] (1/4) (13737066496, 34072559616)
+2024-09-01 03:30:46,122 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:30:46,171 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 03:30:58,986 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 0, loss[loss=0.2251, simple_loss=0.2279, pruned_loss=0.1084, over 18619.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2279, pruned_loss=0.1084, over 18619.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:30:58,986 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:31:18,493 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.0223, 4.3421, 4.7493, 3.7136], device='cuda:1')
+2024-09-01 03:31:23,178 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 14, validation: loss=0.1833, simple_loss=0.2, pruned_loss=0.07856, over 1073944.00 frames.
+2024-09-01 03:31:23,179 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 03:31:38,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 03:31:38,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=108917.33333333333, ans=0.0
+2024-09-01 03:31:40,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 03:31:47,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=108970.66666666667, ans=0.2
+2024-09-01 03:31:58,302 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.21 vs. limit=22.5
+2024-09-01 03:32:08,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=109024.0, ans=0.0
+2024-09-01 03:32:24,218 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109024.0, ans=0.1
+2024-09-01 03:32:57,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=109130.66666666667, ans=0.125
+2024-09-01 03:33:13,475 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 50, loss[loss=0.2199, simple_loss=0.2364, pruned_loss=0.09745, over 19004.00 frames. ], tot_loss[loss=0.1934, simple_loss=0.2079, pruned_loss=0.0854, over 826629.84 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:33:19,742 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.619e+02 1.722e+02 1.984e+02 2.668e+02, threshold=3.445e+02, percent-clipped=0.0
+2024-09-01 03:33:59,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=109290.66666666667, ans=0.125
+2024-09-01 03:34:10,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=109290.66666666667, ans=0.125
+2024-09-01 03:34:41,075 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.34 vs. limit=15.0
+2024-09-01 03:35:00,688 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 100, loss[loss=0.1848, simple_loss=0.2069, pruned_loss=0.0775, over 19114.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.2058, pruned_loss=0.08364, over 1472155.55 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 8.0
+2024-09-01 03:35:12,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=109450.66666666667, ans=0.2
+2024-09-01 03:35:14,752 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.28 vs. limit=15.0
+2024-09-01 03:35:16,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=109450.66666666667, ans=0.125
+2024-09-01 03:35:29,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109504.0, ans=0.125
+2024-09-01 03:35:35,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109504.0, ans=0.1
+2024-09-01 03:35:41,823 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109557.33333333333, ans=0.125
+2024-09-01 03:35:43,973 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=109557.33333333333, ans=0.0
+2024-09-01 03:35:50,275 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=2.634e-03
+2024-09-01 03:36:00,185 INFO [dysarthria_finetune.py:1435] (1/4) (13764329472, 34072559616)
+2024-09-01 03:36:00,186 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:36:00,233 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 03:36:14,210 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 0, loss[loss=0.2063, simple_loss=0.2099, pruned_loss=0.09989, over 18480.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2099, pruned_loss=0.09989, over 18480.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:36:14,210 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:36:45,412 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 15, validation: loss=0.1765, simple_loss=0.1963, pruned_loss=0.07531, over 1073944.00 frames.
+2024-09-01 03:36:45,413 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 03:36:52,292 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.42 vs. limit=6.0
+2024-09-01 03:37:05,815 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=109605.33333333333, ans=0.04949747468305833
+2024-09-01 03:37:18,330 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=11.08 vs. limit=12.0
+2024-09-01 03:37:54,896 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.64 vs. limit=12.0
+2024-09-01 03:38:03,357 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.579e+02 1.672e+02 1.908e+02 2.431e+02, threshold=3.343e+02, percent-clipped=0.0
+2024-09-01 03:38:04,830 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:38:05,279 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.71 vs. limit=10.0
+2024-09-01 03:38:07,701 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.64 vs. limit=6.0
+2024-09-01 03:38:52,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=109818.66666666667, ans=0.125
+2024-09-01 03:39:14,738 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 50, loss[loss=0.2065, simple_loss=0.2209, pruned_loss=0.09415, over 19020.00 frames. ], tot_loss[loss=0.1876, simple_loss=0.2044, pruned_loss=0.08297, over 827766.05 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:39:22,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109872.0, ans=0.1
+2024-09-01 03:39:26,044 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.28 vs. limit=15.0
+2024-09-01 03:41:37,698 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:41:54,226 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 100, loss[loss=0.1575, simple_loss=0.1791, pruned_loss=0.06648, over 19074.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2046, pruned_loss=0.0824, over 1471681.17 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 8.0
+2024-09-01 03:42:37,655 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.54 vs. limit=15.0
+2024-09-01 03:43:21,354 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.548e+02 1.650e+02 1.862e+02 2.617e+02, threshold=3.300e+02, percent-clipped=0.0
+2024-09-01 03:43:24,588 INFO [dysarthria_finetune.py:1435] (1/4) (751501312, 34072559616)
+2024-09-01 03:43:24,589 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:43:24,672 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 03:43:40,066 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 0, loss[loss=0.2092, simple_loss=0.2236, pruned_loss=0.0965, over 18847.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2236, pruned_loss=0.0965, over 18847.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:43:40,066 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:44:25,988 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 16, validation: loss=0.1763, simple_loss=0.1967, pruned_loss=0.07691, over 1073944.00 frames.
+2024-09-01 03:44:25,988 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 03:45:29,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=110346.66666666667, ans=0.125
+2024-09-01 03:46:19,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=110400.0, ans=0.125
+2024-09-01 03:46:31,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=110400.0, ans=0.025
+2024-09-01 03:47:45,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=110453.33333333333, ans=0.125
+2024-09-01 03:47:45,932 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=1.033e-02
+2024-09-01 03:48:04,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=110506.66666666667, ans=0.0
+2024-09-01 03:48:37,719 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 50, loss[loss=0.195, simple_loss=0.2132, pruned_loss=0.08816, over 19018.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2022, pruned_loss=0.08131, over 827868.27 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 03:48:48,135 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.42 vs. limit=15.0
+2024-09-01 03:49:14,931 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.15 vs. limit=15.0
+2024-09-01 03:49:45,583 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=110613.33333333333, ans=0.025
+2024-09-01 03:49:45,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=110613.33333333333, ans=0.125
+2024-09-01 03:51:00,306 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.55 vs. limit=15.0
+2024-09-01 03:52:02,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=110773.33333333333, ans=0.125
+2024-09-01 03:52:25,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-09-01 03:52:26,724 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.555e+02 1.657e+02 1.896e+02 2.445e+02, threshold=3.314e+02, percent-clipped=0.0
+2024-09-01 03:52:31,257 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 100, loss[loss=0.1459, simple_loss=0.1778, pruned_loss=0.05702, over 19118.00 frames. ], tot_loss[loss=0.1818, simple_loss=0.2022, pruned_loss=0.08043, over 1473208.83 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:52:49,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=110826.66666666667, ans=0.125
+2024-09-01 03:52:53,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=110826.66666666667, ans=0.2
+2024-09-01 03:54:05,586 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=9.16 vs. limit=15.0
+2024-09-01 03:54:07,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=110933.33333333333, ans=0.0
+2024-09-01 03:54:10,877 INFO [dysarthria_finetune.py:1435] (1/4) (1116405760, 34072559616)
+2024-09-01 03:54:10,878 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 03:54:10,950 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 03:54:38,950 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 0, loss[loss=0.2221, simple_loss=0.2402, pruned_loss=0.102, over 18527.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2402, pruned_loss=0.102, over 18527.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:54:38,951 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 03:54:42,525 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.2105, 1.5260, 3.5489, 3.2683], device='cuda:1')
+2024-09-01 03:54:57,218 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([4.0398, 3.2854, 3.4219, 3.2284], device='cuda:1')
+2024-09-01 03:55:19,882 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 17, validation: loss=0.1671, simple_loss=0.1912, pruned_loss=0.07151, over 1073944.00 frames.
+2024-09-01 03:55:19,882 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 03:55:35,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=110981.33333333333, ans=0.125
+2024-09-01 03:58:57,666 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 50, loss[loss=0.1555, simple_loss=0.1911, pruned_loss=0.05993, over 19037.00 frames. ], tot_loss[loss=0.1791, simple_loss=0.1997, pruned_loss=0.07926, over 827680.99 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 04:00:12,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=111248.0, ans=0.0
+2024-09-01 04:00:12,891 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff3.min_abs, batch_count=111248.0, ans=0.2
+2024-09-01 04:01:02,144 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.555e+02 1.659e+02 1.888e+02 2.626e+02, threshold=3.319e+02, percent-clipped=0.0
+2024-09-01 04:01:58,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=111408.0, ans=0.125
+2024-09-01 04:02:01,734 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.77 vs. limit=15.0
+2024-09-01 04:02:48,335 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 100, loss[loss=0.1459, simple_loss=0.1774, pruned_loss=0.05723, over 19067.00 frames. ], tot_loss[loss=0.175, simple_loss=0.1964, pruned_loss=0.0768, over 1472664.14 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 04:04:10,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=111621.33333333333, ans=0.1
+2024-09-01 04:05:21,543 INFO [dysarthria_finetune.py:1435] (1/4) (13766426624, 34072559616)
+2024-09-01 04:05:21,544 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 04:05:21,588 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 04:05:29,553 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 04:05:38,878 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 0, loss[loss=0.2076, simple_loss=0.2183, pruned_loss=0.09838, over 18622.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2183, pruned_loss=0.09838, over 18622.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:05:38,879 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 04:06:14,844 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 18, validation: loss=0.1676, simple_loss=0.191, pruned_loss=0.07213, over 1073944.00 frames.
+2024-09-01 04:06:14,844 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 04:06:34,129 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=111669.33333333333, ans=0.125
+2024-09-01 04:08:04,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=111776.0, ans=0.0
+2024-09-01 04:09:16,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=111829.33333333333, ans=0.125
+2024-09-01 04:09:32,364 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.87 vs. limit=15.0
+2024-09-01 04:09:42,029 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 1.516e+02 1.635e+02 1.895e+02 3.024e+02, threshold=3.269e+02, percent-clipped=0.0
+2024-09-01 04:10:09,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=111936.0, ans=0.0
+2024-09-01 04:10:10,259 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 50, loss[loss=0.1811, simple_loss=0.2012, pruned_loss=0.08052, over 19026.00 frames. ], tot_loss[loss=0.1757, simple_loss=0.1963, pruned_loss=0.07754, over 826500.31 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:10:23,569 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111936.0, ans=0.1
+2024-09-01 04:13:28,356 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.98 vs. limit=15.0
+2024-09-01 04:13:38,850 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.55 vs. limit=22.5
+2024-09-01 04:13:44,105 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=112149.33333333333, ans=0.0
+2024-09-01 04:14:13,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=112202.66666666667, ans=0.125
+2024-09-01 04:14:13,997 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 100, loss[loss=0.1574, simple_loss=0.1868, pruned_loss=0.06402, over 19036.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.1946, pruned_loss=0.07588, over 1471672.61 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:14:49,118 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=112256.0, ans=0.125
+2024-09-01 04:14:58,175 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.53 vs. limit=22.5
+2024-09-01 04:14:59,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=112256.0, ans=0.125
+2024-09-01 04:15:13,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112309.33333333333, ans=0.1
+2024-09-01 04:15:56,099 INFO [dysarthria_finetune.py:1435] (1/4) (403374080, 34072559616)
+2024-09-01 04:15:56,100 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 04:15:56,172 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 04:16:15,956 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 0, loss[loss=0.1961, simple_loss=0.2182, pruned_loss=0.08699, over 18691.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2182, pruned_loss=0.08699, over 18691.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:16:15,956 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 04:16:39,406 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 19, validation: loss=0.1638, simple_loss=0.1883, pruned_loss=0.06968, over 1073944.00 frames.
+2024-09-01 04:16:39,407 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 04:17:22,803 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.531e+02 1.615e+02 1.818e+02 2.373e+02, threshold=3.231e+02, percent-clipped=0.0
+2024-09-01 04:17:26,734 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.47 vs. limit=15.0
+2024-09-01 04:18:02,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112458.66666666667, ans=0.1
+2024-09-01 04:18:30,549 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=13.21 vs. limit=12.0
+2024-09-01 04:18:39,342 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=112565.33333333333, ans=0.07
+2024-09-01 04:18:43,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 04:19:10,122 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 50, loss[loss=0.1958, simple_loss=0.217, pruned_loss=0.08734, over 18976.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.1954, pruned_loss=0.07605, over 828010.25 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 4.0
+2024-09-01 04:19:25,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112618.66666666667, ans=0.1
+2024-09-01 04:19:38,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-09-01 04:19:53,691 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-09-01 04:19:58,615 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.67 vs. limit=15.0
+2024-09-01 04:20:16,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.min_positive, batch_count=112725.33333333333, ans=0.025
+2024-09-01 04:20:33,974 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.70 vs. limit=15.0
+2024-09-01 04:20:36,553 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.17 vs. limit=15.0
+2024-09-01 04:21:10,111 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 100, loss[loss=0.1588, simple_loss=0.182, pruned_loss=0.0678, over 19118.00 frames. ], tot_loss[loss=0.1723, simple_loss=0.1947, pruned_loss=0.07491, over 1474453.83 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:21:15,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-09-01 04:21:17,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-09-01 04:21:24,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=112885.33333333333, ans=0.07
+2024-09-01 04:21:38,233 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.62 vs. limit=6.0
+2024-09-01 04:21:40,254 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.21 vs. limit=15.0
+2024-09-01 04:21:54,610 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.501e+02 1.584e+02 1.820e+02 2.268e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-09-01 04:22:00,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=112992.0, ans=0.025
+2024-09-01 04:22:06,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=112992.0, ans=0.125
+2024-09-01 04:22:11,531 INFO [dysarthria_finetune.py:1435] (1/4) (72024064, 34072559616)
+2024-09-01 04:22:11,532 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 04:22:11,605 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 04:22:26,680 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 0, loss[loss=0.2134, simple_loss=0.2294, pruned_loss=0.09876, over 18758.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2294, pruned_loss=0.09876, over 18758.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:22:26,681 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-09-01 04:22:50,267 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 20, validation: loss=0.1638, simple_loss=0.1875, pruned_loss=0.07, over 1073944.00 frames.
+2024-09-01 04:22:50,268 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 14301MB
+2024-09-01 04:22:58,611 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.88 vs. limit=15.0
+2024-09-01 04:23:06,025 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.89 vs. limit=5.0
+2024-09-01 04:23:11,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=113093.33333333333, ans=0.125
+2024-09-01 04:23:39,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.80 vs. limit=8.0
+2024-09-01 04:24:22,629 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=113200.0, ans=0.0
+2024-09-01 04:24:57,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=113253.33333333333, ans=0.125
+2024-09-01 04:25:07,014 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 50, loss[loss=0.1729, simple_loss=0.1966, pruned_loss=0.07457, over 19069.00 frames. ], tot_loss[loss=0.1724, simple_loss=0.193, pruned_loss=0.07585, over 828644.17 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 4.0
+2024-09-01 04:26:18,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=113413.33333333333, ans=0.04949747468305833
+2024-09-01 04:26:50,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=113466.66666666667, ans=0.2
+2024-09-01 04:27:02,097 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=113520.0, ans=0.125
+2024-09-01 04:27:05,364 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.522e+02 1.605e+02 1.869e+02 2.652e+02, threshold=3.210e+02, percent-clipped=0.0
+2024-09-01 04:27:27,154 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 100, loss[loss=0.1396, simple_loss=0.1607, pruned_loss=0.05925, over 19104.00 frames. ], tot_loss[loss=0.17, simple_loss=0.1913, pruned_loss=0.07439, over 1473557.06 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:28:00,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=113626.66666666667, ans=0.0
+2024-09-01 04:28:06,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 04:28:26,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=113680.0, ans=0.0
+2024-09-01 04:28:43,103 INFO [dysarthria_finetune.py:1435] (1/4) (13755940864, 34072559616)
+2024-09-01 04:28:43,103 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-09-01 04:28:43,134 INFO [dysarthria_finetune.py:1440] (1/4) (29300031488, 34072559616)
+2024-09-01 04:28:43,134 INFO [dysarthria_finetune.py:1442] (1/4) Done!
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-2 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-2
new file mode 100644
index 0000000000000000000000000000000000000000..2ebac7c714efbce8c2788e0597f4e5edc242ecdf
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-2
@@ -0,0 +1,544 @@
+2024-08-31 13:16:10,955 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-31 13:16:10,986 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-31 13:16:10,986 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-31 13:16:11,955 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-31 13:16:11,956 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-31 13:16:13,232 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:16:13,232 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-31 13:16:14,948 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 66110931
+2024-08-31 13:17:58,666 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-31 13:18:23,852 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-31 13:20:29,532 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-31 13:20:34,906 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:20:35,066 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-31 13:20:35,066 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-31 13:20:35,066 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-31 13:20:35,067 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-31 13:20:35,067 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-31 13:20:35,147 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-31 13:20:36,077 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-31 13:20:36,078 INFO [dysarthria_asr_datamodule.py:501] (2/4) About to get dev cuts
+2024-08-31 13:20:36,079 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-31 13:20:36,396 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-31 13:20:36,397 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:44:09,211 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=4.96 vs. limit=5.0
+2024-08-31 13:44:10,210 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.49 vs. limit=7.5
+2024-08-31 13:44:14,874 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-31 13:45:00,401 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.53 vs. limit=7.5
+2024-08-31 13:45:00,957 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-31 13:47:50,038 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-31 13:47:52,427 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-31 13:50:16,266 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.99 vs. limit=3.0
+2024-08-31 13:50:20,328 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-31 13:50:22,577 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 12660MB
+2024-08-31 13:51:23,098 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], tot_loss[loss=0.3828, simple_loss=0.3613, pruned_loss=0.2142, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 13:51:23,099 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-31 14:29:03,504 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-31 14:29:03,505 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19764MB
+2024-08-31 15:07:09,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-31 15:19:05,836 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=38.72 vs. limit=15.0
+2024-08-31 15:24:12,382 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=100053.33333333333, ans=0.0
+2024-08-31 15:24:16,876 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-31 15:52:39,439 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.96 vs. limit=15.0
+2024-08-31 15:52:42,142 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.685e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-31 16:23:44,790 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.29 vs. limit=15.0
+2024-08-31 16:29:14,744 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=22.00 vs. limit=15.0
+2024-08-31 16:32:13,906 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 6.192e+02 7.846e+02 8.685e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-31 16:38:41,438 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.02 vs. limit=15.0
+2024-08-31 17:00:11,169 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.24 vs. limit=22.5
+2024-08-31 17:02:30,137 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.4013, simple_loss=0.3799, pruned_loss=0.2136, over 19018.00 frames. ], tot_loss[loss=0.3889, simple_loss=0.367, pruned_loss=0.2174, over 827419.58 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 17:07:59,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=100266.66666666667, ans=0.04949747468305833
+2024-08-31 17:10:25,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-31 18:05:58,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-08-31 18:12:52,331 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.982e+02 7.682e+02 8.607e+02 1.055e+03, threshold=1.536e+03, percent-clipped=0.0
+2024-08-31 18:12:52,368 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 100, loss[loss=0.3537, simple_loss=0.3356, pruned_loss=0.1829, over 19117.00 frames. ], tot_loss[loss=0.3755, simple_loss=0.3547, pruned_loss=0.2061, over 1475925.13 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-31 18:34:00,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=100586.66666666667, ans=0.1
+2024-08-31 18:36:32,007 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.81 vs. limit=15.0
+2024-08-31 18:42:25,150 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.32 vs. limit=15.0
+2024-08-31 18:44:19,277 INFO [dysarthria_finetune.py:1435] (2/4) (10291445760, 34072559616)
+2024-08-31 18:44:19,278 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-31 18:44:19,335 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-08-31 18:46:01,814 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 0, loss[loss=0.3342, simple_loss=0.3158, pruned_loss=0.1813, over 18502.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3158, pruned_loss=0.1813, over 18502.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-31 18:46:01,814 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-31 19:10:08,813 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 2, validation: loss=0.3307, simple_loss=0.3141, pruned_loss=0.1687, over 1073944.00 frames.
+2024-08-31 19:10:08,814 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-08-31 19:45:24,956 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 19:45:32,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=100736.0, ans=0.125
+2024-08-31 19:46:04,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=100736.0, ans=0.125
+2024-08-31 19:51:06,993 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-31 20:01:36,907 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=100789.33333333333, ans=0.125
+2024-08-31 20:15:05,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=100842.66666666667, ans=0.0
+2024-08-31 20:18:22,898 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.34 vs. limit=6.0
+2024-08-31 20:20:09,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-31 20:23:29,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=100896.0, ans=0.125
+2024-08-31 20:23:30,000 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=7.39 vs. limit=12.0
+2024-08-31 20:31:44,822 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 50, loss[loss=0.3633, simple_loss=0.3437, pruned_loss=0.1937, over 18952.00 frames. ], tot_loss[loss=0.3583, simple_loss=0.3386, pruned_loss=0.1935, over 829638.79 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-31 20:33:22,761 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=20.15 vs. limit=15.0
+2024-08-31 20:42:33,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=101002.66666666667, ans=0.0
+2024-08-31 21:01:03,918 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=101056.0, ans=0.125
+2024-08-31 21:03:09,438 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.367e+02 4.995e+02 5.661e+02 6.268e+02 7.321e+02, threshold=1.132e+03, percent-clipped=0.0
+2024-08-31 21:18:49,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-08-31 21:20:09,275 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=35.40 vs. limit=22.5
+2024-08-31 21:20:57,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=101109.33333333333, ans=0.2
+2024-08-31 21:37:51,800 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=101162.66666666667, ans=0.2
+2024-08-31 21:42:13,844 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 100, loss[loss=0.2879, simple_loss=0.2751, pruned_loss=0.1406, over 19108.00 frames. ], tot_loss[loss=0.3452, simple_loss=0.3268, pruned_loss=0.1827, over 1476292.15 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 4.0
+2024-08-31 21:42:47,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=101216.0, ans=0.125
+2024-08-31 21:46:33,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=101216.0, ans=0.125
+2024-08-31 21:47:44,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=101216.0, ans=0.125
+2024-08-31 22:08:22,935 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=101269.33333333333, ans=0.2
+2024-08-31 22:10:11,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101269.33333333333, ans=0.1
+2024-08-31 22:10:43,024 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=14.18 vs. limit=12.0
+2024-08-31 22:17:06,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=101322.66666666667, ans=0.0
+2024-08-31 22:21:40,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=101322.66666666667, ans=0.2
+2024-08-31 22:21:43,990 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-08-31 22:21:43,991 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-31 22:21:44,034 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-08-31 22:22:35,143 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-08-31 22:22:38,992 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 0, loss[loss=0.3511, simple_loss=0.3323, pruned_loss=0.186, over 18600.00 frames. ], tot_loss[loss=0.3511, simple_loss=0.3323, pruned_loss=0.186, over 18600.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 2.0
+2024-08-31 22:22:38,993 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-31 22:31:34,596 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 3, validation: loss=0.2979, simple_loss=0.2853, pruned_loss=0.1432, over 1073944.00 frames.
+2024-08-31 22:31:34,955 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-08-31 22:50:30,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=101424.0, ans=0.0
+2024-08-31 22:50:30,217 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=101424.0, ans=0.125
+2024-08-31 22:50:30,704 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.53 vs. limit=15.0
+2024-08-31 22:52:38,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=101424.0, ans=0.09899494936611666
+2024-08-31 22:53:17,913 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=9.02 vs. limit=15.0
+2024-08-31 23:00:56,715 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=101477.33333333333, ans=0.0
+2024-08-31 23:26:27,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101584.0, ans=0.1
+2024-08-31 23:30:40,086 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.802e+02 3.787e+02 4.308e+02 4.929e+02 6.122e+02, threshold=8.616e+02, percent-clipped=0.0
+2024-08-31 23:32:42,301 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 50, loss[loss=0.3083, simple_loss=0.2955, pruned_loss=0.1491, over 19168.00 frames. ], tot_loss[loss=0.3293, simple_loss=0.313, pruned_loss=0.1694, over 828229.52 frames. ], batch size: 103, lr: 8.08e-05, grad_scale: 1.0
+2024-08-31 23:35:36,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=101637.33333333333, ans=0.025
+2024-08-31 23:41:39,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101637.33333333333, ans=0.125
+2024-08-31 23:54:21,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=101690.66666666667, ans=0.0
+2024-09-01 00:00:05,915 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=21.52 vs. limit=15.0
+2024-09-01 00:02:14,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=101744.0, ans=0.0
+2024-09-01 00:03:32,109 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.59 vs. limit=6.0
+2024-09-01 00:07:29,587 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101797.33333333333, ans=0.0
+2024-09-01 00:12:55,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=101850.66666666667, ans=0.125
+2024-09-01 00:13:17,709 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101850.66666666667, ans=0.1
+2024-09-01 00:17:08,370 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 100, loss[loss=0.2584, simple_loss=0.2495, pruned_loss=0.1197, over 19024.00 frames. ], tot_loss[loss=0.3203, simple_loss=0.3047, pruned_loss=0.1636, over 1476045.82 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 1.0
+2024-09-01 00:18:28,292 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=101904.0, ans=0.2
+2024-09-01 00:18:28,725 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.59 vs. limit=15.0
+2024-09-01 00:18:28,834 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.77 vs. limit=6.0
+2024-09-01 00:21:40,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=101957.33333333333, ans=0.0
+2024-09-01 00:22:17,901 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.89 vs. limit=15.0
+2024-09-01 00:26:12,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=102010.66666666667, ans=0.125
+2024-09-01 00:27:26,437 INFO [dysarthria_finetune.py:1435] (2/4) (10310320128, 34072559616)
+2024-09-01 00:27:26,438 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 00:27:26,476 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 00:27:42,756 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 0, loss[loss=0.3144, simple_loss=0.2996, pruned_loss=0.1597, over 18618.00 frames. ], tot_loss[loss=0.3144, simple_loss=0.2996, pruned_loss=0.1597, over 18618.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 2.0
+2024-09-01 00:27:42,756 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 00:46:27,436 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 4, validation: loss=0.279, simple_loss=0.2687, pruned_loss=0.1325, over 1073944.00 frames.
+2024-09-01 00:46:27,437 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 01:00:56,118 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102053.33333333333, ans=0.125
+2024-09-01 01:00:56,295 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.50 vs. limit=15.0
+2024-09-01 01:16:34,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-09-01 01:16:52,154 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.614e+02 3.221e+02 3.659e+02 4.077e+02 5.349e+02, threshold=7.318e+02, percent-clipped=0.0
+2024-09-01 01:20:07,287 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.53 vs. limit=6.0
+2024-09-01 01:21:05,790 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.63 vs. limit=12.0
+2024-09-01 01:29:34,895 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=17.51 vs. limit=15.0
+2024-09-01 01:39:48,725 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 50, loss[loss=0.3026, simple_loss=0.2892, pruned_loss=0.1517, over 18961.00 frames. ], tot_loss[loss=0.2986, simple_loss=0.2862, pruned_loss=0.1468, over 828488.26 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 0.5
+2024-09-01 01:43:03,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=102320.0, ans=0.2
+2024-09-01 01:43:14,348 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=102320.0, ans=0.07
+2024-09-01 01:45:34,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=102373.33333333333, ans=0.2
+2024-09-01 01:48:00,220 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:55:59,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=102480.0, ans=0.125
+2024-09-01 01:57:53,370 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=102533.33333333333, ans=0.025
+2024-09-01 01:58:37,621 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.73 vs. limit=15.0
+2024-09-01 01:59:02,839 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.68 vs. limit=15.0
+2024-09-01 02:00:16,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=102533.33333333333, ans=0.0
+2024-09-01 02:01:07,739 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 100, loss[loss=0.2707, simple_loss=0.2609, pruned_loss=0.1302, over 19090.00 frames. ], tot_loss[loss=0.2952, simple_loss=0.283, pruned_loss=0.1455, over 1476821.49 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 1.0
+2024-09-01 02:02:19,072 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.37 vs. limit=15.0
+2024-09-01 02:09:39,920 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 2.859e+02 3.213e+02 3.589e+02 4.738e+02, threshold=6.426e+02, percent-clipped=0.0
+2024-09-01 02:10:16,953 INFO [dysarthria_finetune.py:1435] (2/4) (10280960000, 34072559616)
+2024-09-01 02:10:16,953 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:10:16,994 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 02:10:37,119 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 0, loss[loss=0.2921, simple_loss=0.2801, pruned_loss=0.1447, over 18551.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.2801, pruned_loss=0.1447, over 18551.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 1.0
+2024-09-01 02:10:37,119 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:15:37,613 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 5, validation: loss=0.2588, simple_loss=0.2515, pruned_loss=0.1195, over 1073944.00 frames.
+2024-09-01 02:15:37,613 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 02:18:43,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 02:19:22,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=102848.0, ans=0.2
+2024-09-01 02:20:12,613 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.32 vs. limit=15.0
+2024-09-01 02:21:02,960 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.04 vs. limit=15.0
+2024-09-01 02:21:07,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=102901.33333333333, ans=0.125
+2024-09-01 02:21:10,006 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.45 vs. limit=15.0
+2024-09-01 02:23:25,250 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 50, loss[loss=0.2753, simple_loss=0.2647, pruned_loss=0.1354, over 19027.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.2748, pruned_loss=0.1388, over 828775.72 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 0.25
+2024-09-01 02:24:38,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=103061.33333333333, ans=0.025
+2024-09-01 02:25:52,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=103114.66666666667, ans=0.025
+2024-09-01 02:26:14,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=103114.66666666667, ans=0.0
+2024-09-01 02:26:22,733 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=103114.66666666667, ans=0.125
+2024-09-01 02:27:01,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=103168.0, ans=0.125
+2024-09-01 02:27:14,643 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=103168.0, ans=0.125
+2024-09-01 02:27:28,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=103221.33333333333, ans=0.015
+2024-09-01 02:27:48,829 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 2.619e+02 2.908e+02 3.410e+02 5.061e+02, threshold=5.817e+02, percent-clipped=0.0
+2024-09-01 02:28:03,065 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=13.14 vs. limit=12.0
+2024-09-01 02:28:05,197 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 100, loss[loss=0.2857, simple_loss=0.2739, pruned_loss=0.143, over 19114.00 frames. ], tot_loss[loss=0.2777, simple_loss=0.2677, pruned_loss=0.1352, over 1478197.42 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 0.5
+2024-09-01 02:29:09,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=103328.0, ans=0.2
+2024-09-01 02:29:32,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=103381.33333333333, ans=0.0
+2024-09-01 02:30:03,384 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-09-01 02:30:03,385 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:30:03,429 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 02:30:19,018 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 0, loss[loss=0.2542, simple_loss=0.2486, pruned_loss=0.1171, over 18783.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.2486, pruned_loss=0.1171, over 18783.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:30:19,018 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:30:42,394 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 6, validation: loss=0.247, simple_loss=0.2415, pruned_loss=0.1137, over 1073944.00 frames.
+2024-09-01 02:30:51,758 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 02:31:53,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=103424.0, ans=0.125
+2024-09-01 02:32:01,261 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.83 vs. limit=6.0
+2024-09-01 02:32:55,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=103530.66666666667, ans=0.125
+2024-09-01 02:33:05,853 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=103584.0, ans=0.0
+2024-09-01 02:33:31,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=103637.33333333333, ans=0.025
+2024-09-01 02:33:49,369 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 50, loss[loss=0.2716, simple_loss=0.2642, pruned_loss=0.129, over 19006.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.2514, pruned_loss=0.1224, over 828020.78 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 0.5
+2024-09-01 02:33:52,766 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=103690.66666666667, ans=0.125
+2024-09-01 02:34:07,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103690.66666666667, ans=0.1
+2024-09-01 02:34:17,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103744.0, ans=0.1
+2024-09-01 02:34:31,076 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=23.35 vs. limit=22.5
+2024-09-01 02:34:40,111 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.401e+02 2.633e+02 2.975e+02 4.049e+02, threshold=5.266e+02, percent-clipped=0.0
+2024-09-01 02:35:28,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=103850.66666666667, ans=0.025
+2024-09-01 02:35:55,580 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 100, loss[loss=0.2783, simple_loss=0.27, pruned_loss=0.1347, over 19060.00 frames. ], tot_loss[loss=0.259, simple_loss=0.2519, pruned_loss=0.1234, over 1475525.13 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:36:10,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=103957.33333333333, ans=0.0
+2024-09-01 02:36:48,558 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:36:58,495 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-09-01 02:36:58,495 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:36:58,549 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 02:37:37,099 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 0, loss[loss=0.2478, simple_loss=0.2452, pruned_loss=0.1115, over 18435.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.2452, pruned_loss=0.1115, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:37:37,099 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:38:00,928 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 7, validation: loss=0.2303, simple_loss=0.2284, pruned_loss=0.1027, over 1073944.00 frames.
+2024-09-01 02:38:00,928 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 02:38:30,572 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=104160.0, ans=0.125
+2024-09-01 02:38:48,388 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=104213.33333333333, ans=0.0
+2024-09-01 02:39:19,358 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.14 vs. limit=22.5
+2024-09-01 02:39:23,672 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.18 vs. limit=15.0
+2024-09-01 02:39:39,118 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.248e+02 2.388e+02 2.643e+02 3.863e+02, threshold=4.776e+02, percent-clipped=0.0
+2024-09-01 02:39:54,674 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 50, loss[loss=0.2454, simple_loss=0.2411, pruned_loss=0.1147, over 18970.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.2498, pruned_loss=0.1215, over 828175.40 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:40:24,081 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=104426.66666666667, ans=0.125
+2024-09-01 02:41:04,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=104533.33333333333, ans=0.125
+2024-09-01 02:41:19,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=104586.66666666667, ans=0.0
+2024-09-01 02:41:38,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=104586.66666666667, ans=0.125
+2024-09-01 02:41:38,881 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=104586.66666666667, ans=0.125
+2024-09-01 02:41:41,952 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 100, loss[loss=0.2287, simple_loss=0.2303, pruned_loss=0.09875, over 19065.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.2443, pruned_loss=0.1179, over 1476190.89 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:41:45,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=104640.0, ans=0.125
+2024-09-01 02:42:02,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=104693.33333333333, ans=0.1
+2024-09-01 02:42:06,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=104693.33333333333, ans=0.125
+2024-09-01 02:42:39,952 INFO [dysarthria_finetune.py:1435] (2/4) (10280960000, 34072559616)
+2024-09-01 02:42:39,953 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:42:40,003 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 02:42:52,884 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 0, loss[loss=0.2157, simple_loss=0.2177, pruned_loss=0.09301, over 18635.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2177, pruned_loss=0.09301, over 18635.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 4.0
+2024-09-01 02:42:52,884 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:43:16,310 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 8, validation: loss=0.2224, simple_loss=0.2225, pruned_loss=0.09892, over 1073944.00 frames.
+2024-09-01 02:43:16,311 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 02:43:29,257 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.78 vs. limit=6.0
+2024-09-01 02:43:35,759 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=19.55 vs. limit=15.0
+2024-09-01 02:43:42,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=104842.66666666667, ans=0.125
+2024-09-01 02:43:51,414 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.041e+02 2.195e+02 2.485e+02 3.530e+02, threshold=4.390e+02, percent-clipped=0.0
+2024-09-01 02:43:52,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=104842.66666666667, ans=0.0
+2024-09-01 02:44:24,139 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=104949.33333333333, ans=0.025
+2024-09-01 02:44:37,030 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:45:06,421 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 50, loss[loss=0.24, simple_loss=0.241, pruned_loss=0.1067, over 19000.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2382, pruned_loss=0.1131, over 827531.12 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:46:34,859 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=105109.33333333333, ans=0.125
+2024-09-01 02:46:41,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=105109.33333333333, ans=0.125
+2024-09-01 02:46:50,023 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105162.66666666667, ans=0.1
+2024-09-01 02:47:03,201 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.30 vs. limit=15.0
+2024-09-01 02:47:11,808 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.98 vs. limit=15.0
+2024-09-01 02:47:55,828 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 100, loss[loss=0.2265, simple_loss=0.228, pruned_loss=0.1008, over 19093.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.2328, pruned_loss=0.1084, over 1475468.79 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:48:14,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=105322.66666666667, ans=0.05
+2024-09-01 02:48:27,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=105376.0, ans=0.0
+2024-09-01 02:48:34,259 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 1.930e+02 2.062e+02 2.246e+02 3.148e+02, threshold=4.124e+02, percent-clipped=0.0
+2024-09-01 02:48:55,542 INFO [dysarthria_finetune.py:1435] (2/4) (10314514432, 34072559616)
+2024-09-01 02:48:55,543 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:48:55,575 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 02:49:09,716 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 0, loss[loss=0.2438, simple_loss=0.2401, pruned_loss=0.1161, over 18461.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.2401, pruned_loss=0.1161, over 18461.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:49:09,716 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:49:40,375 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 9, validation: loss=0.211, simple_loss=0.2147, pruned_loss=0.09159, over 1073944.00 frames.
+2024-09-01 02:49:40,375 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 02:49:42,916 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=13.27 vs. limit=12.0
+2024-09-01 02:49:56,677 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.71 vs. limit=6.0
+2024-09-01 02:50:12,939 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=105477.33333333333, ans=10.0
+2024-09-01 02:50:26,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=105530.66666666667, ans=0.125
+2024-09-01 02:50:33,856 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.87 vs. limit=15.0
+2024-09-01 02:50:51,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105584.0, ans=0.1
+2024-09-01 02:50:55,114 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=105584.0, ans=0.125
+2024-09-01 02:51:27,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105637.33333333333, ans=0.125
+2024-09-01 02:51:45,169 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.47 vs. limit=15.0
+2024-09-01 02:51:45,383 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.56 vs. limit=10.0
+2024-09-01 02:52:30,678 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 50, loss[loss=0.2505, simple_loss=0.2527, pruned_loss=0.1126, over 18943.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2279, pruned_loss=0.1042, over 826909.81 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:53:34,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=105797.33333333333, ans=0.125
+2024-09-01 02:54:02,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:54:14,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=105904.0, ans=0.125
+2024-09-01 02:54:17,880 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=105904.0, ans=0.2
+2024-09-01 02:54:35,251 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 1.850e+02 1.979e+02 2.143e+02 2.885e+02, threshold=3.959e+02, percent-clipped=0.0
+2024-09-01 02:54:39,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=105957.33333333333, ans=0.0
+2024-09-01 02:55:01,574 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:55:06,133 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 100, loss[loss=0.2263, simple_loss=0.2316, pruned_loss=0.09879, over 19136.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.2268, pruned_loss=0.1027, over 1474643.82 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:55:25,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:55:26,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106010.66666666667, ans=0.125
+2024-09-01 02:55:48,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=106064.0, ans=0.0
+2024-09-01 02:56:27,087 INFO [dysarthria_finetune.py:1435] (2/4) (10312417280, 34072559616)
+2024-09-01 02:56:27,088 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 02:56:27,131 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 02:56:40,077 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 0, loss[loss=0.2627, simple_loss=0.2586, pruned_loss=0.1271, over 18505.00 frames. ], tot_loss[loss=0.2627, simple_loss=0.2586, pruned_loss=0.1271, over 18505.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:56:40,078 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 02:57:03,512 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 10, validation: loss=0.2075, simple_loss=0.2129, pruned_loss=0.09054, over 1073944.00 frames.
+2024-09-01 02:57:03,512 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 02:57:11,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=106165.33333333333, ans=0.125
+2024-09-01 02:57:38,738 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=6.71 vs. limit=12.0
+2024-09-01 02:57:50,133 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106272.0, ans=0.125
+2024-09-01 02:58:10,295 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.39 vs. limit=22.5
+2024-09-01 02:58:16,474 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:58:55,056 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 50, loss[loss=0.2382, simple_loss=0.2379, pruned_loss=0.1121, over 19019.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2238, pruned_loss=0.1007, over 827816.98 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:59:07,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=106432.0, ans=0.0
+2024-09-01 02:59:20,729 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.769e+02 1.897e+02 2.105e+02 2.891e+02, threshold=3.793e+02, percent-clipped=0.0
+2024-09-01 02:59:30,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=106485.33333333333, ans=0.0
+2024-09-01 02:59:48,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=106538.66666666667, ans=0.125
+2024-09-01 02:59:54,892 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.66 vs. limit=15.0
+2024-09-01 03:00:01,245 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.65 vs. limit=22.5
+2024-09-01 03:00:15,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=106592.0, ans=0.125
+2024-09-01 03:00:33,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=106645.33333333333, ans=0.0
+2024-09-01 03:00:42,965 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 100, loss[loss=0.1829, simple_loss=0.1932, pruned_loss=0.07573, over 19070.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2201, pruned_loss=0.09702, over 1475821.74 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 03:01:46,973 INFO [dysarthria_finetune.py:1435] (2/4) (10280960000, 34072559616)
+2024-09-01 03:01:46,974 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:01:47,026 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 03:08:00,751 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 0, loss[loss=0.264, simple_loss=0.2522, pruned_loss=0.1368, over 18525.00 frames. ], tot_loss[loss=0.264, simple_loss=0.2522, pruned_loss=0.1368, over 18525.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:08:00,751 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:08:32,545 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 11, validation: loss=0.2002, simple_loss=0.2088, pruned_loss=0.08618, over 1073944.00 frames.
+2024-09-01 03:08:32,545 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 03:09:37,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106906.66666666667, ans=0.1
+2024-09-01 03:10:00,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=106960.0, ans=0.125
+2024-09-01 03:10:26,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106960.0, ans=0.1
+2024-09-01 03:10:32,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=107013.33333333333, ans=0.0
+2024-09-01 03:10:42,594 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.721e+02 1.824e+02 2.016e+02 2.682e+02, threshold=3.648e+02, percent-clipped=0.0
+2024-09-01 03:11:22,841 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=107066.66666666667, ans=0.125
+2024-09-01 03:11:29,841 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=107066.66666666667, ans=0.025
+2024-09-01 03:12:03,920 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 50, loss[loss=0.2221, simple_loss=0.2243, pruned_loss=0.1036, over 19068.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2177, pruned_loss=0.09419, over 827285.47 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:12:43,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=107173.33333333333, ans=0.125
+2024-09-01 03:12:48,709 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.93 vs. limit=22.5
+2024-09-01 03:13:53,744 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107173.33333333333, ans=0.1
+2024-09-01 03:14:36,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=107226.66666666667, ans=0.125
+2024-09-01 03:15:56,744 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 100, loss[loss=0.1955, simple_loss=0.204, pruned_loss=0.08595, over 19059.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2156, pruned_loss=0.09201, over 1474809.38 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:16:46,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=107440.0, ans=0.125
+2024-09-01 03:17:08,535 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.51 vs. limit=10.0
+2024-09-01 03:17:56,538 INFO [dysarthria_finetune.py:1435] (2/4) (10280960000, 34072559616)
+2024-09-01 03:17:56,539 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:17:56,580 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 03:18:09,578 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 0, loss[loss=0.2182, simple_loss=0.2236, pruned_loss=0.09999, over 18505.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2236, pruned_loss=0.09999, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:18:09,579 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:18:33,054 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 12, validation: loss=0.1929, simple_loss=0.2049, pruned_loss=0.0821, over 1073944.00 frames.
+2024-09-01 03:18:33,055 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 03:18:54,482 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 1.683e+02 1.764e+02 1.920e+02 2.754e+02, threshold=3.529e+02, percent-clipped=0.0
+2024-09-01 03:18:59,020 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=107541.33333333333, ans=0.125
+2024-09-01 03:19:05,122 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=16.50 vs. limit=15.0
+2024-09-01 03:19:10,809 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.26 vs. limit=10.0
+2024-09-01 03:19:18,378 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.43 vs. limit=12.0
+2024-09-01 03:20:40,958 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107754.66666666667, ans=0.1
+2024-09-01 03:21:03,699 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 50, loss[loss=0.1801, simple_loss=0.2048, pruned_loss=0.06629, over 18979.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2128, pruned_loss=0.08997, over 828348.40 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:22:18,200 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.52 vs. limit=15.0
+2024-09-01 03:22:43,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=107914.66666666667, ans=0.2
+2024-09-01 03:23:19,302 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=107968.0, ans=22.5
+2024-09-01 03:23:36,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=108021.33333333333, ans=0.05
+2024-09-01 03:23:38,798 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=108021.33333333333, ans=0.0
+2024-09-01 03:23:46,706 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.09 vs. limit=22.5
+2024-09-01 03:24:01,730 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 100, loss[loss=0.1744, simple_loss=0.1978, pruned_loss=0.06586, over 19089.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2127, pruned_loss=0.08821, over 1475248.85 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:24:21,999 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.650e+02 1.753e+02 1.928e+02 2.697e+02, threshold=3.507e+02, percent-clipped=0.0
+2024-09-01 03:24:26,401 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=108128.0, ans=0.0
+2024-09-01 03:24:50,420 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=108181.33333333333, ans=0.125
+2024-09-01 03:25:11,657 INFO [dysarthria_finetune.py:1435] (2/4) (10280960000, 34072559616)
+2024-09-01 03:25:11,658 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:25:11,708 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 03:25:24,783 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 0, loss[loss=0.2224, simple_loss=0.2284, pruned_loss=0.1034, over 18540.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2284, pruned_loss=0.1034, over 18540.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:25:24,783 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:25:48,262 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 13, validation: loss=0.1886, simple_loss=0.2026, pruned_loss=0.08078, over 1073944.00 frames.
+2024-09-01 03:25:48,262 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 03:26:17,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=108282.66666666667, ans=0.125
+2024-09-01 03:26:23,174 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:27:08,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=108389.33333333333, ans=0.2
+2024-09-01 03:27:49,150 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=108442.66666666667, ans=0.035
+2024-09-01 03:27:54,660 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 50, loss[loss=0.2188, simple_loss=0.2283, pruned_loss=0.09965, over 18984.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2118, pruned_loss=0.08726, over 829065.08 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 1.0
+2024-09-01 03:28:37,122 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.26 vs. limit=15.0
+2024-09-01 03:29:02,098 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108656.0, ans=0.0
+2024-09-01 03:29:07,380 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.617e+02 1.723e+02 2.007e+02 2.594e+02, threshold=3.446e+02, percent-clipped=0.0
+2024-09-01 03:29:21,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=108656.0, ans=0.125
+2024-09-01 03:29:32,656 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.25 vs. limit=15.0
+2024-09-01 03:29:42,897 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-09-01 03:29:42,923 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-09-01 03:29:46,168 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 100, loss[loss=0.1808, simple_loss=0.2002, pruned_loss=0.0749, over 19116.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2129, pruned_loss=0.08897, over 1477011.25 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 2.0
+2024-09-01 03:30:00,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=108762.66666666667, ans=0.2
+2024-09-01 03:30:13,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=108816.0, ans=0.0
+2024-09-01 03:30:24,097 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=108816.0, ans=10.0
+2024-09-01 03:30:28,473 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=108869.33333333333, ans=0.1
+2024-09-01 03:30:46,141 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-09-01 03:30:46,142 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:30:46,175 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 03:30:59,024 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 0, loss[loss=0.2076, simple_loss=0.2113, pruned_loss=0.09931, over 18523.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2113, pruned_loss=0.09931, over 18523.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:30:59,025 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:31:19,198 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.3.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([3.9384, 4.1038, 3.9670, 4.0796, 4.1657, 3.8715, 3.9641, 3.6802],
+ device='cuda:2')
+2024-09-01 03:31:23,179 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 14, validation: loss=0.1833, simple_loss=0.2, pruned_loss=0.07856, over 1073944.00 frames.
+2024-09-01 03:31:23,180 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 03:31:51,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=108970.66666666667, ans=0.025
+2024-09-01 03:33:13,492 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 50, loss[loss=0.192, simple_loss=0.2094, pruned_loss=0.08317, over 18999.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2072, pruned_loss=0.08463, over 827850.18 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:33:16,722 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109184.0, ans=0.1
+2024-09-01 03:33:19,751 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.619e+02 1.722e+02 1.984e+02 2.668e+02, threshold=3.445e+02, percent-clipped=0.0
+2024-09-01 03:33:21,632 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=24.82 vs. limit=22.5
+2024-09-01 03:33:31,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=109184.0, ans=0.125
+2024-09-01 03:33:55,724 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:34:19,777 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.24 vs. limit=6.0
+2024-09-01 03:34:57,881 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=20.89 vs. limit=15.0
+2024-09-01 03:35:00,697 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 100, loss[loss=0.1792, simple_loss=0.2045, pruned_loss=0.07281, over 19059.00 frames. ], tot_loss[loss=0.1925, simple_loss=0.2078, pruned_loss=0.08484, over 1475617.37 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 8.0
+2024-09-01 03:35:02,327 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.29 vs. limit=15.0
+2024-09-01 03:35:43,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=109557.33333333333, ans=0.125
+2024-09-01 03:35:43,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=109557.33333333333, ans=0.05
+2024-09-01 03:35:46,192 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.23 vs. limit=15.0
+2024-09-01 03:36:00,195 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-09-01 03:36:00,197 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:36:00,232 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 03:36:14,209 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 0, loss[loss=0.2441, simple_loss=0.2491, pruned_loss=0.1178, over 18678.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.2491, pruned_loss=0.1178, over 18678.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:36:14,210 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:36:45,406 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 15, validation: loss=0.1765, simple_loss=0.1963, pruned_loss=0.07531, over 1073944.00 frames.
+2024-09-01 03:36:45,407 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 03:37:08,933 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-09-01 03:37:52,742 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=109712.0, ans=0.125
+2024-09-01 03:38:03,368 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.579e+02 1.672e+02 1.908e+02 2.431e+02, threshold=3.343e+02, percent-clipped=0.0
+2024-09-01 03:38:18,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=109765.33333333333, ans=0.125
+2024-09-01 03:38:26,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=109765.33333333333, ans=0.0
+2024-09-01 03:38:29,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=109765.33333333333, ans=0.125
+2024-09-01 03:39:14,736 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 50, loss[loss=0.2063, simple_loss=0.2281, pruned_loss=0.08976, over 18994.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2037, pruned_loss=0.08233, over 827605.34 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:39:52,839 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=9.40 vs. limit=15.0
+2024-09-01 03:40:10,289 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.44 vs. limit=15.0
+2024-09-01 03:40:15,693 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=109925.33333333333, ans=0.015
+2024-09-01 03:40:36,744 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=109978.66666666667, ans=0.125
+2024-09-01 03:40:41,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=109978.66666666667, ans=0.125
+2024-09-01 03:40:49,748 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=110032.0, ans=15.0
+2024-09-01 03:41:13,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=110032.0, ans=0.1
+2024-09-01 03:41:20,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=110032.0, ans=0.125
+2024-09-01 03:41:54,237 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 100, loss[loss=0.172, simple_loss=0.198, pruned_loss=0.07123, over 19062.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2033, pruned_loss=0.08134, over 1475114.53 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 8.0
+2024-09-01 03:42:02,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=110138.66666666667, ans=0.0
+2024-09-01 03:42:34,703 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.44 vs. limit=22.5
+2024-09-01 03:43:09,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=110245.33333333333, ans=0.125
+2024-09-01 03:43:21,356 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.548e+02 1.650e+02 1.862e+02 2.617e+02, threshold=3.300e+02, percent-clipped=0.0
+2024-09-01 03:43:24,583 INFO [dysarthria_finetune.py:1435] (2/4) (10280960000, 34072559616)
+2024-09-01 03:43:24,584 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:43:24,618 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 03:43:40,085 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 0, loss[loss=0.2076, simple_loss=0.2188, pruned_loss=0.09746, over 18504.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2188, pruned_loss=0.09746, over 18504.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:43:40,085 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:44:25,981 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 16, validation: loss=0.1763, simple_loss=0.1967, pruned_loss=0.07691, over 1073944.00 frames.
+2024-09-01 03:44:25,981 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 03:44:37,559 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=5.03 vs. limit=5.0
+2024-09-01 03:44:45,305 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=27.61 vs. limit=22.5
+2024-09-01 03:44:56,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=110293.33333333333, ans=0.125
+2024-09-01 03:45:09,868 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.55 vs. limit=15.0
+2024-09-01 03:45:14,301 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=110346.66666666667, ans=0.2
+2024-09-01 03:45:29,753 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=110346.66666666667, ans=0.2
+2024-09-01 03:45:36,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=110346.66666666667, ans=0.125
+2024-09-01 03:46:19,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=110400.0, ans=0.125
+2024-09-01 03:46:32,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110400.0, ans=0.1
+2024-09-01 03:48:06,678 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:48:09,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=110506.66666666667, ans=0.0
+2024-09-01 03:48:21,448 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.07 vs. limit=15.0
+2024-09-01 03:48:37,720 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 50, loss[loss=0.1825, simple_loss=0.2061, pruned_loss=0.07919, over 19044.00 frames. ], tot_loss[loss=0.1807, simple_loss=0.1999, pruned_loss=0.0802, over 828171.03 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 03:49:19,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=110613.33333333333, ans=0.0
+2024-09-01 03:50:08,511 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=11.73 vs. limit=12.0
+2024-09-01 03:50:11,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=110666.66666666667, ans=0.125
+2024-09-01 03:52:26,726 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.555e+02 1.657e+02 1.896e+02 2.445e+02, threshold=3.314e+02, percent-clipped=0.0
+2024-09-01 03:52:30,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=110826.66666666667, ans=0.125
+2024-09-01 03:52:31,279 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 100, loss[loss=0.1497, simple_loss=0.182, pruned_loss=0.0587, over 19090.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.1998, pruned_loss=0.07951, over 1476933.27 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:52:55,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=110826.66666666667, ans=0.025
+2024-09-01 03:53:40,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110880.0, ans=0.1
+2024-09-01 03:54:09,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110933.33333333333, ans=0.1
+2024-09-01 03:54:10,882 INFO [dysarthria_finetune.py:1435] (2/4) (10278862848, 34072559616)
+2024-09-01 03:54:10,883 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 03:54:10,927 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 03:54:38,956 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 0, loss[loss=0.1794, simple_loss=0.2036, pruned_loss=0.07765, over 18336.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.2036, pruned_loss=0.07765, over 18336.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:54:38,956 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 03:54:45,552 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.5097, 2.9556, 2.7725, 2.6922], device='cuda:2')
+2024-09-01 03:55:19,881 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 17, validation: loss=0.1671, simple_loss=0.1912, pruned_loss=0.07151, over 1073944.00 frames.
+2024-09-01 03:55:19,882 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 03:56:27,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=111034.66666666667, ans=0.2
+2024-09-01 03:57:04,619 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=111088.0, ans=0.0
+2024-09-01 03:58:10,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111194.66666666667, ans=0.125
+2024-09-01 03:58:57,673 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 50, loss[loss=0.159, simple_loss=0.1978, pruned_loss=0.06004, over 19057.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.1984, pruned_loss=0.07808, over 827125.84 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 04:00:43,848 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=111301.33333333333, ans=0.125
+2024-09-01 04:00:57,627 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=111354.66666666667, ans=0.5
+2024-09-01 04:01:01,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=111354.66666666667, ans=0.0
+2024-09-01 04:01:02,151 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.555e+02 1.659e+02 1.888e+02 2.626e+02, threshold=3.319e+02, percent-clipped=0.0
+2024-09-01 04:01:12,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=111354.66666666667, ans=0.125
+2024-09-01 04:01:24,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=111354.66666666667, ans=0.125
+2024-09-01 04:01:48,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=111408.0, ans=0.125
+2024-09-01 04:02:07,006 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111461.33333333333, ans=0.1
+2024-09-01 04:02:25,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=111461.33333333333, ans=0.125
+2024-09-01 04:02:48,337 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 100, loss[loss=0.1398, simple_loss=0.1729, pruned_loss=0.05339, over 19126.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.1971, pruned_loss=0.07661, over 1475165.47 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 04:03:58,722 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.43 vs. limit=15.0
+2024-09-01 04:04:19,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111621.33333333333, ans=0.125
+2024-09-01 04:05:21,534 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-09-01 04:05:21,536 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 04:05:21,587 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 04:05:29,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 04:05:38,881 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 0, loss[loss=0.2067, simple_loss=0.2219, pruned_loss=0.09575, over 18559.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2219, pruned_loss=0.09575, over 18559.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:05:38,882 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 04:06:14,838 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 18, validation: loss=0.1676, simple_loss=0.191, pruned_loss=0.07213, over 1073944.00 frames.
+2024-09-01 04:06:14,838 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 04:06:36,552 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=111669.33333333333, ans=0.0
+2024-09-01 04:08:59,010 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.87 vs. limit=15.0
+2024-09-01 04:09:42,035 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 1.516e+02 1.635e+02 1.895e+02 3.024e+02, threshold=3.269e+02, percent-clipped=0.0
+2024-09-01 04:10:10,275 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 50, loss[loss=0.195, simple_loss=0.2189, pruned_loss=0.08561, over 18975.00 frames. ], tot_loss[loss=0.1756, simple_loss=0.1979, pruned_loss=0.07661, over 827610.12 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:10:19,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=111936.0, ans=0.125
+2024-09-01 04:10:23,775 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.52 vs. limit=15.0
+2024-09-01 04:10:41,414 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=111936.0, ans=0.125
+2024-09-01 04:11:15,007 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.67 vs. limit=6.0
+2024-09-01 04:13:56,086 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=10.90 vs. limit=12.0
+2024-09-01 04:14:13,997 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 100, loss[loss=0.1737, simple_loss=0.1935, pruned_loss=0.07697, over 19135.00 frames. ], tot_loss[loss=0.1731, simple_loss=0.1957, pruned_loss=0.07525, over 1477220.69 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:14:24,056 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.87 vs. limit=22.5
+2024-09-01 04:14:49,998 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112256.0, ans=0.125
+2024-09-01 04:15:09,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=112256.0, ans=0.0
+2024-09-01 04:15:13,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112309.33333333333, ans=0.1
+2024-09-01 04:15:56,108 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-09-01 04:15:56,108 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 04:15:56,154 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 04:16:15,964 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 0, loss[loss=0.1853, simple_loss=0.203, pruned_loss=0.08377, over 18438.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.203, pruned_loss=0.08377, over 18438.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:16:15,965 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 04:16:39,407 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 19, validation: loss=0.1638, simple_loss=0.1883, pruned_loss=0.06968, over 1073944.00 frames.
+2024-09-01 04:16:39,407 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 04:16:45,881 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=19.75 vs. limit=15.0
+2024-09-01 04:17:15,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=112405.33333333333, ans=0.125
+2024-09-01 04:17:22,804 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.531e+02 1.615e+02 1.818e+02 2.373e+02, threshold=3.231e+02, percent-clipped=0.0
+2024-09-01 04:17:38,587 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.01 vs. limit=15.0
+2024-09-01 04:18:32,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 04:18:41,756 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-09-01 04:19:09,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=112618.66666666667, ans=0.0
+2024-09-01 04:19:10,123 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 50, loss[loss=0.1781, simple_loss=0.2083, pruned_loss=0.07392, over 19013.00 frames. ], tot_loss[loss=0.1746, simple_loss=0.1957, pruned_loss=0.07677, over 827262.88 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 4.0
+2024-09-01 04:19:22,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=112618.66666666667, ans=0.0
+2024-09-01 04:20:04,628 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.37 vs. limit=6.0
+2024-09-01 04:20:14,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=112725.33333333333, ans=0.125
+2024-09-01 04:20:29,610 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.78 vs. limit=10.0
+2024-09-01 04:20:33,711 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=112778.66666666667, ans=0.0
+2024-09-01 04:20:42,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=112778.66666666667, ans=0.0
+2024-09-01 04:20:53,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=112832.0, ans=0.125
+2024-09-01 04:21:07,193 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=19.85 vs. limit=15.0
+2024-09-01 04:21:10,111 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 100, loss[loss=0.1362, simple_loss=0.1624, pruned_loss=0.05503, over 19169.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.1959, pruned_loss=0.07689, over 1475351.90 frames. ], batch size: 134, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:21:54,612 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.501e+02 1.584e+02 1.820e+02 2.268e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-09-01 04:22:11,534 INFO [dysarthria_finetune.py:1435] (2/4) (10314514432, 34072559616)
+2024-09-01 04:22:11,535 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 04:22:11,577 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 04:22:26,683 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 0, loss[loss=0.1906, simple_loss=0.2139, pruned_loss=0.08364, over 18527.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2139, pruned_loss=0.08364, over 18527.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:22:26,684 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-09-01 04:22:50,260 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 20, validation: loss=0.1638, simple_loss=0.1875, pruned_loss=0.07, over 1073944.00 frames.
+2024-09-01 04:22:50,261 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19796MB
+2024-09-01 04:24:19,600 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.96 vs. limit=22.5
+2024-09-01 04:24:35,370 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.84 vs. limit=22.5
+2024-09-01 04:24:55,288 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=113253.33333333333, ans=0.0
+2024-09-01 04:24:57,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=113253.33333333333, ans=0.0
+2024-09-01 04:25:07,039 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 50, loss[loss=0.1885, simple_loss=0.2103, pruned_loss=0.08336, over 18968.00 frames. ], tot_loss[loss=0.1707, simple_loss=0.1931, pruned_loss=0.07421, over 828106.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 4.0
+2024-09-01 04:25:41,250 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=113360.0, ans=0.125
+2024-09-01 04:25:57,535 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=113360.0, ans=0.125
+2024-09-01 04:26:00,627 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=113360.0, ans=0.0
+2024-09-01 04:26:55,373 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.00 vs. limit=15.0
+2024-09-01 04:26:59,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=113520.0, ans=0.2
+2024-09-01 04:27:00,330 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.45 vs. limit=6.0
+2024-09-01 04:27:05,362 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.522e+02 1.605e+02 1.869e+02 2.652e+02, threshold=3.210e+02, percent-clipped=0.0
+2024-09-01 04:27:27,155 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 100, loss[loss=0.1403, simple_loss=0.1699, pruned_loss=0.05533, over 19074.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.1921, pruned_loss=0.07331, over 1476081.83 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:27:49,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=113573.33333333333, ans=0.2
+2024-09-01 04:28:00,604 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=113626.66666666667, ans=0.125
+2024-09-01 04:28:21,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 04:28:43,142 INFO [dysarthria_finetune.py:1435] (2/4) (10283057152, 34072559616)
+2024-09-01 04:28:43,143 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-09-01 04:28:43,165 INFO [dysarthria_finetune.py:1440] (2/4) (29109190656, 34072559616)
+2024-09-01 04:28:43,166 INFO [dysarthria_finetune.py:1442] (2/4) Done!
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-3 b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-3
new file mode 100644
index 0000000000000000000000000000000000000000..40680a4b55aa6d80db56d7e4b494707b787b5273
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/log/log-train-2024-08-31-13-16-10-3
@@ -0,0 +1,545 @@
+2024-08-31 13:16:10,943 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-31 13:16:10,986 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-31 13:16:10,986 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-31 13:16:11,947 INFO [dysarthria_finetune.py:1219] (3/4) (32783400960, 34072559616)
+2024-08-31 13:16:11,947 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-31 13:16:13,232 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/4b/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-31 13:16:13,233 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-31 13:16:14,935 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 66110931
+2024-08-31 13:16:14,936 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/streaming/exp/epoch-20.pt
+2024-08-31 13:18:23,851 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-31 13:20:29,534 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-31 13:20:29,665 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-31 13:20:29,919 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-31 13:20:29,920 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-31 13:20:31,921 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-31 13:20:32,874 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-31 13:20:32,876 INFO [dysarthria_asr_datamodule.py:501] (3/4) About to get dev cuts
+2024-08-31 13:20:33,113 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-31 13:20:33,463 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-31 13:20:33,464 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:44:09,209 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=5.07 vs. limit=5.0
+2024-08-31 13:44:10,210 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.46 vs. limit=7.5
+2024-08-31 13:44:14,876 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-31 13:45:00,383 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.37 vs. limit=7.5
+2024-08-31 13:45:00,951 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-31 13:47:50,042 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-31 13:47:52,422 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-31 13:50:17,118 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.87 vs. limit=3.0
+2024-08-31 13:50:20,328 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-31 13:50:22,579 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 12655MB
+2024-08-31 13:51:23,102 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], tot_loss[loss=0.3239, simple_loss=0.3072, pruned_loss=0.1668, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-31 13:51:23,103 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-31 14:29:03,503 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3678, simple_loss=0.3479, pruned_loss=0.1987, over 1073944.00 frames.
+2024-08-31 14:29:03,504 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14320MB
+2024-08-31 15:24:16,877 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.525e+02 8.969e+02 9.815e+02 1.002e+03 1.048e+03, threshold=3.926e+03, percent-clipped=0.0
+2024-08-31 15:49:37,290 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=100053.33333333333, ans=0.2
+2024-08-31 15:52:42,133 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.161e+02 8.685e+02 9.467e+02 1.002e+03 1.055e+03, threshold=3.787e+03, percent-clipped=0.0
+2024-08-31 16:00:43,033 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.01 vs. limit=15.0
+2024-08-31 16:20:00,718 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=100160.0, ans=0.2
+2024-08-31 16:22:14,352 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100160.0, ans=0.125
+2024-08-31 16:23:54,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=100160.0, ans=0.2
+2024-08-31 16:31:57,886 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:32:13,901 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 6.192e+02 7.846e+02 8.685e+02 9.467e+02 1.055e+03, threshold=3.474e+03, percent-clipped=0.0
+2024-08-31 16:54:29,233 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=100213.33333333333, ans=0.0
+2024-08-31 17:02:30,134 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.4509, simple_loss=0.4255, pruned_loss=0.2502, over 19001.00 frames. ], tot_loss[loss=0.3942, simple_loss=0.3718, pruned_loss=0.2222, over 828973.50 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-31 17:08:28,670 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=44.12 vs. limit=22.5
+2024-08-31 17:11:49,972 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.06 vs. limit=22.5
+2024-08-31 17:23:51,440 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=17.48 vs. limit=15.0
+2024-08-31 17:26:14,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100320.0, ans=0.125
+2024-08-31 17:39:39,975 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=100373.33333333333, ans=0.125
+2024-08-31 17:57:52,208 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=21.41 vs. limit=15.0
+2024-08-31 17:58:07,261 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=100426.66666666667, ans=0.025
+2024-08-31 18:03:47,496 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=47.91 vs. limit=22.5
+2024-08-31 18:12:52,329 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.869e+02 6.982e+02 7.682e+02 8.607e+02 1.055e+03, threshold=1.536e+03, percent-clipped=0.0
+2024-08-31 18:12:52,367 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 100, loss[loss=0.358, simple_loss=0.3396, pruned_loss=0.1858, over 19146.00 frames. ], tot_loss[loss=0.3783, simple_loss=0.3572, pruned_loss=0.2086, over 1476162.18 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-31 18:34:03,778 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=100586.66666666667, ans=0.0
+2024-08-31 18:44:19,285 INFO [dysarthria_finetune.py:1435] (3/4) (13370064896, 34072559616)
+2024-08-31 18:44:19,285 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-31 18:44:19,339 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-08-31 18:46:01,812 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 0, loss[loss=0.3531, simple_loss=0.3343, pruned_loss=0.1873, over 18501.00 frames. ], tot_loss[loss=0.3531, simple_loss=0.3343, pruned_loss=0.1873, over 18501.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-31 18:46:01,812 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-31 19:10:08,822 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 2, validation: loss=0.3307, simple_loss=0.3141, pruned_loss=0.1687, over 1073944.00 frames.
+2024-08-31 19:10:08,823 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-08-31 19:50:38,481 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=100789.33333333333, ans=0.125
+2024-08-31 20:01:04,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-08-31 20:05:23,665 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.36 vs. limit=22.5
+2024-08-31 20:15:22,480 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100842.66666666667, ans=0.125
+2024-08-31 20:20:10,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=100896.0, ans=0.125
+2024-08-31 20:23:39,352 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=100896.0, ans=0.125
+2024-08-31 20:23:39,376 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-31 20:31:44,811 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 50, loss[loss=0.3496, simple_loss=0.3328, pruned_loss=0.1758, over 18956.00 frames. ], tot_loss[loss=0.3548, simple_loss=0.3356, pruned_loss=0.1898, over 828460.00 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 4.0
+2024-08-31 20:34:09,143 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.67 vs. limit=15.0
+2024-08-31 20:42:30,519 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.53 vs. limit=15.0
+2024-08-31 20:51:24,188 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=101002.66666666667, ans=0.125
+2024-08-31 21:03:09,447 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.367e+02 4.995e+02 5.661e+02 6.268e+02 7.321e+02, threshold=1.132e+03, percent-clipped=0.0
+2024-08-31 21:07:10,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=101056.0, ans=0.0
+2024-08-31 21:16:13,052 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101109.33333333333, ans=0.1
+2024-08-31 21:17:59,126 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-08-31 21:17:59,145 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-08-31 21:19:17,333 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.39 vs. limit=15.0
+2024-08-31 21:32:23,529 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.95 vs. limit=22.5
+2024-08-31 21:42:13,842 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 100, loss[loss=0.3303, simple_loss=0.3131, pruned_loss=0.173, over 19077.00 frames. ], tot_loss[loss=0.3422, simple_loss=0.3244, pruned_loss=0.1792, over 1476919.42 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 4.0
+2024-08-31 21:46:29,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=101216.0, ans=0.025
+2024-08-31 22:07:16,155 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.97 vs. limit=6.0
+2024-08-31 22:21:43,996 INFO [dysarthria_finetune.py:1435] (3/4) (13187612672, 34072559616)
+2024-08-31 22:21:43,998 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-31 22:21:44,038 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-08-31 22:22:38,994 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 0, loss[loss=0.2899, simple_loss=0.2766, pruned_loss=0.1441, over 18579.00 frames. ], tot_loss[loss=0.2899, simple_loss=0.2766, pruned_loss=0.1441, over 18579.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 2.0
+2024-08-31 22:22:38,995 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-31 22:31:34,590 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 3, validation: loss=0.2979, simple_loss=0.2853, pruned_loss=0.1432, over 1073944.00 frames.
+2024-08-31 22:31:34,954 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-08-31 23:30:40,081 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.802e+02 3.787e+02 4.308e+02 4.929e+02 6.122e+02, threshold=8.616e+02, percent-clipped=0.0
+2024-08-31 23:32:08,311 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=13.29 vs. limit=15.0
+2024-08-31 23:32:42,308 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 50, loss[loss=0.3383, simple_loss=0.3247, pruned_loss=0.1617, over 19113.00 frames. ], tot_loss[loss=0.3251, simple_loss=0.3092, pruned_loss=0.166, over 827781.85 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 1.0
+2024-08-31 23:54:25,131 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=28.44 vs. limit=22.5
+2024-09-01 00:04:57,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=101797.33333333333, ans=0.95
+2024-09-01 00:07:16,971 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.99 vs. limit=15.0
+2024-09-01 00:13:15,691 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.00 vs. limit=15.0
+2024-09-01 00:17:08,367 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 100, loss[loss=0.3064, simple_loss=0.2918, pruned_loss=0.156, over 19145.00 frames. ], tot_loss[loss=0.3161, simple_loss=0.301, pruned_loss=0.1605, over 1476240.06 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 1.0
+2024-09-01 00:18:15,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101904.0, ans=0.125
+2024-09-01 00:19:35,495 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101904.0, ans=0.1
+2024-09-01 00:25:35,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102010.66666666667, ans=0.1
+2024-09-01 00:25:35,310 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=102010.66666666667, ans=0.125
+2024-09-01 00:25:35,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=102010.66666666667, ans=0.1
+2024-09-01 00:26:04,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=102010.66666666667, ans=0.125
+2024-09-01 00:27:08,600 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 00:27:26,416 INFO [dysarthria_finetune.py:1435] (3/4) (13179224064, 34072559616)
+2024-09-01 00:27:26,417 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 00:27:26,456 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 00:27:42,728 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 0, loss[loss=0.3503, simple_loss=0.3295, pruned_loss=0.1922, over 18645.00 frames. ], tot_loss[loss=0.3503, simple_loss=0.3295, pruned_loss=0.1922, over 18645.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 2.0
+2024-09-01 00:27:42,729 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 00:46:27,437 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 4, validation: loss=0.279, simple_loss=0.2687, pruned_loss=0.1325, over 1073944.00 frames.
+2024-09-01 00:46:27,438 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 00:59:45,234 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.85 vs. limit=22.5
+2024-09-01 01:06:46,635 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.22 vs. limit=15.0
+2024-09-01 01:16:41,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=102160.0, ans=0.0
+2024-09-01 01:16:52,151 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.614e+02 3.221e+02 3.659e+02 4.077e+02 5.349e+02, threshold=7.318e+02, percent-clipped=0.0
+2024-09-01 01:25:25,551 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=17.91 vs. limit=15.0
+2024-09-01 01:27:41,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=102213.33333333333, ans=0.025
+2024-09-01 01:28:22,514 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=102266.66666666667, ans=0.125
+2024-09-01 01:29:39,649 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=28.14 vs. limit=22.5
+2024-09-01 01:37:41,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=102266.66666666667, ans=0.125
+2024-09-01 01:39:48,717 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 50, loss[loss=0.304, simple_loss=0.2895, pruned_loss=0.1553, over 18993.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.292, pruned_loss=0.1525, over 827748.42 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 0.5
+2024-09-01 01:40:37,068 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.57 vs. limit=15.0
+2024-09-01 01:40:37,135 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.27 vs. limit=15.0
+2024-09-01 01:44:00,952 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=102320.0, ans=0.0
+2024-09-01 01:46:06,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=102373.33333333333, ans=0.125
+2024-09-01 01:51:17,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102426.66666666667, ans=0.0
+2024-09-01 01:57:09,401 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 01:59:03,249 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=102533.33333333333, ans=0.125
+2024-09-01 01:59:47,281 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102533.33333333333, ans=0.1
+2024-09-01 02:01:07,724 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 100, loss[loss=0.3067, simple_loss=0.2926, pruned_loss=0.1558, over 19161.00 frames. ], tot_loss[loss=0.2956, simple_loss=0.2832, pruned_loss=0.1462, over 1475350.41 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 1.0
+2024-09-01 02:05:19,001 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.89 vs. limit=6.0
+2024-09-01 02:09:39,914 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 2.859e+02 3.213e+02 3.589e+02 4.738e+02, threshold=6.426e+02, percent-clipped=0.0
+2024-09-01 02:10:07,076 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-09-01 02:10:07,599 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.06 vs. limit=6.0
+2024-09-01 02:10:16,929 INFO [dysarthria_finetune.py:1435] (3/4) (13193904128, 34072559616)
+2024-09-01 02:10:16,930 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:10:16,959 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 02:10:37,117 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 0, loss[loss=0.26, simple_loss=0.2518, pruned_loss=0.1223, over 18566.00 frames. ], tot_loss[loss=0.26, simple_loss=0.2518, pruned_loss=0.1223, over 18566.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 1.0
+2024-09-01 02:10:37,117 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:15:37,620 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 5, validation: loss=0.2588, simple_loss=0.2515, pruned_loss=0.1195, over 1073944.00 frames.
+2024-09-01 02:15:37,621 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 02:17:57,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 02:18:25,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102794.66666666667, ans=0.1
+2024-09-01 02:18:28,630 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.47 vs. limit=6.0
+2024-09-01 02:18:43,790 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=102794.66666666667, ans=0.125
+2024-09-01 02:19:22,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=102848.0, ans=0.2
+2024-09-01 02:19:46,796 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.58 vs. limit=15.0
+2024-09-01 02:21:03,374 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.22 vs. limit=15.0
+2024-09-01 02:23:25,248 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 50, loss[loss=0.3013, simple_loss=0.2886, pruned_loss=0.1509, over 18976.00 frames. ], tot_loss[loss=0.2803, simple_loss=0.27, pruned_loss=0.1363, over 827749.28 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 0.25
+2024-09-01 02:24:04,287 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.05 vs. limit=22.5
+2024-09-01 02:24:44,336 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-09-01 02:25:12,470 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-09-01 02:25:29,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-09-01 02:25:42,160 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:25:54,053 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.49 vs. limit=15.0
+2024-09-01 02:26:14,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103114.66666666667, ans=0.1
+2024-09-01 02:27:05,610 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:27:14,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=103168.0, ans=0.125
+2024-09-01 02:27:15,166 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.48 vs. limit=15.0
+2024-09-01 02:27:48,820 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 2.619e+02 2.908e+02 3.410e+02 5.061e+02, threshold=5.817e+02, percent-clipped=0.0
+2024-09-01 02:27:54,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=103221.33333333333, ans=0.2
+2024-09-01 02:28:05,158 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 100, loss[loss=0.277, simple_loss=0.2668, pruned_loss=0.1361, over 19091.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.2673, pruned_loss=0.1351, over 1475913.16 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 0.5
+2024-09-01 02:29:03,537 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.40 vs. limit=6.0
+2024-09-01 02:29:29,981 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=103328.0, ans=0.0
+2024-09-01 02:30:03,389 INFO [dysarthria_finetune.py:1435] (3/4) (13175029760, 34072559616)
+2024-09-01 02:30:03,390 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:30:03,431 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 02:30:19,016 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 0, loss[loss=0.2498, simple_loss=0.2427, pruned_loss=0.1183, over 18684.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.2427, pruned_loss=0.1183, over 18684.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:30:19,016 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:30:42,399 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 6, validation: loss=0.247, simple_loss=0.2415, pruned_loss=0.1137, over 1073944.00 frames.
+2024-09-01 02:30:51,758 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 02:32:19,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=103477.33333333333, ans=0.125
+2024-09-01 02:32:24,822 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=18.53 vs. limit=15.0
+2024-09-01 02:32:55,004 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103530.66666666667, ans=0.125
+2024-09-01 02:33:49,353 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 50, loss[loss=0.2401, simple_loss=0.2377, pruned_loss=0.1058, over 19058.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.2565, pruned_loss=0.1267, over 828493.81 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 0.5
+2024-09-01 02:34:10,173 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103690.66666666667, ans=0.1
+2024-09-01 02:34:39,191 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-09-01 02:34:39,629 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=28.48 vs. limit=22.5
+2024-09-01 02:34:40,098 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.401e+02 2.633e+02 2.975e+02 4.049e+02, threshold=5.266e+02, percent-clipped=0.0
+2024-09-01 02:34:59,015 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=103797.33333333333, ans=0.125
+2024-09-01 02:35:49,194 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=38.71 vs. limit=22.5
+2024-09-01 02:35:51,872 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:35:55,582 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 100, loss[loss=0.2423, simple_loss=0.2367, pruned_loss=0.114, over 19113.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.2554, pruned_loss=0.1262, over 1475249.23 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:36:30,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=104010.66666666667, ans=0.125
+2024-09-01 02:36:30,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=104010.66666666667, ans=0.125
+2024-09-01 02:36:58,483 INFO [dysarthria_finetune.py:1435] (3/4) (380305408, 34072559616)
+2024-09-01 02:36:58,484 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:36:58,568 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 02:37:37,096 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 0, loss[loss=0.294, simple_loss=0.2799, pruned_loss=0.1521, over 18595.00 frames. ], tot_loss[loss=0.294, simple_loss=0.2799, pruned_loss=0.1521, over 18595.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:37:37,097 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:38:00,937 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 7, validation: loss=0.2303, simple_loss=0.2284, pruned_loss=0.1027, over 1073944.00 frames.
+2024-09-01 02:38:00,938 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 02:38:14,474 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.83 vs. limit=15.0
+2024-09-01 02:38:46,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104213.33333333333, ans=0.1
+2024-09-01 02:39:01,150 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=16.96 vs. limit=15.0
+2024-09-01 02:39:39,114 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.900e+02 2.248e+02 2.388e+02 2.643e+02 3.863e+02, threshold=4.776e+02, percent-clipped=0.0
+2024-09-01 02:39:54,666 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 50, loss[loss=0.2562, simple_loss=0.2514, pruned_loss=0.12, over 18963.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.2491, pruned_loss=0.1215, over 827887.87 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 1.0
+2024-09-01 02:40:12,889 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104373.33333333333, ans=0.1
+2024-09-01 02:40:37,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=104480.0, ans=0.07
+2024-09-01 02:40:56,987 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.09 vs. limit=15.0
+2024-09-01 02:41:04,692 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=104533.33333333333, ans=0.0
+2024-09-01 02:41:19,774 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=104586.66666666667, ans=0.125
+2024-09-01 02:41:41,952 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 100, loss[loss=0.225, simple_loss=0.224, pruned_loss=0.1012, over 19124.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.2438, pruned_loss=0.1179, over 1475075.17 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:41:47,148 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=104640.0, ans=0.2
+2024-09-01 02:41:47,696 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.22 vs. limit=22.5
+2024-09-01 02:41:55,804 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=104640.0, ans=10.0
+2024-09-01 02:42:00,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=104640.0, ans=0.125
+2024-09-01 02:42:39,968 INFO [dysarthria_finetune.py:1435] (3/4) (13219069952, 34072559616)
+2024-09-01 02:42:39,969 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:42:40,007 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 02:42:52,891 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 0, loss[loss=0.2285, simple_loss=0.2285, pruned_loss=0.1018, over 18547.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.2285, pruned_loss=0.1018, over 18547.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 4.0
+2024-09-01 02:42:52,891 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:43:16,310 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 8, validation: loss=0.2224, simple_loss=0.2225, pruned_loss=0.09892, over 1073944.00 frames.
+2024-09-01 02:43:16,311 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 02:43:17,989 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.60 vs. limit=15.0
+2024-09-01 02:43:19,888 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 02:43:51,418 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.041e+02 2.195e+02 2.485e+02 3.530e+02, threshold=4.390e+02, percent-clipped=0.0
+2024-09-01 02:44:24,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-09-01 02:44:28,204 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-09-01 02:45:06,402 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 50, loss[loss=0.2497, simple_loss=0.2442, pruned_loss=0.1204, over 18964.00 frames. ], tot_loss[loss=0.242, simple_loss=0.2388, pruned_loss=0.1131, over 828441.23 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:45:07,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=105056.0, ans=0.125
+2024-09-01 02:46:20,132 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=11.76 vs. limit=12.0
+2024-09-01 02:46:26,192 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=105056.0, ans=0.125
+2024-09-01 02:46:47,791 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=105109.33333333333, ans=0.0
+2024-09-01 02:47:09,841 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=17.46 vs. limit=15.0
+2024-09-01 02:47:29,439 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.84 vs. limit=15.0
+2024-09-01 02:47:48,249 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=105269.33333333333, ans=0.125
+2024-09-01 02:47:55,804 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 100, loss[loss=0.2384, simple_loss=0.2394, pruned_loss=0.1071, over 19119.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.2364, pruned_loss=0.1121, over 1475727.62 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 2.0
+2024-09-01 02:48:10,448 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.68 vs. limit=15.0
+2024-09-01 02:48:16,179 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=105376.0, ans=0.125
+2024-09-01 02:48:29,226 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=105376.0, ans=0.09899494936611666
+2024-09-01 02:48:34,263 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 1.930e+02 2.062e+02 2.246e+02 3.148e+02, threshold=4.124e+02, percent-clipped=0.0
+2024-09-01 02:48:55,527 INFO [dysarthria_finetune.py:1435] (3/4) (13208584192, 34072559616)
+2024-09-01 02:48:55,528 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:48:55,571 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 02:49:09,690 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 0, loss[loss=0.2539, simple_loss=0.2518, pruned_loss=0.1187, over 18777.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.2518, pruned_loss=0.1187, over 18777.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:49:09,691 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:49:40,379 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 9, validation: loss=0.211, simple_loss=0.2147, pruned_loss=0.09159, over 1073944.00 frames.
+2024-09-01 02:49:40,380 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 02:50:05,355 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=105477.33333333333, ans=0.125
+2024-09-01 02:50:51,237 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.15 vs. limit=15.0
+2024-09-01 02:51:27,654 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=105637.33333333333, ans=0.2
+2024-09-01 02:51:33,281 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=105637.33333333333, ans=0.125
+2024-09-01 02:51:38,654 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=105637.33333333333, ans=0.09899494936611666
+2024-09-01 02:52:16,756 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=17.83 vs. limit=22.5
+2024-09-01 02:52:30,678 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 50, loss[loss=0.2246, simple_loss=0.2293, pruned_loss=0.09762, over 18965.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.2273, pruned_loss=0.1034, over 827503.70 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:53:44,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=105850.66666666667, ans=0.125
+2024-09-01 02:54:14,230 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.74 vs. limit=15.0
+2024-09-01 02:54:20,858 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.33 vs. limit=15.0
+2024-09-01 02:54:35,248 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 1.850e+02 1.979e+02 2.143e+02 2.885e+02, threshold=3.959e+02, percent-clipped=0.0
+2024-09-01 02:54:39,636 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=105957.33333333333, ans=0.125
+2024-09-01 02:55:01,903 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105957.33333333333, ans=0.1
+2024-09-01 02:55:06,093 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 100, loss[loss=0.2105, simple_loss=0.2145, pruned_loss=0.093, over 19159.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.226, pruned_loss=0.1037, over 1475225.92 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:56:27,085 INFO [dysarthria_finetune.py:1435] (3/4) (14370406400, 34072559616)
+2024-09-01 02:56:27,085 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 02:56:27,127 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 02:56:40,079 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 0, loss[loss=0.2173, simple_loss=0.2183, pruned_loss=0.1002, over 18587.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2183, pruned_loss=0.1002, over 18587.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 02:56:40,080 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 02:57:03,505 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 10, validation: loss=0.2075, simple_loss=0.2129, pruned_loss=0.09054, over 1073944.00 frames.
+2024-09-01 02:57:03,506 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 02:57:25,523 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.73 vs. limit=22.5
+2024-09-01 02:57:38,391 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.84 vs. limit=15.0
+2024-09-01 02:58:03,838 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.41 vs. limit=15.0
+2024-09-01 02:58:14,612 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=106325.33333333333, ans=0.125
+2024-09-01 02:58:18,863 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=106325.33333333333, ans=0.2
+2024-09-01 02:58:42,799 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106378.66666666667, ans=0.125
+2024-09-01 02:58:55,060 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 50, loss[loss=0.2416, simple_loss=0.2461, pruned_loss=0.1085, over 19101.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2253, pruned_loss=0.1024, over 827631.91 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 2.0
+2024-09-01 02:59:01,093 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=9.85 vs. limit=12.0
+2024-09-01 02:59:15,807 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.00 vs. limit=15.0
+2024-09-01 02:59:20,728 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.769e+02 1.897e+02 2.105e+02 2.891e+02, threshold=3.793e+02, percent-clipped=0.0
+2024-09-01 02:59:22,087 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=106485.33333333333, ans=0.0
+2024-09-01 02:59:46,211 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.69 vs. limit=15.0
+2024-09-01 03:00:07,137 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=106592.0, ans=0.5
+2024-09-01 03:00:24,818 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.44 vs. limit=15.0
+2024-09-01 03:00:40,067 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=16.09 vs. limit=15.0
+2024-09-01 03:00:42,969 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 100, loss[loss=0.209, simple_loss=0.215, pruned_loss=0.09248, over 19051.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2241, pruned_loss=0.1007, over 1475773.03 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 4.0
+2024-09-01 03:01:03,940 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106698.66666666667, ans=0.125
+2024-09-01 03:01:25,393 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106752.0, ans=0.0
+2024-09-01 03:01:40,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=106805.33333333333, ans=0.125
+2024-09-01 03:01:41,971 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=106805.33333333333, ans=0.0
+2024-09-01 03:01:46,077 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=106805.33333333333, ans=0.0
+2024-09-01 03:01:46,113 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=106805.33333333333, ans=0.125
+2024-09-01 03:01:46,974 INFO [dysarthria_finetune.py:1435] (3/4) (14313783296, 34072559616)
+2024-09-01 03:01:46,975 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:01:47,028 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 03:08:00,726 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 0, loss[loss=0.2372, simple_loss=0.2374, pruned_loss=0.1122, over 18604.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.2374, pruned_loss=0.1122, over 18604.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:08:00,726 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:08:32,552 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 11, validation: loss=0.2002, simple_loss=0.2088, pruned_loss=0.08618, over 1073944.00 frames.
+2024-09-01 03:08:32,552 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 03:09:36,984 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=106906.66666666667, ans=0.0
+2024-09-01 03:10:00,532 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=106960.0, ans=0.125
+2024-09-01 03:10:26,173 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=106960.0, ans=0.0
+2024-09-01 03:10:42,589 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.721e+02 1.824e+02 2.016e+02 2.682e+02, threshold=3.648e+02, percent-clipped=0.0
+2024-09-01 03:11:52,029 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-09-01 03:12:03,918 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 50, loss[loss=0.2191, simple_loss=0.2263, pruned_loss=0.09753, over 19110.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2148, pruned_loss=0.09185, over 828132.31 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:12:12,928 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.91 vs. limit=15.0
+2024-09-01 03:12:36,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107120.0, ans=0.125
+2024-09-01 03:14:04,156 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.39 vs. limit=15.0
+2024-09-01 03:14:36,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=107226.66666666667, ans=0.2
+2024-09-01 03:15:07,532 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.11 vs. limit=15.0
+2024-09-01 03:15:13,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=107333.33333333333, ans=0.05
+2024-09-01 03:15:18,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=107333.33333333333, ans=0.04949747468305833
+2024-09-01 03:15:56,722 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 100, loss[loss=0.1842, simple_loss=0.2024, pruned_loss=0.0718, over 19127.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2163, pruned_loss=0.09267, over 1475363.18 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:16:47,166 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=107440.0, ans=0.0
+2024-09-01 03:17:09,177 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.76 vs. limit=10.0
+2024-09-01 03:17:56,543 INFO [dysarthria_finetune.py:1435] (3/4) (13189709824, 34072559616)
+2024-09-01 03:17:56,544 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:17:56,583 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 03:18:09,579 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 0, loss[loss=0.2184, simple_loss=0.2228, pruned_loss=0.101, over 18650.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2228, pruned_loss=0.101, over 18650.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:18:09,579 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:18:33,054 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 12, validation: loss=0.1929, simple_loss=0.2049, pruned_loss=0.0821, over 1073944.00 frames.
+2024-09-01 03:18:33,055 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 03:18:41,680 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.01 vs. limit=15.0
+2024-09-01 03:18:47,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=107541.33333333333, ans=0.125
+2024-09-01 03:18:51,346 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.02 vs. limit=22.5
+2024-09-01 03:18:54,486 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 1.683e+02 1.764e+02 1.920e+02 2.754e+02, threshold=3.529e+02, percent-clipped=0.0
+2024-09-01 03:19:20,923 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:19:21,125 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.69 vs. limit=6.0
+2024-09-01 03:19:27,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=107594.66666666667, ans=0.0
+2024-09-01 03:20:01,597 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=107648.0, ans=0.125
+2024-09-01 03:20:02,043 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.21 vs. limit=15.0
+2024-09-01 03:20:40,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=107754.66666666667, ans=0.2
+2024-09-01 03:20:41,418 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.31 vs. limit=22.5
+2024-09-01 03:21:02,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=107808.0, ans=0.0
+2024-09-01 03:21:03,694 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 50, loss[loss=0.2181, simple_loss=0.2283, pruned_loss=0.09669, over 19037.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2144, pruned_loss=0.0904, over 828666.57 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:21:07,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107808.0, ans=0.1
+2024-09-01 03:21:26,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=107808.0, ans=0.125
+2024-09-01 03:21:30,786 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.58 vs. limit=15.0
+2024-09-01 03:22:34,899 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-09-01 03:22:34,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-09-01 03:24:01,736 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 100, loss[loss=0.1706, simple_loss=0.1873, pruned_loss=0.06935, over 19142.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2134, pruned_loss=0.08897, over 1477170.44 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 4.0
+2024-09-01 03:24:21,999 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.650e+02 1.753e+02 1.928e+02 2.697e+02, threshold=3.507e+02, percent-clipped=0.0
+2024-09-01 03:24:38,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=108128.0, ans=0.0
+2024-09-01 03:24:50,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=108181.33333333333, ans=0.125
+2024-09-01 03:25:11,656 INFO [dysarthria_finetune.py:1435] (3/4) (13181321216, 34072559616)
+2024-09-01 03:25:11,656 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:25:11,709 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 03:25:24,761 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 0, loss[loss=0.1961, simple_loss=0.203, pruned_loss=0.08995, over 18629.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.203, pruned_loss=0.08995, over 18629.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:25:24,761 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:25:48,259 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 13, validation: loss=0.1886, simple_loss=0.2026, pruned_loss=0.08078, over 1073944.00 frames.
+2024-09-01 03:25:48,260 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 03:26:07,855 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108229.33333333333, ans=0.125
+2024-09-01 03:26:34,434 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=108336.0, ans=0.0
+2024-09-01 03:27:13,286 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=17.39 vs. limit=15.0
+2024-09-01 03:27:17,704 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.98 vs. limit=15.0
+2024-09-01 03:27:49,276 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:27:54,662 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 50, loss[loss=0.2111, simple_loss=0.2194, pruned_loss=0.09681, over 19050.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2108, pruned_loss=0.08718, over 828311.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 1.0
+2024-09-01 03:28:03,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=108496.0, ans=0.2
+2024-09-01 03:28:07,522 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=108496.0, ans=0.125
+2024-09-01 03:28:38,948 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108602.66666666667, ans=0.125
+2024-09-01 03:29:04,449 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=108656.0, ans=0.125
+2024-09-01 03:29:07,378 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.617e+02 1.723e+02 2.007e+02 2.594e+02, threshold=3.446e+02, percent-clipped=0.0
+2024-09-01 03:29:21,576 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=108656.0, ans=0.125
+2024-09-01 03:29:28,222 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.58 vs. limit=15.0
+2024-09-01 03:29:30,130 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-09-01 03:29:42,968 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=108709.33333333333, ans=0.125
+2024-09-01 03:29:46,146 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 100, loss[loss=0.1923, simple_loss=0.2091, pruned_loss=0.08237, over 19095.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2103, pruned_loss=0.08593, over 1474662.24 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 2.0
+2024-09-01 03:29:58,521 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.whiten.whitening_limit, batch_count=108762.66666666667, ans=12.0
+2024-09-01 03:30:00,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=108762.66666666667, ans=0.125
+2024-09-01 03:30:09,389 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.49 vs. limit=15.0
+2024-09-01 03:30:13,159 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=108816.0, ans=0.125
+2024-09-01 03:30:18,035 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.25 vs. limit=15.0
+2024-09-01 03:30:19,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=108816.0, ans=0.125
+2024-09-01 03:30:19,847 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=108816.0, ans=0.2
+2024-09-01 03:30:26,200 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=108816.0, ans=0.0
+2024-09-01 03:30:36,975 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=108869.33333333333, ans=0.125
+2024-09-01 03:30:46,125 INFO [dysarthria_finetune.py:1435] (3/4) (13185515520, 34072559616)
+2024-09-01 03:30:46,126 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:30:46,174 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 03:30:58,987 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 0, loss[loss=0.2284, simple_loss=0.2338, pruned_loss=0.1083, over 18650.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.2338, pruned_loss=0.1083, over 18650.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:30:58,987 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:31:17,316 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.9378, 2.6222, 3.4462, 1.6604], device='cuda:3')
+2024-09-01 03:31:23,186 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 14, validation: loss=0.1833, simple_loss=0.2, pruned_loss=0.07856, over 1073944.00 frames.
+2024-09-01 03:31:23,187 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 03:31:26,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=108917.33333333333, ans=0.0
+2024-09-01 03:31:38,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=108917.33333333333, ans=0.125
+2024-09-01 03:31:38,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=108917.33333333333, ans=0.0
+2024-09-01 03:33:12,970 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=10.78 vs. limit=12.0
+2024-09-01 03:33:13,473 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 50, loss[loss=0.1708, simple_loss=0.1998, pruned_loss=0.0652, over 19012.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2075, pruned_loss=0.08365, over 829335.16 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 4.0
+2024-09-01 03:33:14,728 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=109184.0, ans=0.0
+2024-09-01 03:33:19,741 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.619e+02 1.722e+02 1.984e+02 2.668e+02, threshold=3.445e+02, percent-clipped=0.0
+2024-09-01 03:33:47,234 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=25.54 vs. limit=22.5
+2024-09-01 03:34:53,119 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=109397.33333333333, ans=0.0
+2024-09-01 03:35:00,698 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 100, loss[loss=0.1701, simple_loss=0.1978, pruned_loss=0.06686, over 19114.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.2055, pruned_loss=0.08227, over 1476363.01 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 8.0
+2024-09-01 03:35:39,713 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=109504.0, ans=0.025
+2024-09-01 03:36:00,177 INFO [dysarthria_finetune.py:1435] (3/4) (14307491840, 34072559616)
+2024-09-01 03:36:00,178 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:36:00,229 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 03:36:14,209 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 0, loss[loss=0.1834, simple_loss=0.1993, pruned_loss=0.0811, over 18716.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.1993, pruned_loss=0.0811, over 18716.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:36:14,210 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:36:45,407 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 15, validation: loss=0.1765, simple_loss=0.1963, pruned_loss=0.07531, over 1073944.00 frames.
+2024-09-01 03:36:45,408 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 03:37:05,703 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.01 vs. limit=15.0
+2024-09-01 03:37:52,612 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=109712.0, ans=0.0
+2024-09-01 03:38:03,353 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.579e+02 1.672e+02 1.908e+02 2.431e+02, threshold=3.343e+02, percent-clipped=0.0
+2024-09-01 03:38:04,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=109712.0, ans=0.125
+2024-09-01 03:38:16,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109765.33333333333, ans=0.125
+2024-09-01 03:39:11,327 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=109818.66666666667, ans=0.125
+2024-09-01 03:39:14,743 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 50, loss[loss=0.1633, simple_loss=0.1826, pruned_loss=0.06987, over 19179.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2041, pruned_loss=0.08247, over 827713.24 frames. ], batch size: 103, lr: 9.95e-05, grad_scale: 4.0
+2024-09-01 03:39:25,723 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=109872.0, ans=0.0
+2024-09-01 03:39:37,646 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.42 vs. limit=15.0
+2024-09-01 03:40:49,892 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=110032.0, ans=0.5
+2024-09-01 03:41:15,404 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.88 vs. limit=15.0
+2024-09-01 03:41:54,230 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 100, loss[loss=0.1555, simple_loss=0.1843, pruned_loss=0.06161, over 19073.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2039, pruned_loss=0.08199, over 1475236.97 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 8.0
+2024-09-01 03:42:13,318 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.75 vs. limit=15.0
+2024-09-01 03:42:50,386 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.63 vs. limit=15.0
+2024-09-01 03:43:21,351 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.548e+02 1.650e+02 1.862e+02 2.617e+02, threshold=3.300e+02, percent-clipped=0.0
+2024-09-01 03:43:24,594 INFO [dysarthria_finetune.py:1435] (3/4) (604700672, 34072559616)
+2024-09-01 03:43:24,595 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:43:24,675 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 03:43:40,072 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 0, loss[loss=0.2067, simple_loss=0.2168, pruned_loss=0.09759, over 18560.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2168, pruned_loss=0.09759, over 18560.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:43:40,073 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:44:25,991 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 16, validation: loss=0.1763, simple_loss=0.1967, pruned_loss=0.07691, over 1073944.00 frames.
+2024-09-01 03:44:25,991 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 03:46:02,852 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:46:15,859 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=110400.0, ans=0.125
+2024-09-01 03:47:46,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=110453.33333333333, ans=0.125
+2024-09-01 03:47:59,869 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=110506.66666666667, ans=0.0
+2024-09-01 03:48:37,713 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 50, loss[loss=0.1725, simple_loss=0.2045, pruned_loss=0.06984, over 19044.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2028, pruned_loss=0.08122, over 827661.95 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 03:49:15,058 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.56 vs. limit=15.0
+2024-09-01 03:49:50,948 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:50:42,382 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=13.39 vs. limit=15.0
+2024-09-01 03:51:55,171 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-09-01 03:52:21,039 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.93 vs. limit=22.5
+2024-09-01 03:52:26,727 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.555e+02 1.657e+02 1.896e+02 2.445e+02, threshold=3.314e+02, percent-clipped=0.0
+2024-09-01 03:52:31,255 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 100, loss[loss=0.1832, simple_loss=0.2103, pruned_loss=0.07804, over 19120.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2022, pruned_loss=0.08081, over 1474935.70 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:52:41,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=110826.66666666667, ans=0.125
+2024-09-01 03:52:44,914 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.02 vs. limit=22.5
+2024-09-01 03:52:47,471 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=110826.66666666667, ans=0.125
+2024-09-01 03:52:52,989 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110826.66666666667, ans=0.125
+2024-09-01 03:53:00,565 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=10.38 vs. limit=12.0
+2024-09-01 03:53:08,413 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.86 vs. limit=15.0
+2024-09-01 03:53:34,812 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-09-01 03:54:07,461 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=110933.33333333333, ans=0.04949747468305833
+2024-09-01 03:54:07,967 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.63 vs. limit=6.0
+2024-09-01 03:54:10,883 INFO [dysarthria_finetune.py:1435] (3/4) (13185515520, 34072559616)
+2024-09-01 03:54:10,884 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 03:54:10,932 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 03:54:38,949 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 0, loss[loss=0.2429, simple_loss=0.2444, pruned_loss=0.1207, over 18583.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2444, pruned_loss=0.1207, over 18583.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 03:54:38,950 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 03:54:59,406 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.3.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([3.1051, 3.8526, 3.6111, 3.7194, 3.9271, 2.8770, 3.7905, 3.8597],
+ device='cuda:3')
+2024-09-01 03:55:19,878 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 17, validation: loss=0.1671, simple_loss=0.1912, pruned_loss=0.07151, over 1073944.00 frames.
+2024-09-01 03:55:19,879 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 03:55:22,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110981.33333333333, ans=0.1
+2024-09-01 03:56:06,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111034.66666666667, ans=0.125
+2024-09-01 03:56:27,191 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=111034.66666666667, ans=0.0
+2024-09-01 03:56:30,360 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.83 vs. limit=15.0
+2024-09-01 03:56:42,693 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=18.82 vs. limit=15.0
+2024-09-01 03:57:04,428 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=111088.0, ans=0.125
+2024-09-01 03:57:20,469 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.53 vs. limit=22.5
+2024-09-01 03:57:33,471 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.18 vs. limit=22.5
+2024-09-01 03:58:12,446 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=111194.66666666667, ans=0.1
+2024-09-01 03:58:57,666 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 50, loss[loss=0.1765, simple_loss=0.1985, pruned_loss=0.0772, over 18982.00 frames. ], tot_loss[loss=0.176, simple_loss=0.1981, pruned_loss=0.0769, over 827806.80 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 4.0
+2024-09-01 04:00:54,007 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.39 vs. limit=15.0
+2024-09-01 04:00:57,799 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111354.66666666667, ans=0.1
+2024-09-01 04:01:02,143 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.555e+02 1.659e+02 1.888e+02 2.626e+02, threshold=3.319e+02, percent-clipped=0.0
+2024-09-01 04:02:24,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=111461.33333333333, ans=0.125
+2024-09-01 04:02:28,247 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111461.33333333333, ans=0.1
+2024-09-01 04:02:48,339 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 100, loss[loss=0.1706, simple_loss=0.1997, pruned_loss=0.07079, over 19078.00 frames. ], tot_loss[loss=0.1751, simple_loss=0.1977, pruned_loss=0.07629, over 1476033.73 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 8.0
+2024-09-01 04:03:18,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111514.66666666667, ans=0.1
+2024-09-01 04:03:23,501 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111514.66666666667, ans=0.125
+2024-09-01 04:05:21,540 INFO [dysarthria_finetune.py:1435] (3/4) (13177126912, 34072559616)
+2024-09-01 04:05:21,541 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 04:05:21,589 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 04:05:30,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=111669.33333333333, ans=0.2
+2024-09-01 04:05:38,880 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 0, loss[loss=0.2044, simple_loss=0.218, pruned_loss=0.09541, over 18613.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.218, pruned_loss=0.09541, over 18613.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:05:38,880 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 04:06:14,845 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 18, validation: loss=0.1676, simple_loss=0.191, pruned_loss=0.07213, over 1073944.00 frames.
+2024-09-01 04:06:14,846 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 04:06:24,774 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=7.608e-02
+2024-09-01 04:08:29,785 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.02 vs. limit=15.0
+2024-09-01 04:09:42,032 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 1.516e+02 1.635e+02 1.895e+02 3.024e+02, threshold=3.269e+02, percent-clipped=0.0
+2024-09-01 04:09:57,728 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.02 vs. limit=22.5
+2024-09-01 04:10:10,258 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 50, loss[loss=0.1768, simple_loss=0.2004, pruned_loss=0.07658, over 19004.00 frames. ], tot_loss[loss=0.1713, simple_loss=0.1938, pruned_loss=0.07445, over 828768.32 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:10:19,579 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=111936.0, ans=0.125
+2024-09-01 04:13:49,688 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.24 vs. limit=15.0
+2024-09-01 04:13:49,808 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.70 vs. limit=10.0
+2024-09-01 04:14:13,999 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 100, loss[loss=0.1394, simple_loss=0.1673, pruned_loss=0.05573, over 19084.00 frames. ], tot_loss[loss=0.1712, simple_loss=0.1932, pruned_loss=0.07458, over 1476677.05 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 8.0
+2024-09-01 04:14:30,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112202.66666666667, ans=0.125
+2024-09-01 04:15:02,847 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.55 vs. limit=15.0
+2024-09-01 04:15:13,929 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=112309.33333333333, ans=0.125
+2024-09-01 04:15:46,971 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=112309.33333333333, ans=0.0
+2024-09-01 04:15:56,105 INFO [dysarthria_finetune.py:1435] (3/4) (13172932608, 34072559616)
+2024-09-01 04:15:56,105 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 04:15:56,155 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 04:16:15,954 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 0, loss[loss=0.2243, simple_loss=0.2289, pruned_loss=0.1099, over 18562.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2289, pruned_loss=0.1099, over 18562.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:16:15,954 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 04:16:39,401 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 19, validation: loss=0.1638, simple_loss=0.1883, pruned_loss=0.06968, over 1073944.00 frames.
+2024-09-01 04:16:39,401 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 04:16:50,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112352.0, ans=0.1
+2024-09-01 04:16:56,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=112352.0, ans=0.0
+2024-09-01 04:17:22,798 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.531e+02 1.615e+02 1.818e+02 2.373e+02, threshold=3.231e+02, percent-clipped=0.0
+2024-09-01 04:17:38,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112458.66666666667, ans=0.125
+2024-09-01 04:18:07,741 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.74 vs. limit=6.0
+2024-09-01 04:18:32,320 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=112565.33333333333, ans=0.125
+2024-09-01 04:18:32,839 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.53 vs. limit=6.0
+2024-09-01 04:18:39,171 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-09-01 04:19:09,368 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112618.66666666667, ans=0.125
+2024-09-01 04:19:10,127 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 50, loss[loss=0.1575, simple_loss=0.1829, pruned_loss=0.066, over 19015.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.193, pruned_loss=0.07382, over 829365.51 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 4.0
+2024-09-01 04:19:51,653 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.57 vs. limit=15.0
+2024-09-01 04:20:00,270 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=112725.33333333333, ans=0.2
+2024-09-01 04:20:00,816 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.35 vs. limit=10.0
+2024-09-01 04:20:10,697 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=112725.33333333333, ans=0.125
+2024-09-01 04:20:40,361 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=112778.66666666667, ans=0.09899494936611666
+2024-09-01 04:20:51,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=112832.0, ans=0.125
+2024-09-01 04:21:10,108 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 100, loss[loss=0.1561, simple_loss=0.1802, pruned_loss=0.06602, over 19083.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.1942, pruned_loss=0.07482, over 1476389.98 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 8.0
+2024-09-01 04:21:31,371 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=112938.66666666667, ans=0.2
+2024-09-01 04:21:50,066 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.56 vs. limit=12.0
+2024-09-01 04:21:54,607 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.501e+02 1.584e+02 1.820e+02 2.268e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-09-01 04:21:58,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=112992.0, ans=0.07
+2024-09-01 04:22:11,534 INFO [dysarthria_finetune.py:1435] (3/4) (13212778496, 34072559616)
+2024-09-01 04:22:11,535 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 04:22:11,579 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 04:22:26,676 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 0, loss[loss=0.2041, simple_loss=0.213, pruned_loss=0.09759, over 18436.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.213, pruned_loss=0.09759, over 18436.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:22:26,677 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-09-01 04:22:50,266 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 20, validation: loss=0.1638, simple_loss=0.1875, pruned_loss=0.07, over 1073944.00 frames.
+2024-09-01 04:22:50,267 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14321MB
+2024-09-01 04:22:56,343 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=113040.0, ans=0.125
+2024-09-01 04:23:19,572 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.07 vs. limit=15.0
+2024-09-01 04:23:20,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=113093.33333333333, ans=0.2
+2024-09-01 04:23:20,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=113093.33333333333, ans=0.0
+2024-09-01 04:23:27,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=113093.33333333333, ans=0.125
+2024-09-01 04:23:55,407 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=113146.66666666667, ans=0.2
+2024-09-01 04:24:57,413 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=113253.33333333333, ans=0.0
+2024-09-01 04:25:07,019 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 50, loss[loss=0.1742, simple_loss=0.2013, pruned_loss=0.0736, over 18942.00 frames. ], tot_loss[loss=0.171, simple_loss=0.192, pruned_loss=0.07499, over 827999.75 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 4.0
+2024-09-01 04:25:22,230 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=113306.66666666667, ans=0.0
+2024-09-01 04:25:57,693 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=113360.0, ans=0.125
+2024-09-01 04:26:11,882 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.23 vs. limit=15.0
+2024-09-01 04:27:04,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=113520.0, ans=10.0
+2024-09-01 04:27:05,365 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.522e+02 1.605e+02 1.869e+02 2.652e+02, threshold=3.210e+02, percent-clipped=0.0
+2024-09-01 04:27:27,157 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 100, loss[loss=0.1332, simple_loss=0.1609, pruned_loss=0.05269, over 19171.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.1908, pruned_loss=0.07332, over 1475487.52 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 8.0
+2024-09-01 04:27:34,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=113573.33333333333, ans=0.0
+2024-09-01 04:28:11,336 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113626.66666666667, ans=0.1
+2024-09-01 04:28:22,543 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.08 vs. limit=6.0
+2024-09-01 04:28:26,629 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113680.0, ans=0.1
+2024-09-01 04:28:43,107 INFO [dysarthria_finetune.py:1435] (3/4) (13204389888, 34072559616)
+2024-09-01 04:28:43,108 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-09-01 04:28:43,135 INFO [dysarthria_finetune.py:1440] (3/4) (29977411584, 34072559616)
+2024-09-01 04:28:43,136 INFO [dysarthria_finetune.py:1442] (3/4) Done!
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724922176.cdr2545.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724922176.cdr2545.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..b69328ea649bed37d6c19c01909ac670cf5a8183
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724922176.cdr2545.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23a1db0cd13f9f343e91bdd0e39824b1bcd4787346a0bdd0353b0fbfc063a5e6
+size 795
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724954049.cdr2500.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724954049.cdr2500.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..5dade4a3675ef6b625311a8fbbead5f7dba998a1
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1724954049.cdr2500.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2d585a054cf6291c3056d8d4a538cd23d0d3bfafaa3826967de2f119edbe333
+size 2913
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725048942.cdr2558.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725048942.cdr2558.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..427271e264be9568ff3710910c4ff80f07656691
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725048942.cdr2558.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b9911ca5b48d87ab8f592f2b499cebe47052e960f21dc5b838293c6005ecfe6
+size 2913
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725075683.cdr2549.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725075683.cdr2549.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..e3c664fc38e3eae8f362198daa6063ba47e119d4
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725075683.cdr2549.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0debd6746a60033f489a0d99f2da1883b373fc2997d27e149f7752476786dbb2
+size 88
diff --git a/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725135371.cdr2558.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725135371.cdr2558.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..dba159547ec89e0b23d61d73650efe5ecc78f0af
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/causal/exp_finetune/tensorboard/events.out.tfevents.1725135371.cdr2558.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa9fdc485834f1a5eb2d1734f98436e8992ba3bdd9992fcab8e9f37ed98cd183
+size 33318
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/best-train-loss.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/best-train-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d6dc59a4b51dab51ffb6acb6b6bd8bcc7dac0097
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/best-train-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a02d980bab1cf51199f8a322afec45228f04c701bbc8e53ffa6b95dff3d1af32
+size 1049767630
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/best-valid-loss.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/best-valid-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d6dc59a4b51dab51ffb6acb6b6bd8bcc7dac0097
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/best-valid-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a02d980bab1cf51199f8a322afec45228f04c701bbc8e53ffa6b95dff3d1af32
+size 1049767630
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-1.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-1.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ce9e92c8bbf92a001a04760297a9d5efdd970714
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-1.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8bf6591bbaa08df8deece1c131bb0f077fc947df16a02839a3cf6f5b8adb6f77
+size 1049764225
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-10.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-10.pt
new file mode 100644
index 0000000000000000000000000000000000000000..eb86675d4e19beabf017a6f1d0e23dc21a0dcbce
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-10.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d496a20aced6969ccce0127e7724f97d99e4d2e95515d1c4c5563b62a0fc726
+size 1049767182
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-11.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-11.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a0f7e10bb177da58af6a1c8fadd888400784fedb
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-11.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:325ebfa4ec186df868b2af7be88a6a491f110ec0a8618c66520e6cbc481936e5
+size 1049767246
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-12.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-12.pt
new file mode 100644
index 0000000000000000000000000000000000000000..571a58e49321353a918b34dc22fedeadc27e632c
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-12.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:465735360596882ff42ffcd52a951185d22f0edc6c870fb7bc8a02827cee4a51
+size 1049767246
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-13.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-13.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f8f3e76d53d378eae457daa4e9ff7c353f90c219
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-13.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:78fd167d620ca2c8528072a2b63f69cacbc5d606ebd636245be9e235fba471f6
+size 1049767310
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-14.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-14.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0a7940f6fd6eaf5300b8a77755c62da7730fff5b
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-14.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f786bf51e47eeb03b13babbf96ed152e3ac422e2d0a3fe82b4f1ec49dc6a9fc5
+size 1049767374
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-15.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-15.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a6aa2ab661fdb5c79902f8985369b5bf05102dd1
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-15.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:330c6ef9a279b4eaf0ec13177f983b2cec1ddc1923a3cd796f2f92f155ee2819
+size 1049767438
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-16.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-16.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4baea69ec8804e73a5e2bca227c8296449d3db7c
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-16.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:88d17ba2fd3abe76f889f33f7057b9ce8f28a8f5cf9cc506bb16e3e248e1d5e4
+size 1049767438
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-17.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-17.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0e5234b0a15cd46352805feb24a5359cc8ca06a1
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-17.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69ddee01425a498fccb9dd89d5c12338ae3d806cae7f0e65c5a491bc12ac0f09
+size 1049767502
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-18.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-18.pt
new file mode 100644
index 0000000000000000000000000000000000000000..7b97caebb0dfaddd59e4568fa7835c70e27cda43
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-18.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52654bb82ba8b355114dec260b1ad3c6556b3ded5195b0b5547d7eb22c1c0639
+size 1049767566
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-19.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-19.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e49c66e876caf3f40b2b6479e23b708d2b459a12
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-19.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a911d925a8f5828417993c7abf5408d01e7a0600c6b2b752dad486e2e839ce0
+size 1049767630
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-2.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-2.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ea8ddd877cf36e4acf008f1277352528bff5f28a
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-2.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1eb3e7ba818fc718138d2ac2b9979f5c5489ce23997c64437ebc38ebe8732e14
+size 1049764353
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-20.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-20.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d6dc59a4b51dab51ffb6acb6b6bd8bcc7dac0097
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-20.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a02d980bab1cf51199f8a322afec45228f04c701bbc8e53ffa6b95dff3d1af32
+size 1049767630
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-3.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-3.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bfa6207347f66c428c6345daa2ceb2760ea69030
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-3.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffaf609e3f562b806178f9d2f8a4305522a5d319a5e0bd117f0cff49e3e2a609
+size 1049764417
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-4.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-4.pt
new file mode 100644
index 0000000000000000000000000000000000000000..28bb995d979c23e11bf471b2c5e4c3b170347195
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-4.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1553ef709b9d00e9baa2e07d4544148e968bf5ec9a89ea66c1dc998b9d692f74
+size 1049764481
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-5.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-5.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3aaf43a4f9466dfe7ad1042a4d130c0303243b96
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-5.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e47fd0bfc8e06cb590101e65e6e410fa9a666e4cd1c435bfb030dba5c9d7d64
+size 1049764481
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-6.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-6.pt
new file mode 100644
index 0000000000000000000000000000000000000000..53b4cb0e7f25c16f2a4c9458c75d0f687f020e93
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-6.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df1a6f2d87d44717d37e02a44f2f9cf3b563d2098e8c4300fde8a0136df40719
+size 1049764545
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-7.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-7.pt
new file mode 100644
index 0000000000000000000000000000000000000000..61b652f6b15ab34c53c7ad857d11c2580aff1243
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-7.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b309e45915dd30d1134d8c4919cd92e66bd987528f127e1d89d1b62feb3cd0f
+size 1049764609
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-8.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-8.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3b4ee0881e0a200d6779a0081ecfdfe50ad90c56
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-8.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b29753416e3775e314089448b443ac4dded7e5706b88965210e21809e0798be1
+size 1049764673
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-9.pt b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-9.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0d9aa37c3e809896cf974157c98a92866e6148fc
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/epoch-9.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aa74c839e14cee9d19c78a6887d99e79e4a569dcd407602435309836a0d70210
+size 1049764673
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-0 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-0
new file mode 100644
index 0000000000000000000000000000000000000000..c0d0ea5436342c867fb1c0c8f8de1b70ef920f72
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-0
@@ -0,0 +1,66 @@
+2024-08-12 23:55:25,674 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-12 23:55:25,895 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-12 23:55:25,895 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-12 23:55:26,256 INFO [dysarthria_finetune.py:1219] (0/4) (33748090880, 34072559616)
+2024-08-12 23:55:27,783 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-12 23:55:28,274 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2650.int.cedar.computecanada.ca', 'IP address': '172.16.146.87'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-12 23:55:28,274 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-12 23:55:29,263 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65549011
+2024-08-12 23:55:29,824 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-12 23:55:39,907 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-12 23:55:48,042 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-12 23:55:48,316 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-12 23:55:52,160 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-12 23:55:53,092 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-12 23:55:53,094 INFO [dysarthria_asr_datamodule.py:500] (0/4) About to get dev cuts
+2024-08-12 23:55:53,095 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-12 23:55:53,414 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-12 23:55:53,414 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-12 23:57:01,486 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.46 vs. limit=7.5
+2024-08-12 23:57:02,260 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 7499MB
+2024-08-12 23:57:03,576 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 7699MB
+2024-08-12 23:58:07,742 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 7699MB
+2024-08-12 23:58:09,122 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 7699MB
+2024-08-12 23:59:32,122 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 7699MB
+2024-08-12 23:59:33,640 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 7699MB
+2024-08-13 00:00:14,100 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.2996, simple_loss=0.2854, pruned_loss=0.1418, over 11720.00 frames. ], tot_loss[loss=0.2996, simple_loss=0.2854, pruned_loss=0.1418, over 11720.00 frames. ], batch size: 41, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 00:00:14,101 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-13 00:19:18,429 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 00:19:18,430 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 12287MB
+2024-08-13 00:29:11,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-13 00:29:25,442 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-13 00:31:33,245 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.808e+02 6.474e+02 7.040e+02 7.172e+02 7.430e+02, threshold=2.816e+03, percent-clipped=0.0
+2024-08-13 00:34:36,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=100033.33333333333, ans=0.07
+2024-08-13 00:41:15,030 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.070e+02 5.541e+02 6.558e+02 7.172e+02 7.522e+02, threshold=2.623e+03, percent-clipped=0.0
+2024-08-13 01:22:57,376 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100100.0, ans=0.125
+2024-08-13 01:25:52,229 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.705e+02 5.003e+02 5.937e+02 6.682e+02 7.522e+02, threshold=2.375e+03, percent-clipped=0.0
+2024-08-13 01:32:49,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=100133.33333333333, ans=0.125
+2024-08-13 01:37:51,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100133.33333333333, ans=0.125
+2024-08-13 01:38:09,238 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.3339, simple_loss=0.3132, pruned_loss=0.2013, over 11899.00 frames. ], tot_loss[loss=0.3486, simple_loss=0.3288, pruned_loss=0.1959, over 516580.24 frames. ], batch size: 64, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 01:39:07,620 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=26.11 vs. limit=15.0
+2024-08-13 01:39:42,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100166.66666666667, ans=0.0
+2024-08-13 01:57:38,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=100233.33333333333, ans=0.125
+2024-08-13 01:57:38,555 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.35 vs. limit=15.0
+2024-08-13 02:08:03,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=100266.66666666667, ans=0.07
+2024-08-13 02:15:48,562 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.801e+02 3.979e+02 4.793e+02 5.640e+02 7.522e+02, threshold=9.587e+02, percent-clipped=0.0
+2024-08-13 02:15:48,597 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 100, loss[loss=0.3407, simple_loss=0.3206, pruned_loss=0.1939, over 11886.00 frames. ], tot_loss[loss=0.338, simple_loss=0.3189, pruned_loss=0.1886, over 916559.82 frames. ], batch size: 96, lr: 6.01e-05, grad_scale: 4.0
+2024-08-13 02:23:24,118 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=25.73 vs. limit=15.0
+2024-08-13 02:24:26,377 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.88 vs. limit=15.0
+2024-08-13 02:40:27,325 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100466.66666666667, ans=0.1
+2024-08-13 02:43:23,332 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 150, loss[loss=0.3101, simple_loss=0.2936, pruned_loss=0.1637, over 11781.00 frames. ], tot_loss[loss=0.3329, simple_loss=0.3143, pruned_loss=0.1838, over 1229700.22 frames. ], batch size: 69, lr: 6.51e-05, grad_scale: 4.0
+2024-08-13 02:46:56,396 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=25.28 vs. limit=15.0
+2024-08-13 02:53:02,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=100533.33333333333, ans=0.0
+2024-08-13 03:03:09,464 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.86 vs. limit=15.0
+2024-08-13 03:29:02,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=100633.33333333333, ans=0.0
+2024-08-13 03:30:44,924 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.239e+02 2.802e+02 3.112e+02 3.482e+02 4.513e+02, threshold=6.224e+02, percent-clipped=0.0
+2024-08-13 03:30:44,958 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 200, loss[loss=0.3584, simple_loss=0.3344, pruned_loss=0.214, over 11418.00 frames. ], tot_loss[loss=0.3249, simple_loss=0.307, pruned_loss=0.177, over 1472498.12 frames. ], batch size: 46, lr: 7.01e-05, grad_scale: 8.0
+2024-08-13 03:34:41,761 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100666.66666666667, ans=0.1
+2024-08-13 03:35:46,987 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-1.pt
+2024-08-13 03:41:32,251 INFO [dysarthria_finetune.py:1435] (0/4) (18644402176, 34072559616)
+2024-08-13 03:41:32,252 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-13 03:41:32,266 INFO [dysarthria_finetune.py:1440] (0/4) (30036131840, 34072559616)
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-1 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-1
new file mode 100644
index 0000000000000000000000000000000000000000..99b476183fcfa8dc5c465ee7180e74639fb01379
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-1
@@ -0,0 +1,68 @@
+2024-08-12 23:55:25,903 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-12 23:55:33,367 INFO [dysarthria_finetune.py:1214] (1/4) (33106362368, 34072559616)
+2024-08-12 23:55:33,367 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-12 23:55:33,989 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-12 23:55:33,989 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-12 23:55:33,992 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2650.int.cedar.computecanada.ca', 'IP address': '172.16.146.87'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-12 23:55:33,992 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-12 23:55:34,683 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65549011
+2024-08-12 23:55:34,684 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-12 23:55:39,161 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-12 23:55:48,035 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-12 23:55:48,316 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-12 23:55:52,031 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-12 23:55:52,998 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-12 23:55:52,999 INFO [dysarthria_asr_datamodule.py:500] (1/4) About to get dev cuts
+2024-08-12 23:55:53,007 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-12 23:55:53,414 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-12 23:55:53,415 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-12 23:57:01,486 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.48 vs. limit=7.5
+2024-08-12 23:57:02,259 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 7522MB
+2024-08-12 23:57:03,572 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 7711MB
+2024-08-12 23:58:07,747 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 7711MB
+2024-08-12 23:58:09,127 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 7711MB
+2024-08-12 23:59:32,123 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 7711MB
+2024-08-12 23:59:33,642 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 7711MB
+2024-08-13 00:00:14,086 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.3398, simple_loss=0.3215, pruned_loss=0.1834, over 11438.00 frames. ], tot_loss[loss=0.3398, simple_loss=0.3215, pruned_loss=0.1834, over 11438.00 frames. ], batch size: 40, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 00:00:14,086 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-13 00:19:18,431 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 00:19:18,432 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 10510MB
+2024-08-13 00:23:11,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-13 00:27:15,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=100000.0, ans=0.125
+2024-08-13 00:28:44,694 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.24 vs. limit=22.5
+2024-08-13 00:29:15,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-13 00:31:33,245 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.808e+02 6.474e+02 7.040e+02 7.172e+02 7.430e+02, threshold=2.816e+03, percent-clipped=0.0
+2024-08-13 00:38:33,980 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.01 vs. limit=15.0
+2024-08-13 00:41:15,033 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.070e+02 5.541e+02 6.558e+02 7.172e+02 7.522e+02, threshold=2.623e+03, percent-clipped=0.0
+2024-08-13 00:54:00,363 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.10 vs. limit=15.0
+2024-08-13 01:25:52,224 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.705e+02 5.003e+02 5.937e+02 6.682e+02 7.522e+02, threshold=2.375e+03, percent-clipped=0.0
+2024-08-13 01:38:09,241 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.3026, simple_loss=0.287, pruned_loss=0.1565, over 11921.00 frames. ], tot_loss[loss=0.3528, simple_loss=0.3326, pruned_loss=0.2009, over 515986.49 frames. ], batch size: 64, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 01:40:15,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=100166.66666666667, ans=0.125
+2024-08-13 01:51:38,184 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=17.91 vs. limit=15.0
+2024-08-13 01:59:35,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=100233.33333333333, ans=0.2
+2024-08-13 01:59:35,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=100233.33333333333, ans=0.125
+2024-08-13 02:08:27,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=100266.66666666667, ans=0.025
+2024-08-13 02:15:48,563 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.801e+02 3.979e+02 4.793e+02 5.640e+02 7.522e+02, threshold=9.587e+02, percent-clipped=0.0
+2024-08-13 02:15:48,612 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 100, loss[loss=0.359, simple_loss=0.3377, pruned_loss=0.205, over 11911.00 frames. ], tot_loss[loss=0.3413, simple_loss=0.322, pruned_loss=0.1909, over 915517.59 frames. ], batch size: 96, lr: 6.01e-05, grad_scale: 4.0
+2024-08-13 02:16:11,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100333.33333333333, ans=0.1
+2024-08-13 02:37:41,001 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.85 vs. limit=15.0
+2024-08-13 02:38:46,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=100433.33333333333, ans=0.2
+2024-08-13 02:39:45,215 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=25.95 vs. limit=15.0
+2024-08-13 02:43:23,335 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 150, loss[loss=0.3098, simple_loss=0.2931, pruned_loss=0.1654, over 11699.00 frames. ], tot_loss[loss=0.3335, simple_loss=0.3148, pruned_loss=0.1842, over 1227213.99 frames. ], batch size: 69, lr: 6.51e-05, grad_scale: 4.0
+2024-08-13 02:47:16,635 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=34.85 vs. limit=15.0
+2024-08-13 02:53:03,481 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.76 vs. limit=15.0
+2024-08-13 03:00:24,893 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=100566.66666666667, ans=0.0
+2024-08-13 03:03:10,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=100566.66666666667, ans=0.125
+2024-08-13 03:23:37,785 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-13 03:30:44,929 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.239e+02 2.802e+02 3.112e+02 3.482e+02 4.513e+02, threshold=6.224e+02, percent-clipped=0.0
+2024-08-13 03:30:44,964 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 200, loss[loss=0.2979, simple_loss=0.2832, pruned_loss=0.152, over 11607.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3076, pruned_loss=0.1775, over 1470998.27 frames. ], batch size: 46, lr: 7.01e-05, grad_scale: 8.0
+2024-08-13 03:31:26,378 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.58 vs. limit=15.0
+2024-08-13 03:34:39,530 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=100666.66666666667, ans=0.125
+2024-08-13 03:35:46,995 INFO [dysarthria_finetune.py:1435] (1/4) (18224971776, 34072559616)
+2024-08-13 03:35:46,996 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-13 03:35:47,028 INFO [dysarthria_finetune.py:1440] (1/4) (30065491968, 34072559616)
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-2 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-2
new file mode 100644
index 0000000000000000000000000000000000000000..dcaad0f5f54db3a0a8e28d92783375643702e4ce
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-2
@@ -0,0 +1,76 @@
+2024-08-12 23:55:25,900 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-12 23:55:33,367 INFO [dysarthria_finetune.py:1214] (2/4) (33106362368, 34072559616)
+2024-08-12 23:55:33,367 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-12 23:55:33,979 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-12 23:55:33,980 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-12 23:55:33,983 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2650.int.cedar.computecanada.ca', 'IP address': '172.16.146.87'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-12 23:55:33,983 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-12 23:55:34,668 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65549011
+2024-08-12 23:55:34,668 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-12 23:55:39,209 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-12 23:55:48,035 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-12 23:55:48,316 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-12 23:55:52,031 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-12 23:55:52,973 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-12 23:55:52,980 INFO [dysarthria_asr_datamodule.py:500] (2/4) About to get dev cuts
+2024-08-12 23:55:53,007 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-12 23:55:53,413 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-12 23:55:53,414 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-12 23:57:01,485 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.64 vs. limit=7.5
+2024-08-12 23:57:02,259 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 7517MB
+2024-08-12 23:57:03,571 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 7703MB
+2024-08-12 23:58:07,741 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 7703MB
+2024-08-12 23:58:09,123 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 7703MB
+2024-08-12 23:59:32,122 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 7703MB
+2024-08-12 23:59:33,643 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 7703MB
+2024-08-13 00:00:07,019 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.39 vs. limit=15.0
+2024-08-13 00:00:14,081 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.3133, simple_loss=0.2969, pruned_loss=0.1634, over 11286.00 frames. ], tot_loss[loss=0.3133, simple_loss=0.2969, pruned_loss=0.1634, over 11286.00 frames. ], batch size: 40, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 00:00:14,082 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-13 00:19:18,432 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 00:19:18,433 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 10601MB
+2024-08-13 00:23:10,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-13 00:27:26,122 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=100000.0, ans=0.0
+2024-08-13 00:29:24,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-13 00:31:33,245 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.808e+02 6.474e+02 7.040e+02 7.172e+02 7.430e+02, threshold=2.816e+03, percent-clipped=0.0
+2024-08-13 00:34:34,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=100033.33333333333, ans=0.2
+2024-08-13 00:39:52,000 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=22.06 vs. limit=15.0
+2024-08-13 00:41:15,029 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.070e+02 5.541e+02 6.558e+02 7.172e+02 7.522e+02, threshold=2.623e+03, percent-clipped=0.0
+2024-08-13 01:24:24,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=100100.0, ans=0.0
+2024-08-13 01:25:52,226 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.705e+02 5.003e+02 5.937e+02 6.682e+02 7.522e+02, threshold=2.375e+03, percent-clipped=0.0
+2024-08-13 01:32:50,365 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=51.69 vs. limit=15.0
+2024-08-13 01:38:07,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=100166.66666666667, ans=0.125
+2024-08-13 01:38:09,239 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.3475, simple_loss=0.3283, pruned_loss=0.19, over 11995.00 frames. ], tot_loss[loss=0.354, simple_loss=0.3337, pruned_loss=0.2009, over 516736.81 frames. ], batch size: 64, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 01:38:49,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100166.66666666667, ans=0.1
+2024-08-13 01:39:06,072 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.95 vs. limit=15.0
+2024-08-13 01:39:45,576 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=24.83 vs. limit=15.0
+2024-08-13 01:51:37,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=100200.0, ans=0.125
+2024-08-13 01:52:06,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=100200.0, ans=0.0
+2024-08-13 01:57:33,083 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=19.70 vs. limit=15.0
+2024-08-13 01:59:18,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=100233.33333333333, ans=0.2
+2024-08-13 02:08:25,101 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=100266.66666666667, ans=0.2
+2024-08-13 02:08:25,313 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=23.65 vs. limit=15.0
+2024-08-13 02:08:25,379 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=33.13 vs. limit=22.5
+2024-08-13 02:09:01,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-13 02:10:54,946 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=23.85 vs. limit=15.0
+2024-08-13 02:15:48,560 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.801e+02 3.979e+02 4.793e+02 5.640e+02 7.522e+02, threshold=9.587e+02, percent-clipped=0.0
+2024-08-13 02:15:48,594 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 100, loss[loss=0.3244, simple_loss=0.3059, pruned_loss=0.1802, over 11924.00 frames. ], tot_loss[loss=0.3395, simple_loss=0.3204, pruned_loss=0.1889, over 916658.53 frames. ], batch size: 96, lr: 6.01e-05, grad_scale: 4.0
+2024-08-13 02:17:02,706 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=19.59 vs. limit=15.0
+2024-08-13 02:31:19,886 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100400.0, ans=0.1
+2024-08-13 02:33:11,677 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=100400.0, ans=0.125
+2024-08-13 02:43:23,334 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 150, loss[loss=0.352, simple_loss=0.3315, pruned_loss=0.1967, over 11776.00 frames. ], tot_loss[loss=0.3326, simple_loss=0.3141, pruned_loss=0.1828, over 1229118.95 frames. ], batch size: 69, lr: 6.51e-05, grad_scale: 4.0
+2024-08-13 03:00:25,170 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.91 vs. limit=15.0
+2024-08-13 03:03:09,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=100566.66666666667, ans=0.125
+2024-08-13 03:05:50,295 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-13 03:13:12,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=100600.0, ans=0.0
+2024-08-13 03:30:44,923 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.239e+02 2.802e+02 3.112e+02 3.482e+02 4.513e+02, threshold=6.224e+02, percent-clipped=0.0
+2024-08-13 03:30:44,957 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 200, loss[loss=0.2976, simple_loss=0.2823, pruned_loss=0.1549, over 11600.00 frames. ], tot_loss[loss=0.3241, simple_loss=0.3065, pruned_loss=0.1754, over 1472852.73 frames. ], batch size: 46, lr: 7.01e-05, grad_scale: 8.0
+2024-08-13 03:31:01,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=100666.66666666667, ans=0.125
+2024-08-13 03:34:30,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=100666.66666666667, ans=0.2
+2024-08-13 03:35:46,990 INFO [dysarthria_finetune.py:1435] (2/4) (19120455680, 34072559616)
+2024-08-13 03:35:46,990 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-13 03:35:47,028 INFO [dysarthria_finetune.py:1440] (2/4) (30036131840, 34072559616)
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-3 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-3
new file mode 100644
index 0000000000000000000000000000000000000000..bfef760ef3a224104dfa926bfb96fce374a4f8dd
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-12-23-55-25-3
@@ -0,0 +1,65 @@
+2024-08-12 23:55:25,898 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-12 23:55:31,887 INFO [dysarthria_finetune.py:1214] (3/4) (33427226624, 34072559616)
+2024-08-12 23:55:31,888 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-12 23:55:32,263 INFO [dysarthria_finetune.py:1219] (3/4) (33427226624, 34072559616)
+2024-08-12 23:55:32,264 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-12 23:55:32,267 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2650.int.cedar.computecanada.ca', 'IP address': '172.16.146.87'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-12 23:55:32,267 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-12 23:55:32,956 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65549011
+2024-08-12 23:55:32,956 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-12 23:55:39,186 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-12 23:55:48,038 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-12 23:55:48,316 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-12 23:55:49,218 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-12 23:55:49,219 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-12 23:55:49,219 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-12 23:55:49,219 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-12 23:55:49,219 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-12 23:55:52,031 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-12 23:55:52,973 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-12 23:55:52,980 INFO [dysarthria_asr_datamodule.py:500] (3/4) About to get dev cuts
+2024-08-12 23:55:53,007 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-12 23:55:53,411 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-12 23:55:53,411 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-12 23:57:01,485 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=9.13 vs. limit=7.5
+2024-08-12 23:57:02,259 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 7527MB
+2024-08-12 23:57:03,571 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 7720MB
+2024-08-12 23:58:07,741 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 7720MB
+2024-08-12 23:58:09,122 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 7720MB
+2024-08-12 23:59:32,122 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 7720MB
+2024-08-12 23:59:33,639 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 7720MB
+2024-08-13 00:00:04,254 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=17.84 vs. limit=15.0
+2024-08-13 00:00:14,080 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.3627, simple_loss=0.3426, pruned_loss=0.201, over 11386.00 frames. ], tot_loss[loss=0.3627, simple_loss=0.3426, pruned_loss=0.201, over 11386.00 frames. ], batch size: 40, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 00:00:14,080 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-13 00:19:18,431 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 00:19:18,432 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 9924MB
+2024-08-13 00:29:27,540 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=100000.0, ans=0.0
+2024-08-13 00:31:33,245 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.808e+02 6.474e+02 7.040e+02 7.172e+02 7.430e+02, threshold=2.816e+03, percent-clipped=0.0
+2024-08-13 00:34:40,774 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.38 vs. limit=22.5
+2024-08-13 00:39:47,601 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.97 vs. limit=15.0
+2024-08-13 00:41:15,032 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.070e+02 5.541e+02 6.558e+02 7.172e+02 7.522e+02, threshold=2.623e+03, percent-clipped=0.0
+2024-08-13 00:55:16,173 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.97 vs. limit=15.0
+2024-08-13 01:25:52,222 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.705e+02 5.003e+02 5.937e+02 6.682e+02 7.522e+02, threshold=2.375e+03, percent-clipped=0.0
+2024-08-13 01:37:51,514 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=100133.33333333333, ans=0.125
+2024-08-13 01:38:09,238 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.3556, simple_loss=0.3339, pruned_loss=0.2114, over 11934.00 frames. ], tot_loss[loss=0.35, simple_loss=0.3301, pruned_loss=0.1974, over 516465.20 frames. ], batch size: 64, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 01:51:19,753 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.63 vs. limit=15.0
+2024-08-13 01:57:33,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100233.33333333333, ans=0.0
+2024-08-13 02:15:46,122 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=100333.33333333333, ans=0.025
+2024-08-13 02:15:48,560 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.801e+02 3.979e+02 4.793e+02 5.640e+02 7.522e+02, threshold=9.587e+02, percent-clipped=0.0
+2024-08-13 02:15:48,594 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 100, loss[loss=0.3405, simple_loss=0.3201, pruned_loss=0.1962, over 11888.00 frames. ], tot_loss[loss=0.3384, simple_loss=0.3194, pruned_loss=0.1878, over 916989.95 frames. ], batch size: 96, lr: 6.01e-05, grad_scale: 4.0
+2024-08-13 02:16:07,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100333.33333333333, ans=0.1
+2024-08-13 02:31:17,452 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=100400.0, ans=0.125
+2024-08-13 02:38:48,705 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=100433.33333333333, ans=0.125
+2024-08-13 02:39:58,091 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=100466.66666666667, ans=0.025
+2024-08-13 02:42:00,773 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.71 vs. limit=22.5
+2024-08-13 02:43:23,057 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=11.77 vs. limit=12.0
+2024-08-13 02:43:23,330 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 150, loss[loss=0.2971, simple_loss=0.282, pruned_loss=0.1533, over 11775.00 frames. ], tot_loss[loss=0.3328, simple_loss=0.3143, pruned_loss=0.1825, over 1227470.66 frames. ], batch size: 69, lr: 6.51e-05, grad_scale: 4.0
+2024-08-13 02:45:30,282 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.87 vs. limit=22.5
+2024-08-13 03:00:46,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=100566.66666666667, ans=0.0
+2024-08-13 03:13:14,031 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.74 vs. limit=22.5
+2024-08-13 03:27:59,823 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.45 vs. limit=10.0
+2024-08-13 03:30:44,927 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.239e+02 2.802e+02 3.112e+02 3.482e+02 4.513e+02, threshold=6.224e+02, percent-clipped=0.0
+2024-08-13 03:30:44,963 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 200, loss[loss=0.3044, simple_loss=0.2881, pruned_loss=0.1615, over 11055.00 frames. ], tot_loss[loss=0.3247, simple_loss=0.3069, pruned_loss=0.1758, over 1470172.09 frames. ], batch size: 23, lr: 7.01e-05, grad_scale: 8.0
+2024-08-13 03:35:46,991 INFO [dysarthria_finetune.py:1435] (3/4) (20447952896, 34072559616)
+2024-08-13 03:35:46,991 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-13 03:35:47,027 INFO [dysarthria_finetune.py:1440] (3/4) (30206001152, 34072559616)
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-50-0 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-50-0
new file mode 100644
index 0000000000000000000000000000000000000000..7c8d819824b184305b24776f453440d64e0b1824
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-50-0
@@ -0,0 +1,49 @@
+2024-08-13 20:03:50,982 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-13 20:03:51,865 INFO [dysarthria_finetune.py:1214] (0/4) (32783400960, 34072559616)
+2024-08-13 20:03:51,866 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-13 20:03:52,354 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-13 20:03:52,359 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-13 20:03:52,362 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 20:03:52,362 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-13 20:03:53,072 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65549011
+2024-08-13 20:03:53,634 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 20:09:02,288 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-13 20:09:05,915 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-13 20:09:06,006 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 20:09:06,260 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-13 20:09:06,260 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-13 20:09:06,260 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-13 20:09:06,260 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-13 20:09:06,261 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-13 20:09:07,385 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-13 20:09:08,328 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-13 20:09:08,330 INFO [dysarthria_asr_datamodule.py:500] (0/4) About to get dev cuts
+2024-08-13 20:09:08,452 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-13 20:09:08,941 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-13 20:09:08,942 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 20:09:46,138 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=24.02 vs. limit=7.5
+2024-08-13 20:09:46,398 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=18.77 vs. limit=7.5
+2024-08-13 20:09:47,191 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 20:09:48,993 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 20:12:21,529 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 20:12:23,563 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 20:17:32,565 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=8.74 vs. limit=5.0
+2024-08-13 20:17:33,028 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 20:17:35,209 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 20:19:00,520 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=10.28 vs. limit=12.0
+2024-08-13 20:19:23,403 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.288, simple_loss=0.2741, pruned_loss=0.1393, over 18513.00 frames. ], tot_loss[loss=0.288, simple_loss=0.2741, pruned_loss=0.1393, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 20:19:23,404 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-13 20:50:10,155 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 20:50:10,168 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-13 20:57:02,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100000.0, ans=0.0
+2024-08-13 20:57:02,921 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=8.07 vs. limit=15.0
+2024-08-13 21:11:38,979 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-13 21:19:40,364 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.297e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-13 21:53:14,155 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.448e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-13 22:20:37,285 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.221e+02 7.297e+02 9.456e+02 1.050e+03 1.319e+03, threshold=3.783e+03, percent-clipped=0.0
+2024-08-13 22:22:33,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-13 22:33:06,379 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=24.09 vs. limit=15.0
+2024-08-13 22:42:42,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=100266.66666666667, ans=0.0
+2024-08-13 22:42:43,527 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.3521, simple_loss=0.3326, pruned_loss=0.1925, over 18890.00 frames. ], tot_loss[loss=0.3518, simple_loss=0.3318, pruned_loss=0.1981, over 828692.51 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 23:00:28,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-13 23:01:28,174 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.49 vs. limit=6.0
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-1 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-1
new file mode 100644
index 0000000000000000000000000000000000000000..d233d9fb220c0e306db2a08c64aee0225c9601c7
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-1
@@ -0,0 +1,47 @@
+2024-08-13 20:03:51,119 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-13 20:03:51,121 INFO [dysarthria_finetune.py:1214] (1/4) (33748090880, 34072559616)
+2024-08-13 20:03:51,121 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-13 20:03:51,865 INFO [dysarthria_finetune.py:1219] (1/4) (33414643712, 34072559616)
+2024-08-13 20:03:51,866 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-13 20:03:51,966 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 20:03:51,967 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-13 20:03:52,865 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65549011
+2024-08-13 20:03:52,865 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 20:09:01,829 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-13 20:09:05,914 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-13 20:09:06,006 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-13 20:09:07,252 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-13 20:09:08,186 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-13 20:09:08,192 INFO [dysarthria_asr_datamodule.py:500] (1/4) About to get dev cuts
+2024-08-13 20:09:08,452 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-13 20:09:08,933 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-13 20:09:08,933 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 20:09:46,148 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=19.93 vs. limit=7.5
+2024-08-13 20:09:46,398 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=16.77 vs. limit=7.5
+2024-08-13 20:09:47,196 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 20:09:48,988 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 20:12:21,524 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 20:12:23,562 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 20:17:33,027 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 20:17:35,210 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 20:19:23,405 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.343, simple_loss=0.3241, pruned_loss=0.1887, over 18549.00 frames. ], tot_loss[loss=0.343, simple_loss=0.3241, pruned_loss=0.1887, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 20:19:23,405 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-13 20:50:10,151 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 20:50:10,168 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13098MB
+2024-08-13 20:52:19,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-13 20:56:44,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=100000.0, ans=0.125
+2024-08-13 21:08:48,181 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.62 vs. limit=15.0
+2024-08-13 21:19:40,364 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.297e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-13 21:37:57,730 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.79 vs. limit=15.0
+2024-08-13 21:49:24,259 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.96 vs. limit=10.0
+2024-08-13 21:53:14,153 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.448e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-13 22:17:29,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=100160.0, ans=0.0
+2024-08-13 22:20:37,286 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.221e+02 7.297e+02 9.456e+02 1.050e+03 1.319e+03, threshold=3.783e+03, percent-clipped=0.0
+2024-08-13 22:29:43,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-13 22:42:43,509 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.3541, simple_loss=0.3336, pruned_loss=0.2013, over 19042.00 frames. ], tot_loss[loss=0.3545, simple_loss=0.3342, pruned_loss=0.2019, over 827432.33 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 23:00:01,624 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=31.42 vs. limit=15.0
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-2 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-2
new file mode 100644
index 0000000000000000000000000000000000000000..fd7cf3e37c1ab881a485f101ef0d84b97c6b63c7
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-2
@@ -0,0 +1,51 @@
+2024-08-13 20:03:51,121 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-13 20:03:51,121 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-13 20:03:51,122 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-13 20:03:51,865 INFO [dysarthria_finetune.py:1219] (2/4) (33427226624, 34072559616)
+2024-08-13 20:03:51,866 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-13 20:03:51,966 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 20:03:51,966 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-13 20:03:52,859 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65549011
+2024-08-13 20:03:52,859 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 20:09:01,828 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-13 20:09:05,910 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-13 20:09:06,006 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-13 20:09:07,254 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-13 20:09:08,185 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-13 20:09:08,192 INFO [dysarthria_asr_datamodule.py:500] (2/4) About to get dev cuts
+2024-08-13 20:09:08,452 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-13 20:09:08,931 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-13 20:09:08,932 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 20:09:46,142 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=22.66 vs. limit=7.5
+2024-08-13 20:09:46,399 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=17.80 vs. limit=7.5
+2024-08-13 20:09:47,191 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 20:09:48,990 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 20:12:21,524 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 20:12:23,562 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 20:17:33,028 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 20:17:35,210 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 20:19:23,426 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.3479, simple_loss=0.3286, pruned_loss=0.1929, over 18533.00 frames. ], tot_loss[loss=0.3479, simple_loss=0.3286, pruned_loss=0.1929, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 20:19:23,427 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-13 20:50:10,155 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 20:50:10,169 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19754MB
+2024-08-13 20:52:07,422 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.11 vs. limit=22.5
+2024-08-13 20:52:19,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-13 20:56:34,203 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=15.08 vs. limit=15.0
+2024-08-13 21:11:02,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=100000.0, ans=0.09899494936611666
+2024-08-13 21:19:40,365 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.297e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-13 21:35:57,119 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.06 vs. limit=15.0
+2024-08-13 21:37:57,987 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=19.66 vs. limit=15.0
+2024-08-13 21:50:07,484 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.42 vs. limit=15.0
+2024-08-13 21:53:14,154 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.448e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-13 22:18:01,424 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.70 vs. limit=15.0
+2024-08-13 22:20:37,282 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.221e+02 7.297e+02 9.456e+02 1.050e+03 1.319e+03, threshold=3.783e+03, percent-clipped=0.0
+2024-08-13 22:31:43,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-13 22:42:35,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-13 22:42:43,511 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.3626, simple_loss=0.3409, pruned_loss=0.2122, over 19018.00 frames. ], tot_loss[loss=0.3542, simple_loss=0.3338, pruned_loss=0.2019, over 827419.58 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 23:01:23,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100266.66666666667, ans=0.1
+2024-08-13 23:01:23,406 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=24.05 vs. limit=15.0
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-3 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-3
new file mode 100644
index 0000000000000000000000000000000000000000..1b0352c53ce1e6e2519901fda80e3c84d038aece
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-20-03-51-3
@@ -0,0 +1,47 @@
+2024-08-13 20:03:51,142 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-13 20:03:51,534 INFO [dysarthria_finetune.py:1214] (3/4) (33735507968, 34072559616)
+2024-08-13 20:03:51,534 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-13 20:03:52,261 INFO [dysarthria_finetune.py:1219] (3/4) (33093779456, 34072559616)
+2024-08-13 20:03:52,262 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-13 20:03:52,265 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 20:03:52,265 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-13 20:03:52,953 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65549011
+2024-08-13 20:03:52,953 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 20:09:01,804 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-13 20:09:05,910 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-13 20:09:06,006 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-13 20:09:06,258 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-13 20:09:07,252 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-13 20:09:08,194 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-13 20:09:08,195 INFO [dysarthria_asr_datamodule.py:500] (3/4) About to get dev cuts
+2024-08-13 20:09:08,452 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-13 20:09:08,932 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-13 20:09:08,932 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 20:09:46,142 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=17.38 vs. limit=7.5
+2024-08-13 20:09:46,399 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=19.31 vs. limit=7.5
+2024-08-13 20:09:47,191 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 20:09:48,992 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 20:12:21,527 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 20:12:23,566 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 20:17:33,029 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 20:17:35,212 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 20:19:23,427 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.2854, simple_loss=0.2712, pruned_loss=0.1421, over 18634.00 frames. ], tot_loss[loss=0.2854, simple_loss=0.2712, pruned_loss=0.1421, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 20:19:23,428 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-13 20:50:10,155 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 20:50:10,169 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14284MB
+2024-08-13 20:52:17,783 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.78 vs. limit=22.5
+2024-08-13 20:52:19,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-13 20:56:49,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=100000.0, ans=0.125
+2024-08-13 21:08:40,401 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.66 vs. limit=22.5
+2024-08-13 21:19:40,369 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.297e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-13 21:53:14,153 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.448e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-13 22:20:37,283 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.221e+02 7.297e+02 9.456e+02 1.050e+03 1.319e+03, threshold=3.783e+03, percent-clipped=0.0
+2024-08-13 22:29:37,077 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=100213.33333333333, ans=0.0
+2024-08-13 22:31:29,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=100213.33333333333, ans=0.025
+2024-08-13 22:37:35,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100213.33333333333, ans=0.1
+2024-08-13 22:42:43,511 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.3748, simple_loss=0.3525, pruned_loss=0.2175, over 19001.00 frames. ], tot_loss[loss=0.3538, simple_loss=0.3337, pruned_loss=0.1999, over 828973.50 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-13 23:01:29,079 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=100266.66666666667, ans=0.125
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-0 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-0
new file mode 100644
index 0000000000000000000000000000000000000000..d99eb9d3b921bcc92417c034b90e071385056a36
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-0
@@ -0,0 +1,565 @@
+2024-08-13 23:24:47,605 INFO [dysarthria_finetune.py:1212] (0/4) Training started
+2024-08-13 23:24:47,914 INFO [dysarthria_finetune.py:1214] (0/4) (33748090880, 34072559616)
+2024-08-13 23:24:47,914 INFO [dysarthria_finetune.py:1215] (0/4) Empty cache: before and after
+2024-08-13 23:24:48,924 INFO [dysarthria_finetune.py:1219] (0/4) (32783400960, 34072559616)
+2024-08-13 23:24:48,929 INFO [dysarthria_finetune.py:1229] (0/4) Device: cuda:0
+2024-08-13 23:24:48,990 INFO [dysarthria_finetune.py:1241] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 23:24:48,990 INFO [dysarthria_finetune.py:1243] (0/4) About to create model
+2024-08-13 23:24:49,990 INFO [dysarthria_finetune.py:1247] (0/4) Number of model parameters: 65549011
+2024-08-13 23:24:50,530 INFO [dysarthria_finetune.py:769] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 23:25:00,373 INFO [dysarthria_finetune.py:1275] (0/4) Using DDP
+2024-08-13 23:25:17,950 INFO [dysarthria_asr_datamodule.py:494] (0/4) About to get train cuts
+2024-08-13 23:25:18,291 INFO [dysarthria_finetune.py:1319] (0/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 23:25:18,620 INFO [dysarthria_asr_datamodule.py:239] (0/4) Disable MUSAN
+2024-08-13 23:25:18,620 INFO [dysarthria_asr_datamodule.py:257] (0/4) Enable SpecAugment
+2024-08-13 23:25:18,621 INFO [dysarthria_asr_datamodule.py:258] (0/4) Time warp factor: 80
+2024-08-13 23:25:18,621 INFO [dysarthria_asr_datamodule.py:268] (0/4) Num frame mask: 10
+2024-08-13 23:25:18,621 INFO [dysarthria_asr_datamodule.py:281] (0/4) About to create train dataset
+2024-08-13 23:25:19,390 INFO [dysarthria_asr_datamodule.py:308] (0/4) Using DynamicBucketingSampler.
+2024-08-13 23:25:20,348 INFO [dysarthria_asr_datamodule.py:325] (0/4) About to create train dataloader
+2024-08-13 23:25:24,505 INFO [dysarthria_asr_datamodule.py:500] (0/4) About to get dev cuts
+2024-08-13 23:25:24,698 INFO [dysarthria_asr_datamodule.py:356] (0/4) About to create dev dataset
+2024-08-13 23:25:28,032 INFO [dysarthria_asr_datamodule.py:373] (0/4) About to create dev dataloader
+2024-08-13 23:25:28,032 INFO [dysarthria_finetune.py:1490] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 23:27:16,792 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=24.02 vs. limit=7.5
+2024-08-13 23:27:17,110 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=18.77 vs. limit=7.5
+2024-08-13 23:27:17,919 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 23:27:19,736 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 23:32:34,804 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 23:32:36,803 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 23:35:37,938 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=8.74 vs. limit=5.0
+2024-08-13 23:35:38,400 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 23:35:41,667 INFO [dysarthria_finetune.py:1518] (0/4) Maximum memory allocated so far is 11707MB
+2024-08-13 23:36:57,313 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=10.28 vs. limit=12.0
+2024-08-13 23:36:58,730 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 0, loss[loss=0.288, simple_loss=0.2741, pruned_loss=0.1393, over 18513.00 frames. ], tot_loss[loss=0.288, simple_loss=0.2741, pruned_loss=0.1393, over 18513.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 23:36:58,732 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-13 23:49:47,046 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 23:49:47,367 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-13 23:56:08,569 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100000.0, ans=0.0
+2024-08-13 23:56:08,878 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=8.07 vs. limit=15.0
+2024-08-14 00:02:40,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-14 00:15:49,520 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.298e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-14 00:23:15,347 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.450e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-14 00:29:53,827 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.241e+02 7.298e+02 9.450e+02 1.050e+03 1.319e+03, threshold=3.780e+03, percent-clipped=0.0
+2024-08-14 00:30:11,204 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-14 00:34:46,097 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=24.15 vs. limit=15.0
+2024-08-14 00:36:59,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=100266.66666666667, ans=0.0
+2024-08-14 00:37:01,628 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 50, loss[loss=0.352, simple_loss=0.3326, pruned_loss=0.1925, over 18890.00 frames. ], tot_loss[loss=0.3517, simple_loss=0.3318, pruned_loss=0.198, over 828692.51 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-14 00:42:26,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-14 00:42:53,124 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.49 vs. limit=6.0
+2024-08-14 00:46:02,623 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.91 vs. limit=10.0
+2024-08-14 00:46:59,382 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.53 vs. limit=15.0
+2024-08-14 01:01:58,595 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.501e+02 5.963e+02 7.298e+02 8.800e+02 1.319e+03, threshold=1.460e+03, percent-clipped=0.0
+2024-08-14 01:01:58,631 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 1, batch 100, loss[loss=0.3331, simple_loss=0.3136, pruned_loss=0.1882, over 19293.00 frames. ], tot_loss[loss=0.3347, simple_loss=0.316, pruned_loss=0.1854, over 1474004.25 frames. ], batch size: 144, lr: 6.01e-05, grad_scale: 4.0
+2024-08-14 01:10:23,149 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100640.0, ans=0.1
+2024-08-14 01:10:23,456 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.39 vs. limit=15.0
+2024-08-14 01:11:48,029 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-1.pt
+2024-08-14 01:12:16,343 INFO [dysarthria_finetune.py:1435] (0/4) (910884864, 34072559616)
+2024-08-14 01:12:16,343 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 01:12:16,370 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 01:12:35,400 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 0, loss[loss=0.2799, simple_loss=0.2683, pruned_loss=0.1286, over 18874.00 frames. ], tot_loss[loss=0.2799, simple_loss=0.2683, pruned_loss=0.1286, over 18874.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-14 01:12:35,401 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 01:16:55,991 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 2, validation: loss=0.2907, simple_loss=0.276, pruned_loss=0.149, over 1073944.00 frames.
+2024-08-14 01:16:55,992 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 01:19:27,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=100736.0, ans=0.025
+2024-08-14 01:20:03,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=100736.0, ans=0.125
+2024-08-14 01:20:03,731 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.23 vs. limit=15.0
+2024-08-14 01:20:52,450 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=100789.33333333333, ans=0.125
+2024-08-14 01:21:45,029 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=29.10 vs. limit=15.0
+2024-08-14 01:21:59,778 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=100842.66666666667, ans=0.125
+2024-08-14 01:22:26,450 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100896.0, ans=0.125
+2024-08-14 01:22:31,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-14 01:24:54,901 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 50, loss[loss=0.3168, simple_loss=0.2976, pruned_loss=0.1804, over 18964.00 frames. ], tot_loss[loss=0.3212, simple_loss=0.3039, pruned_loss=0.1711, over 826819.73 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-08-14 01:25:01,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=100949.33333333333, ans=0.04949747468305833
+2024-08-14 01:26:04,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101002.66666666667, ans=0.1
+2024-08-14 01:26:13,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101002.66666666667, ans=0.125
+2024-08-14 01:27:29,015 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.249e+02 4.347e+02 4.852e+02 5.543e+02 7.043e+02, threshold=9.703e+02, percent-clipped=0.0
+2024-08-14 01:27:52,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=101056.0, ans=0.125
+2024-08-14 01:28:07,173 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=101109.33333333333, ans=0.125
+2024-08-14 01:28:13,356 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.92 vs. limit=15.0
+2024-08-14 01:28:52,976 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.66 vs. limit=15.0
+2024-08-14 01:29:00,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=101216.0, ans=0.125
+2024-08-14 01:29:06,635 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 2, batch 100, loss[loss=0.3529, simple_loss=0.3322, pruned_loss=0.1957, over 19229.00 frames. ], tot_loss[loss=0.3105, simple_loss=0.2942, pruned_loss=0.163, over 1473154.80 frames. ], batch size: 144, lr: 7.29e-05, grad_scale: 8.0
+2024-08-14 01:29:19,189 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.97 vs. limit=6.0
+2024-08-14 01:29:33,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101216.0, ans=0.125
+2024-08-14 01:29:44,800 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:30:25,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101322.66666666667, ans=0.1
+2024-08-14 01:30:32,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101322.66666666667, ans=0.1
+2024-08-14 01:30:42,041 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-2.pt
+2024-08-14 01:30:46,426 INFO [dysarthria_finetune.py:1435] (0/4) (856358912, 34072559616)
+2024-08-14 01:30:46,427 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 01:30:46,453 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 01:30:54,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-08-14 01:30:55,539 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 0, loss[loss=0.2949, simple_loss=0.2798, pruned_loss=0.1535, over 18603.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.2798, pruned_loss=0.1535, over 18603.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-08-14 01:30:55,540 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 01:31:18,579 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 3, validation: loss=0.2682, simple_loss=0.2564, pruned_loss=0.1309, over 1073944.00 frames.
+2024-08-14 01:31:18,579 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 01:31:50,826 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101424.0, ans=0.1
+2024-08-14 01:32:09,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=101477.33333333333, ans=0.05
+2024-08-14 01:32:14,107 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.10 vs. limit=22.5
+2024-08-14 01:32:25,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=101477.33333333333, ans=0.0
+2024-08-14 01:32:41,640 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.53 vs. limit=15.0
+2024-08-14 01:32:57,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101584.0, ans=0.1
+2024-08-14 01:32:58,606 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.574e+02 3.350e+02 3.692e+02 4.154e+02 5.648e+02, threshold=7.384e+02, percent-clipped=0.0
+2024-08-14 01:33:11,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101584.0, ans=0.125
+2024-08-14 01:33:11,829 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=101584.0, ans=0.2
+2024-08-14 01:33:14,925 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 50, loss[loss=0.3053, simple_loss=0.2895, pruned_loss=0.1595, over 18964.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.28, pruned_loss=0.1503, over 827741.27 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-08-14 01:33:16,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=101637.33333333333, ans=0.2
+2024-08-14 01:33:18,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=101637.33333333333, ans=0.0
+2024-08-14 01:33:58,719 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101690.66666666667, ans=0.1
+2024-08-14 01:34:29,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.91 vs. limit=15.0
+2024-08-14 01:34:33,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=101797.33333333333, ans=0.5
+2024-08-14 01:34:33,328 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=26.82 vs. limit=22.5
+2024-08-14 01:34:46,263 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.93 vs. limit=15.0
+2024-08-14 01:34:59,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=101850.66666666667, ans=0.2
+2024-08-14 01:35:02,107 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.23 vs. limit=22.5
+2024-08-14 01:35:08,700 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 3, batch 100, loss[loss=0.2793, simple_loss=0.2658, pruned_loss=0.1428, over 19231.00 frames. ], tot_loss[loss=0.2872, simple_loss=0.2737, pruned_loss=0.1451, over 1473938.15 frames. ], batch size: 144, lr: 8.58e-05, grad_scale: 16.0
+2024-08-14 01:35:27,432 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.83 vs. limit=22.5
+2024-08-14 01:35:53,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=102010.66666666667, ans=0.0
+2024-08-14 01:36:03,991 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-3.pt
+2024-08-14 01:36:11,345 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 01:36:11,346 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 01:36:11,389 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 01:36:20,154 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 0, loss[loss=0.2718, simple_loss=0.2572, pruned_loss=0.144, over 18523.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.2572, pruned_loss=0.144, over 18523.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-08-14 01:36:20,154 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 01:36:43,053 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 4, validation: loss=0.2499, simple_loss=0.241, pruned_loss=0.1173, over 1073944.00 frames.
+2024-08-14 01:36:43,054 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 01:36:57,294 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.85 vs. limit=6.0
+2024-08-14 01:37:14,066 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 2.841e+02 3.076e+02 3.396e+02 5.357e+02, threshold=6.153e+02, percent-clipped=0.0
+2024-08-14 01:37:15,070 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=102106.66666666667, ans=0.1
+2024-08-14 01:37:21,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=102106.66666666667, ans=0.125
+2024-08-14 01:37:34,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=102160.0, ans=6.0
+2024-08-14 01:37:37,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=102160.0, ans=0.125
+2024-08-14 01:37:49,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=102213.33333333333, ans=0.04949747468305833
+2024-08-14 01:37:57,304 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=102213.33333333333, ans=0.2
+2024-08-14 01:38:22,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=102320.0, ans=0.2
+2024-08-14 01:38:23,458 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 50, loss[loss=0.3128, simple_loss=0.2964, pruned_loss=0.1644, over 18961.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.2618, pruned_loss=0.1348, over 828586.64 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-08-14 01:38:30,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=102320.0, ans=0.0
+2024-08-14 01:38:44,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=102373.33333333333, ans=0.125
+2024-08-14 01:39:17,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=102426.66666666667, ans=0.2
+2024-08-14 01:39:21,589 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=102480.0, ans=0.0
+2024-08-14 01:39:25,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=102480.0, ans=0.0
+2024-08-14 01:39:39,295 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:39:54,665 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.20 vs. limit=15.0
+2024-08-14 01:40:00,898 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 4, batch 100, loss[loss=0.2781, simple_loss=0.2654, pruned_loss=0.1412, over 19286.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.2562, pruned_loss=0.1315, over 1474147.24 frames. ], batch size: 144, lr: 9.86e-05, grad_scale: 32.0
+2024-08-14 01:40:30,560 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.524e+02 2.719e+02 2.975e+02 4.617e+02, threshold=5.438e+02, percent-clipped=0.0
+2024-08-14 01:40:43,539 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.28 vs. limit=6.0
+2024-08-14 01:40:50,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-14 01:40:55,799 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-4.pt
+2024-08-14 01:41:02,216 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 01:41:02,216 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 01:41:02,245 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 01:41:11,068 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 0, loss[loss=0.2267, simple_loss=0.2196, pruned_loss=0.1067, over 18549.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.2196, pruned_loss=0.1067, over 18549.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:41:11,069 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 01:41:34,534 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 5, validation: loss=0.2343, simple_loss=0.2283, pruned_loss=0.1066, over 1073944.00 frames.
+2024-08-14 01:41:34,534 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 01:42:12,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=102794.66666666667, ans=0.125
+2024-08-14 01:42:18,470 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=102794.66666666667, ans=0.0
+2024-08-14 01:42:28,401 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=12.34 vs. limit=15.0
+2024-08-14 01:42:57,680 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.60 vs. limit=22.5
+2024-08-14 01:43:29,161 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 50, loss[loss=0.2442, simple_loss=0.2374, pruned_loss=0.1139, over 19008.00 frames. ], tot_loss[loss=0.256, simple_loss=0.2474, pruned_loss=0.1226, over 828355.03 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:43:32,456 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=103008.0, ans=0.125
+2024-08-14 01:44:14,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-08-14 01:44:14,457 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=103061.33333333333, ans=15.0
+2024-08-14 01:44:20,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-08-14 01:44:28,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=103114.66666666667, ans=0.125
+2024-08-14 01:44:43,822 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.73 vs. limit=6.0
+2024-08-14 01:44:54,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=103168.0, ans=0.125
+2024-08-14 01:44:59,446 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.063e+02 2.398e+02 2.550e+02 2.967e+02 4.732e+02, threshold=5.099e+02, percent-clipped=0.0
+2024-08-14 01:45:00,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103168.0, ans=0.1
+2024-08-14 01:45:14,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=103221.33333333333, ans=0.125
+2024-08-14 01:45:27,207 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 5, batch 100, loss[loss=0.2437, simple_loss=0.2337, pruned_loss=0.1217, over 19287.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.2424, pruned_loss=0.1193, over 1473652.43 frames. ], batch size: 144, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:45:32,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103274.66666666667, ans=0.1
+2024-08-14 01:45:34,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103274.66666666667, ans=0.125
+2024-08-14 01:45:39,940 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=10.20 vs. limit=12.0
+2024-08-14 01:46:02,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=103328.0, ans=0.2
+2024-08-14 01:46:18,154 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-5.pt
+2024-08-14 01:46:22,554 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 01:46:22,554 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 01:46:22,581 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 01:46:32,108 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 0, loss[loss=0.2526, simple_loss=0.247, pruned_loss=0.1163, over 18610.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.247, pruned_loss=0.1163, over 18610.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:46:32,109 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 01:46:55,695 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 6, validation: loss=0.2214, simple_loss=0.2182, pruned_loss=0.09842, over 1073944.00 frames.
+2024-08-14 01:46:55,696 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 01:47:15,971 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.57 vs. limit=15.0
+2024-08-14 01:47:47,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=103477.33333333333, ans=0.07
+2024-08-14 01:48:17,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=103584.0, ans=0.0
+2024-08-14 01:48:20,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=103584.0, ans=0.125
+2024-08-14 01:48:22,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103584.0, ans=0.1
+2024-08-14 01:48:25,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=103584.0, ans=0.125
+2024-08-14 01:48:40,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=103584.0, ans=0.125
+2024-08-14 01:48:48,427 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.49 vs. limit=6.0
+2024-08-14 01:48:55,622 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.02 vs. limit=12.0
+2024-08-14 01:49:09,886 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 50, loss[loss=0.2322, simple_loss=0.2277, pruned_loss=0.1068, over 19047.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.2385, pruned_loss=0.1161, over 829577.21 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:49:16,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=103690.66666666667, ans=0.0
+2024-08-14 01:49:28,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103690.66666666667, ans=0.1
+2024-08-14 01:49:32,950 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.293e+02 2.374e+02 2.625e+02 4.193e+02, threshold=4.747e+02, percent-clipped=0.0
+2024-08-14 01:49:35,328 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.22 vs. limit=15.0
+2024-08-14 01:49:42,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=103744.0, ans=0.125
+2024-08-14 01:49:56,699 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=14.45 vs. limit=15.0
+2024-08-14 01:51:17,782 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 6, batch 100, loss[loss=0.2261, simple_loss=0.2199, pruned_loss=0.1083, over 19232.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.2305, pruned_loss=0.11, over 1476247.28 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:51:52,505 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.08 vs. limit=15.0
+2024-08-14 01:51:55,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=104010.66666666667, ans=0.125
+2024-08-14 01:52:05,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104010.66666666667, ans=0.1
+2024-08-14 01:52:05,815 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys.whitening_limit, batch_count=104010.66666666667, ans=6.0
+2024-08-14 01:52:24,235 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-6.pt
+2024-08-14 01:52:28,823 INFO [dysarthria_finetune.py:1435] (0/4) (856358912, 34072559616)
+2024-08-14 01:52:28,823 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 01:52:28,850 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 01:52:37,503 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 0, loss[loss=0.236, simple_loss=0.2346, pruned_loss=0.1043, over 18570.00 frames. ], tot_loss[loss=0.236, simple_loss=0.2346, pruned_loss=0.1043, over 18570.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:52:37,504 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 01:53:01,318 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 7, validation: loss=0.2103, simple_loss=0.2098, pruned_loss=0.0916, over 1073944.00 frames.
+2024-08-14 01:53:01,318 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 01:54:01,265 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.137e+02 2.271e+02 2.445e+02 3.999e+02, threshold=4.542e+02, percent-clipped=0.0
+2024-08-14 01:54:04,804 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.35 vs. limit=15.0
+2024-08-14 01:54:22,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=104320.0, ans=0.125
+2024-08-14 01:54:28,186 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.66 vs. limit=15.0
+2024-08-14 01:54:40,882 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 50, loss[loss=0.2262, simple_loss=0.2282, pruned_loss=0.09549, over 18968.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.2243, pruned_loss=0.1043, over 827907.61 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:54:54,527 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=18.07 vs. limit=22.5
+2024-08-14 01:55:00,089 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:55:05,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=104426.66666666667, ans=0.0
+2024-08-14 01:55:46,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.55 vs. limit=5.0
+2024-08-14 01:55:57,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=104533.33333333333, ans=0.125
+2024-08-14 01:56:18,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=104640.0, ans=0.0
+2024-08-14 01:56:18,960 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 7, batch 100, loss[loss=0.2147, simple_loss=0.2139, pruned_loss=0.09623, over 19302.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2204, pruned_loss=0.1017, over 1473040.93 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:56:22,400 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.63 vs. limit=22.5
+2024-08-14 01:56:24,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=104640.0, ans=0.125
+2024-08-14 01:56:33,739 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=104640.0, ans=0.125
+2024-08-14 01:57:10,868 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-7.pt
+2024-08-14 01:57:16,254 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 01:57:16,255 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 01:57:16,281 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 01:57:24,749 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 0, loss[loss=0.2382, simple_loss=0.2347, pruned_loss=0.1114, over 18485.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.2347, pruned_loss=0.1114, over 18485.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:57:24,750 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 01:57:48,461 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 8, validation: loss=0.2004, simple_loss=0.2027, pruned_loss=0.08579, over 1073944.00 frames.
+2024-08-14 01:57:48,462 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 01:57:54,667 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.054e+02 2.212e+02 2.317e+02 3.796e+02, threshold=4.423e+02, percent-clipped=0.0
+2024-08-14 01:58:08,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=10.76 vs. limit=12.0
+2024-08-14 01:58:51,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104949.33333333333, ans=0.1
+2024-08-14 01:59:44,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=105002.66666666667, ans=0.125
+2024-08-14 01:59:59,173 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 50, loss[loss=0.2117, simple_loss=0.2122, pruned_loss=0.09446, over 18938.00 frames. ], tot_loss[loss=0.213, simple_loss=0.213, pruned_loss=0.09554, over 828565.55 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:00:08,722 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.95 vs. limit=15.0
+2024-08-14 02:01:25,917 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105269.33333333333, ans=0.125
+2024-08-14 02:01:36,492 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 8, batch 100, loss[loss=0.2034, simple_loss=0.2076, pruned_loss=0.08666, over 19222.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2117, pruned_loss=0.09462, over 1474444.14 frames. ], batch size: 144, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:01:42,320 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.040e+02 2.200e+02 2.368e+02 3.520e+02, threshold=4.401e+02, percent-clipped=0.0
+2024-08-14 02:01:51,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=105322.66666666667, ans=0.125
+2024-08-14 02:01:53,023 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=105322.66666666667, ans=0.125
+2024-08-14 02:01:54,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=105376.0, ans=0.125
+2024-08-14 02:02:28,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=105429.33333333333, ans=0.125
+2024-08-14 02:02:29,650 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-8.pt
+2024-08-14 02:02:33,930 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:02:33,930 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:02:33,956 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:02:42,910 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 0, loss[loss=0.2174, simple_loss=0.2178, pruned_loss=0.09882, over 18596.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2178, pruned_loss=0.09882, over 18596.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:02:42,910 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:03:19,140 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 9, validation: loss=0.1911, simple_loss=0.1962, pruned_loss=0.08053, over 1073944.00 frames.
+2024-08-14 02:03:19,141 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:03:31,830 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.47 vs. limit=15.0
+2024-08-14 02:03:51,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=105530.66666666667, ans=0.0
+2024-08-14 02:03:57,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=105530.66666666667, ans=0.0
+2024-08-14 02:04:16,827 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=13.34 vs. limit=12.0
+2024-08-14 02:04:19,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=105584.0, ans=0.125
+2024-08-14 02:04:19,265 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=11.67 vs. limit=12.0
+2024-08-14 02:04:40,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=105637.33333333333, ans=0.0
+2024-08-14 02:06:19,734 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 50, loss[loss=0.2134, simple_loss=0.2166, pruned_loss=0.0944, over 19065.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2102, pruned_loss=0.09185, over 828972.56 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:06:34,780 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=105744.0, ans=0.125
+2024-08-14 02:06:48,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=105797.33333333333, ans=0.025
+2024-08-14 02:07:13,693 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=105850.66666666667, ans=0.2
+2024-08-14 02:07:13,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=105850.66666666667, ans=0.125
+2024-08-14 02:07:22,053 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.86 vs. limit=22.5
+2024-08-14 02:07:22,960 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.009e+02 2.115e+02 2.263e+02 3.410e+02, threshold=4.229e+02, percent-clipped=0.0
+2024-08-14 02:07:37,520 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=105904.0, ans=0.125
+2024-08-14 02:08:11,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105957.33333333333, ans=0.1
+2024-08-14 02:08:25,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105957.33333333333, ans=0.1
+2024-08-14 02:08:30,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106010.66666666667, ans=0.125
+2024-08-14 02:08:32,109 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 9, batch 100, loss[loss=0.1651, simple_loss=0.1733, pruned_loss=0.06696, over 19269.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.206, pruned_loss=0.08884, over 1474236.32 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 16.0
+2024-08-14 02:08:46,097 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=106010.66666666667, ans=0.025
+2024-08-14 02:09:08,038 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.38 vs. limit=15.0
+2024-08-14 02:09:32,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=106117.33333333333, ans=0.125
+2024-08-14 02:09:37,399 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-9.pt
+2024-08-14 02:09:44,335 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:09:44,336 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:09:44,365 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:09:53,509 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 0, loss[loss=0.1795, simple_loss=0.1871, pruned_loss=0.07487, over 18682.00 frames. ], tot_loss[loss=0.1795, simple_loss=0.1871, pruned_loss=0.07487, over 18682.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:09:53,510 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:10:16,414 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 10, validation: loss=0.1833, simple_loss=0.191, pruned_loss=0.07653, over 1073944.00 frames.
+2024-08-14 02:10:16,415 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:10:54,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=106218.66666666667, ans=0.2
+2024-08-14 02:11:19,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-08-14 02:11:47,861 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 1.913e+02 2.021e+02 2.184e+02 3.494e+02, threshold=4.042e+02, percent-clipped=0.0
+2024-08-14 02:11:55,859 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 50, loss[loss=0.2065, simple_loss=0.2146, pruned_loss=0.08811, over 19012.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.204, pruned_loss=0.08796, over 829104.52 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:11:57,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=106432.0, ans=0.025
+2024-08-14 02:12:02,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=106432.0, ans=0.125
+2024-08-14 02:12:02,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=106432.0, ans=0.125
+2024-08-14 02:12:22,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=106485.33333333333, ans=0.05
+2024-08-14 02:12:22,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=106485.33333333333, ans=15.0
+2024-08-14 02:13:07,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=106592.0, ans=0.0
+2024-08-14 02:13:20,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=106645.33333333333, ans=0.125
+2024-08-14 02:13:33,246 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 10, batch 100, loss[loss=0.1794, simple_loss=0.19, pruned_loss=0.0738, over 19226.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2022, pruned_loss=0.08657, over 1474931.95 frames. ], batch size: 144, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:14:07,177 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=106752.0, ans=0.05
+2024-08-14 02:14:11,322 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.95 vs. limit=6.0
+2024-08-14 02:14:16,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=106805.33333333333, ans=0.125
+2024-08-14 02:14:22,339 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.whiten.whitening_limit, batch_count=106805.33333333333, ans=12.0
+2024-08-14 02:14:26,602 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-10.pt
+2024-08-14 02:14:30,910 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:14:30,910 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:14:30,937 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:14:39,663 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 0, loss[loss=0.1989, simple_loss=0.2076, pruned_loss=0.08558, over 18505.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2076, pruned_loss=0.08558, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:14:39,664 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:15:02,461 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 11, validation: loss=0.1768, simple_loss=0.1869, pruned_loss=0.07357, over 1073944.00 frames.
+2024-08-14 02:15:02,461 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:15:33,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=106906.66666666667, ans=0.0
+2024-08-14 02:15:35,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=106906.66666666667, ans=0.0
+2024-08-14 02:15:35,905 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 1.865e+02 1.931e+02 2.118e+02 3.052e+02, threshold=3.863e+02, percent-clipped=0.0
+2024-08-14 02:15:58,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=106960.0, ans=0.09899494936611666
+2024-08-14 02:15:59,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=106960.0, ans=0.125
+2024-08-14 02:16:28,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=107066.66666666667, ans=0.2
+2024-08-14 02:16:30,823 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-08-14 02:16:32,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=107066.66666666667, ans=0.07
+2024-08-14 02:16:45,030 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 50, loss[loss=0.1831, simple_loss=0.1916, pruned_loss=0.07922, over 19023.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.1983, pruned_loss=0.08405, over 827570.26 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:16:52,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=107120.0, ans=0.125
+2024-08-14 02:17:13,652 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:17:21,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=107173.33333333333, ans=0.0
+2024-08-14 02:18:19,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=107333.33333333333, ans=0.05
+2024-08-14 02:18:21,864 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:18:50,454 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 11, batch 100, loss[loss=0.1647, simple_loss=0.1749, pruned_loss=0.06974, over 19237.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.1961, pruned_loss=0.08256, over 1473115.37 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:19:24,168 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.22 vs. limit=6.0
+2024-08-14 02:19:36,311 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=107440.0, ans=0.0
+2024-08-14 02:19:36,870 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.754e+02 1.842e+02 1.998e+02 3.456e+02, threshold=3.684e+02, percent-clipped=0.0
+2024-08-14 02:19:38,568 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.23 vs. limit=10.0
+2024-08-14 02:19:59,996 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-11.pt
+2024-08-14 02:20:04,361 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:20:04,361 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:20:04,388 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:20:13,224 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 0, loss[loss=0.185, simple_loss=0.1932, pruned_loss=0.08157, over 18585.00 frames. ], tot_loss[loss=0.185, simple_loss=0.1932, pruned_loss=0.08157, over 18585.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:20:13,225 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:20:15,420 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([6.3439, 5.7683, 5.6063, 6.2187], device='cuda:0')
+2024-08-14 02:20:42,008 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 12, validation: loss=0.1712, simple_loss=0.1836, pruned_loss=0.0713, over 1073944.00 frames.
+2024-08-14 02:20:42,009 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:21:18,481 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=107594.66666666667, ans=0.125
+2024-08-14 02:21:44,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=107594.66666666667, ans=0.125
+2024-08-14 02:21:57,660 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.83 vs. limit=15.0
+2024-08-14 02:22:20,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=107701.33333333333, ans=0.0
+2024-08-14 02:22:35,188 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107701.33333333333, ans=0.1
+2024-08-14 02:23:41,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-14 02:24:24,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=107808.0, ans=0.125
+2024-08-14 02:24:25,102 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 50, loss[loss=0.1659, simple_loss=0.1811, pruned_loss=0.06732, over 18986.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.1888, pruned_loss=0.07557, over 829307.75 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:24:36,012 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.13 vs. limit=15.0
+2024-08-14 02:25:14,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=107861.33333333333, ans=0.0
+2024-08-14 02:26:41,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=107914.66666666667, ans=0.5
+2024-08-14 02:27:09,000 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 1.754e+02 1.846e+02 2.049e+02 2.889e+02, threshold=3.691e+02, percent-clipped=0.0
+2024-08-14 02:27:37,282 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 12, batch 100, loss[loss=0.1443, simple_loss=0.1574, pruned_loss=0.05951, over 19194.00 frames. ], tot_loss[loss=0.178, simple_loss=0.1885, pruned_loss=0.07713, over 1473409.16 frames. ], batch size: 144, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:27:38,463 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=108074.66666666667, ans=0.125
+2024-08-14 02:28:06,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=108128.0, ans=0.025
+2024-08-14 02:28:06,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108128.0, ans=0.1
+2024-08-14 02:28:45,320 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.38 vs. limit=6.0
+2024-08-14 02:28:48,209 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-12.pt
+2024-08-14 02:28:52,634 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:28:52,635 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:28:52,664 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:29:01,626 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 0, loss[loss=0.2097, simple_loss=0.2152, pruned_loss=0.09757, over 18643.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2152, pruned_loss=0.09757, over 18643.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:29:01,627 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:29:09,015 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.4.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.9204, 6.1498, 5.9117, 6.0544], device='cuda:0')
+2024-08-14 02:29:24,536 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 13, validation: loss=0.1662, simple_loss=0.1808, pruned_loss=0.06949, over 1073944.00 frames.
+2024-08-14 02:29:24,536 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:29:30,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=108229.33333333333, ans=0.125
+2024-08-14 02:29:38,960 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=17.16 vs. limit=15.0
+2024-08-14 02:29:51,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=108282.66666666667, ans=0.125
+2024-08-14 02:30:29,819 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.29 vs. limit=22.5
+2024-08-14 02:30:48,010 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108442.66666666667, ans=0.125
+2024-08-14 02:31:06,615 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 50, loss[loss=0.1534, simple_loss=0.1716, pruned_loss=0.06137, over 19011.00 frames. ], tot_loss[loss=0.1763, simple_loss=0.1882, pruned_loss=0.07679, over 829773.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:31:10,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=108496.0, ans=0.07
+2024-08-14 02:31:16,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=108496.0, ans=0.07
+2024-08-14 02:31:20,512 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108496.0, ans=0.125
+2024-08-14 02:31:24,776 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=14.57 vs. limit=15.0
+2024-08-14 02:31:27,170 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.723e+02 1.826e+02 1.962e+02 2.693e+02, threshold=3.652e+02, percent-clipped=0.0
+2024-08-14 02:31:48,565 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:32:08,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108656.0, ans=0.0
+2024-08-14 02:32:09,909 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108656.0, ans=0.1
+2024-08-14 02:32:09,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-08-14 02:32:17,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108656.0, ans=0.1
+2024-08-14 02:32:25,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-08-14 02:32:30,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=108709.33333333333, ans=0.125
+2024-08-14 02:32:45,027 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 13, batch 100, loss[loss=0.1293, simple_loss=0.146, pruned_loss=0.05157, over 19225.00 frames. ], tot_loss[loss=0.175, simple_loss=0.1875, pruned_loss=0.07626, over 1474982.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:32:46,440 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.55 vs. limit=10.0
+2024-08-14 02:32:55,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=108762.66666666667, ans=0.2
+2024-08-14 02:32:57,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=108762.66666666667, ans=0.05
+2024-08-14 02:33:15,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=108816.0, ans=0.0
+2024-08-14 02:33:17,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=108816.0, ans=0.0
+2024-08-14 02:33:17,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.06 vs. limit=22.5
+2024-08-14 02:33:24,826 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=108869.33333333333, ans=0.2
+2024-08-14 02:33:38,653 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-13.pt
+2024-08-14 02:33:43,014 INFO [dysarthria_finetune.py:1435] (0/4) (856358912, 34072559616)
+2024-08-14 02:33:43,014 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:33:43,042 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:33:51,753 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 0, loss[loss=0.186, simple_loss=0.2056, pruned_loss=0.07781, over 18695.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2056, pruned_loss=0.07781, over 18695.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:33:51,754 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:34:15,235 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 14, validation: loss=0.1615, simple_loss=0.1782, pruned_loss=0.06778, over 1073944.00 frames.
+2024-08-14 02:34:15,235 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:34:28,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=108917.33333333333, ans=0.125
+2024-08-14 02:35:16,114 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.678e+02 1.779e+02 1.987e+02 2.879e+02, threshold=3.559e+02, percent-clipped=0.0
+2024-08-14 02:35:23,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109077.33333333333, ans=0.1
+2024-08-14 02:35:42,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=109130.66666666667, ans=0.0
+2024-08-14 02:35:48,100 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:35:52,353 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.28 vs. limit=6.0
+2024-08-14 02:35:52,703 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 50, loss[loss=0.1508, simple_loss=0.1741, pruned_loss=0.05901, over 18964.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.1828, pruned_loss=0.07314, over 828263.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:35:53,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=109184.0, ans=0.0
+2024-08-14 02:36:07,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=109184.0, ans=0.125
+2024-08-14 02:36:26,945 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109237.33333333333, ans=0.1
+2024-08-14 02:37:28,723 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 14, batch 100, loss[loss=0.1549, simple_loss=0.1676, pruned_loss=0.06858, over 19207.00 frames. ], tot_loss[loss=0.167, simple_loss=0.1824, pruned_loss=0.07218, over 1474261.28 frames. ], batch size: 144, lr: 9.96e-05, grad_scale: 16.0
+2024-08-14 02:37:43,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=109450.66666666667, ans=0.025
+2024-08-14 02:37:50,971 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109504.0, ans=0.1
+2024-08-14 02:38:02,446 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.61 vs. limit=15.0
+2024-08-14 02:38:03,999 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=109504.0, ans=0.125
+2024-08-14 02:38:21,631 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-14.pt
+2024-08-14 02:38:26,154 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:38:26,154 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:38:26,182 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:38:34,953 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 0, loss[loss=0.2081, simple_loss=0.2223, pruned_loss=0.09439, over 18509.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2223, pruned_loss=0.09439, over 18509.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:38:34,954 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:38:57,684 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 15, validation: loss=0.1571, simple_loss=0.176, pruned_loss=0.06629, over 1073944.00 frames.
+2024-08-14 02:38:57,684 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:38:59,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109605.33333333333, ans=0.2
+2024-08-14 02:39:03,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-08-14 02:39:07,270 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 1.642e+02 1.752e+02 1.914e+02 2.610e+02, threshold=3.503e+02, percent-clipped=0.0
+2024-08-14 02:39:16,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109605.33333333333, ans=0.1
+2024-08-14 02:39:39,007 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=9.54 vs. limit=12.0
+2024-08-14 02:40:04,604 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.78 vs. limit=22.5
+2024-08-14 02:40:57,612 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 50, loss[loss=0.1567, simple_loss=0.1791, pruned_loss=0.06481, over 19011.00 frames. ], tot_loss[loss=0.164, simple_loss=0.18, pruned_loss=0.07181, over 827942.50 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:42:51,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=110032.0, ans=0.125
+2024-08-14 02:43:05,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=110085.33333333333, ans=0.0
+2024-08-14 02:43:17,713 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 15, batch 100, loss[loss=0.1361, simple_loss=0.1549, pruned_loss=0.05733, over 19251.00 frames. ], tot_loss[loss=0.1623, simple_loss=0.179, pruned_loss=0.07093, over 1473903.80 frames. ], batch size: 144, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:43:19,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=110138.66666666667, ans=0.2
+2024-08-14 02:43:23,631 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.639e+02 1.741e+02 1.916e+02 2.571e+02, threshold=3.482e+02, percent-clipped=0.0
+2024-08-14 02:44:29,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=110245.33333333333, ans=0.125
+2024-08-14 02:44:39,964 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-15.pt
+2024-08-14 02:44:45,329 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:44:45,329 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:44:45,355 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:44:54,086 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 0, loss[loss=0.1827, simple_loss=0.2028, pruned_loss=0.08034, over 18729.00 frames. ], tot_loss[loss=0.1827, simple_loss=0.2028, pruned_loss=0.08034, over 18729.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:44:54,087 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:45:16,882 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 16, validation: loss=0.1529, simple_loss=0.1739, pruned_loss=0.06493, over 1073944.00 frames.
+2024-08-14 02:45:16,883 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:45:32,245 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110293.33333333333, ans=0.1
+2024-08-14 02:46:13,518 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=10.67 vs. limit=12.0
+2024-08-14 02:46:34,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=110506.66666666667, ans=0.025
+2024-08-14 02:47:33,730 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 50, loss[loss=0.1377, simple_loss=0.1638, pruned_loss=0.05546, over 18988.00 frames. ], tot_loss[loss=0.1596, simple_loss=0.1787, pruned_loss=0.06963, over 828175.61 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:48:14,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=110666.66666666667, ans=0.0
+2024-08-14 02:48:18,596 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.72 vs. limit=15.0
+2024-08-14 02:48:22,829 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.614e+02 1.779e+02 1.933e+02 2.621e+02, threshold=3.558e+02, percent-clipped=0.0
+2024-08-14 02:49:22,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-08-14 02:49:33,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=110826.66666666667, ans=0.125
+2024-08-14 02:49:34,517 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 16, batch 100, loss[loss=0.1376, simple_loss=0.1575, pruned_loss=0.05883, over 19270.00 frames. ], tot_loss[loss=0.1576, simple_loss=0.1772, pruned_loss=0.0688, over 1473314.28 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 16.0
+2024-08-14 02:50:07,771 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.50 vs. limit=10.0
+2024-08-14 02:50:17,105 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=9.14 vs. limit=12.0
+2024-08-14 02:50:28,477 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.15 vs. limit=6.0
+2024-08-14 02:50:28,874 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-16.pt
+2024-08-14 02:50:39,327 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 02:50:39,327 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:50:39,354 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:50:47,961 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 0, loss[loss=0.1732, simple_loss=0.1852, pruned_loss=0.08058, over 18739.00 frames. ], tot_loss[loss=0.1732, simple_loss=0.1852, pruned_loss=0.08058, over 18739.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:50:47,962 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:50:56,791 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.0.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.2244, 4.9887, 5.1861, 5.1181], device='cuda:0')
+2024-08-14 02:51:11,069 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 17, validation: loss=0.1498, simple_loss=0.1721, pruned_loss=0.06377, over 1073944.00 frames.
+2024-08-14 02:51:11,070 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:51:31,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111034.66666666667, ans=0.125
+2024-08-14 02:51:40,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=111034.66666666667, ans=0.2
+2024-08-14 02:51:42,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=111034.66666666667, ans=0.2
+2024-08-14 02:51:42,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=111034.66666666667, ans=0.0
+2024-08-14 02:52:59,544 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.599e+02 1.701e+02 1.889e+02 2.501e+02, threshold=3.403e+02, percent-clipped=0.0
+2024-08-14 02:53:00,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=111194.66666666667, ans=0.2
+2024-08-14 02:53:07,421 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 50, loss[loss=0.1463, simple_loss=0.177, pruned_loss=0.0578, over 19028.00 frames. ], tot_loss[loss=0.1566, simple_loss=0.1754, pruned_loss=0.06896, over 827378.67 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:53:37,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=111301.33333333333, ans=0.0
+2024-08-14 02:53:38,292 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=11.34 vs. limit=15.0
+2024-08-14 02:53:49,258 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=111354.66666666667, ans=0.125
+2024-08-14 02:53:49,616 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=6.33 vs. limit=12.0
+2024-08-14 02:54:06,548 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:54:14,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=111408.0, ans=0.04949747468305833
+2024-08-14 02:54:15,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=111408.0, ans=0.125
+2024-08-14 02:54:43,505 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.27 vs. limit=15.0
+2024-08-14 02:55:04,515 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 17, batch 100, loss[loss=0.1196, simple_loss=0.1429, pruned_loss=0.04812, over 19218.00 frames. ], tot_loss[loss=0.1569, simple_loss=0.1762, pruned_loss=0.06882, over 1473529.96 frames. ], batch size: 144, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:55:45,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=111621.33333333333, ans=0.125
+2024-08-14 02:56:14,222 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-17.pt
+2024-08-14 02:56:19,092 INFO [dysarthria_finetune.py:1435] (0/4) (856358912, 34072559616)
+2024-08-14 02:56:19,092 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 02:56:19,118 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 02:56:27,749 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 0, loss[loss=0.1748, simple_loss=0.1932, pruned_loss=0.07825, over 18538.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.1932, pruned_loss=0.07825, over 18538.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 02:56:27,750 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 02:56:58,388 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 18, validation: loss=0.1479, simple_loss=0.1705, pruned_loss=0.06271, over 1073944.00 frames.
+2024-08-14 02:56:58,389 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 02:57:05,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111669.33333333333, ans=0.07
+2024-08-14 02:57:44,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=111669.33333333333, ans=0.05
+2024-08-14 02:58:03,299 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.609e+02 1.680e+02 1.858e+02 2.812e+02, threshold=3.359e+02, percent-clipped=0.0
+2024-08-14 02:58:35,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=111776.0, ans=0.125
+2024-08-14 02:59:54,697 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 50, loss[loss=0.1382, simple_loss=0.1639, pruned_loss=0.0563, over 18998.00 frames. ], tot_loss[loss=0.1533, simple_loss=0.1739, pruned_loss=0.06641, over 828205.61 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 03:01:13,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=111989.33333333333, ans=0.125
+2024-08-14 03:01:57,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=112042.66666666667, ans=10.0
+2024-08-14 03:03:17,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=112149.33333333333, ans=0.125
+2024-08-14 03:03:56,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112149.33333333333, ans=0.1
+2024-08-14 03:04:01,394 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.10 vs. limit=6.0
+2024-08-14 03:04:53,085 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 18, batch 100, loss[loss=0.1273, simple_loss=0.1522, pruned_loss=0.05118, over 19294.00 frames. ], tot_loss[loss=0.1519, simple_loss=0.1732, pruned_loss=0.06535, over 1473690.24 frames. ], batch size: 144, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 03:05:52,848 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.561e+02 1.643e+02 1.812e+02 2.261e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-14 03:06:41,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=112256.0, ans=0.125
+2024-08-14 03:07:01,005 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.95 vs. limit=6.0
+2024-08-14 03:07:12,805 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-18.pt
+2024-08-14 03:07:17,223 INFO [dysarthria_finetune.py:1435] (0/4) (856358912, 34072559616)
+2024-08-14 03:07:17,223 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 03:07:17,251 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 03:07:26,410 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 0, loss[loss=0.168, simple_loss=0.1928, pruned_loss=0.07154, over 18598.00 frames. ], tot_loss[loss=0.168, simple_loss=0.1928, pruned_loss=0.07154, over 18598.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:07:26,411 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 03:07:58,726 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 19, validation: loss=0.1464, simple_loss=0.169, pruned_loss=0.06188, over 1073944.00 frames.
+2024-08-14 03:07:58,727 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 03:08:43,327 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112405.33333333333, ans=0.125
+2024-08-14 03:09:18,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=112458.66666666667, ans=0.0
+2024-08-14 03:10:15,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=112512.0, ans=0.0
+2024-08-14 03:10:39,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112565.33333333333, ans=0.125
+2024-08-14 03:10:40,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=112565.33333333333, ans=0.2
+2024-08-14 03:10:40,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=112565.33333333333, ans=0.025
+2024-08-14 03:10:57,788 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 50, loss[loss=0.1412, simple_loss=0.1633, pruned_loss=0.05958, over 19038.00 frames. ], tot_loss[loss=0.1476, simple_loss=0.169, pruned_loss=0.06306, over 827203.46 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:11:00,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112618.66666666667, ans=0.125
+2024-08-14 03:11:02,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=112618.66666666667, ans=0.125
+2024-08-14 03:11:55,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112672.0, ans=0.125
+2024-08-14 03:11:57,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=112672.0, ans=0.125
+2024-08-14 03:12:19,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112672.0, ans=0.125
+2024-08-14 03:12:23,987 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.60 vs. limit=15.0
+2024-08-14 03:12:35,221 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.78 vs. limit=22.5
+2024-08-14 03:12:51,638 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=112778.66666666667, ans=0.2
+2024-08-14 03:13:01,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=112778.66666666667, ans=0.2
+2024-08-14 03:13:05,924 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=112778.66666666667, ans=0.0
+2024-08-14 03:13:37,507 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.593e+02 1.694e+02 1.909e+02 3.031e+02, threshold=3.389e+02, percent-clipped=0.0
+2024-08-14 03:13:45,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112832.0, ans=0.1
+2024-08-14 03:14:02,406 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 19, batch 100, loss[loss=0.1316, simple_loss=0.1542, pruned_loss=0.05448, over 19274.00 frames. ], tot_loss[loss=0.1482, simple_loss=0.169, pruned_loss=0.06375, over 1472434.33 frames. ], batch size: 144, lr: 9.92e-05, grad_scale: 16.0
+2024-08-14 03:14:05,521 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 03:15:24,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=112938.66666666667, ans=0.125
+2024-08-14 03:15:42,704 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.01 vs. limit=15.0
+2024-08-14 03:15:54,673 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-19.pt
+2024-08-14 03:16:00,521 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 03:16:00,522 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 03:16:00,550 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 03:16:41,825 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 0, loss[loss=0.1428, simple_loss=0.1684, pruned_loss=0.05857, over 18599.00 frames. ], tot_loss[loss=0.1428, simple_loss=0.1684, pruned_loss=0.05857, over 18599.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:16:41,826 INFO [dysarthria_finetune.py:1165] (0/4) Computing validation loss on speech
+2024-08-14 03:17:15,216 INFO [dysarthria_finetune.py:1174] (0/4) Validation on speech: Epoch 20, validation: loss=0.1449, simple_loss=0.1677, pruned_loss=0.0611, over 1073944.00 frames.
+2024-08-14 03:17:15,217 INFO [dysarthria_finetune.py:1177] (0/4) Maximum memory allocated so far is 26678MB
+2024-08-14 03:17:30,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=113040.0, ans=0.125
+2024-08-14 03:17:42,009 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=113040.0, ans=0.125
+2024-08-14 03:17:50,157 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=113093.33333333333, ans=0.2
+2024-08-14 03:19:34,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=113200.0, ans=0.09899494936611666
+2024-08-14 03:20:51,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=113253.33333333333, ans=0.2
+2024-08-14 03:21:08,396 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 50, loss[loss=0.1408, simple_loss=0.1682, pruned_loss=0.05677, over 18985.00 frames. ], tot_loss[loss=0.1489, simple_loss=0.1695, pruned_loss=0.0642, over 828130.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:21:49,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=113306.66666666667, ans=0.0
+2024-08-14 03:21:59,150 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=9.27 vs. limit=12.0
+2024-08-14 03:22:10,070 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.567e+02 1.664e+02 1.868e+02 2.522e+02, threshold=3.327e+02, percent-clipped=0.0
+2024-08-14 03:23:21,307 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.31 vs. limit=6.0
+2024-08-14 03:24:28,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=113520.0, ans=0.07
+2024-08-14 03:24:58,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=113573.33333333333, ans=0.125
+2024-08-14 03:24:59,420 INFO [dysarthria_finetune.py:1141] (0/4) Epoch 20, batch 100, loss[loss=0.1312, simple_loss=0.1537, pruned_loss=0.05428, over 19321.00 frames. ], tot_loss[loss=0.1479, simple_loss=0.1688, pruned_loss=0.06343, over 1472900.97 frames. ], batch size: 144, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:25:17,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=113573.33333333333, ans=0.125
+2024-08-14 03:25:17,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=113573.33333333333, ans=0.125
+2024-08-14 03:25:23,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=113573.33333333333, ans=0.0
+2024-08-14 03:26:28,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=113626.66666666667, ans=0.125
+2024-08-14 03:26:45,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=113626.66666666667, ans=0.125
+2024-08-14 03:27:20,911 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune/epoch-20.pt
+2024-08-14 03:27:42,250 INFO [dysarthria_finetune.py:1435] (0/4) (858456064, 34072559616)
+2024-08-14 03:27:42,250 INFO [dysarthria_finetune.py:1436] (0/4) Empty cache: before and after
+2024-08-14 03:27:42,277 INFO [dysarthria_finetune.py:1440] (0/4) (29306322944, 34072559616)
+2024-08-14 03:27:42,277 INFO [dysarthria_finetune.py:1442] (0/4) Done!
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-1 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-1
new file mode 100644
index 0000000000000000000000000000000000000000..ff59f956ef2c6a32fb32c90b9f3ad09028338d81
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-1
@@ -0,0 +1,533 @@
+2024-08-13 23:24:47,920 INFO [dysarthria_finetune.py:1212] (1/4) Training started
+2024-08-13 23:24:47,960 INFO [dysarthria_finetune.py:1214] (1/4) (33735507968, 34072559616)
+2024-08-13 23:24:47,960 INFO [dysarthria_finetune.py:1215] (1/4) Empty cache: before and after
+2024-08-13 23:24:48,946 INFO [dysarthria_finetune.py:1219] (1/4) (32783400960, 34072559616)
+2024-08-13 23:24:48,947 INFO [dysarthria_finetune.py:1229] (1/4) Device: cuda:1
+2024-08-13 23:24:48,990 INFO [dysarthria_finetune.py:1241] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 23:24:48,991 INFO [dysarthria_finetune.py:1243] (1/4) About to create model
+2024-08-13 23:24:50,010 INFO [dysarthria_finetune.py:1247] (1/4) Number of model parameters: 65549011
+2024-08-13 23:24:50,011 INFO [dysarthria_finetune.py:769] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 23:24:59,882 INFO [dysarthria_finetune.py:1275] (1/4) Using DDP
+2024-08-13 23:25:17,971 INFO [dysarthria_asr_datamodule.py:494] (1/4) About to get train cuts
+2024-08-13 23:25:18,291 INFO [dysarthria_finetune.py:1319] (1/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:239] (1/4) Disable MUSAN
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:257] (1/4) Enable SpecAugment
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:258] (1/4) Time warp factor: 80
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:268] (1/4) Num frame mask: 10
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:281] (1/4) About to create train dataset
+2024-08-13 23:25:19,281 INFO [dysarthria_asr_datamodule.py:308] (1/4) Using DynamicBucketingSampler.
+2024-08-13 23:25:20,229 INFO [dysarthria_asr_datamodule.py:325] (1/4) About to create train dataloader
+2024-08-13 23:25:20,234 INFO [dysarthria_asr_datamodule.py:500] (1/4) About to get dev cuts
+2024-08-13 23:25:24,698 INFO [dysarthria_asr_datamodule.py:356] (1/4) About to create dev dataset
+2024-08-13 23:25:28,026 INFO [dysarthria_asr_datamodule.py:373] (1/4) About to create dev dataloader
+2024-08-13 23:25:28,026 INFO [dysarthria_finetune.py:1490] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 23:27:16,793 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=512, metric=19.93 vs. limit=7.5
+2024-08-13 23:27:17,109 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=192, metric=16.77 vs. limit=7.5
+2024-08-13 23:27:17,919 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 23:27:19,736 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 23:32:34,802 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 23:32:36,803 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 23:35:38,399 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 23:35:41,667 INFO [dysarthria_finetune.py:1518] (1/4) Maximum memory allocated so far is 11776MB
+2024-08-13 23:36:58,726 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 0, loss[loss=0.343, simple_loss=0.3241, pruned_loss=0.1887, over 18549.00 frames. ], tot_loss[loss=0.343, simple_loss=0.3241, pruned_loss=0.1887, over 18549.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 23:36:58,726 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-13 23:49:47,041 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 23:49:47,367 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13098MB
+2024-08-13 23:51:42,535 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100000.0, ans=0.125
+2024-08-13 23:53:30,682 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=100000.0, ans=0.125
+2024-08-14 00:00:01,105 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.62 vs. limit=15.0
+2024-08-14 00:15:49,520 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.298e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-14 00:20:36,358 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.79 vs. limit=15.0
+2024-08-14 00:22:16,299 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.96 vs. limit=10.0
+2024-08-14 00:23:15,347 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.450e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-14 00:27:43,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=100160.0, ans=0.0
+2024-08-14 00:29:53,824 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.241e+02 7.298e+02 9.450e+02 1.050e+03 1.319e+03, threshold=3.780e+03, percent-clipped=0.0
+2024-08-14 00:31:20,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-14 00:37:01,644 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 50, loss[loss=0.3541, simple_loss=0.3336, pruned_loss=0.2013, over 19042.00 frames. ], tot_loss[loss=0.3545, simple_loss=0.3342, pruned_loss=0.2019, over 827432.33 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-14 00:42:06,328 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=17.17 vs. limit=15.0
+2024-08-14 00:42:13,540 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=100266.66666666667, ans=0.2
+2024-08-14 00:43:30,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=100320.0, ans=0.0
+2024-08-14 00:47:09,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=100320.0, ans=0.0
+2024-08-14 00:57:33,325 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=26.97 vs. limit=15.0
+2024-08-14 00:58:13,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100480.0, ans=0.0
+2024-08-14 01:01:58,600 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.501e+02 5.963e+02 7.298e+02 8.800e+02 1.319e+03, threshold=1.460e+03, percent-clipped=0.0
+2024-08-14 01:01:58,635 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 1, batch 100, loss[loss=0.3387, simple_loss=0.3196, pruned_loss=0.1871, over 19093.00 frames. ], tot_loss[loss=0.341, simple_loss=0.3218, pruned_loss=0.1899, over 1470684.91 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-14 01:11:48,008 INFO [dysarthria_finetune.py:1435] (1/4) (13820952576, 34072559616)
+2024-08-14 01:11:48,009 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 01:11:48,058 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 01:12:35,408 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 0, loss[loss=0.2894, simple_loss=0.2745, pruned_loss=0.1501, over 18746.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.2745, pruned_loss=0.1501, over 18746.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-14 01:12:35,408 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 01:16:55,986 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 2, validation: loss=0.2907, simple_loss=0.276, pruned_loss=0.149, over 1073944.00 frames.
+2024-08-14 01:16:55,986 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 01:19:23,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=100736.0, ans=0.5
+2024-08-14 01:20:02,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=100736.0, ans=0.125
+2024-08-14 01:20:05,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=100736.0, ans=0.2
+2024-08-14 01:20:57,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-14 01:20:57,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=100789.33333333333, ans=0.125
+2024-08-14 01:21:13,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=100789.33333333333, ans=0.2
+2024-08-14 01:21:57,272 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.74 vs. limit=15.0
+2024-08-14 01:21:59,516 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100842.66666666667, ans=0.125
+2024-08-14 01:22:26,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=100896.0, ans=0.125
+2024-08-14 01:22:31,958 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:24:53,691 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.49 vs. limit=22.5
+2024-08-14 01:24:54,903 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 50, loss[loss=0.345, simple_loss=0.325, pruned_loss=0.1911, over 19071.00 frames. ], tot_loss[loss=0.3192, simple_loss=0.3021, pruned_loss=0.17, over 827854.65 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-08-14 01:25:44,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=100949.33333333333, ans=0.125
+2024-08-14 01:26:11,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=101002.66666666667, ans=0.09899494936611666
+2024-08-14 01:27:07,286 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=101002.66666666667, ans=0.2
+2024-08-14 01:27:29,018 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.249e+02 4.347e+02 4.852e+02 5.543e+02 7.043e+02, threshold=9.703e+02, percent-clipped=0.0
+2024-08-14 01:27:52,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=101056.0, ans=0.0
+2024-08-14 01:28:04,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-08-14 01:28:31,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=101109.33333333333, ans=0.125
+2024-08-14 01:28:39,701 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:28:52,699 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.08 vs. limit=22.5
+2024-08-14 01:28:58,129 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.47 vs. limit=15.0
+2024-08-14 01:29:06,633 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 2, batch 100, loss[loss=0.3062, simple_loss=0.2903, pruned_loss=0.1601, over 19090.00 frames. ], tot_loss[loss=0.3106, simple_loss=0.2943, pruned_loss=0.1633, over 1472213.55 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-08-14 01:29:10,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101216.0, ans=0.0
+2024-08-14 01:29:30,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=101216.0, ans=0.125
+2024-08-14 01:30:16,030 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.39 vs. limit=10.0
+2024-08-14 01:30:20,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=101322.66666666667, ans=0.125
+2024-08-14 01:30:25,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=101322.66666666667, ans=0.2
+2024-08-14 01:30:33,203 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=101322.66666666667, ans=0.0
+2024-08-14 01:30:42,019 INFO [dysarthria_finetune.py:1435] (1/4) (665518080, 34072559616)
+2024-08-14 01:30:42,019 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 01:30:42,089 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 01:30:49,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-08-14 01:30:55,556 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 0, loss[loss=0.3208, simple_loss=0.3029, pruned_loss=0.1732, over 18511.00 frames. ], tot_loss[loss=0.3208, simple_loss=0.3029, pruned_loss=0.1732, over 18511.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-08-14 01:30:55,556 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 01:31:18,577 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 3, validation: loss=0.2682, simple_loss=0.2564, pruned_loss=0.1309, over 1073944.00 frames.
+2024-08-14 01:31:18,578 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 01:31:23,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-08-14 01:32:09,888 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=101477.33333333333, ans=0.025
+2024-08-14 01:32:19,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=101477.33333333333, ans=0.04949747468305833
+2024-08-14 01:32:30,691 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.07 vs. limit=15.0
+2024-08-14 01:32:58,609 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.574e+02 3.350e+02 3.692e+02 4.154e+02 5.648e+02, threshold=7.384e+02, percent-clipped=0.0
+2024-08-14 01:33:12,584 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.57 vs. limit=10.0
+2024-08-14 01:33:14,921 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 50, loss[loss=0.2928, simple_loss=0.2799, pruned_loss=0.1447, over 19005.00 frames. ], tot_loss[loss=0.2971, simple_loss=0.2825, pruned_loss=0.1522, over 828905.42 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-08-14 01:33:23,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101637.33333333333, ans=0.1
+2024-08-14 01:34:00,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=101690.66666666667, ans=0.125
+2024-08-14 01:34:25,521 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=8.78 vs. limit=12.0
+2024-08-14 01:34:38,499 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=101797.33333333333, ans=0.5
+2024-08-14 01:34:40,556 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=25.39 vs. limit=15.0
+2024-08-14 01:34:46,257 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.06 vs. limit=15.0
+2024-08-14 01:35:01,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=101850.66666666667, ans=0.0
+2024-08-14 01:35:08,704 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 3, batch 100, loss[loss=0.26, simple_loss=0.25, pruned_loss=0.1238, over 19133.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.2733, pruned_loss=0.145, over 1474266.40 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-08-14 01:35:10,556 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.02 vs. limit=15.0
+2024-08-14 01:36:03,995 INFO [dysarthria_finetune.py:1435] (1/4) (803930112, 34072559616)
+2024-08-14 01:36:04,626 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 01:36:04,696 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 01:36:20,153 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 0, loss[loss=0.2541, simple_loss=0.2436, pruned_loss=0.1242, over 18466.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.2436, pruned_loss=0.1242, over 18466.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-08-14 01:36:20,154 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 01:36:43,054 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 4, validation: loss=0.2499, simple_loss=0.241, pruned_loss=0.1173, over 1073944.00 frames.
+2024-08-14 01:36:43,055 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 01:37:14,070 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 2.841e+02 3.076e+02 3.396e+02 5.357e+02, threshold=6.153e+02, percent-clipped=0.0
+2024-08-14 01:37:19,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102106.66666666667, ans=0.125
+2024-08-14 01:37:23,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-08-14 01:37:33,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=102160.0, ans=0.0
+2024-08-14 01:37:36,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=102160.0, ans=0.5
+2024-08-14 01:37:39,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=102160.0, ans=0.2
+2024-08-14 01:37:57,586 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.24 vs. limit=6.0
+2024-08-14 01:38:18,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=102266.66666666667, ans=0.125
+2024-08-14 01:38:23,454 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 50, loss[loss=0.2644, simple_loss=0.2537, pruned_loss=0.1293, over 18961.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.2613, pruned_loss=0.1331, over 827373.05 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-08-14 01:38:36,626 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=21.77 vs. limit=15.0
+2024-08-14 01:38:40,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=102320.0, ans=0.125
+2024-08-14 01:39:39,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=102480.0, ans=0.0
+2024-08-14 01:39:54,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=102533.33333333333, ans=0.125
+2024-08-14 01:39:58,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=102533.33333333333, ans=10.0
+2024-08-14 01:39:58,559 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.12 vs. limit=15.0
+2024-08-14 01:40:00,899 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 4, batch 100, loss[loss=0.268, simple_loss=0.258, pruned_loss=0.1298, over 19038.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.2566, pruned_loss=0.1309, over 1472261.06 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-08-14 01:40:18,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=102586.66666666667, ans=0.05
+2024-08-14 01:40:20,699 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.02 vs. limit=6.0
+2024-08-14 01:40:30,558 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.524e+02 2.719e+02 2.975e+02 4.617e+02, threshold=5.438e+02, percent-clipped=0.0
+2024-08-14 01:40:37,551 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=102640.0, ans=0.125
+2024-08-14 01:40:49,187 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=8.70 vs. limit=12.0
+2024-08-14 01:40:51,041 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=20.30 vs. limit=15.0
+2024-08-14 01:40:55,060 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-14 01:40:55,789 INFO [dysarthria_finetune.py:1435] (1/4) (1640693760, 34072559616)
+2024-08-14 01:40:55,789 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 01:40:55,862 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 01:41:11,086 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 0, loss[loss=0.2389, simple_loss=0.2328, pruned_loss=0.1088, over 18670.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2328, pruned_loss=0.1088, over 18670.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:41:11,086 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 01:41:34,534 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 5, validation: loss=0.2343, simple_loss=0.2283, pruned_loss=0.1066, over 1073944.00 frames.
+2024-08-14 01:41:34,535 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 01:41:46,300 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=102741.33333333333, ans=0.0
+2024-08-14 01:42:12,435 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=102794.66666666667, ans=0.125
+2024-08-14 01:42:39,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=102848.0, ans=0.125
+2024-08-14 01:43:07,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=102901.33333333333, ans=0.2
+2024-08-14 01:43:29,161 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 50, loss[loss=0.248, simple_loss=0.2377, pruned_loss=0.1238, over 18968.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.2454, pruned_loss=0.1204, over 828630.89 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:44:16,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=103061.33333333333, ans=0.125
+2024-08-14 01:44:22,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-08-14 01:44:30,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=103114.66666666667, ans=0.025
+2024-08-14 01:44:59,445 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.063e+02 2.398e+02 2.550e+02 2.967e+02 4.732e+02, threshold=5.099e+02, percent-clipped=0.0
+2024-08-14 01:45:02,679 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:45:27,206 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 5, batch 100, loss[loss=0.2347, simple_loss=0.2292, pruned_loss=0.108, over 19157.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.2408, pruned_loss=0.1176, over 1473409.40 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:45:34,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=103274.66666666667, ans=0.1
+2024-08-14 01:46:18,160 INFO [dysarthria_finetune.py:1435] (1/4) (443219968, 34072559616)
+2024-08-14 01:46:18,161 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 01:46:18,212 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 01:46:32,125 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 0, loss[loss=0.2545, simple_loss=0.2475, pruned_loss=0.1201, over 18435.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.2475, pruned_loss=0.1201, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:46:32,126 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 01:46:55,697 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 6, validation: loss=0.2214, simple_loss=0.2182, pruned_loss=0.09842, over 1073944.00 frames.
+2024-08-14 01:46:55,697 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 01:47:15,546 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.80 vs. limit=15.0
+2024-08-14 01:48:02,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=103530.66666666667, ans=0.2
+2024-08-14 01:48:27,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=103584.0, ans=0.0
+2024-08-14 01:48:55,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103637.33333333333, ans=0.125
+2024-08-14 01:49:09,903 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 50, loss[loss=0.2199, simple_loss=0.2163, pruned_loss=0.09993, over 19041.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2333, pruned_loss=0.1112, over 827399.35 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:49:30,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=103690.66666666667, ans=0.125
+2024-08-14 01:49:32,949 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.293e+02 2.374e+02 2.625e+02 4.193e+02, threshold=4.747e+02, percent-clipped=0.0
+2024-08-14 01:49:34,926 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:49:56,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=103744.0, ans=0.2
+2024-08-14 01:50:03,632 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-08-14 01:50:08,109 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=16.67 vs. limit=15.0
+2024-08-14 01:50:10,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.57 vs. limit=15.0
+2024-08-14 01:50:21,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=103797.33333333333, ans=0.0
+2024-08-14 01:50:21,517 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.78 vs. limit=15.0
+2024-08-14 01:51:17,784 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 6, batch 100, loss[loss=0.2029, simple_loss=0.2011, pruned_loss=0.09017, over 19066.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2289, pruned_loss=0.108, over 1471849.14 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:51:29,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=103957.33333333333, ans=0.0
+2024-08-14 01:51:54,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104010.66666666667, ans=0.125
+2024-08-14 01:51:57,360 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=104010.66666666667, ans=0.125
+2024-08-14 01:52:24,216 INFO [dysarthria_finetune.py:1435] (1/4) (1141571584, 34072559616)
+2024-08-14 01:52:24,216 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 01:52:24,300 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 01:52:37,503 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 0, loss[loss=0.2495, simple_loss=0.2397, pruned_loss=0.1253, over 18532.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.2397, pruned_loss=0.1253, over 18532.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:52:37,503 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 01:53:01,318 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 7, validation: loss=0.2103, simple_loss=0.2098, pruned_loss=0.0916, over 1073944.00 frames.
+2024-08-14 01:53:01,319 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 01:53:13,298 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.12 vs. limit=15.0
+2024-08-14 01:53:43,057 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.93 vs. limit=15.0
+2024-08-14 01:54:01,265 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.137e+02 2.271e+02 2.445e+02 3.999e+02, threshold=4.542e+02, percent-clipped=0.0
+2024-08-14 01:54:06,548 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=15.98 vs. limit=15.0
+2024-08-14 01:54:40,899 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 50, loss[loss=0.2189, simple_loss=0.2164, pruned_loss=0.09995, over 19096.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.2229, pruned_loss=0.1029, over 827950.42 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:55:10,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=104426.66666666667, ans=0.125
+2024-08-14 01:55:16,524 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:56:18,959 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 7, batch 100, loss[loss=0.2146, simple_loss=0.2143, pruned_loss=0.09547, over 19105.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2197, pruned_loss=0.1002, over 1472811.51 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:56:22,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=104640.0, ans=0.07
+2024-08-14 01:56:30,027 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.34 vs. limit=15.0
+2024-08-14 01:56:31,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=104640.0, ans=0.2
+2024-08-14 01:56:39,626 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.82 vs. limit=15.0
+2024-08-14 01:56:45,476 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=104693.33333333333, ans=0.125
+2024-08-14 01:56:56,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=104746.66666666667, ans=0.09899494936611666
+2024-08-14 01:57:06,706 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.07 vs. limit=15.0
+2024-08-14 01:57:10,846 INFO [dysarthria_finetune.py:1435] (1/4) (14915665920, 34072559616)
+2024-08-14 01:57:10,846 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 01:57:10,897 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 01:57:24,750 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 0, loss[loss=0.2019, simple_loss=0.2048, pruned_loss=0.08546, over 18679.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2048, pruned_loss=0.08546, over 18679.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:57:24,750 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 01:57:48,462 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 8, validation: loss=0.2004, simple_loss=0.2027, pruned_loss=0.08579, over 1073944.00 frames.
+2024-08-14 01:57:48,462 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 01:57:54,670 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.054e+02 2.212e+02 2.317e+02 3.796e+02, threshold=4.423e+02, percent-clipped=0.0
+2024-08-14 01:58:12,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=104842.66666666667, ans=15.0
+2024-08-14 01:58:18,077 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.13 vs. limit=15.0
+2024-08-14 01:58:46,932 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-08-14 01:58:50,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104949.33333333333, ans=0.1
+2024-08-14 01:59:00,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-08-14 01:59:59,177 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 50, loss[loss=0.1938, simple_loss=0.1951, pruned_loss=0.08525, over 19009.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2158, pruned_loss=0.09784, over 829068.39 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:00:16,459 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.69 vs. limit=15.0
+2024-08-14 02:01:01,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105216.0, ans=0.1
+2024-08-14 02:01:10,961 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.39 vs. limit=15.0
+2024-08-14 02:01:11,033 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.02 vs. limit=6.0
+2024-08-14 02:01:22,193 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=105269.33333333333, ans=0.1
+2024-08-14 02:01:36,489 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 8, batch 100, loss[loss=0.1994, simple_loss=0.2026, pruned_loss=0.08622, over 19109.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2135, pruned_loss=0.09549, over 1473116.98 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:01:42,321 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.040e+02 2.200e+02 2.368e+02 3.520e+02, threshold=4.401e+02, percent-clipped=0.0
+2024-08-14 02:01:51,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=105322.66666666667, ans=0.125
+2024-08-14 02:01:52,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=105322.66666666667, ans=0.04949747468305833
+2024-08-14 02:02:08,440 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.13 vs. limit=6.0
+2024-08-14 02:02:28,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105429.33333333333, ans=0.1
+2024-08-14 02:02:29,646 INFO [dysarthria_finetune.py:1435] (1/4) (2469068800, 34072559616)
+2024-08-14 02:02:29,647 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:02:29,705 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:02:42,909 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 0, loss[loss=0.2088, simple_loss=0.2112, pruned_loss=0.09219, over 18520.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2112, pruned_loss=0.09219, over 18520.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:02:42,910 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:03:19,141 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 9, validation: loss=0.1911, simple_loss=0.1962, pruned_loss=0.08053, over 1073944.00 frames.
+2024-08-14 02:03:19,142 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:03:45,205 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.00 vs. limit=22.5
+2024-08-14 02:04:40,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=105637.33333333333, ans=0.125
+2024-08-14 02:05:05,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=105637.33333333333, ans=0.025
+2024-08-14 02:05:27,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=105690.66666666667, ans=0.125
+2024-08-14 02:05:32,389 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:06:19,732 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 50, loss[loss=0.2236, simple_loss=0.2257, pruned_loss=0.1003, over 19008.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2062, pruned_loss=0.09046, over 827563.28 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:06:30,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=105744.0, ans=0.09899494936611666
+2024-08-14 02:07:11,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=105850.66666666667, ans=0.0
+2024-08-14 02:07:13,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=105850.66666666667, ans=0.0
+2024-08-14 02:07:22,962 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.009e+02 2.115e+02 2.263e+02 3.410e+02, threshold=4.229e+02, percent-clipped=0.0
+2024-08-14 02:07:37,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=105904.0, ans=0.125
+2024-08-14 02:07:41,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=105904.0, ans=0.0
+2024-08-14 02:08:28,390 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=105957.33333333333, ans=0.125
+2024-08-14 02:08:32,107 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 9, batch 100, loss[loss=0.1983, simple_loss=0.2047, pruned_loss=0.08455, over 19113.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2053, pruned_loss=0.08865, over 1473118.75 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 16.0
+2024-08-14 02:08:55,919 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106064.0, ans=0.0
+2024-08-14 02:09:37,379 INFO [dysarthria_finetune.py:1435] (1/4) (789250048, 34072559616)
+2024-08-14 02:09:37,380 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:09:37,449 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:09:53,516 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 0, loss[loss=0.2102, simple_loss=0.2164, pruned_loss=0.09095, over 18522.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2164, pruned_loss=0.09095, over 18522.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:09:53,517 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:10:16,415 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 10, validation: loss=0.1833, simple_loss=0.191, pruned_loss=0.07653, over 1073944.00 frames.
+2024-08-14 02:10:16,416 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:10:24,215 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=106165.33333333333, ans=0.125
+2024-08-14 02:10:40,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=106218.66666666667, ans=0.125
+2024-08-14 02:10:41,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=106218.66666666667, ans=6.0
+2024-08-14 02:11:14,407 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.02 vs. limit=22.5
+2024-08-14 02:11:19,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106325.33333333333, ans=0.0
+2024-08-14 02:11:43,480 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.26 vs. limit=22.5
+2024-08-14 02:11:47,860 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 1.913e+02 2.021e+02 2.184e+02 3.494e+02, threshold=4.042e+02, percent-clipped=0.0
+2024-08-14 02:11:55,860 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 50, loss[loss=0.1712, simple_loss=0.1864, pruned_loss=0.06374, over 18973.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.202, pruned_loss=0.08725, over 826863.11 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:12:00,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=106432.0, ans=0.125
+2024-08-14 02:12:20,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106485.33333333333, ans=0.0
+2024-08-14 02:13:19,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=106645.33333333333, ans=0.125
+2024-08-14 02:13:33,251 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 10, batch 100, loss[loss=0.1914, simple_loss=0.1991, pruned_loss=0.08243, over 19188.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.1986, pruned_loss=0.08399, over 1472464.39 frames. ], batch size: 134, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:13:44,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=106698.66666666667, ans=0.125
+2024-08-14 02:14:05,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=106752.0, ans=0.0
+2024-08-14 02:14:22,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=106805.33333333333, ans=0.125
+2024-08-14 02:14:26,590 INFO [dysarthria_finetune.py:1435] (1/4) (13749649408, 34072559616)
+2024-08-14 02:14:26,590 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:14:26,635 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:14:39,660 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 0, loss[loss=0.1895, simple_loss=0.1933, pruned_loss=0.08603, over 18704.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.1933, pruned_loss=0.08603, over 18704.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:14:39,660 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:15:02,461 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 11, validation: loss=0.1768, simple_loss=0.1869, pruned_loss=0.07357, over 1073944.00 frames.
+2024-08-14 02:15:02,462 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:15:35,905 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 1.865e+02 1.931e+02 2.118e+02 3.052e+02, threshold=3.863e+02, percent-clipped=0.0
+2024-08-14 02:15:37,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=106906.66666666667, ans=0.025
+2024-08-14 02:15:50,459 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=106960.0, ans=0.125
+2024-08-14 02:16:01,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=106960.0, ans=0.125
+2024-08-14 02:16:30,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=107066.66666666667, ans=22.5
+2024-08-14 02:16:34,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=107066.66666666667, ans=0.125
+2024-08-14 02:16:45,029 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 50, loss[loss=0.1769, simple_loss=0.1871, pruned_loss=0.07456, over 18947.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.1966, pruned_loss=0.0828, over 828704.78 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:16:50,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=107120.0, ans=0.2
+2024-08-14 02:17:23,484 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=107226.66666666667, ans=0.05
+2024-08-14 02:17:23,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=107226.66666666667, ans=0.04949747468305833
+2024-08-14 02:18:09,671 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.08 vs. limit=15.0
+2024-08-14 02:18:50,465 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 11, batch 100, loss[loss=0.1781, simple_loss=0.1863, pruned_loss=0.07788, over 19147.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.1942, pruned_loss=0.08063, over 1473582.76 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:19:08,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=107386.66666666667, ans=0.0
+2024-08-14 02:19:36,876 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.754e+02 1.842e+02 1.998e+02 3.456e+02, threshold=3.684e+02, percent-clipped=0.0
+2024-08-14 02:19:46,439 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.93 vs. limit=15.0
+2024-08-14 02:20:00,007 INFO [dysarthria_finetune.py:1435] (1/4) (847970304, 34072559616)
+2024-08-14 02:20:00,008 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:20:00,053 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:20:13,228 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 0, loss[loss=0.2045, simple_loss=0.2089, pruned_loss=0.09434, over 18735.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2089, pruned_loss=0.09434, over 18735.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:20:13,229 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:20:15,597 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([6.3578, 5.7049, 5.7704, 6.2608], device='cuda:1')
+2024-08-14 02:20:42,007 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 12, validation: loss=0.1712, simple_loss=0.1836, pruned_loss=0.0713, over 1073944.00 frames.
+2024-08-14 02:20:42,007 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:21:12,569 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=107541.33333333333, ans=0.0
+2024-08-14 02:22:17,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=107648.0, ans=0.125
+2024-08-14 02:23:15,913 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=9.78 vs. limit=12.0
+2024-08-14 02:23:52,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=107754.66666666667, ans=0.95
+2024-08-14 02:24:25,106 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 50, loss[loss=0.1513, simple_loss=0.166, pruned_loss=0.06077, over 18974.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.1923, pruned_loss=0.07962, over 827168.58 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:25:10,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107861.33333333333, ans=0.125
+2024-08-14 02:25:14,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107861.33333333333, ans=0.125
+2024-08-14 02:26:24,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=107914.66666666667, ans=0.07
+2024-08-14 02:26:34,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=107914.66666666667, ans=0.0
+2024-08-14 02:26:44,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-08-14 02:27:01,951 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=9.24 vs. limit=12.0
+2024-08-14 02:27:09,002 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 1.754e+02 1.846e+02 2.049e+02 2.889e+02, threshold=3.691e+02, percent-clipped=0.0
+2024-08-14 02:27:10,584 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.27 vs. limit=15.0
+2024-08-14 02:27:37,286 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 12, batch 100, loss[loss=0.1658, simple_loss=0.1814, pruned_loss=0.06789, over 19114.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.1909, pruned_loss=0.07896, over 1473649.48 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:27:40,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=108074.66666666667, ans=0.05
+2024-08-14 02:28:06,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108128.0, ans=0.1
+2024-08-14 02:28:48,223 INFO [dysarthria_finetune.py:1435] (1/4) (994770944, 34072559616)
+2024-08-14 02:28:48,224 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:28:48,267 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:29:01,627 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 0, loss[loss=0.2181, simple_loss=0.2197, pruned_loss=0.1047, over 18361.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2197, pruned_loss=0.1047, over 18361.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:29:01,628 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:29:08,177 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([5.1633, 6.1968, 6.2010, 6.0783], device='cuda:1')
+2024-08-14 02:29:24,534 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 13, validation: loss=0.1662, simple_loss=0.1808, pruned_loss=0.06949, over 1073944.00 frames.
+2024-08-14 02:29:24,535 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:29:42,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=108229.33333333333, ans=0.0
+2024-08-14 02:30:05,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=108336.0, ans=0.0
+2024-08-14 02:30:21,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=108336.0, ans=0.0
+2024-08-14 02:30:27,938 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.23 vs. limit=22.5
+2024-08-14 02:30:56,051 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=8.06 vs. limit=10.0
+2024-08-14 02:31:06,613 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 50, loss[loss=0.1459, simple_loss=0.1716, pruned_loss=0.05217, over 19011.00 frames. ], tot_loss[loss=0.1729, simple_loss=0.1843, pruned_loss=0.0756, over 828396.70 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:31:10,080 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=108496.0, ans=0.2
+2024-08-14 02:31:12,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=108496.0, ans=6.0
+2024-08-14 02:31:15,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108496.0, ans=0.125
+2024-08-14 02:31:16,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=108496.0, ans=0.09899494936611666
+2024-08-14 02:31:27,172 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.723e+02 1.826e+02 1.962e+02 2.693e+02, threshold=3.652e+02, percent-clipped=0.0
+2024-08-14 02:31:56,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=108602.66666666667, ans=0.025
+2024-08-14 02:32:06,265 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=108656.0, ans=15.0
+2024-08-14 02:32:08,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108656.0, ans=0.125
+2024-08-14 02:32:13,765 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=108656.0, ans=0.0
+2024-08-14 02:32:45,046 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 13, batch 100, loss[loss=0.1582, simple_loss=0.173, pruned_loss=0.06706, over 19217.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.1845, pruned_loss=0.07492, over 1472353.64 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:32:46,293 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:32:57,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=108762.66666666667, ans=0.0
+2024-08-14 02:32:59,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=108762.66666666667, ans=0.2
+2024-08-14 02:33:07,577 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:33:11,245 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108816.0, ans=0.1
+2024-08-14 02:33:15,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=108816.0, ans=0.0
+2024-08-14 02:33:22,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=108869.33333333333, ans=0.125
+2024-08-14 02:33:34,129 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=108869.33333333333, ans=0.125
+2024-08-14 02:33:38,661 INFO [dysarthria_finetune.py:1435] (1/4) (583729152, 34072559616)
+2024-08-14 02:33:38,662 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:33:38,712 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:33:51,753 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 0, loss[loss=0.1986, simple_loss=0.2028, pruned_loss=0.09453, over 18619.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2028, pruned_loss=0.09453, over 18619.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:33:51,753 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:34:15,236 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 14, validation: loss=0.1615, simple_loss=0.1782, pruned_loss=0.06778, over 1073944.00 frames.
+2024-08-14 02:34:15,237 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:34:56,219 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=109024.0, ans=0.2
+2024-08-14 02:35:16,114 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.678e+02 1.779e+02 1.987e+02 2.879e+02, threshold=3.559e+02, percent-clipped=0.0
+2024-08-14 02:35:38,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=109130.66666666667, ans=0.125
+2024-08-14 02:35:48,071 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=109130.66666666667, ans=0.125
+2024-08-14 02:35:52,730 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 50, loss[loss=0.1723, simple_loss=0.1937, pruned_loss=0.07089, over 19004.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.183, pruned_loss=0.07312, over 826629.84 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:35:56,272 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.94 vs. limit=6.0
+2024-08-14 02:36:00,252 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.04 vs. limit=22.5
+2024-08-14 02:36:03,815 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109184.0, ans=0.1
+2024-08-14 02:36:11,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=109237.33333333333, ans=0.125
+2024-08-14 02:36:36,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=109290.66666666667, ans=0.125
+2024-08-14 02:36:53,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=109344.0, ans=0.0
+2024-08-14 02:37:10,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_ff2.min_abs, batch_count=109397.33333333333, ans=0.1
+2024-08-14 02:37:28,739 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 14, batch 100, loss[loss=0.1575, simple_loss=0.1762, pruned_loss=0.06615, over 19114.00 frames. ], tot_loss[loss=0.168, simple_loss=0.1825, pruned_loss=0.0733, over 1472155.55 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 16.0
+2024-08-14 02:37:29,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109450.66666666667, ans=0.125
+2024-08-14 02:37:47,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=109504.0, ans=0.025
+2024-08-14 02:37:54,731 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109504.0, ans=0.1
+2024-08-14 02:37:58,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=11.20 vs. limit=12.0
+2024-08-14 02:38:02,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=109504.0, ans=0.07
+2024-08-14 02:38:07,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109557.33333333333, ans=0.125
+2024-08-14 02:38:09,611 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=109557.33333333333, ans=0.04949747468305833
+2024-08-14 02:38:21,630 INFO [dysarthria_finetune.py:1435] (1/4) (14963900416, 34072559616)
+2024-08-14 02:38:21,630 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:38:21,674 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:38:34,972 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 0, loss[loss=0.1867, simple_loss=0.1909, pruned_loss=0.08974, over 18480.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.1909, pruned_loss=0.08974, over 18480.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:38:34,972 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:38:57,685 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 15, validation: loss=0.1571, simple_loss=0.176, pruned_loss=0.06629, over 1073944.00 frames.
+2024-08-14 02:38:57,685 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:38:59,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109605.33333333333, ans=0.2
+2024-08-14 02:39:02,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-08-14 02:39:07,270 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 1.642e+02 1.752e+02 1.914e+02 2.610e+02, threshold=3.503e+02, percent-clipped=0.0
+2024-08-14 02:39:13,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=109605.33333333333, ans=0.125
+2024-08-14 02:39:16,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=109605.33333333333, ans=0.125
+2024-08-14 02:39:38,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=109658.66666666667, ans=0.09899494936611666
+2024-08-14 02:39:38,888 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.02 vs. limit=15.0
+2024-08-14 02:39:50,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109712.0, ans=0.1
+2024-08-14 02:40:26,204 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.11 vs. limit=8.0
+2024-08-14 02:40:31,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=109765.33333333333, ans=0.125
+2024-08-14 02:40:57,612 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 50, loss[loss=0.1749, simple_loss=0.1913, pruned_loss=0.07731, over 19020.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.1824, pruned_loss=0.07406, over 827766.05 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:42:37,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110032.0, ans=0.1
+2024-08-14 02:42:53,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=110085.33333333333, ans=0.125
+2024-08-14 02:43:07,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=110085.33333333333, ans=0.125
+2024-08-14 02:43:17,728 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 15, batch 100, loss[loss=0.145, simple_loss=0.1596, pruned_loss=0.06408, over 19074.00 frames. ], tot_loss[loss=0.1653, simple_loss=0.182, pruned_loss=0.07244, over 1471681.17 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:43:23,630 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.639e+02 1.741e+02 1.916e+02 2.571e+02, threshold=3.482e+02, percent-clipped=0.0
+2024-08-14 02:43:34,758 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.34 vs. limit=15.0
+2024-08-14 02:44:39,950 INFO [dysarthria_finetune.py:1435] (1/4) (2863333376, 34072559616)
+2024-08-14 02:44:39,950 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:44:40,034 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:44:54,093 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 0, loss[loss=0.176, simple_loss=0.1983, pruned_loss=0.07578, over 18847.00 frames. ], tot_loss[loss=0.176, simple_loss=0.1983, pruned_loss=0.07578, over 18847.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:44:54,093 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:45:16,881 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 16, validation: loss=0.1529, simple_loss=0.1739, pruned_loss=0.06493, over 1073944.00 frames.
+2024-08-14 02:45:16,882 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:45:32,265 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=110293.33333333333, ans=0.0
+2024-08-14 02:45:50,057 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.86 vs. limit=22.5
+2024-08-14 02:46:26,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=110453.33333333333, ans=0.125
+2024-08-14 02:46:36,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=110506.66666666667, ans=0.125
+2024-08-14 02:46:38,491 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:47:33,746 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 50, loss[loss=0.1623, simple_loss=0.1838, pruned_loss=0.07012, over 19018.00 frames. ], tot_loss[loss=0.1599, simple_loss=0.1783, pruned_loss=0.07014, over 827868.27 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:47:42,977 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.20 vs. limit=15.0
+2024-08-14 02:47:44,759 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:47:50,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=110560.0, ans=0.125
+2024-08-14 02:48:12,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=110666.66666666667, ans=0.05
+2024-08-14 02:48:12,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=110666.66666666667, ans=0.0
+2024-08-14 02:48:22,827 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.614e+02 1.779e+02 1.933e+02 2.621e+02, threshold=3.558e+02, percent-clipped=0.0
+2024-08-14 02:49:16,471 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-08-14 02:49:34,534 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 16, batch 100, loss[loss=0.1414, simple_loss=0.164, pruned_loss=0.05939, over 19118.00 frames. ], tot_loss[loss=0.1606, simple_loss=0.1793, pruned_loss=0.07065, over 1473208.83 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 16.0
+2024-08-14 02:49:35,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=110826.66666666667, ans=0.125
+2024-08-14 02:49:36,023 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.49 vs. limit=22.5
+2024-08-14 02:50:22,678 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.88 vs. limit=15.0
+2024-08-14 02:50:28,860 INFO [dysarthria_finetune.py:1435] (1/4) (371916800, 34072559616)
+2024-08-14 02:50:28,861 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:50:28,931 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:50:47,962 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 0, loss[loss=0.1927, simple_loss=0.2145, pruned_loss=0.08548, over 18527.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2145, pruned_loss=0.08548, over 18527.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:50:47,963 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:50:53,048 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([4.6480, 4.1175, 4.0232, 4.5265], device='cuda:1')
+2024-08-14 02:51:11,068 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 17, validation: loss=0.1498, simple_loss=0.1721, pruned_loss=0.06377, over 1073944.00 frames.
+2024-08-14 02:51:11,069 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13100MB
+2024-08-14 02:51:27,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=110981.33333333333, ans=0.125
+2024-08-14 02:51:31,139 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=111034.66666666667, ans=0.125
+2024-08-14 02:51:44,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111034.66666666667, ans=0.125
+2024-08-14 02:52:04,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=111088.0, ans=0.125
+2024-08-14 02:52:08,383 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=111088.0, ans=0.025
+2024-08-14 02:52:59,543 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.599e+02 1.701e+02 1.889e+02 2.501e+02, threshold=3.403e+02, percent-clipped=0.0
+2024-08-14 02:53:07,423 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 50, loss[loss=0.1487, simple_loss=0.1722, pruned_loss=0.06262, over 19037.00 frames. ], tot_loss[loss=0.1571, simple_loss=0.1768, pruned_loss=0.06867, over 827680.99 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:53:10,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=111248.0, ans=0.0
+2024-08-14 02:54:15,767 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=111408.0, ans=0.0
+2024-08-14 02:54:49,163 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.66 vs. limit=22.5
+2024-08-14 02:55:04,519 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 17, batch 100, loss[loss=0.1296, simple_loss=0.1546, pruned_loss=0.05235, over 19067.00 frames. ], tot_loss[loss=0.1534, simple_loss=0.1737, pruned_loss=0.06649, over 1472664.14 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:55:45,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=111621.33333333333, ans=0.125
+2024-08-14 02:56:14,212 INFO [dysarthria_finetune.py:1435] (1/4) (1472921600, 34072559616)
+2024-08-14 02:56:14,212 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 02:56:14,282 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 02:56:27,749 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 0, loss[loss=0.1738, simple_loss=0.1889, pruned_loss=0.07937, over 18622.00 frames. ], tot_loss[loss=0.1738, simple_loss=0.1889, pruned_loss=0.07937, over 18622.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 02:56:27,750 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 02:56:58,387 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 18, validation: loss=0.1479, simple_loss=0.1705, pruned_loss=0.06271, over 1073944.00 frames.
+2024-08-14 02:56:58,388 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13133MB
+2024-08-14 02:57:05,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111669.33333333333, ans=0.07
+2024-08-14 02:57:59,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=111722.66666666667, ans=0.0
+2024-08-14 02:58:03,299 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.609e+02 1.680e+02 1.858e+02 2.812e+02, threshold=3.359e+02, percent-clipped=0.0
+2024-08-14 02:58:17,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111776.0, ans=0.07
+2024-08-14 02:58:31,849 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.69 vs. limit=5.0
+2024-08-14 02:58:52,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=111829.33333333333, ans=0.125
+2024-08-14 02:58:58,250 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=6.83 vs. limit=12.0
+2024-08-14 02:59:52,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=111936.0, ans=0.125
+2024-08-14 02:59:54,714 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 50, loss[loss=0.1593, simple_loss=0.1816, pruned_loss=0.06857, over 19026.00 frames. ], tot_loss[loss=0.1545, simple_loss=0.1741, pruned_loss=0.06749, over 826500.31 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 03:00:00,908 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=6.43 vs. limit=15.0
+2024-08-14 03:00:06,484 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=111936.0, ans=0.125
+2024-08-14 03:04:04,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112149.33333333333, ans=0.1
+2024-08-14 03:04:53,114 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 18, batch 100, loss[loss=0.1446, simple_loss=0.1667, pruned_loss=0.06129, over 19036.00 frames. ], tot_loss[loss=0.1531, simple_loss=0.1729, pruned_loss=0.06666, over 1471672.61 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 03:05:52,852 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.561e+02 1.643e+02 1.812e+02 2.261e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-14 03:07:12,790 INFO [dysarthria_finetune.py:1435] (1/4) (1055588352, 34072559616)
+2024-08-14 03:07:12,790 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 03:07:12,861 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 03:07:26,409 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 0, loss[loss=0.1685, simple_loss=0.1947, pruned_loss=0.07114, over 18691.00 frames. ], tot_loss[loss=0.1685, simple_loss=0.1947, pruned_loss=0.07114, over 18691.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:07:26,409 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 03:07:58,726 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 19, validation: loss=0.1464, simple_loss=0.169, pruned_loss=0.06188, over 1073944.00 frames.
+2024-08-14 03:07:58,727 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13133MB
+2024-08-14 03:08:43,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112405.33333333333, ans=0.0
+2024-08-14 03:09:05,115 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.45 vs. limit=15.0
+2024-08-14 03:09:10,839 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.63 vs. limit=22.5
+2024-08-14 03:09:19,175 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=112458.66666666667, ans=0.125
+2024-08-14 03:09:59,486 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=9.02 vs. limit=12.0
+2024-08-14 03:10:21,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112512.0, ans=0.1
+2024-08-14 03:10:41,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-08-14 03:10:57,785 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 50, loss[loss=0.1563, simple_loss=0.1807, pruned_loss=0.06597, over 18976.00 frames. ], tot_loss[loss=0.1543, simple_loss=0.1734, pruned_loss=0.06759, over 828010.25 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:11:02,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112618.66666666667, ans=0.1
+2024-08-14 03:11:55,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=112672.0, ans=0.125
+2024-08-14 03:11:57,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=112672.0, ans=0.125
+2024-08-14 03:12:19,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-08-14 03:12:51,523 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=112778.66666666667, ans=10.0
+2024-08-14 03:13:01,839 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=112778.66666666667, ans=0.0
+2024-08-14 03:13:37,507 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.593e+02 1.694e+02 1.909e+02 3.031e+02, threshold=3.389e+02, percent-clipped=0.0
+2024-08-14 03:13:45,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112832.0, ans=0.125
+2024-08-14 03:14:02,405 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 19, batch 100, loss[loss=0.1267, simple_loss=0.1481, pruned_loss=0.05264, over 19118.00 frames. ], tot_loss[loss=0.1514, simple_loss=0.1722, pruned_loss=0.06526, over 1474453.83 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 16.0
+2024-08-14 03:14:07,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=112885.33333333333, ans=0.125
+2024-08-14 03:14:10,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=112885.33333333333, ans=0.2
+2024-08-14 03:14:19,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=112885.33333333333, ans=0.125
+2024-08-14 03:15:47,494 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=10.85 vs. limit=15.0
+2024-08-14 03:15:54,658 INFO [dysarthria_finetune.py:1435] (1/4) (877330432, 34072559616)
+2024-08-14 03:15:54,659 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 03:15:54,731 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 03:16:41,856 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 0, loss[loss=0.1886, simple_loss=0.2068, pruned_loss=0.08519, over 18758.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2068, pruned_loss=0.08519, over 18758.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:16:41,857 INFO [dysarthria_finetune.py:1165] (1/4) Computing validation loss on speech
+2024-08-14 03:17:15,217 INFO [dysarthria_finetune.py:1174] (1/4) Validation on speech: Epoch 20, validation: loss=0.1449, simple_loss=0.1677, pruned_loss=0.0611, over 1073944.00 frames.
+2024-08-14 03:17:15,218 INFO [dysarthria_finetune.py:1177] (1/4) Maximum memory allocated so far is 13133MB
+2024-08-14 03:17:42,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=113040.0, ans=0.125
+2024-08-14 03:17:46,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=113093.33333333333, ans=0.125
+2024-08-14 03:19:26,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113146.66666666667, ans=0.1
+2024-08-14 03:19:36,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=113200.0, ans=0.07
+2024-08-14 03:19:39,604 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113200.0, ans=0.125
+2024-08-14 03:19:42,198 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 03:19:42,360 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.60 vs. limit=15.0
+2024-08-14 03:20:43,496 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=113253.33333333333, ans=0.1
+2024-08-14 03:20:55,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=113253.33333333333, ans=0.125
+2024-08-14 03:21:08,395 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 50, loss[loss=0.1513, simple_loss=0.1714, pruned_loss=0.06561, over 19069.00 frames. ], tot_loss[loss=0.1516, simple_loss=0.1707, pruned_loss=0.06624, over 828644.17 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:22:10,071 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.567e+02 1.664e+02 1.868e+02 2.522e+02, threshold=3.327e+02, percent-clipped=0.0
+2024-08-14 03:24:34,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=113520.0, ans=0.0
+2024-08-14 03:24:59,416 INFO [dysarthria_finetune.py:1141] (1/4) Epoch 20, batch 100, loss[loss=0.1247, simple_loss=0.1401, pruned_loss=0.0547, over 19104.00 frames. ], tot_loss[loss=0.1492, simple_loss=0.169, pruned_loss=0.06475, over 1473557.06 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:25:15,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=113573.33333333333, ans=0.0
+2024-08-14 03:25:32,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=113626.66666666667, ans=0.125
+2024-08-14 03:26:31,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=113626.66666666667, ans=0.0
+2024-08-14 03:26:46,486 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.03 vs. limit=15.0
+2024-08-14 03:26:50,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=113626.66666666667, ans=0.025
+2024-08-14 03:26:55,963 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=11.75 vs. limit=15.0
+2024-08-14 03:27:02,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113680.0, ans=0.1
+2024-08-14 03:27:02,080 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=113680.0, ans=0.0
+2024-08-14 03:27:20,905 INFO [dysarthria_finetune.py:1435] (1/4) (1347092480, 34072559616)
+2024-08-14 03:27:20,905 INFO [dysarthria_finetune.py:1436] (1/4) Empty cache: before and after
+2024-08-14 03:27:20,968 INFO [dysarthria_finetune.py:1440] (1/4) (29283254272, 34072559616)
+2024-08-14 03:27:20,968 INFO [dysarthria_finetune.py:1442] (1/4) Done!
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-2 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-2
new file mode 100644
index 0000000000000000000000000000000000000000..05c10f62bc4123006f5a32438e5b346310383cc5
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-2
@@ -0,0 +1,527 @@
+2024-08-13 23:24:47,931 INFO [dysarthria_finetune.py:1212] (2/4) Training started
+2024-08-13 23:24:47,960 INFO [dysarthria_finetune.py:1214] (2/4) (33748090880, 34072559616)
+2024-08-13 23:24:47,960 INFO [dysarthria_finetune.py:1215] (2/4) Empty cache: before and after
+2024-08-13 23:24:48,945 INFO [dysarthria_finetune.py:1219] (2/4) (32783400960, 34072559616)
+2024-08-13 23:24:48,946 INFO [dysarthria_finetune.py:1229] (2/4) Device: cuda:2
+2024-08-13 23:24:48,990 INFO [dysarthria_finetune.py:1241] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 23:24:48,991 INFO [dysarthria_finetune.py:1243] (2/4) About to create model
+2024-08-13 23:24:50,075 INFO [dysarthria_finetune.py:1247] (2/4) Number of model parameters: 65549011
+2024-08-13 23:24:50,075 INFO [dysarthria_finetune.py:769] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 23:24:59,919 INFO [dysarthria_finetune.py:1275] (2/4) Using DDP
+2024-08-13 23:25:17,950 INFO [dysarthria_asr_datamodule.py:494] (2/4) About to get train cuts
+2024-08-13 23:25:18,291 INFO [dysarthria_finetune.py:1319] (2/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:239] (2/4) Disable MUSAN
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:257] (2/4) Enable SpecAugment
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:258] (2/4) Time warp factor: 80
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:268] (2/4) Num frame mask: 10
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:281] (2/4) About to create train dataset
+2024-08-13 23:25:19,282 INFO [dysarthria_asr_datamodule.py:308] (2/4) Using DynamicBucketingSampler.
+2024-08-13 23:25:20,228 INFO [dysarthria_asr_datamodule.py:325] (2/4) About to create train dataloader
+2024-08-13 23:25:20,234 INFO [dysarthria_asr_datamodule.py:500] (2/4) About to get dev cuts
+2024-08-13 23:25:24,698 INFO [dysarthria_asr_datamodule.py:356] (2/4) About to create dev dataset
+2024-08-13 23:25:28,033 INFO [dysarthria_asr_datamodule.py:373] (2/4) About to create dev dataloader
+2024-08-13 23:25:28,034 INFO [dysarthria_finetune.py:1490] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 23:27:16,791 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=22.66 vs. limit=7.5
+2024-08-13 23:27:17,110 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=17.80 vs. limit=7.5
+2024-08-13 23:27:17,919 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 23:27:19,738 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 23:32:34,805 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 23:32:36,803 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 23:35:38,399 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 23:35:41,668 INFO [dysarthria_finetune.py:1518] (2/4) Maximum memory allocated so far is 11790MB
+2024-08-13 23:36:58,732 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 0, loss[loss=0.3479, simple_loss=0.3286, pruned_loss=0.1929, over 18533.00 frames. ], tot_loss[loss=0.3479, simple_loss=0.3286, pruned_loss=0.1929, over 18533.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 23:36:58,733 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-13 23:49:47,044 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 23:49:47,367 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19754MB
+2024-08-13 23:51:29,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.11 vs. limit=22.5
+2024-08-13 23:51:42,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-13 23:53:32,690 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=15.08 vs. limit=15.0
+2024-08-14 00:02:25,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=100000.0, ans=0.09899494936611666
+2024-08-14 00:15:49,521 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.298e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-14 00:19:46,065 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.06 vs. limit=15.0
+2024-08-14 00:20:37,578 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=19.66 vs. limit=15.0
+2024-08-14 00:22:20,358 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.42 vs. limit=15.0
+2024-08-14 00:23:15,345 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.450e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-14 00:28:13,089 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.71 vs. limit=15.0
+2024-08-14 00:29:53,825 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.241e+02 7.298e+02 9.450e+02 1.050e+03 1.319e+03, threshold=3.780e+03, percent-clipped=0.0
+2024-08-14 00:33:57,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-14 00:37:00,966 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=100266.66666666667, ans=0.125
+2024-08-14 00:37:01,625 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 50, loss[loss=0.3626, simple_loss=0.3408, pruned_loss=0.2122, over 19018.00 frames. ], tot_loss[loss=0.3542, simple_loss=0.3338, pruned_loss=0.2019, over 827419.58 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-14 00:42:55,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100266.66666666667, ans=0.1
+2024-08-14 00:42:55,082 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=24.05 vs. limit=15.0
+2024-08-14 00:46:57,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100320.0, ans=0.125
+2024-08-14 00:47:09,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=100320.0, ans=0.125
+2024-08-14 00:49:05,719 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=7.62 vs. limit=6.0
+2024-08-14 00:52:23,139 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.54 vs. limit=22.5
+2024-08-14 00:57:45,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100480.0, ans=0.125
+2024-08-14 01:01:58,596 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.501e+02 5.963e+02 7.298e+02 8.800e+02 1.319e+03, threshold=1.460e+03, percent-clipped=0.0
+2024-08-14 01:01:58,632 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 1, batch 100, loss[loss=0.3197, simple_loss=0.302, pruned_loss=0.1735, over 19117.00 frames. ], tot_loss[loss=0.3405, simple_loss=0.3212, pruned_loss=0.1903, over 1475925.13 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-14 01:03:28,403 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=14.96 vs. limit=15.0
+2024-08-14 01:11:48,006 INFO [dysarthria_finetune.py:1435] (2/4) (6835339264, 34072559616)
+2024-08-14 01:11:48,007 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 01:11:48,057 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 01:12:35,403 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 0, loss[loss=0.2944, simple_loss=0.2803, pruned_loss=0.1466, over 18502.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.2803, pruned_loss=0.1466, over 18502.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-14 01:12:35,404 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 01:16:55,990 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 2, validation: loss=0.2907, simple_loss=0.276, pruned_loss=0.149, over 1073944.00 frames.
+2024-08-14 01:16:55,991 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 01:20:02,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100736.0, ans=0.1
+2024-08-14 01:20:03,118 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.04 vs. limit=15.0
+2024-08-14 01:20:52,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=100789.33333333333, ans=15.0
+2024-08-14 01:22:32,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=100896.0, ans=0.2
+2024-08-14 01:24:54,898 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 50, loss[loss=0.3095, simple_loss=0.2935, pruned_loss=0.1612, over 18952.00 frames. ], tot_loss[loss=0.3217, simple_loss=0.3039, pruned_loss=0.1742, over 829638.79 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-08-14 01:27:07,120 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=101002.66666666667, ans=0.0
+2024-08-14 01:27:16,529 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:27:29,014 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.249e+02 4.347e+02 4.852e+02 5.543e+02 7.043e+02, threshold=9.703e+02, percent-clipped=0.0
+2024-08-14 01:27:52,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101056.0, ans=0.1
+2024-08-14 01:28:04,324 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten.whitening_limit, batch_count=101109.33333333333, ans=15.0
+2024-08-14 01:28:12,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-08-14 01:28:12,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=101109.33333333333, ans=0.125
+2024-08-14 01:28:20,554 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.23 vs. limit=22.5
+2024-08-14 01:28:36,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.64 vs. limit=15.0
+2024-08-14 01:28:52,973 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.51 vs. limit=6.0
+2024-08-14 01:28:57,355 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=15.65 vs. limit=15.0
+2024-08-14 01:29:06,627 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 2, batch 100, loss[loss=0.2613, simple_loss=0.2493, pruned_loss=0.1292, over 19108.00 frames. ], tot_loss[loss=0.31, simple_loss=0.2936, pruned_loss=0.1641, over 1476292.15 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-08-14 01:29:10,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101216.0, ans=0.0
+2024-08-14 01:30:15,840 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101322.66666666667, ans=0.0
+2024-08-14 01:30:33,596 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=101322.66666666667, ans=0.125
+2024-08-14 01:30:42,018 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 01:30:42,018 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 01:30:42,060 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 01:30:55,539 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 0, loss[loss=0.3069, simple_loss=0.2898, pruned_loss=0.1657, over 18600.00 frames. ], tot_loss[loss=0.3069, simple_loss=0.2898, pruned_loss=0.1657, over 18600.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-08-14 01:30:55,539 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 01:31:18,578 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 3, validation: loss=0.2682, simple_loss=0.2564, pruned_loss=0.1309, over 1073944.00 frames.
+2024-08-14 01:31:18,578 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 01:31:50,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=101424.0, ans=0.025
+2024-08-14 01:31:50,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101424.0, ans=0.125
+2024-08-14 01:32:00,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=101424.0, ans=0.2
+2024-08-14 01:32:24,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=101477.33333333333, ans=0.2
+2024-08-14 01:32:25,987 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=101477.33333333333, ans=0.125
+2024-08-14 01:32:32,516 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=101530.66666666667, ans=0.125
+2024-08-14 01:32:58,604 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.574e+02 3.350e+02 3.692e+02 4.154e+02 5.648e+02, threshold=7.384e+02, percent-clipped=0.0
+2024-08-14 01:33:11,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101584.0, ans=0.1
+2024-08-14 01:33:14,922 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 50, loss[loss=0.2893, simple_loss=0.2752, pruned_loss=0.148, over 19168.00 frames. ], tot_loss[loss=0.2973, simple_loss=0.2827, pruned_loss=0.152, over 828229.52 frames. ], batch size: 103, lr: 8.08e-05, grad_scale: 16.0
+2024-08-14 01:33:16,185 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101637.33333333333, ans=0.1
+2024-08-14 01:33:18,215 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=101637.33333333333, ans=0.125
+2024-08-14 01:35:08,698 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 3, batch 100, loss[loss=0.2503, simple_loss=0.2407, pruned_loss=0.1192, over 19024.00 frames. ], tot_loss[loss=0.2889, simple_loss=0.2751, pruned_loss=0.1467, over 1476045.82 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-08-14 01:35:55,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=102010.66666666667, ans=0.125
+2024-08-14 01:36:03,976 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 01:36:04,625 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 01:36:04,670 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 01:36:20,152 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 0, loss[loss=0.2799, simple_loss=0.2699, pruned_loss=0.1314, over 18618.00 frames. ], tot_loss[loss=0.2799, simple_loss=0.2699, pruned_loss=0.1314, over 18618.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-08-14 01:36:20,152 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 01:36:43,052 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 4, validation: loss=0.2499, simple_loss=0.241, pruned_loss=0.1173, over 1073944.00 frames.
+2024-08-14 01:36:43,053 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 01:37:05,281 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=12.46 vs. limit=15.0
+2024-08-14 01:37:11,682 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=6.15 vs. limit=12.0
+2024-08-14 01:37:13,146 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=102106.66666666667, ans=0.0
+2024-08-14 01:37:14,067 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 2.841e+02 3.076e+02 3.396e+02 5.357e+02, threshold=6.153e+02, percent-clipped=0.0
+2024-08-14 01:38:23,454 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 50, loss[loss=0.2688, simple_loss=0.2584, pruned_loss=0.1302, over 18961.00 frames. ], tot_loss[loss=0.273, simple_loss=0.2618, pruned_loss=0.1339, over 828488.26 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-08-14 01:38:30,698 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.16 vs. limit=15.0
+2024-08-14 01:38:40,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=102320.0, ans=0.125
+2024-08-14 01:38:58,601 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.41 vs. limit=22.5
+2024-08-14 01:39:23,499 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=102480.0, ans=0.125
+2024-08-14 01:39:29,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102480.0, ans=0.0
+2024-08-14 01:39:43,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=102533.33333333333, ans=0.035
+2024-08-14 01:39:52,485 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_ff2.min_abs, batch_count=102533.33333333333, ans=0.1
+2024-08-14 01:40:00,371 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=20.48 vs. limit=15.0
+2024-08-14 01:40:00,893 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 4, batch 100, loss[loss=0.2498, simple_loss=0.2395, pruned_loss=0.1238, over 19090.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.2584, pruned_loss=0.1321, over 1476821.49 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-08-14 01:40:22,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=102640.0, ans=22.5
+2024-08-14 01:40:30,559 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.524e+02 2.719e+02 2.975e+02 4.617e+02, threshold=5.438e+02, percent-clipped=0.0
+2024-08-14 01:40:34,051 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.58 vs. limit=6.0
+2024-08-14 01:40:48,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-14 01:40:50,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-14 01:40:51,225 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.84 vs. limit=22.5
+2024-08-14 01:40:55,780 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 01:40:55,781 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 01:40:55,824 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 01:41:11,067 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 0, loss[loss=0.2592, simple_loss=0.25, pruned_loss=0.1247, over 18551.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.25, pruned_loss=0.1247, over 18551.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:41:11,068 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 01:41:34,533 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 5, validation: loss=0.2343, simple_loss=0.2283, pruned_loss=0.1066, over 1073944.00 frames.
+2024-08-14 01:41:34,534 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 01:42:38,075 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=102848.0, ans=0.0
+2024-08-14 01:43:11,226 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.62 vs. limit=15.0
+2024-08-14 01:43:29,159 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 50, loss[loss=0.2364, simple_loss=0.2296, pruned_loss=0.1108, over 19027.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.2492, pruned_loss=0.1229, over 828775.72 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:44:12,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=103061.33333333333, ans=0.05
+2024-08-14 01:44:16,263 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=103061.33333333333, ans=0.125
+2024-08-14 01:44:26,287 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.78 vs. limit=22.5
+2024-08-14 01:44:29,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103114.66666666667, ans=0.1
+2024-08-14 01:44:59,445 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.063e+02 2.398e+02 2.550e+02 2.967e+02 4.732e+02, threshold=5.099e+02, percent-clipped=0.0
+2024-08-14 01:45:02,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103168.0, ans=0.1
+2024-08-14 01:45:06,500 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=103221.33333333333, ans=0.5
+2024-08-14 01:45:14,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-08-14 01:45:18,278 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.64 vs. limit=10.0
+2024-08-14 01:45:23,003 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.27 vs. limit=15.0
+2024-08-14 01:45:27,201 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 5, batch 100, loss[loss=0.2459, simple_loss=0.2379, pruned_loss=0.1181, over 19114.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.2416, pruned_loss=0.1182, over 1478197.42 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:46:02,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=103328.0, ans=0.1
+2024-08-14 01:46:08,381 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.60 vs. limit=22.5
+2024-08-14 01:46:18,141 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 01:46:18,141 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 01:46:18,185 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 01:46:32,107 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 0, loss[loss=0.2436, simple_loss=0.2385, pruned_loss=0.1115, over 18783.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.2385, pruned_loss=0.1115, over 18783.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:46:32,108 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 01:46:55,695 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 6, validation: loss=0.2214, simple_loss=0.2182, pruned_loss=0.09842, over 1073944.00 frames.
+2024-08-14 01:46:55,696 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 01:47:15,259 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.55 vs. limit=15.0
+2024-08-14 01:47:47,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103477.33333333333, ans=0.1
+2024-08-14 01:47:47,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103477.33333333333, ans=0.1
+2024-08-14 01:47:53,789 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.08 vs. limit=15.0
+2024-08-14 01:47:56,786 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.64 vs. limit=22.5
+2024-08-14 01:48:12,421 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=17.78 vs. limit=15.0
+2024-08-14 01:48:17,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=103584.0, ans=0.025
+2024-08-14 01:48:23,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=103584.0, ans=0.025
+2024-08-14 01:48:25,035 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=103584.0, ans=0.125
+2024-08-14 01:49:09,883 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 50, loss[loss=0.2439, simple_loss=0.238, pruned_loss=0.1143, over 19006.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.2314, pruned_loss=0.1106, over 828020.78 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:49:32,269 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.23 vs. limit=22.5
+2024-08-14 01:49:32,949 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.293e+02 2.374e+02 2.625e+02 4.193e+02, threshold=4.747e+02, percent-clipped=0.0
+2024-08-14 01:49:56,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103744.0, ans=0.125
+2024-08-14 01:49:59,451 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.12 vs. limit=15.0
+2024-08-14 01:50:03,134 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=103797.33333333333, ans=0.5
+2024-08-14 01:50:08,009 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=103797.33333333333, ans=0.09899494936611666
+2024-08-14 01:50:13,258 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.06 vs. limit=15.0
+2024-08-14 01:50:21,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103797.33333333333, ans=0.1
+2024-08-14 01:50:49,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=103850.66666666667, ans=0.0
+2024-08-14 01:50:50,015 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.93 vs. limit=6.0
+2024-08-14 01:51:17,783 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 6, batch 100, loss[loss=0.2473, simple_loss=0.2428, pruned_loss=0.1142, over 19060.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2305, pruned_loss=0.1097, over 1475525.13 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:51:31,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=103957.33333333333, ans=0.95
+2024-08-14 01:51:54,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=104010.66666666667, ans=0.2
+2024-08-14 01:52:24,215 INFO [dysarthria_finetune.py:1435] (2/4) (10763304960, 34072559616)
+2024-08-14 01:52:24,216 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 01:52:24,255 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 01:52:37,500 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 0, loss[loss=0.2387, simple_loss=0.2368, pruned_loss=0.1063, over 18435.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.2368, pruned_loss=0.1063, over 18435.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:52:37,500 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 01:53:01,317 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 7, validation: loss=0.2103, simple_loss=0.2098, pruned_loss=0.0916, over 1073944.00 frames.
+2024-08-14 01:53:01,318 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 01:53:52,784 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=104213.33333333333, ans=0.125
+2024-08-14 01:53:52,925 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=11.41 vs. limit=12.0
+2024-08-14 01:53:54,744 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.59 vs. limit=15.0
+2024-08-14 01:54:01,264 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.137e+02 2.271e+02 2.445e+02 3.999e+02, threshold=4.542e+02, percent-clipped=0.0
+2024-08-14 01:54:22,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=104320.0, ans=0.125
+2024-08-14 01:54:40,882 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 50, loss[loss=0.2052, simple_loss=0.2075, pruned_loss=0.08584, over 18970.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.227, pruned_loss=0.1064, over 828175.40 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:55:47,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104533.33333333333, ans=0.1
+2024-08-14 01:55:57,254 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=17.06 vs. limit=15.0
+2024-08-14 01:55:59,264 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=104586.66666666667, ans=0.125
+2024-08-14 01:56:02,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=104586.66666666667, ans=0.2
+2024-08-14 01:56:18,958 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 7, batch 100, loss[loss=0.2159, simple_loss=0.2161, pruned_loss=0.09522, over 19065.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2222, pruned_loss=0.1029, over 1476190.89 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:56:20,141 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=104640.0, ans=0.125
+2024-08-14 01:56:28,349 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.03 vs. limit=22.5
+2024-08-14 01:57:02,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=104746.66666666667, ans=0.0
+2024-08-14 01:57:10,846 INFO [dysarthria_finetune.py:1435] (2/4) (10763304960, 34072559616)
+2024-08-14 01:57:10,847 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 01:57:10,898 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 01:57:24,750 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 0, loss[loss=0.2036, simple_loss=0.2073, pruned_loss=0.08498, over 18635.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2073, pruned_loss=0.08498, over 18635.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:57:24,751 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 01:57:48,460 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 8, validation: loss=0.2004, simple_loss=0.2027, pruned_loss=0.08579, over 1073944.00 frames.
+2024-08-14 01:57:48,461 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 01:57:54,666 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.054e+02 2.212e+02 2.317e+02 3.796e+02, threshold=4.423e+02, percent-clipped=0.0
+2024-08-14 01:58:14,379 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.69 vs. limit=15.0
+2024-08-14 01:58:33,448 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=104896.0, ans=0.0
+2024-08-14 01:58:49,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=104949.33333333333, ans=0.125
+2024-08-14 01:59:02,823 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=104949.33333333333, ans=0.0
+2024-08-14 01:59:46,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=105002.66666666667, ans=0.125
+2024-08-14 01:59:59,169 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 50, loss[loss=0.2061, simple_loss=0.212, pruned_loss=0.08423, over 19000.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2169, pruned_loss=0.09951, over 827531.12 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:00:26,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105109.33333333333, ans=0.1
+2024-08-14 02:00:29,102 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.58 vs. limit=22.5
+2024-08-14 02:00:51,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=105162.66666666667, ans=0.2
+2024-08-14 02:01:36,491 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 8, batch 100, loss[loss=0.202, simple_loss=0.2028, pruned_loss=0.09074, over 19093.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2112, pruned_loss=0.0944, over 1475468.79 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:01:42,319 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.040e+02 2.200e+02 2.368e+02 3.520e+02, threshold=4.401e+02, percent-clipped=0.0
+2024-08-14 02:01:47,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=105322.66666666667, ans=0.125
+2024-08-14 02:01:54,874 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=105376.0, ans=0.2
+2024-08-14 02:01:58,642 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=105376.0, ans=0.0
+2024-08-14 02:01:58,923 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.71 vs. limit=15.0
+2024-08-14 02:02:21,885 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.28 vs. limit=15.0
+2024-08-14 02:02:29,637 INFO [dysarthria_finetune.py:1435] (2/4) (10763304960, 34072559616)
+2024-08-14 02:02:29,637 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:02:29,681 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:02:42,908 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 0, loss[loss=0.2207, simple_loss=0.2212, pruned_loss=0.1001, over 18461.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2212, pruned_loss=0.1001, over 18461.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:02:42,909 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:03:19,142 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 9, validation: loss=0.1911, simple_loss=0.1962, pruned_loss=0.08053, over 1073944.00 frames.
+2024-08-14 02:03:19,143 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:04:18,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=105584.0, ans=0.0
+2024-08-14 02:04:22,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105584.0, ans=0.1
+2024-08-14 02:04:25,009 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.87 vs. limit=15.0
+2024-08-14 02:04:43,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=105637.33333333333, ans=0.125
+2024-08-14 02:05:08,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=105637.33333333333, ans=0.05
+2024-08-14 02:06:19,439 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=7.51 vs. limit=12.0
+2024-08-14 02:06:19,732 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 50, loss[loss=0.2093, simple_loss=0.2165, pruned_loss=0.08741, over 18943.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2055, pruned_loss=0.0898, over 826909.81 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:07:11,131 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105850.66666666667, ans=0.1
+2024-08-14 02:07:19,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=105850.66666666667, ans=0.2
+2024-08-14 02:07:19,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=105850.66666666667, ans=0.125
+2024-08-14 02:07:22,957 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.009e+02 2.115e+02 2.263e+02 3.410e+02, threshold=4.229e+02, percent-clipped=0.0
+2024-08-14 02:07:25,031 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=11.41 vs. limit=12.0
+2024-08-14 02:07:33,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=105904.0, ans=0.0
+2024-08-14 02:08:32,106 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 9, batch 100, loss[loss=0.1913, simple_loss=0.1994, pruned_loss=0.07928, over 19136.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.204, pruned_loss=0.08796, over 1474643.82 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 16.0
+2024-08-14 02:08:33,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=106010.66666666667, ans=0.2
+2024-08-14 02:08:40,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=106010.66666666667, ans=0.2
+2024-08-14 02:08:43,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=106010.66666666667, ans=0.0
+2024-08-14 02:09:28,447 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=106117.33333333333, ans=0.035
+2024-08-14 02:09:37,378 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 02:09:37,379 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:09:37,422 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:09:53,511 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 0, loss[loss=0.2298, simple_loss=0.2323, pruned_loss=0.1042, over 18505.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.2323, pruned_loss=0.1042, over 18505.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:09:53,512 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:10:16,413 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 10, validation: loss=0.1833, simple_loss=0.191, pruned_loss=0.07653, over 1073944.00 frames.
+2024-08-14 02:10:16,414 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:10:17,967 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.18 vs. limit=15.0
+2024-08-14 02:10:24,202 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=106165.33333333333, ans=0.2
+2024-08-14 02:10:54,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=106218.66666666667, ans=0.125
+2024-08-14 02:11:19,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=106325.33333333333, ans=0.0
+2024-08-14 02:11:35,614 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.13 vs. limit=15.0
+2024-08-14 02:11:47,860 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 1.913e+02 2.021e+02 2.184e+02 3.494e+02, threshold=4.042e+02, percent-clipped=0.0
+2024-08-14 02:11:55,858 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 50, loss[loss=0.1915, simple_loss=0.1991, pruned_loss=0.08149, over 19019.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2011, pruned_loss=0.08652, over 827816.98 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:11:57,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=106432.0, ans=0.2
+2024-08-14 02:12:06,885 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=106432.0, ans=0.0
+2024-08-14 02:13:05,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=106592.0, ans=0.95
+2024-08-14 02:13:09,002 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=106592.0, ans=0.09899494936611666
+2024-08-14 02:13:33,245 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 10, batch 100, loss[loss=0.1694, simple_loss=0.1776, pruned_loss=0.07144, over 19070.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.1969, pruned_loss=0.08306, over 1475821.74 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:14:09,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=106752.0, ans=0.125
+2024-08-14 02:14:18,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=106805.33333333333, ans=0.125
+2024-08-14 02:14:26,593 INFO [dysarthria_finetune.py:1435] (2/4) (10696196096, 34072559616)
+2024-08-14 02:14:26,593 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:14:26,636 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:14:39,660 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 0, loss[loss=0.2263, simple_loss=0.2236, pruned_loss=0.1098, over 18525.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.2236, pruned_loss=0.1098, over 18525.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:14:39,661 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:15:02,460 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 11, validation: loss=0.1768, simple_loss=0.1869, pruned_loss=0.07357, over 1073944.00 frames.
+2024-08-14 02:15:02,460 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:15:33,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106906.66666666667, ans=0.125
+2024-08-14 02:15:35,905 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 1.865e+02 1.931e+02 2.118e+02 3.052e+02, threshold=3.863e+02, percent-clipped=0.0
+2024-08-14 02:15:58,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106960.0, ans=0.1
+2024-08-14 02:16:34,280 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=107066.66666666667, ans=0.025
+2024-08-14 02:16:45,031 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 50, loss[loss=0.1874, simple_loss=0.1936, pruned_loss=0.08339, over 19068.00 frames. ], tot_loss[loss=0.1878, simple_loss=0.1953, pruned_loss=0.08196, over 827285.47 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:16:48,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=107120.0, ans=0.125
+2024-08-14 02:17:21,376 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff3.min_abs, batch_count=107173.33333333333, ans=0.2
+2024-08-14 02:17:21,381 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=107173.33333333333, ans=0.0
+2024-08-14 02:17:51,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=107226.66666666667, ans=0.125
+2024-08-14 02:18:20,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=107333.33333333333, ans=0.2
+2024-08-14 02:18:50,455 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 11, batch 100, loss[loss=0.1721, simple_loss=0.1814, pruned_loss=0.07399, over 19059.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.1937, pruned_loss=0.08056, over 1474809.38 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:19:04,203 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.71 vs. limit=6.0
+2024-08-14 02:19:14,605 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.70 vs. limit=22.5
+2024-08-14 02:19:36,871 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.754e+02 1.842e+02 1.998e+02 3.456e+02, threshold=3.684e+02, percent-clipped=0.0
+2024-08-14 02:19:41,106 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=10.52 vs. limit=15.0
+2024-08-14 02:19:59,984 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 02:19:59,985 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:20:00,025 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:20:13,223 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 0, loss[loss=0.1858, simple_loss=0.1971, pruned_loss=0.07927, over 18505.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.1971, pruned_loss=0.07927, over 18505.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:20:13,223 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:20:42,008 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 12, validation: loss=0.1712, simple_loss=0.1836, pruned_loss=0.0713, over 1073944.00 frames.
+2024-08-14 02:20:42,009 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:21:16,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=107594.66666666667, ans=0.125
+2024-08-14 02:22:18,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=107648.0, ans=0.125
+2024-08-14 02:23:13,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=107701.33333333333, ans=0.125
+2024-08-14 02:23:32,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=107754.66666666667, ans=0.125
+2024-08-14 02:23:46,562 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=107754.66666666667, ans=0.2
+2024-08-14 02:24:25,100 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 50, loss[loss=0.174, simple_loss=0.1882, pruned_loss=0.07209, over 18979.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.1897, pruned_loss=0.07895, over 828348.40 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:26:15,344 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.85 vs. limit=6.0
+2024-08-14 02:26:31,767 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.79 vs. limit=10.0
+2024-08-14 02:26:42,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=107914.66666666667, ans=0.125
+2024-08-14 02:27:04,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=107968.0, ans=0.2
+2024-08-14 02:27:08,998 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 1.754e+02 1.846e+02 2.049e+02 2.889e+02, threshold=3.691e+02, percent-clipped=0.0
+2024-08-14 02:27:13,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=108021.33333333333, ans=0.0
+2024-08-14 02:27:13,683 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.37 vs. limit=15.0
+2024-08-14 02:27:37,285 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 12, batch 100, loss[loss=0.1595, simple_loss=0.176, pruned_loss=0.06412, over 19089.00 frames. ], tot_loss[loss=0.1794, simple_loss=0.1899, pruned_loss=0.07782, over 1475248.85 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:28:03,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=108128.0, ans=0.2
+2024-08-14 02:28:03,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=108128.0, ans=0.125
+2024-08-14 02:28:48,198 INFO [dysarthria_finetune.py:1435] (2/4) (10763304960, 34072559616)
+2024-08-14 02:28:48,199 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:28:48,236 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:29:01,627 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 0, loss[loss=0.2041, simple_loss=0.2147, pruned_loss=0.09096, over 18540.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2147, pruned_loss=0.09096, over 18540.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:29:01,628 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:29:09,427 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([6.5372, 5.8372, 6.1243, 5.9362], device='cuda:2')
+2024-08-14 02:29:24,534 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 13, validation: loss=0.1662, simple_loss=0.1808, pruned_loss=0.06949, over 1073944.00 frames.
+2024-08-14 02:29:24,535 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:29:38,896 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=15.67 vs. limit=15.0
+2024-08-14 02:29:51,548 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=108282.66666666667, ans=0.0
+2024-08-14 02:30:04,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=108336.0, ans=0.0
+2024-08-14 02:30:11,723 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.20 vs. limit=10.0
+2024-08-14 02:30:21,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108336.0, ans=0.125
+2024-08-14 02:30:33,611 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.19 vs. limit=22.5
+2024-08-14 02:30:35,908 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.68 vs. limit=15.0
+2024-08-14 02:30:46,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=108442.66666666667, ans=0.125
+2024-08-14 02:31:06,619 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 50, loss[loss=0.1903, simple_loss=0.2024, pruned_loss=0.08382, over 18984.00 frames. ], tot_loss[loss=0.1743, simple_loss=0.1876, pruned_loss=0.07481, over 829065.08 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:31:27,171 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.723e+02 1.826e+02 1.962e+02 2.693e+02, threshold=3.652e+02, percent-clipped=0.0
+2024-08-14 02:31:46,873 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=9.70 vs. limit=12.0
+2024-08-14 02:31:48,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=108602.66666666667, ans=0.025
+2024-08-14 02:32:13,937 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.84 vs. limit=6.0
+2024-08-14 02:32:17,613 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=108656.0, ans=0.0
+2024-08-14 02:32:27,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=108709.33333333333, ans=0.0
+2024-08-14 02:32:32,663 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=108709.33333333333, ans=0.125
+2024-08-14 02:32:32,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=108709.33333333333, ans=0.2
+2024-08-14 02:32:45,025 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 13, batch 100, loss[loss=0.1596, simple_loss=0.1771, pruned_loss=0.06582, over 19116.00 frames. ], tot_loss[loss=0.1752, simple_loss=0.1881, pruned_loss=0.07597, over 1477011.25 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:32:49,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=108762.66666666667, ans=0.2
+2024-08-14 02:33:03,914 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=108816.0, ans=0.04949747468305833
+2024-08-14 02:33:09,369 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108816.0, ans=0.125
+2024-08-14 02:33:17,036 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=108816.0, ans=0.125
+2024-08-14 02:33:17,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=108816.0, ans=0.125
+2024-08-14 02:33:21,088 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=108816.0, ans=10.0
+2024-08-14 02:33:28,880 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.32 vs. limit=15.0
+2024-08-14 02:33:38,639 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 02:33:38,640 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:33:38,680 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:33:51,753 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 0, loss[loss=0.1893, simple_loss=0.1956, pruned_loss=0.08854, over 18523.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.1956, pruned_loss=0.08854, over 18523.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:33:51,754 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:34:15,234 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 14, validation: loss=0.1615, simple_loss=0.1782, pruned_loss=0.06778, over 1073944.00 frames.
+2024-08-14 02:34:15,235 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:34:28,641 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108917.33333333333, ans=0.125
+2024-08-14 02:35:13,451 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:35:16,114 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.678e+02 1.779e+02 1.987e+02 2.879e+02, threshold=3.559e+02, percent-clipped=0.0
+2024-08-14 02:35:23,178 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=109077.33333333333, ans=0.125
+2024-08-14 02:35:42,040 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:35:44,129 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109130.66666666667, ans=0.1
+2024-08-14 02:35:52,700 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 50, loss[loss=0.1562, simple_loss=0.175, pruned_loss=0.06467, over 18999.00 frames. ], tot_loss[loss=0.1687, simple_loss=0.1832, pruned_loss=0.07324, over 827850.18 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:36:07,532 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=109184.0, ans=0.125
+2024-08-14 02:37:28,721 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 14, batch 100, loss[loss=0.1649, simple_loss=0.1814, pruned_loss=0.07122, over 19059.00 frames. ], tot_loss[loss=0.1697, simple_loss=0.1844, pruned_loss=0.07397, over 1475617.37 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 16.0
+2024-08-14 02:37:43,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=109450.66666666667, ans=0.0
+2024-08-14 02:37:50,910 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109504.0, ans=0.1
+2024-08-14 02:37:56,550 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109504.0, ans=0.125
+2024-08-14 02:38:03,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=109504.0, ans=0.125
+2024-08-14 02:38:04,082 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=109504.0, ans=0.0
+2024-08-14 02:38:21,623 INFO [dysarthria_finetune.py:1435] (2/4) (10700390400, 34072559616)
+2024-08-14 02:38:21,624 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:38:21,674 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:38:34,954 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 0, loss[loss=0.2223, simple_loss=0.2309, pruned_loss=0.1047, over 18678.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.2309, pruned_loss=0.1047, over 18678.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:38:34,955 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:38:57,683 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 15, validation: loss=0.1571, simple_loss=0.176, pruned_loss=0.06629, over 1073944.00 frames.
+2024-08-14 02:38:57,683 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:39:00,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109605.33333333333, ans=0.2
+2024-08-14 02:39:02,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-08-14 02:39:07,269 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 1.642e+02 1.752e+02 1.914e+02 2.610e+02, threshold=3.503e+02, percent-clipped=0.0
+2024-08-14 02:39:16,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=109605.33333333333, ans=0.125
+2024-08-14 02:39:40,796 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.35 vs. limit=15.0
+2024-08-14 02:39:44,999 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=109658.66666666667, ans=0.125
+2024-08-14 02:40:31,774 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=109765.33333333333, ans=0.0
+2024-08-14 02:40:39,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109818.66666666667, ans=0.125
+2024-08-14 02:40:57,611 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 50, loss[loss=0.164, simple_loss=0.1872, pruned_loss=0.06803, over 18994.00 frames. ], tot_loss[loss=0.1662, simple_loss=0.1822, pruned_loss=0.0729, over 827605.34 frames. ], batch size: 102, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:42:00,924 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=109925.33333333333, ans=0.0
+2024-08-14 02:42:07,167 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=109925.33333333333, ans=0.0
+2024-08-14 02:42:24,875 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.35 vs. limit=15.0
+2024-08-14 02:42:31,109 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.02 vs. limit=15.0
+2024-08-14 02:42:35,363 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=110032.0, ans=0.125
+2024-08-14 02:42:39,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=110032.0, ans=0.0
+2024-08-14 02:43:17,711 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 15, batch 100, loss[loss=0.1574, simple_loss=0.18, pruned_loss=0.06593, over 19062.00 frames. ], tot_loss[loss=0.1644, simple_loss=0.1815, pruned_loss=0.07178, over 1475114.53 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:43:23,630 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.639e+02 1.741e+02 1.916e+02 2.571e+02, threshold=3.482e+02, percent-clipped=0.0
+2024-08-14 02:44:29,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=110245.33333333333, ans=0.125
+2024-08-14 02:44:39,951 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 02:44:39,952 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:44:39,990 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:44:54,086 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 0, loss[loss=0.1862, simple_loss=0.1953, pruned_loss=0.08789, over 18504.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.1953, pruned_loss=0.08789, over 18504.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:44:54,086 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:45:16,882 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 16, validation: loss=0.1529, simple_loss=0.1739, pruned_loss=0.06493, over 1073944.00 frames.
+2024-08-14 02:45:16,882 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:45:28,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=110293.33333333333, ans=0.0
+2024-08-14 02:45:46,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=110346.66666666667, ans=0.0
+2024-08-14 02:46:26,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=110453.33333333333, ans=0.125
+2024-08-14 02:46:26,896 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=110453.33333333333, ans=0.2
+2024-08-14 02:46:34,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110506.66666666667, ans=0.1
+2024-08-14 02:47:33,727 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 50, loss[loss=0.1601, simple_loss=0.1806, pruned_loss=0.06948, over 19044.00 frames. ], tot_loss[loss=0.1582, simple_loss=0.1764, pruned_loss=0.06947, over 828171.03 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:48:12,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=110666.66666666667, ans=0.125
+2024-08-14 02:48:22,826 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.614e+02 1.779e+02 1.933e+02 2.621e+02, threshold=3.558e+02, percent-clipped=0.0
+2024-08-14 02:48:32,054 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.88 vs. limit=6.0
+2024-08-14 02:49:16,544 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=110773.33333333333, ans=0.2
+2024-08-14 02:49:20,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=110773.33333333333, ans=0.125
+2024-08-14 02:49:31,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=110773.33333333333, ans=0.2
+2024-08-14 02:49:34,513 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 16, batch 100, loss[loss=0.125, simple_loss=0.1537, pruned_loss=0.04821, over 19090.00 frames. ], tot_loss[loss=0.1586, simple_loss=0.1773, pruned_loss=0.06966, over 1476933.27 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 16.0
+2024-08-14 02:50:28,861 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 02:50:28,862 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:50:28,907 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:50:47,962 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 0, loss[loss=0.1627, simple_loss=0.1872, pruned_loss=0.06912, over 18336.00 frames. ], tot_loss[loss=0.1627, simple_loss=0.1872, pruned_loss=0.06912, over 18336.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:50:47,962 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:50:53,043 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([4.6270, 4.0767, 3.9122, 4.4789], device='cuda:2')
+2024-08-14 02:51:11,069 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 17, validation: loss=0.1498, simple_loss=0.1721, pruned_loss=0.06377, over 1073944.00 frames.
+2024-08-14 02:51:11,070 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:51:27,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=110981.33333333333, ans=0.125
+2024-08-14 02:51:40,864 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=111034.66666666667, ans=0.0
+2024-08-14 02:51:40,999 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=111034.66666666667, ans=0.125
+2024-08-14 02:51:42,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=111034.66666666667, ans=0.95
+2024-08-14 02:51:44,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=111034.66666666667, ans=0.2
+2024-08-14 02:52:08,414 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=111088.0, ans=0.0
+2024-08-14 02:52:10,488 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.44 vs. limit=15.0
+2024-08-14 02:52:21,963 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.59 vs. limit=15.0
+2024-08-14 02:52:58,841 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=111194.66666666667, ans=0.125
+2024-08-14 02:52:59,543 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.599e+02 1.701e+02 1.889e+02 2.501e+02, threshold=3.403e+02, percent-clipped=0.0
+2024-08-14 02:53:07,418 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 50, loss[loss=0.1407, simple_loss=0.174, pruned_loss=0.05366, over 19057.00 frames. ], tot_loss[loss=0.1569, simple_loss=0.1769, pruned_loss=0.06846, over 827125.84 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:53:35,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=111301.33333333333, ans=0.0
+2024-08-14 02:54:15,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=111408.0, ans=0.0
+2024-08-14 02:54:51,060 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.94 vs. limit=15.0
+2024-08-14 02:55:04,514 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 17, batch 100, loss[loss=0.1347, simple_loss=0.1598, pruned_loss=0.0548, over 19126.00 frames. ], tot_loss[loss=0.1553, simple_loss=0.1754, pruned_loss=0.06756, over 1475165.47 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:55:20,806 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=111514.66666666667, ans=0.125
+2024-08-14 02:55:21,239 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=12.50 vs. limit=15.0
+2024-08-14 02:55:34,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=111568.0, ans=0.0
+2024-08-14 02:55:36,483 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.04 vs. limit=15.0
+2024-08-14 02:55:54,769 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.91 vs. limit=6.0
+2024-08-14 02:56:14,213 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 02:56:14,213 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 02:56:14,265 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 02:56:27,748 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 0, loss[loss=0.1608, simple_loss=0.1858, pruned_loss=0.06784, over 18559.00 frames. ], tot_loss[loss=0.1608, simple_loss=0.1858, pruned_loss=0.06784, over 18559.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 02:56:27,748 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 02:56:58,388 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 18, validation: loss=0.1479, simple_loss=0.1705, pruned_loss=0.06271, over 1073944.00 frames.
+2024-08-14 02:56:58,388 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 02:57:05,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111669.33333333333, ans=0.07
+2024-08-14 02:57:07,690 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.60 vs. limit=10.0
+2024-08-14 02:57:43,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111669.33333333333, ans=0.125
+2024-08-14 02:58:00,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111722.66666666667, ans=0.1
+2024-08-14 02:58:03,297 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.609e+02 1.680e+02 1.858e+02 2.812e+02, threshold=3.359e+02, percent-clipped=0.0
+2024-08-14 02:58:59,586 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=111829.33333333333, ans=0.0
+2024-08-14 02:59:05,828 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=111829.33333333333, ans=0.125
+2024-08-14 02:59:54,694 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 50, loss[loss=0.1692, simple_loss=0.1913, pruned_loss=0.07357, over 18975.00 frames. ], tot_loss[loss=0.1546, simple_loss=0.1751, pruned_loss=0.06711, over 827610.12 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 03:00:02,811 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=111936.0, ans=0.0
+2024-08-14 03:01:19,082 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.27 vs. limit=15.0
+2024-08-14 03:01:54,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=112042.66666666667, ans=0.025
+2024-08-14 03:03:07,592 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 03:04:53,082 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 18, batch 100, loss[loss=0.141, simple_loss=0.1592, pruned_loss=0.06137, over 19135.00 frames. ], tot_loss[loss=0.1521, simple_loss=0.173, pruned_loss=0.06556, over 1477220.69 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 03:05:52,850 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.561e+02 1.643e+02 1.812e+02 2.261e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-14 03:06:41,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=112256.0, ans=0.015
+2024-08-14 03:06:41,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112256.0, ans=0.0
+2024-08-14 03:07:12,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=112309.33333333333, ans=0.125
+2024-08-14 03:07:12,791 INFO [dysarthria_finetune.py:1435] (2/4) (10761207808, 34072559616)
+2024-08-14 03:07:12,792 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 03:07:12,839 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 03:07:26,409 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 0, loss[loss=0.1619, simple_loss=0.1806, pruned_loss=0.07156, over 18438.00 frames. ], tot_loss[loss=0.1619, simple_loss=0.1806, pruned_loss=0.07156, over 18438.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:07:26,409 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 03:07:58,727 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 19, validation: loss=0.1464, simple_loss=0.169, pruned_loss=0.06188, over 1073944.00 frames.
+2024-08-14 03:07:58,728 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 03:08:08,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=112352.0, ans=0.0
+2024-08-14 03:08:15,783 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.88 vs. limit=15.0
+2024-08-14 03:08:43,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=112405.33333333333, ans=0.2
+2024-08-14 03:09:15,492 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.59 vs. limit=15.0
+2024-08-14 03:09:18,996 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=112458.66666666667, ans=0.125
+2024-08-14 03:09:56,934 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.91 vs. limit=15.0
+2024-08-14 03:10:24,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112512.0, ans=0.1
+2024-08-14 03:10:40,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-08-14 03:10:57,784 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 50, loss[loss=0.1517, simple_loss=0.1768, pruned_loss=0.06333, over 19013.00 frames. ], tot_loss[loss=0.1524, simple_loss=0.1722, pruned_loss=0.06632, over 827262.88 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:11:54,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=112672.0, ans=0.125
+2024-08-14 03:11:57,264 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-08-14 03:12:18,670 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-08-14 03:12:19,036 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.36 vs. limit=15.0
+2024-08-14 03:12:41,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=112725.33333333333, ans=0.2
+2024-08-14 03:12:46,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=112725.33333333333, ans=0.125
+2024-08-14 03:13:01,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=112778.66666666667, ans=0.025
+2024-08-14 03:13:37,508 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.593e+02 1.694e+02 1.909e+02 3.031e+02, threshold=3.389e+02, percent-clipped=0.0
+2024-08-14 03:14:02,404 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 19, batch 100, loss[loss=0.1167, simple_loss=0.1383, pruned_loss=0.0476, over 19169.00 frames. ], tot_loss[loss=0.1528, simple_loss=0.1726, pruned_loss=0.06654, over 1475351.90 frames. ], batch size: 134, lr: 9.92e-05, grad_scale: 16.0
+2024-08-14 03:14:05,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.64 vs. limit=22.5
+2024-08-14 03:14:08,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=112885.33333333333, ans=0.025
+2024-08-14 03:14:10,814 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=112885.33333333333, ans=0.125
+2024-08-14 03:14:10,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=112885.33333333333, ans=0.125
+2024-08-14 03:14:11,080 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.53 vs. limit=15.0
+2024-08-14 03:15:24,101 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=112938.66666666667, ans=0.125
+2024-08-14 03:15:54,661 INFO [dysarthria_finetune.py:1435] (2/4) (10759110656, 34072559616)
+2024-08-14 03:15:54,662 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 03:15:54,706 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 03:16:41,826 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 0, loss[loss=0.1682, simple_loss=0.1915, pruned_loss=0.07247, over 18527.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.1915, pruned_loss=0.07247, over 18527.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:16:41,827 INFO [dysarthria_finetune.py:1165] (2/4) Computing validation loss on speech
+2024-08-14 03:17:15,215 INFO [dysarthria_finetune.py:1174] (2/4) Validation on speech: Epoch 20, validation: loss=0.1449, simple_loss=0.1677, pruned_loss=0.0611, over 1073944.00 frames.
+2024-08-14 03:17:15,216 INFO [dysarthria_finetune.py:1177] (2/4) Maximum memory allocated so far is 19757MB
+2024-08-14 03:17:42,112 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=113040.0, ans=0.125
+2024-08-14 03:17:49,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=113093.33333333333, ans=0.125
+2024-08-14 03:19:25,757 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.27 vs. limit=22.5
+2024-08-14 03:19:36,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113200.0, ans=0.1
+2024-08-14 03:19:39,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=113200.0, ans=0.1
+2024-08-14 03:21:08,393 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 50, loss[loss=0.1562, simple_loss=0.1756, pruned_loss=0.06841, over 18968.00 frames. ], tot_loss[loss=0.1489, simple_loss=0.1697, pruned_loss=0.06406, over 828106.18 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:22:10,067 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.567e+02 1.664e+02 1.868e+02 2.522e+02, threshold=3.327e+02, percent-clipped=0.0
+2024-08-14 03:22:48,923 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.40 vs. limit=22.5
+2024-08-14 03:24:59,418 INFO [dysarthria_finetune.py:1141] (2/4) Epoch 20, batch 100, loss[loss=0.1274, simple_loss=0.1493, pruned_loss=0.05271, over 19074.00 frames. ], tot_loss[loss=0.1476, simple_loss=0.1688, pruned_loss=0.06321, over 1476081.83 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:25:17,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=113573.33333333333, ans=0.0
+2024-08-14 03:25:19,980 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 03:26:46,575 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=113626.66666666667, ans=0.125
+2024-08-14 03:27:20,896 INFO [dysarthria_finetune.py:1435] (2/4) (10763304960, 34072559616)
+2024-08-14 03:27:20,897 INFO [dysarthria_finetune.py:1436] (2/4) Empty cache: before and after
+2024-08-14 03:27:20,940 INFO [dysarthria_finetune.py:1440] (2/4) (29522329600, 34072559616)
+2024-08-14 03:27:20,941 INFO [dysarthria_finetune.py:1442] (2/4) Done!
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-3 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-3
new file mode 100644
index 0000000000000000000000000000000000000000..925cea856c2cdcccc6a4a32d252af2769dc30730
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/log/log-train-2024-08-13-23-24-47-3
@@ -0,0 +1,559 @@
+2024-08-13 23:24:47,922 INFO [dysarthria_finetune.py:1212] (3/4) Training started
+2024-08-13 23:24:47,960 INFO [dysarthria_finetune.py:1214] (3/4) (33748090880, 34072559616)
+2024-08-13 23:24:47,960 INFO [dysarthria_finetune.py:1215] (3/4) Empty cache: before and after
+2024-08-13 23:24:48,946 INFO [dysarthria_finetune.py:1219] (3/4) (32783400960, 34072559616)
+2024-08-13 23:24:48,947 INFO [dysarthria_finetune.py:1229] (3/4) Device: cuda:3
+2024-08-13 23:24:48,990 INFO [dysarthria_finetune.py:1241] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp_finetune'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.0001, 'lr_batches': 100000.0, 'lr_epochs': 100.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': False, 'do_finetune': True, 'use_mux': False, 'init_modules': None, 'finetune_ckpt': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt', 'full_libri': False, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/data/speech_accessibility/manifests'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 20, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': True, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 0, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
+2024-08-13 23:24:48,991 INFO [dysarthria_finetune.py:1243] (3/4) About to create model
+2024-08-13 23:24:49,988 INFO [dysarthria_finetune.py:1247] (3/4) Number of model parameters: 65549011
+2024-08-13 23:24:49,988 INFO [dysarthria_finetune.py:769] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/libri/exp/epoch-20.pt
+2024-08-13 23:24:59,905 INFO [dysarthria_finetune.py:1275] (3/4) Using DDP
+2024-08-13 23:25:17,957 INFO [dysarthria_asr_datamodule.py:494] (3/4) About to get train cuts
+2024-08-13 23:25:18,291 INFO [dysarthria_finetune.py:1319] (3/4) CutSet(len=62255) [underlying data type: ]
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:239] (3/4) Disable MUSAN
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:257] (3/4) Enable SpecAugment
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:258] (3/4) Time warp factor: 80
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:268] (3/4) Num frame mask: 10
+2024-08-13 23:25:18,622 INFO [dysarthria_asr_datamodule.py:281] (3/4) About to create train dataset
+2024-08-13 23:25:19,282 INFO [dysarthria_asr_datamodule.py:308] (3/4) Using DynamicBucketingSampler.
+2024-08-13 23:25:20,228 INFO [dysarthria_asr_datamodule.py:325] (3/4) About to create train dataloader
+2024-08-13 23:25:20,234 INFO [dysarthria_asr_datamodule.py:500] (3/4) About to get dev cuts
+2024-08-13 23:25:24,698 INFO [dysarthria_asr_datamodule.py:356] (3/4) About to create dev dataset
+2024-08-13 23:25:28,030 INFO [dysarthria_asr_datamodule.py:373] (3/4) About to create dev dataloader
+2024-08-13 23:25:28,030 INFO [dysarthria_finetune.py:1490] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-13 23:27:16,791 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=17.38 vs. limit=7.5
+2024-08-13 23:27:17,110 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=19.31 vs. limit=7.5
+2024-08-13 23:27:17,920 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 23:27:19,742 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 23:32:34,804 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 23:32:36,808 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 23:35:38,405 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 23:35:41,667 INFO [dysarthria_finetune.py:1518] (3/4) Maximum memory allocated so far is 11778MB
+2024-08-13 23:36:58,745 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 0, loss[loss=0.2854, simple_loss=0.2712, pruned_loss=0.1421, over 18634.00 frames. ], tot_loss[loss=0.2854, simple_loss=0.2712, pruned_loss=0.1421, over 18634.00 frames. ], batch size: 65, lr: 5.01e-05, grad_scale: 2.0
+2024-08-13 23:36:58,746 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-13 23:49:47,045 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 1, validation: loss=0.3215, simple_loss=0.3039, pruned_loss=0.1764, over 1073944.00 frames.
+2024-08-13 23:49:47,367 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14284MB
+2024-08-13 23:51:36,335 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.78 vs. limit=22.5
+2024-08-13 23:51:42,549 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100000.0, ans=0.1
+2024-08-13 23:53:41,364 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=100000.0, ans=0.125
+2024-08-13 23:59:58,835 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.66 vs. limit=22.5
+2024-08-14 00:15:49,520 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.298e+02 1.050e+03 1.114e+03 1.201e+03 1.245e+03, threshold=4.457e+03, percent-clipped=0.0
+2024-08-14 00:23:15,349 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.960e+02 9.450e+02 1.050e+03 1.152e+03 1.319e+03, threshold=4.200e+03, percent-clipped=0.0
+2024-08-14 00:27:45,410 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.30 vs. limit=15.0
+2024-08-14 00:29:33,642 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=45.82 vs. limit=15.0
+2024-08-14 00:29:33,937 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=15.71 vs. limit=15.0
+2024-08-14 00:29:53,831 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.241e+02 7.298e+02 9.450e+02 1.050e+03 1.319e+03, threshold=3.780e+03, percent-clipped=0.0
+2024-08-14 00:31:20,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=100213.33333333333, ans=0.07
+2024-08-14 00:33:53,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=100213.33333333333, ans=0.1
+2024-08-14 00:35:42,823 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=100213.33333333333, ans=0.125
+2024-08-14 00:37:01,624 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 50, loss[loss=0.3685, simple_loss=0.3467, pruned_loss=0.2133, over 19001.00 frames. ], tot_loss[loss=0.3534, simple_loss=0.3333, pruned_loss=0.1995, over 828973.50 frames. ], batch size: 102, lr: 5.51e-05, grad_scale: 2.0
+2024-08-14 00:42:02,060 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.16 vs. limit=6.0
+2024-08-14 00:42:12,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=100266.66666666667, ans=22.5
+2024-08-14 00:47:09,378 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=100320.0, ans=0.2
+2024-08-14 00:49:53,844 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.60 vs. limit=6.0
+2024-08-14 00:52:25,355 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=100373.33333333333, ans=0.125
+2024-08-14 00:53:36,533 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.72 vs. limit=15.0
+2024-08-14 00:55:10,396 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 00:58:17,519 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.64 vs. limit=22.5
+2024-08-14 01:01:58,593 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.501e+02 5.963e+02 7.298e+02 8.800e+02 1.319e+03, threshold=1.460e+03, percent-clipped=0.0
+2024-08-14 01:01:58,627 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 1, batch 100, loss[loss=0.3293, simple_loss=0.3105, pruned_loss=0.1828, over 19146.00 frames. ], tot_loss[loss=0.3393, simple_loss=0.3202, pruned_loss=0.1893, over 1476162.18 frames. ], batch size: 133, lr: 6.01e-05, grad_scale: 4.0
+2024-08-14 01:10:39,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=100640.0, ans=0.0
+2024-08-14 01:11:48,001 INFO [dysarthria_finetune.py:1435] (3/4) (13995016192, 34072559616)
+2024-08-14 01:11:48,001 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 01:11:48,057 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 01:12:35,401 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 0, loss[loss=0.3084, simple_loss=0.2937, pruned_loss=0.153, over 18501.00 frames. ], tot_loss[loss=0.3084, simple_loss=0.2937, pruned_loss=0.153, over 18501.00 frames. ], batch size: 65, lr: 6.29e-05, grad_scale: 8.0
+2024-08-14 01:12:35,402 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 01:16:55,988 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 2, validation: loss=0.2907, simple_loss=0.276, pruned_loss=0.149, over 1073944.00 frames.
+2024-08-14 01:16:55,989 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 01:18:48,591 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=22.13 vs. limit=15.0
+2024-08-14 01:20:02,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=100736.0, ans=0.0
+2024-08-14 01:20:02,979 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=100736.0, ans=0.125
+2024-08-14 01:20:19,621 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=100789.33333333333, ans=0.025
+2024-08-14 01:20:51,646 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=100789.33333333333, ans=0.0
+2024-08-14 01:20:51,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100789.33333333333, ans=0.1
+2024-08-14 01:22:24,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100896.0, ans=0.0
+2024-08-14 01:22:24,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=100896.0, ans=0.125
+2024-08-14 01:22:29,974 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=100896.0, ans=0.125
+2024-08-14 01:24:54,914 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 50, loss[loss=0.3246, simple_loss=0.3073, pruned_loss=0.1718, over 18956.00 frames. ], tot_loss[loss=0.3216, simple_loss=0.3039, pruned_loss=0.1733, over 828460.00 frames. ], batch size: 102, lr: 6.79e-05, grad_scale: 8.0
+2024-08-14 01:25:46,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=100949.33333333333, ans=0.0
+2024-08-14 01:26:11,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=101002.66666666667, ans=0.0
+2024-08-14 01:27:29,014 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.249e+02 4.347e+02 4.852e+02 5.543e+02 7.043e+02, threshold=9.703e+02, percent-clipped=0.0
+2024-08-14 01:27:48,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=101056.0, ans=0.0
+2024-08-14 01:27:53,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=101056.0, ans=0.125
+2024-08-14 01:28:17,298 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=101109.33333333333, ans=0.0
+2024-08-14 01:28:19,615 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.38 vs. limit=6.0
+2024-08-14 01:28:31,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101109.33333333333, ans=0.125
+2024-08-14 01:28:33,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=101162.66666666667, ans=0.0
+2024-08-14 01:29:04,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=101216.0, ans=0.125
+2024-08-14 01:29:06,626 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 2, batch 100, loss[loss=0.2677, simple_loss=0.2549, pruned_loss=0.1349, over 19077.00 frames. ], tot_loss[loss=0.3107, simple_loss=0.2943, pruned_loss=0.1643, over 1476919.42 frames. ], batch size: 133, lr: 7.29e-05, grad_scale: 8.0
+2024-08-14 01:29:10,198 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=101216.0, ans=0.125
+2024-08-14 01:30:03,664 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=29.02 vs. limit=22.5
+2024-08-14 01:30:15,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101322.66666666667, ans=0.125
+2024-08-14 01:30:34,219 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=17.92 vs. limit=22.5
+2024-08-14 01:30:36,800 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:30:42,018 INFO [dysarthria_finetune.py:1435] (3/4) (13936295936, 34072559616)
+2024-08-14 01:30:42,018 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 01:30:42,061 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 01:30:49,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=101370.66666666667, ans=0.0
+2024-08-14 01:30:55,538 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 0, loss[loss=0.2691, simple_loss=0.2578, pruned_loss=0.1294, over 18579.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.2578, pruned_loss=0.1294, over 18579.00 frames. ], batch size: 65, lr: 7.58e-05, grad_scale: 16.0
+2024-08-14 01:30:55,538 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 01:31:18,579 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 3, validation: loss=0.2682, simple_loss=0.2564, pruned_loss=0.1309, over 1073944.00 frames.
+2024-08-14 01:31:18,580 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 01:31:23,388 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=101370.66666666667, ans=0.125
+2024-08-14 01:31:55,143 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.57 vs. limit=10.0
+2024-08-14 01:32:00,499 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=101424.0, ans=0.0
+2024-08-14 01:32:19,762 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=101477.33333333333, ans=0.025
+2024-08-14 01:32:58,605 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.574e+02 3.350e+02 3.692e+02 4.154e+02 5.648e+02, threshold=7.384e+02, percent-clipped=0.0
+2024-08-14 01:33:14,949 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 50, loss[loss=0.2948, simple_loss=0.283, pruned_loss=0.141, over 19113.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.2815, pruned_loss=0.1506, over 827781.85 frames. ], batch size: 102, lr: 8.08e-05, grad_scale: 16.0
+2024-08-14 01:33:18,154 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=101637.33333333333, ans=0.2
+2024-08-14 01:33:58,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=101690.66666666667, ans=0.2
+2024-08-14 01:34:25,415 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.15 vs. limit=6.0
+2024-08-14 01:34:46,031 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=101797.33333333333, ans=0.2
+2024-08-14 01:35:00,141 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.10 vs. limit=6.0
+2024-08-14 01:35:08,697 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 3, batch 100, loss[loss=0.2637, simple_loss=0.254, pruned_loss=0.124, over 19145.00 frames. ], tot_loss[loss=0.2875, simple_loss=0.2739, pruned_loss=0.1457, over 1476240.06 frames. ], batch size: 133, lr: 8.58e-05, grad_scale: 16.0
+2024-08-14 01:35:16,165 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:35:29,340 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.75 vs. limit=6.0
+2024-08-14 01:35:53,425 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=102010.66666666667, ans=0.125
+2024-08-14 01:36:03,539 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=8.23 vs. limit=12.0
+2024-08-14 01:36:03,974 INFO [dysarthria_finetune.py:1435] (3/4) (13969850368, 34072559616)
+2024-08-14 01:36:04,625 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 01:36:04,673 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 01:36:20,153 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 0, loss[loss=0.3092, simple_loss=0.2951, pruned_loss=0.1555, over 18645.00 frames. ], tot_loss[loss=0.3092, simple_loss=0.2951, pruned_loss=0.1555, over 18645.00 frames. ], batch size: 65, lr: 8.86e-05, grad_scale: 32.0
+2024-08-14 01:36:20,153 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 01:36:43,053 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 4, validation: loss=0.2499, simple_loss=0.241, pruned_loss=0.1173, over 1073944.00 frames.
+2024-08-14 01:36:43,054 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 01:36:57,268 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.15 vs. limit=15.0
+2024-08-14 01:37:14,065 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 2.841e+02 3.076e+02 3.396e+02 5.357e+02, threshold=6.153e+02, percent-clipped=0.0
+2024-08-14 01:37:21,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=102106.66666666667, ans=0.125
+2024-08-14 01:37:25,534 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=14.57 vs. limit=15.0
+2024-08-14 01:37:34,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=102160.0, ans=0.0
+2024-08-14 01:37:37,755 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=102160.0, ans=10.0
+2024-08-14 01:37:39,697 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-08-14 01:37:43,587 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=26.14 vs. limit=15.0
+2024-08-14 01:37:49,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=102213.33333333333, ans=0.0
+2024-08-14 01:38:09,265 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.11 vs. limit=10.0
+2024-08-14 01:38:23,456 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 50, loss[loss=0.2704, simple_loss=0.2583, pruned_loss=0.136, over 18993.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.2647, pruned_loss=0.1366, over 827748.42 frames. ], batch size: 102, lr: 9.36e-05, grad_scale: 32.0
+2024-08-14 01:38:36,641 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=22.38 vs. limit=15.0
+2024-08-14 01:38:40,461 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=102320.0, ans=0.125
+2024-08-14 01:38:46,947 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=12.37 vs. limit=15.0
+2024-08-14 01:38:58,311 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=102373.33333333333, ans=0.1
+2024-08-14 01:39:04,051 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102426.66666666667, ans=0.1
+2024-08-14 01:39:11,984 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=102426.66666666667, ans=0.125
+2024-08-14 01:39:12,323 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.65 vs. limit=15.0
+2024-08-14 01:39:19,721 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102426.66666666667, ans=0.1
+2024-08-14 01:39:45,266 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.79 vs. limit=15.0
+2024-08-14 01:39:48,685 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:39:54,424 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=102533.33333333333, ans=0.025
+2024-08-14 01:40:00,893 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 4, batch 100, loss[loss=0.2655, simple_loss=0.2537, pruned_loss=0.1338, over 19161.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.2576, pruned_loss=0.1315, over 1475350.41 frames. ], batch size: 133, lr: 9.86e-05, grad_scale: 32.0
+2024-08-14 01:40:04,250 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.92 vs. limit=15.0
+2024-08-14 01:40:18,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=102586.66666666667, ans=0.05
+2024-08-14 01:40:30,558 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.524e+02 2.719e+02 2.975e+02 4.617e+02, threshold=5.438e+02, percent-clipped=0.0
+2024-08-14 01:40:39,684 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=21.43 vs. limit=15.0
+2024-08-14 01:40:53,168 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=102693.33333333333, ans=0.125
+2024-08-14 01:40:54,999 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-14 01:40:55,781 INFO [dysarthria_finetune.py:1435] (3/4) (147521536, 34072559616)
+2024-08-14 01:40:55,782 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 01:40:55,864 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 01:41:11,067 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 0, loss[loss=0.2403, simple_loss=0.2335, pruned_loss=0.1112, over 18566.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.2335, pruned_loss=0.1112, over 18566.00 frames. ], batch size: 65, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:41:11,068 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 01:41:34,535 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 5, validation: loss=0.2343, simple_loss=0.2283, pruned_loss=0.1066, over 1073944.00 frames.
+2024-08-14 01:41:34,536 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 01:42:12,521 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=102794.66666666667, ans=0.125
+2024-08-14 01:42:32,198 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=102848.0, ans=0.0
+2024-08-14 01:43:29,177 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 50, loss[loss=0.2675, simple_loss=0.2579, pruned_loss=0.1299, over 18976.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.2464, pruned_loss=0.1215, over 827749.28 frames. ], batch size: 102, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:43:34,279 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=103008.0, ans=0.0
+2024-08-14 01:44:16,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=103061.33333333333, ans=0.0
+2024-08-14 01:44:56,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=103168.0, ans=0.125
+2024-08-14 01:44:59,449 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.063e+02 2.398e+02 2.550e+02 2.967e+02 4.732e+02, threshold=5.099e+02, percent-clipped=0.0
+2024-08-14 01:45:02,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103168.0, ans=0.1
+2024-08-14 01:45:16,085 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=103221.33333333333, ans=0.0
+2024-08-14 01:45:27,200 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 5, batch 100, loss[loss=0.2397, simple_loss=0.2332, pruned_loss=0.1122, over 19091.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.2432, pruned_loss=0.1201, over 1475913.16 frames. ], batch size: 133, lr: 1.00e-04, grad_scale: 32.0
+2024-08-14 01:45:34,028 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103274.66666666667, ans=0.1
+2024-08-14 01:46:04,393 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=103381.33333333333, ans=0.0
+2024-08-14 01:46:10,054 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 01:46:18,141 INFO [dysarthria_finetune.py:1435] (3/4) (13957267456, 34072559616)
+2024-08-14 01:46:18,141 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 01:46:18,187 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 01:46:32,108 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 0, loss[loss=0.239, simple_loss=0.2331, pruned_loss=0.1115, over 18684.00 frames. ], tot_loss[loss=0.239, simple_loss=0.2331, pruned_loss=0.1115, over 18684.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:46:32,108 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 01:46:55,695 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 6, validation: loss=0.2214, simple_loss=0.2182, pruned_loss=0.09842, over 1073944.00 frames.
+2024-08-14 01:46:55,695 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 01:47:15,409 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.76 vs. limit=15.0
+2024-08-14 01:47:20,629 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=103424.0, ans=6.0
+2024-08-14 01:48:02,068 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=103530.66666666667, ans=0.125
+2024-08-14 01:48:15,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103530.66666666667, ans=0.1
+2024-08-14 01:48:20,750 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=103584.0, ans=0.2
+2024-08-14 01:48:27,834 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=103584.0, ans=0.125
+2024-08-14 01:49:03,615 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=103637.33333333333, ans=0.125
+2024-08-14 01:49:09,884 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 50, loss[loss=0.2246, simple_loss=0.2213, pruned_loss=0.1012, over 19058.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.2337, pruned_loss=0.1123, over 828493.81 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:49:16,743 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=103690.66666666667, ans=0.0
+2024-08-14 01:49:30,220 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=103690.66666666667, ans=0.125
+2024-08-14 01:49:32,953 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.293e+02 2.374e+02 2.625e+02 4.193e+02, threshold=4.747e+02, percent-clipped=0.0
+2024-08-14 01:49:59,462 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=11.70 vs. limit=15.0
+2024-08-14 01:49:59,513 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.33 vs. limit=6.0
+2024-08-14 01:51:17,787 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 6, batch 100, loss[loss=0.2107, simple_loss=0.2083, pruned_loss=0.09466, over 19113.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.2319, pruned_loss=0.1108, over 1475249.23 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:51:57,680 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=104010.66666666667, ans=0.025
+2024-08-14 01:52:10,358 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=104064.0, ans=0.125
+2024-08-14 01:52:24,217 INFO [dysarthria_finetune.py:1435] (3/4) (413859840, 34072559616)
+2024-08-14 01:52:24,218 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 01:52:24,301 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 01:52:37,505 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 0, loss[loss=0.2662, simple_loss=0.2552, pruned_loss=0.1346, over 18595.00 frames. ], tot_loss[loss=0.2662, simple_loss=0.2552, pruned_loss=0.1346, over 18595.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:52:37,506 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 01:53:01,317 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 7, validation: loss=0.2103, simple_loss=0.2098, pruned_loss=0.0916, over 1073944.00 frames.
+2024-08-14 01:53:01,317 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 01:53:42,715 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=104213.33333333333, ans=0.125
+2024-08-14 01:54:01,266 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.137e+02 2.271e+02 2.445e+02 3.999e+02, threshold=4.542e+02, percent-clipped=0.0
+2024-08-14 01:54:40,880 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 50, loss[loss=0.2308, simple_loss=0.2286, pruned_loss=0.1046, over 18963.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2277, pruned_loss=0.107, over 827887.87 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:55:05,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=104426.66666666667, ans=0.1
+2024-08-14 01:55:12,281 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=104426.66666666667, ans=0.0
+2024-08-14 01:55:45,621 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=104533.33333333333, ans=0.025
+2024-08-14 01:55:59,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=104586.66666666667, ans=0.125
+2024-08-14 01:56:12,675 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.34 vs. limit=22.5
+2024-08-14 01:56:18,976 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 7, batch 100, loss[loss=0.2045, simple_loss=0.2048, pruned_loss=0.08995, over 19124.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2218, pruned_loss=0.1026, over 1475075.17 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:56:41,480 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.14 vs. limit=15.0
+2024-08-14 01:57:10,852 INFO [dysarthria_finetune.py:1435] (3/4) (12835291136, 34072559616)
+2024-08-14 01:57:10,852 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 01:57:10,899 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 01:57:24,754 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 0, loss[loss=0.2156, simple_loss=0.2149, pruned_loss=0.09707, over 18547.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2149, pruned_loss=0.09707, over 18547.00 frames. ], batch size: 65, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 01:57:24,754 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 01:57:48,461 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 8, validation: loss=0.2004, simple_loss=0.2027, pruned_loss=0.08579, over 1073944.00 frames.
+2024-08-14 01:57:48,462 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 01:57:54,664 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.054e+02 2.212e+02 2.317e+02 3.796e+02, threshold=4.423e+02, percent-clipped=0.0
+2024-08-14 01:58:08,267 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=13.63 vs. limit=12.0
+2024-08-14 01:58:25,980 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.16 vs. limit=15.0
+2024-08-14 01:58:47,515 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.97 vs. limit=15.0
+2024-08-14 01:59:10,393 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=105002.66666666667, ans=0.125
+2024-08-14 01:59:59,173 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 50, loss[loss=0.2288, simple_loss=0.2264, pruned_loss=0.1064, over 18964.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2165, pruned_loss=0.09738, over 828441.23 frames. ], batch size: 102, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:00:24,583 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.45 vs. limit=15.0
+2024-08-14 02:01:24,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=105269.33333333333, ans=0.0
+2024-08-14 02:01:36,505 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 8, batch 100, loss[loss=0.2167, simple_loss=0.2185, pruned_loss=0.09597, over 19119.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2137, pruned_loss=0.09557, over 1475727.62 frames. ], batch size: 133, lr: 9.99e-05, grad_scale: 32.0
+2024-08-14 02:01:42,324 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.040e+02 2.200e+02 2.368e+02 3.520e+02, threshold=4.401e+02, percent-clipped=0.0
+2024-08-14 02:01:43,879 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=31.83 vs. limit=22.5
+2024-08-14 02:01:49,429 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.37 vs. limit=10.0
+2024-08-14 02:01:53,081 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105322.66666666667, ans=0.1
+2024-08-14 02:02:00,443 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=105376.0, ans=0.0
+2024-08-14 02:02:15,879 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=8.07 vs. limit=12.0
+2024-08-14 02:02:28,885 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=105429.33333333333, ans=0.0
+2024-08-14 02:02:29,636 INFO [dysarthria_finetune.py:1435] (3/4) (12810125312, 34072559616)
+2024-08-14 02:02:29,636 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:02:29,684 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:02:42,914 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 0, loss[loss=0.2212, simple_loss=0.2252, pruned_loss=0.09584, over 18777.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2252, pruned_loss=0.09584, over 18777.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:02:42,914 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:03:19,142 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 9, validation: loss=0.1911, simple_loss=0.1962, pruned_loss=0.08053, over 1073944.00 frames.
+2024-08-14 02:03:19,143 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:04:18,991 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=105584.0, ans=0.125
+2024-08-14 02:05:05,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=105637.33333333333, ans=0.025
+2024-08-14 02:05:12,564 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.98 vs. limit=10.0
+2024-08-14 02:06:19,736 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 50, loss[loss=0.2134, simple_loss=0.2172, pruned_loss=0.09364, over 18965.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2074, pruned_loss=0.09092, over 827503.70 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:06:34,725 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=105744.0, ans=0.025
+2024-08-14 02:07:03,078 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105797.33333333333, ans=0.125
+2024-08-14 02:07:14,431 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=105850.66666666667, ans=0.09899494936611666
+2024-08-14 02:07:22,958 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.009e+02 2.115e+02 2.263e+02 3.410e+02, threshold=4.229e+02, percent-clipped=0.0
+2024-08-14 02:07:37,412 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=105904.0, ans=0.025
+2024-08-14 02:07:41,830 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105904.0, ans=0.125
+2024-08-14 02:08:31,693 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.22 vs. limit=22.5
+2024-08-14 02:08:32,109 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 9, batch 100, loss[loss=0.1813, simple_loss=0.1873, pruned_loss=0.07712, over 19159.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2048, pruned_loss=0.08975, over 1475225.92 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 16.0
+2024-08-14 02:08:33,696 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=106010.66666666667, ans=0.125
+2024-08-14 02:08:43,430 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=106010.66666666667, ans=0.05
+2024-08-14 02:08:53,305 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=106010.66666666667, ans=0.2
+2024-08-14 02:09:26,614 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.68 vs. limit=22.5
+2024-08-14 02:09:32,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=106117.33333333333, ans=0.0
+2024-08-14 02:09:37,395 INFO [dysarthria_finetune.py:1435] (3/4) (13986627584, 34072559616)
+2024-08-14 02:09:37,395 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:09:37,436 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:09:53,509 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 0, loss[loss=0.1941, simple_loss=0.1986, pruned_loss=0.08522, over 18587.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.1986, pruned_loss=0.08522, over 18587.00 frames. ], batch size: 65, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:09:53,509 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:10:16,415 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 10, validation: loss=0.1833, simple_loss=0.191, pruned_loss=0.07653, over 1073944.00 frames.
+2024-08-14 02:10:16,416 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:10:36,472 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.15 vs. limit=22.5
+2024-08-14 02:10:40,819 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=106218.66666666667, ans=0.2
+2024-08-14 02:10:40,964 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106218.66666666667, ans=0.125
+2024-08-14 02:10:47,024 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.03 vs. limit=6.0
+2024-08-14 02:11:17,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=106325.33333333333, ans=0.125
+2024-08-14 02:11:47,864 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 1.913e+02 2.021e+02 2.184e+02 3.494e+02, threshold=4.042e+02, percent-clipped=0.0
+2024-08-14 02:11:55,159 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=106432.0, ans=0.0
+2024-08-14 02:11:55,875 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 50, loss[loss=0.2107, simple_loss=0.2173, pruned_loss=0.09176, over 19101.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2016, pruned_loss=0.08822, over 827631.91 frames. ], batch size: 102, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:11:59,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=106432.0, ans=0.125
+2024-08-14 02:11:59,068 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=106432.0, ans=0.2
+2024-08-14 02:12:03,045 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=106432.0, ans=15.0
+2024-08-14 02:12:18,735 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=106485.33333333333, ans=0.025
+2024-08-14 02:12:20,599 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=106485.33333333333, ans=0.125
+2024-08-14 02:13:33,244 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 10, batch 100, loss[loss=0.1796, simple_loss=0.1869, pruned_loss=0.07732, over 19051.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2004, pruned_loss=0.08596, over 1475773.03 frames. ], batch size: 133, lr: 9.98e-05, grad_scale: 32.0
+2024-08-14 02:13:42,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=106698.66666666667, ans=0.0
+2024-08-14 02:13:46,118 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=106698.66666666667, ans=0.0
+2024-08-14 02:14:20,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106805.33333333333, ans=0.125
+2024-08-14 02:14:20,375 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=106805.33333333333, ans=0.0
+2024-08-14 02:14:22,089 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=106805.33333333333, ans=0.2
+2024-08-14 02:14:26,593 INFO [dysarthria_finetune.py:1435] (3/4) (158007296, 34072559616)
+2024-08-14 02:14:26,594 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:14:26,665 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:14:39,664 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 0, loss[loss=0.2055, simple_loss=0.2098, pruned_loss=0.09303, over 18604.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2098, pruned_loss=0.09303, over 18604.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:14:39,664 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:15:02,461 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 11, validation: loss=0.1768, simple_loss=0.1869, pruned_loss=0.07357, over 1073944.00 frames.
+2024-08-14 02:15:02,462 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:15:31,966 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.92 vs. limit=15.0
+2024-08-14 02:15:33,373 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=106906.66666666667, ans=0.0
+2024-08-14 02:15:35,909 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 1.865e+02 1.931e+02 2.118e+02 3.052e+02, threshold=3.863e+02, percent-clipped=0.0
+2024-08-14 02:15:50,455 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=106960.0, ans=0.125
+2024-08-14 02:15:52,630 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.67 vs. limit=15.0
+2024-08-14 02:15:58,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106960.0, ans=0.1
+2024-08-14 02:16:15,598 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.03 vs. limit=22.5
+2024-08-14 02:16:21,467 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=11.73 vs. limit=12.0
+2024-08-14 02:16:30,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=107066.66666666667, ans=0.125
+2024-08-14 02:16:32,524 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107066.66666666667, ans=0.125
+2024-08-14 02:16:45,046 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 50, loss[loss=0.1846, simple_loss=0.1953, pruned_loss=0.07778, over 19110.00 frames. ], tot_loss[loss=0.181, simple_loss=0.1902, pruned_loss=0.07711, over 828132.31 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:16:46,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=107120.0, ans=0.125
+2024-08-14 02:17:19,535 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=107173.33333333333, ans=0.025
+2024-08-14 02:17:19,541 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=107173.33333333333, ans=0.2
+2024-08-14 02:18:50,472 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 11, batch 100, loss[loss=0.1525, simple_loss=0.1703, pruned_loss=0.05702, over 19127.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.1926, pruned_loss=0.07929, over 1475363.18 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 16.0
+2024-08-14 02:19:36,870 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.754e+02 1.842e+02 1.998e+02 3.456e+02, threshold=3.684e+02, percent-clipped=0.0
+2024-08-14 02:19:51,213 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.10 vs. limit=15.0
+2024-08-14 02:19:59,981 INFO [dysarthria_finetune.py:1435] (3/4) (13980336128, 34072559616)
+2024-08-14 02:19:59,982 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:20:00,025 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:20:13,223 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 0, loss[loss=0.1876, simple_loss=0.1997, pruned_loss=0.0795, over 18650.00 frames. ], tot_loss[loss=0.1876, simple_loss=0.1997, pruned_loss=0.0795, over 18650.00 frames. ], batch size: 65, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:20:13,223 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:20:42,014 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 12, validation: loss=0.1712, simple_loss=0.1836, pruned_loss=0.0713, over 1073944.00 frames.
+2024-08-14 02:20:42,014 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:21:13,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=107541.33333333333, ans=0.0
+2024-08-14 02:21:41,815 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107594.66666666667, ans=0.1
+2024-08-14 02:23:50,836 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=107754.66666666667, ans=0.95
+2024-08-14 02:24:17,902 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-14 02:24:25,100 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 50, loss[loss=0.1629, simple_loss=0.1835, pruned_loss=0.06146, over 19037.00 frames. ], tot_loss[loss=0.1809, simple_loss=0.1912, pruned_loss=0.07819, over 828666.57 frames. ], batch size: 102, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:24:37,079 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.99 vs. limit=15.0
+2024-08-14 02:25:06,226 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=107861.33333333333, ans=0.0
+2024-08-14 02:26:20,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=107914.66666666667, ans=0.125
+2024-08-14 02:26:30,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=107914.66666666667, ans=0.125
+2024-08-14 02:26:51,794 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-08-14 02:27:04,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107968.0, ans=0.1
+2024-08-14 02:27:09,000 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 1.754e+02 1.846e+02 2.049e+02 2.889e+02, threshold=3.691e+02, percent-clipped=0.0
+2024-08-14 02:27:13,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=108021.33333333333, ans=0.025
+2024-08-14 02:27:36,591 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=108074.66666666667, ans=0.0
+2024-08-14 02:27:37,278 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 12, batch 100, loss[loss=0.1505, simple_loss=0.1636, pruned_loss=0.0625, over 19142.00 frames. ], tot_loss[loss=0.1784, simple_loss=0.1898, pruned_loss=0.07656, over 1477170.44 frames. ], batch size: 133, lr: 9.97e-05, grad_scale: 32.0
+2024-08-14 02:28:03,813 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=108128.0, ans=0.125
+2024-08-14 02:28:48,193 INFO [dysarthria_finetune.py:1435] (3/4) (13997113344, 34072559616)
+2024-08-14 02:28:48,194 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:28:48,236 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:29:01,644 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 0, loss[loss=0.1797, simple_loss=0.1891, pruned_loss=0.08004, over 18629.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.1891, pruned_loss=0.08004, over 18629.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:29:01,644 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:29:08,193 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([6.5646, 5.8579, 6.1448, 5.9949], device='cuda:3')
+2024-08-14 02:29:24,537 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 13, validation: loss=0.1662, simple_loss=0.1808, pruned_loss=0.06949, over 1073944.00 frames.
+2024-08-14 02:29:24,538 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:29:28,339 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=19.60 vs. limit=15.0
+2024-08-14 02:29:30,175 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108229.33333333333, ans=0.125
+2024-08-14 02:29:32,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=108229.33333333333, ans=0.0
+2024-08-14 02:29:42,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108229.33333333333, ans=0.0
+2024-08-14 02:29:51,579 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=108282.66666666667, ans=0.125
+2024-08-14 02:30:15,208 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=108336.0, ans=0.0
+2024-08-14 02:30:19,757 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=108336.0, ans=0.0
+2024-08-14 02:30:27,789 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=108389.33333333333, ans=0.0
+2024-08-14 02:31:06,629 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 50, loss[loss=0.1766, simple_loss=0.1889, pruned_loss=0.07699, over 19050.00 frames. ], tot_loss[loss=0.1754, simple_loss=0.1875, pruned_loss=0.07624, over 828311.79 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:31:14,013 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=108496.0, ans=0.125
+2024-08-14 02:31:14,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=108496.0, ans=0.2
+2024-08-14 02:31:26,704 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.68 vs. limit=22.5
+2024-08-14 02:31:27,175 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.723e+02 1.826e+02 1.962e+02 2.693e+02, threshold=3.652e+02, percent-clipped=0.0
+2024-08-14 02:31:30,426 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=108549.33333333333, ans=0.0
+2024-08-14 02:32:02,312 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=108602.66666666667, ans=0.125
+2024-08-14 02:32:06,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=108656.0, ans=0.025
+2024-08-14 02:32:11,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108656.0, ans=0.0
+2024-08-14 02:32:28,799 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=108709.33333333333, ans=0.2
+2024-08-14 02:32:28,842 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108709.33333333333, ans=0.125
+2024-08-14 02:32:45,024 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 13, batch 100, loss[loss=0.1724, simple_loss=0.1871, pruned_loss=0.07406, over 19095.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.1868, pruned_loss=0.07517, over 1474662.24 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:32:55,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=108762.66666666667, ans=0.125
+2024-08-14 02:33:01,518 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:33:05,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=108816.0, ans=0.09899494936611666
+2024-08-14 02:33:07,585 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=108816.0, ans=0.125
+2024-08-14 02:33:17,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=108816.0, ans=0.0
+2024-08-14 02:33:21,028 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=108816.0, ans=0.125
+2024-08-14 02:33:26,698 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=108869.33333333333, ans=0.0
+2024-08-14 02:33:30,340 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=108869.33333333333, ans=10.0
+2024-08-14 02:33:38,638 INFO [dysarthria_finetune.py:1435] (3/4) (13955170304, 34072559616)
+2024-08-14 02:33:38,639 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:33:38,682 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:33:51,769 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 0, loss[loss=0.2076, simple_loss=0.2169, pruned_loss=0.09542, over 18650.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2169, pruned_loss=0.09542, over 18650.00 frames. ], batch size: 65, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:33:51,770 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:34:15,234 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 14, validation: loss=0.1615, simple_loss=0.1782, pruned_loss=0.06778, over 1073944.00 frames.
+2024-08-14 02:34:15,235 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:34:28,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108917.33333333333, ans=0.1
+2024-08-14 02:34:52,450 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.77 vs. limit=15.0
+2024-08-14 02:35:16,113 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.678e+02 1.779e+02 1.987e+02 2.879e+02, threshold=3.559e+02, percent-clipped=0.0
+2024-08-14 02:35:44,148 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=109130.66666666667, ans=0.125
+2024-08-14 02:35:50,321 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.18 vs. limit=15.0
+2024-08-14 02:35:52,700 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 50, loss[loss=0.1424, simple_loss=0.1678, pruned_loss=0.0535, over 19012.00 frames. ], tot_loss[loss=0.1715, simple_loss=0.186, pruned_loss=0.07465, over 829335.16 frames. ], batch size: 102, lr: 9.96e-05, grad_scale: 32.0
+2024-08-14 02:36:09,485 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109184.0, ans=0.2
+2024-08-14 02:36:17,543 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.19 vs. limit=6.0
+2024-08-14 02:36:28,979 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109237.33333333333, ans=0.1
+2024-08-14 02:36:30,742 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 02:36:59,457 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.54 vs. limit=15.0
+2024-08-14 02:37:28,028 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=109450.66666666667, ans=0.0
+2024-08-14 02:37:28,721 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 14, batch 100, loss[loss=0.1599, simple_loss=0.1767, pruned_loss=0.06846, over 19114.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.1837, pruned_loss=0.07297, over 1476363.01 frames. ], batch size: 133, lr: 9.96e-05, grad_scale: 16.0
+2024-08-14 02:37:45,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=109450.66666666667, ans=0.0
+2024-08-14 02:37:52,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=109504.0, ans=0.0
+2024-08-14 02:38:05,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=109557.33333333333, ans=0.125
+2024-08-14 02:38:07,741 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=109557.33333333333, ans=0.0
+2024-08-14 02:38:21,621 INFO [dysarthria_finetune.py:1435] (3/4) (13988724736, 34072559616)
+2024-08-14 02:38:21,622 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:38:21,675 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:38:34,952 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 0, loss[loss=0.1615, simple_loss=0.1796, pruned_loss=0.06895, over 18716.00 frames. ], tot_loss[loss=0.1615, simple_loss=0.1796, pruned_loss=0.06895, over 18716.00 frames. ], batch size: 65, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:38:34,952 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:38:57,684 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 15, validation: loss=0.1571, simple_loss=0.176, pruned_loss=0.06629, over 1073944.00 frames.
+2024-08-14 02:38:57,685 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:39:00,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=109605.33333333333, ans=0.2
+2024-08-14 02:39:03,446 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-08-14 02:39:07,274 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 1.642e+02 1.752e+02 1.914e+02 2.610e+02, threshold=3.503e+02, percent-clipped=0.0
+2024-08-14 02:39:16,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=109605.33333333333, ans=0.0
+2024-08-14 02:39:50,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=109712.0, ans=0.125
+2024-08-14 02:40:07,375 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.90 vs. limit=15.0
+2024-08-14 02:40:19,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=109765.33333333333, ans=0.04949747468305833
+2024-08-14 02:40:41,953 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=109818.66666666667, ans=0.125
+2024-08-14 02:40:49,729 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=109818.66666666667, ans=0.07
+2024-08-14 02:40:57,638 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 50, loss[loss=0.1329, simple_loss=0.1539, pruned_loss=0.05384, over 19179.00 frames. ], tot_loss[loss=0.1639, simple_loss=0.1805, pruned_loss=0.07142, over 827713.24 frames. ], batch size: 103, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:42:05,012 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=109925.33333333333, ans=0.125
+2024-08-14 02:42:24,803 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.43 vs. limit=15.0
+2024-08-14 02:42:43,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=110032.0, ans=0.125
+2024-08-14 02:42:55,232 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=110085.33333333333, ans=0.0
+2024-08-14 02:43:17,709 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 15, batch 100, loss[loss=0.1376, simple_loss=0.1601, pruned_loss=0.05611, over 19073.00 frames. ], tot_loss[loss=0.1619, simple_loss=0.1795, pruned_loss=0.07025, over 1475236.97 frames. ], batch size: 133, lr: 9.95e-05, grad_scale: 32.0
+2024-08-14 02:43:23,632 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.639e+02 1.741e+02 1.916e+02 2.571e+02, threshold=3.482e+02, percent-clipped=0.0
+2024-08-14 02:43:24,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110138.66666666667, ans=0.125
+2024-08-14 02:43:55,587 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.69 vs. limit=22.5
+2024-08-14 02:44:39,969 INFO [dysarthria_finetune.py:1435] (3/4) (434831360, 34072559616)
+2024-08-14 02:44:39,970 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:44:40,037 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:44:54,087 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 0, loss[loss=0.1765, simple_loss=0.1964, pruned_loss=0.07736, over 18560.00 frames. ], tot_loss[loss=0.1765, simple_loss=0.1964, pruned_loss=0.07736, over 18560.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:44:54,088 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:45:16,883 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 16, validation: loss=0.1529, simple_loss=0.1739, pruned_loss=0.06493, over 1073944.00 frames.
+2024-08-14 02:45:16,884 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:45:30,447 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.90 vs. limit=15.0
+2024-08-14 02:45:38,664 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.18 vs. limit=15.0
+2024-08-14 02:45:48,018 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=110346.66666666667, ans=0.2
+2024-08-14 02:45:56,172 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.63 vs. limit=15.0
+2024-08-14 02:46:29,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=110453.33333333333, ans=0.125
+2024-08-14 02:46:30,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=110453.33333333333, ans=0.125
+2024-08-14 02:46:36,585 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=110506.66666666667, ans=0.125
+2024-08-14 02:47:33,728 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 50, loss[loss=0.1504, simple_loss=0.1786, pruned_loss=0.06072, over 19044.00 frames. ], tot_loss[loss=0.1608, simple_loss=0.1795, pruned_loss=0.07052, over 827661.95 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:47:43,224 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.66 vs. limit=8.0
+2024-08-14 02:48:16,647 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.23 vs. limit=22.5
+2024-08-14 02:48:22,830 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.614e+02 1.779e+02 1.933e+02 2.621e+02, threshold=3.558e+02, percent-clipped=0.0
+2024-08-14 02:49:20,579 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=110773.33333333333, ans=0.125
+2024-08-14 02:49:26,012 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=110773.33333333333, ans=0.0
+2024-08-14 02:49:34,038 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=19.14 vs. limit=22.5
+2024-08-14 02:49:34,513 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 16, batch 100, loss[loss=0.1544, simple_loss=0.1834, pruned_loss=0.06267, over 19120.00 frames. ], tot_loss[loss=0.1599, simple_loss=0.1791, pruned_loss=0.07008, over 1474935.70 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 16.0
+2024-08-14 02:49:35,682 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110826.66666666667, ans=0.1
+2024-08-14 02:49:39,774 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=110826.66666666667, ans=0.025
+2024-08-14 02:50:11,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=110880.0, ans=0.0
+2024-08-14 02:50:28,867 INFO [dysarthria_finetune.py:1435] (3/4) (13967753216, 34072559616)
+2024-08-14 02:50:28,868 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:50:28,918 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:50:47,978 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 0, loss[loss=0.2102, simple_loss=0.2204, pruned_loss=0.1, over 18583.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2204, pruned_loss=0.1, over 18583.00 frames. ], batch size: 65, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:50:47,979 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:51:11,071 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 17, validation: loss=0.1498, simple_loss=0.1721, pruned_loss=0.06377, over 1073944.00 frames.
+2024-08-14 02:51:11,071 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:51:27,514 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=12.41 vs. limit=15.0
+2024-08-14 02:51:31,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=111034.66666666667, ans=0.025
+2024-08-14 02:51:31,441 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=14.24 vs. limit=12.0
+2024-08-14 02:51:48,993 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.83 vs. limit=15.0
+2024-08-14 02:52:04,496 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=111088.0, ans=0.0
+2024-08-14 02:52:51,149 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=111194.66666666667, ans=0.125
+2024-08-14 02:52:59,547 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.599e+02 1.701e+02 1.889e+02 2.501e+02, threshold=3.403e+02, percent-clipped=0.0
+2024-08-14 02:53:02,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111194.66666666667, ans=0.125
+2024-08-14 02:53:07,418 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 50, loss[loss=0.1637, simple_loss=0.1822, pruned_loss=0.07261, over 18982.00 frames. ], tot_loss[loss=0.1565, simple_loss=0.1761, pruned_loss=0.06846, over 827806.80 frames. ], batch size: 102, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:53:20,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=111248.0, ans=0.0
+2024-08-14 02:53:22,534 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.18 vs. limit=10.0
+2024-08-14 02:53:51,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=111354.66666666667, ans=10.0
+2024-08-14 02:54:41,340 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=111408.0, ans=0.125
+2024-08-14 02:55:04,514 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 17, batch 100, loss[loss=0.1366, simple_loss=0.1629, pruned_loss=0.05516, over 19078.00 frames. ], tot_loss[loss=0.1544, simple_loss=0.1749, pruned_loss=0.06694, over 1476033.73 frames. ], batch size: 133, lr: 9.94e-05, grad_scale: 32.0
+2024-08-14 02:55:26,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=111568.0, ans=0.0
+2024-08-14 02:55:39,968 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=111568.0, ans=0.125
+2024-08-14 02:56:14,229 INFO [dysarthria_finetune.py:1435] (3/4) (13955170304, 34072559616)
+2024-08-14 02:56:14,230 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 02:56:14,277 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 02:56:27,747 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 0, loss[loss=0.1721, simple_loss=0.1919, pruned_loss=0.07617, over 18613.00 frames. ], tot_loss[loss=0.1721, simple_loss=0.1919, pruned_loss=0.07617, over 18613.00 frames. ], batch size: 65, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 02:56:27,748 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 02:56:58,391 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 18, validation: loss=0.1479, simple_loss=0.1705, pruned_loss=0.06271, over 1073944.00 frames.
+2024-08-14 02:56:58,392 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 02:57:05,382 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111669.33333333333, ans=0.07
+2024-08-14 02:57:09,945 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=111669.33333333333, ans=0.0
+2024-08-14 02:58:00,030 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=111722.66666666667, ans=0.0
+2024-08-14 02:58:03,301 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.609e+02 1.680e+02 1.858e+02 2.812e+02, threshold=3.359e+02, percent-clipped=0.0
+2024-08-14 02:58:09,258 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.51 vs. limit=10.0
+2024-08-14 02:58:36,042 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=111776.0, ans=0.0
+2024-08-14 02:58:56,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=111829.33333333333, ans=0.0
+2024-08-14 02:59:01,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=111829.33333333333, ans=0.0
+2024-08-14 02:59:10,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111829.33333333333, ans=0.125
+2024-08-14 02:59:10,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=111829.33333333333, ans=0.125
+2024-08-14 02:59:29,674 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.02 vs. limit=15.0
+2024-08-14 02:59:54,694 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 50, loss[loss=0.1436, simple_loss=0.168, pruned_loss=0.0596, over 19004.00 frames. ], tot_loss[loss=0.1507, simple_loss=0.1712, pruned_loss=0.06509, over 828768.32 frames. ], batch size: 102, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 02:59:58,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=111936.0, ans=0.2
+2024-08-14 03:00:05,779 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=111936.0, ans=0.125
+2024-08-14 03:01:15,307 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.66 vs. limit=15.0
+2024-08-14 03:02:00,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=112096.0, ans=0.2
+2024-08-14 03:02:16,383 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=112096.0, ans=10.0
+2024-08-14 03:03:28,784 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=112149.33333333333, ans=0.125
+2024-08-14 03:04:12,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112149.33333333333, ans=0.1
+2024-08-14 03:04:53,080 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 18, batch 100, loss[loss=0.13, simple_loss=0.1511, pruned_loss=0.05441, over 19084.00 frames. ], tot_loss[loss=0.1506, simple_loss=0.1709, pruned_loss=0.06518, over 1476677.05 frames. ], batch size: 133, lr: 9.93e-05, grad_scale: 32.0
+2024-08-14 03:05:04,103 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 03:05:52,850 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.561e+02 1.643e+02 1.812e+02 2.261e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-14 03:06:42,024 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=11.19 vs. limit=12.0
+2024-08-14 03:06:47,805 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=112309.33333333333, ans=0.0
+2024-08-14 03:06:51,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=112309.33333333333, ans=0.125
+2024-08-14 03:06:55,031 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.57 vs. limit=15.0
+2024-08-14 03:07:12,793 INFO [dysarthria_finetune.py:1435] (3/4) (12814319616, 34072559616)
+2024-08-14 03:07:12,793 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 03:07:12,855 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 03:07:26,411 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 0, loss[loss=0.1931, simple_loss=0.2064, pruned_loss=0.08986, over 18562.00 frames. ], tot_loss[loss=0.1931, simple_loss=0.2064, pruned_loss=0.08986, over 18562.00 frames. ], batch size: 65, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:07:26,412 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 03:07:58,727 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 19, validation: loss=0.1464, simple_loss=0.169, pruned_loss=0.06188, over 1073944.00 frames.
+2024-08-14 03:07:58,728 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 03:08:25,389 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=112352.0, ans=0.125
+2024-08-14 03:08:44,062 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=112405.33333333333, ans=0.125
+2024-08-14 03:09:56,015 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=112458.66666666667, ans=0.025
+2024-08-14 03:10:23,993 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112512.0, ans=0.1
+2024-08-14 03:10:38,613 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=112565.33333333333, ans=0.125
+2024-08-14 03:10:40,972 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=112565.33333333333, ans=0.0
+2024-08-14 03:10:40,978 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=112565.33333333333, ans=0.025
+2024-08-14 03:10:57,804 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 50, loss[loss=0.1358, simple_loss=0.1555, pruned_loss=0.05803, over 19015.00 frames. ], tot_loss[loss=0.1517, simple_loss=0.1719, pruned_loss=0.06576, over 829365.51 frames. ], batch size: 102, lr: 9.92e-05, grad_scale: 32.0
+2024-08-14 03:11:17,601 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-14 03:11:55,653 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=112672.0, ans=0.5
+2024-08-14 03:12:07,201 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=112672.0, ans=0.125
+2024-08-14 03:12:10,278 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=7.22 vs. limit=10.0
+2024-08-14 03:12:19,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=112672.0, ans=0.1
+2024-08-14 03:12:40,889 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=112725.33333333333, ans=0.0
+2024-08-14 03:12:57,233 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.10 vs. limit=15.0
+2024-08-14 03:13:05,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=112778.66666666667, ans=0.0
+2024-08-14 03:13:37,512 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.593e+02 1.694e+02 1.909e+02 3.031e+02, threshold=3.389e+02, percent-clipped=0.0
+2024-08-14 03:13:43,254 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.96 vs. limit=15.0
+2024-08-14 03:14:02,408 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 19, batch 100, loss[loss=0.1263, simple_loss=0.149, pruned_loss=0.05176, over 19083.00 frames. ], tot_loss[loss=0.1515, simple_loss=0.1719, pruned_loss=0.06554, over 1476389.98 frames. ], batch size: 133, lr: 9.92e-05, grad_scale: 16.0
+2024-08-14 03:14:15,417 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=112885.33333333333, ans=0.125
+2024-08-14 03:14:15,507 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-08-14 03:14:22,487 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=112885.33333333333, ans=0.0
+2024-08-14 03:15:07,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=112938.66666666667, ans=0.125
+2024-08-14 03:15:47,736 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.64 vs. limit=10.0
+2024-08-14 03:15:54,662 INFO [dysarthria_finetune.py:1435] (3/4) (13942587392, 34072559616)
+2024-08-14 03:15:54,662 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 03:15:54,714 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 03:16:41,826 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 0, loss[loss=0.1815, simple_loss=0.194, pruned_loss=0.0845, over 18436.00 frames. ], tot_loss[loss=0.1815, simple_loss=0.194, pruned_loss=0.0845, over 18436.00 frames. ], batch size: 65, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:16:41,827 INFO [dysarthria_finetune.py:1165] (3/4) Computing validation loss on speech
+2024-08-14 03:17:15,217 INFO [dysarthria_finetune.py:1174] (3/4) Validation on speech: Epoch 20, validation: loss=0.1449, simple_loss=0.1677, pruned_loss=0.0611, over 1073944.00 frames.
+2024-08-14 03:17:15,218 INFO [dysarthria_finetune.py:1177] (3/4) Maximum memory allocated so far is 14287MB
+2024-08-14 03:17:41,888 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=113040.0, ans=0.125
+2024-08-14 03:17:47,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=113093.33333333333, ans=0.2
+2024-08-14 03:19:14,131 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=113146.66666666667, ans=0.2
+2024-08-14 03:19:25,359 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113146.66666666667, ans=0.1
+2024-08-14 03:19:25,697 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.46 vs. limit=15.0
+2024-08-14 03:19:34,792 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=113200.0, ans=0.04949747468305833
+2024-08-14 03:19:40,036 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113200.0, ans=0.125
+2024-08-14 03:20:55,646 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=113253.33333333333, ans=0.125
+2024-08-14 03:21:08,410 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 50, loss[loss=0.1451, simple_loss=0.1708, pruned_loss=0.05966, over 18942.00 frames. ], tot_loss[loss=0.1497, simple_loss=0.1697, pruned_loss=0.06485, over 827999.75 frames. ], batch size: 102, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:22:09,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=113360.0, ans=0.0
+2024-08-14 03:22:10,067 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.567e+02 1.664e+02 1.868e+02 2.522e+02, threshold=3.327e+02, percent-clipped=0.0
+2024-08-14 03:22:28,116 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.96 vs. limit=22.5
+2024-08-14 03:22:37,124 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=113360.0, ans=0.2
+2024-08-14 03:23:59,328 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.05 vs. limit=22.5
+2024-08-14 03:24:26,404 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.90 vs. limit=22.5
+2024-08-14 03:24:59,418 INFO [dysarthria_finetune.py:1141] (3/4) Epoch 20, batch 100, loss[loss=0.1083, simple_loss=0.1327, pruned_loss=0.04189, over 19171.00 frames. ], tot_loss[loss=0.1476, simple_loss=0.1684, pruned_loss=0.06338, over 1475487.52 frames. ], batch size: 133, lr: 9.91e-05, grad_scale: 32.0
+2024-08-14 03:25:22,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=113573.33333333333, ans=0.2
+2024-08-14 03:26:34,725 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=113626.66666666667, ans=0.2
+2024-08-14 03:27:20,901 INFO [dysarthria_finetune.py:1435] (3/4) (13946781696, 34072559616)
+2024-08-14 03:27:20,902 INFO [dysarthria_finetune.py:1436] (3/4) Empty cache: before and after
+2024-08-14 03:27:20,945 INFO [dysarthria_finetune.py:1440] (3/4) (29576855552, 34072559616)
+2024-08-14 03:27:20,945 INFO [dysarthria_finetune.py:1442] (3/4) Done!
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723532126.cdr2650.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723532126.cdr2650.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..43457d3d00ead825f53a1753c80f84d1d2d314ec
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723532126.cdr2650.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8540c7763e9059b44d17c8091a697d3bb2068106b265b9131478919f1388c52
+size 2713
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723604632.cdr2649.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723604632.cdr2649.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..3eebb10fa2084a1def07128d3e0466011afe12a1
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723604632.cdr2649.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d8938ce5a041661ca003bd7730a36b90bdedc0ad588df390dc0acdf54c69b9c
+size 1258
diff --git a/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723616688.cdr2649.int.cedar.computecanada.ca.70.0 b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723616688.cdr2649.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..d64b436add8c713235bc3c82169b2991b8dfd374
--- /dev/null
+++ b/zipformer/finetuned/non_ctc/non_causal/exp_finetune/tensorboard/events.out.tfevents.1723616688.cdr2649.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1ee011530ded5f6a641c63bcbd842da7a170d5172b9882cdb0d62d986cb5409
+size 33318
diff --git a/zipformer/pretrained/ctc/causal/exp/best-train-loss.pt b/zipformer/pretrained/ctc/causal/exp/best-train-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1545fe340294e2d827f41eeda5833e8ec6710e00
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/best-train-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67cc7bf27b506c8f8e8f15235f3e4bf6d77b4a02ece1b03df8369bcd03b531f0
+size 1062964046
diff --git a/zipformer/pretrained/ctc/causal/exp/best-valid-loss.pt b/zipformer/pretrained/ctc/causal/exp/best-valid-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1545fe340294e2d827f41eeda5833e8ec6710e00
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/best-valid-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67cc7bf27b506c8f8e8f15235f3e4bf6d77b4a02ece1b03df8369bcd03b531f0
+size 1062964046
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-12000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-12000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5f9a4e196e4bdaca3fffdb4e63a52f3009b72c91
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-12000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:daa0d052a0cbd03f076d94a67ff518ce67a60fe5b353f9bb448558366639a3f2
+size 1062981526
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-16000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-16000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a797a812cd930aeb7dc34c400421d3aebead29ac
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-16000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:850993b35913b34ddf7c7523dbe3319aa05ebb22bc059821c75e7d51fac7dbdc
+size 1062981654
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-20000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-20000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..48d76a5610f935eacdf97f5cf82b4094f1ffc228
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-20000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8ae35c57baa90792cf6df7e37311ae9511495642ea032a041027e5ed2c0f9164
+size 1062981718
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-24000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-24000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..dcf2c47843c9108da1259363f29d2bbebe510201
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-24000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:13fe0b09f4259d5396b44719b08a92119f32f865bc3d1a5c67e15fe362d39451
+size 1062981782
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-28000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-28000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b91a88a6fc501a615b87ca4acbfddad6394cbe23
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-28000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33ae62b50c47c15408bb9ec97328ae87b942d46771fd6233b76d118a4089abd3
+size 1062981910
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-32000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-32000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bbc546500321fea8e0138cf550a5d03f9c8659a7
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-32000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c67271df2d530a600d308014bd3b8006ef37462d3339eed7b75ec0c8a0b5c854
+size 1062981910
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-36000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-36000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..993a09180bab900808eae4f7ece2cabc9701879b
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-36000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c8a8716468f5078a6ed84b3396fea73cedcfc1674a831062e177b4f3b7b9879
+size 1062981462
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-4000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-4000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bfe31e79de51ad830c9c6bfe25e897c7114e494e
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-4000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7282c284c0fe3f0de14775219ee0557fe4a23bddd8f1d4f614df9092fc7acf6
+size 1062979229
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-40000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-40000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2949aadce4aaf0e2d2d0d7ddb0ab2936b5d5d647
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-40000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e28573be2b8cd387ad6c4e0555fa5cef97461dd1fee8cca3974c0f8f34371d63
+size 1062981398
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-44000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-44000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b8760cfd7c008cbebe3fdbbd3de41e0c01f925ac
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-44000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a56f79a7ad4bbf46eb01c8295d8bee29961a993fb487530f9ba1372d12deb8f8
+size 1062981398
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-48000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-48000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6c3b135bd69c443d551ff6b99340d562e19fdd5f
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-48000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c06f614ef6af29291a42c0889e9adaab77a601439c27a0e431b85ca024b6721f
+size 1062981526
diff --git a/zipformer/pretrained/ctc/causal/exp/checkpoint-8000.pt b/zipformer/pretrained/ctc/causal/exp/checkpoint-8000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..834500fd5b4ba090aed05836db55795102320148
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/checkpoint-8000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4934bf4f850b2d9d8b54af562b3b4d6a6bf30559e9b3b5c016576e441d6fc358
+size 1062979293
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-1.pt b/zipformer/pretrained/ctc/causal/exp/epoch-1.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d29617597252f8f4aae7f77df2cf07c6098c624b
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-1.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a3b5d7655b8e150e919e2dfe4f25dd230f20af2d9fc5d0e8f2fe8477eb0958bf
+size 1062961173
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-10.pt b/zipformer/pretrained/ctc/causal/exp/epoch-10.pt
new file mode 100644
index 0000000000000000000000000000000000000000..490a56adb3b797fa4d105fe631277c0fe64ad1eb
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-10.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce210f71dde53db846470f4438ca8981a5bb5862504e9bb621c2abefe16b801f
+size 1062964302
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-11.pt b/zipformer/pretrained/ctc/causal/exp/epoch-11.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c4daec9011a40a641c632902409e69cc47b99ad9
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-11.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4207336a54cf4fd8ebf28b906b2a4bf71a29cde577c8447778a1a6f1c2dff40a
+size 1062964366
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-12.pt b/zipformer/pretrained/ctc/causal/exp/epoch-12.pt
new file mode 100644
index 0000000000000000000000000000000000000000..be448be1335da4d0deca266f3b5de975fea323d6
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-12.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c7ba168264ea2e6ba9eed0e04ff7045f236834fce410d3588c049abca887fdf
+size 1062964366
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-13.pt b/zipformer/pretrained/ctc/causal/exp/epoch-13.pt
new file mode 100644
index 0000000000000000000000000000000000000000..dfa93c3f60990e8915b08e6d35b99516b89562b2
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-13.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:caf32501297129597767ab5656d3a9bba610baccbfcf42c208315f8a87d20546
+size 1062964430
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-14.pt b/zipformer/pretrained/ctc/causal/exp/epoch-14.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d7b3e01e33e6cbda330833837ae21857fb912275
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-14.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4741ab625e55a8bf10b267dbfceaea1aa44b8c5a6b590c6afa2a8802117839d8
+size 1062963918
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-15.pt b/zipformer/pretrained/ctc/causal/exp/epoch-15.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c3398606ad29edf5bd2a0a8f06dfdca842ad07b1
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-15.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab16e338dee355d8583067604d9cf505abee05c784db0b638e3d4708b8433efe
+size 1062963982
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-16.pt b/zipformer/pretrained/ctc/causal/exp/epoch-16.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e3823c164b6c6b8408fe3a2f9f14112eeeebc063
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-16.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7487753fada1b15a96bbe85ef051ae77db988bb9d2c721c1ac5c52d39a982c1f
+size 1062964046
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-17.pt b/zipformer/pretrained/ctc/causal/exp/epoch-17.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4ec1cec180dde3fd0154d7c96c9a0eb09e432333
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-17.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4190acd166d549f6ff45f92c5ec0e468d3453181793030d13995afb4651676f0
+size 1062963918
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-18.pt b/zipformer/pretrained/ctc/causal/exp/epoch-18.pt
new file mode 100644
index 0000000000000000000000000000000000000000..45632e2dab0b42efd7b73cc5584e303f4ac64534
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-18.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53bcafdb1bb7b03e7803cbb9e94bcfa9a5ccc028e9a1211ad7d2387e6473c6b3
+size 1062963918
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-19.pt b/zipformer/pretrained/ctc/causal/exp/epoch-19.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5bac506833a5482c9d53c3de3692d24d4c6c9b37
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-19.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3965981c384b21858edd0ba79fa963b830634de4e5609e53d15d9491dbe73e8c
+size 1062963982
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-2.pt b/zipformer/pretrained/ctc/causal/exp/epoch-2.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8c0d988c1e28e933ed0e5b9828a7af0a06cf9028
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-2.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:140dd0e719c2564e63254777fcb5a5e809b45c524761d99e848939b79d2a85fa
+size 1062961237
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-20.pt b/zipformer/pretrained/ctc/causal/exp/epoch-20.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1545fe340294e2d827f41eeda5833e8ec6710e00
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-20.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67cc7bf27b506c8f8e8f15235f3e4bf6d77b4a02ece1b03df8369bcd03b531f0
+size 1062964046
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-3.pt b/zipformer/pretrained/ctc/causal/exp/epoch-3.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fdae6ee05fd6506da8b35fba443adde7d72d21e4
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-3.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2125d40afd9da39b03fee242d410af37e4b02cb26db2ad74002a7b90099f9b98
+size 1062961301
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-4.pt b/zipformer/pretrained/ctc/causal/exp/epoch-4.pt
new file mode 100644
index 0000000000000000000000000000000000000000..18699c2e1dae1e7b392606c51ea911d23c423b85
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-4.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a74e0311bf6e996d56f4c035be74c301f4294d99e7d7293b10f8f256ee1c34d0
+size 1062961301
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-5.pt b/zipformer/pretrained/ctc/causal/exp/epoch-5.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3c895c5c743c519c2e756c1f5599bd2953121853
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-5.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aefc7d7f2f07711438c8e45938564ed4a6765512ba0c5f038d7d96aaad4e77cd
+size 1062961365
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-6.pt b/zipformer/pretrained/ctc/causal/exp/epoch-6.pt
new file mode 100644
index 0000000000000000000000000000000000000000..799b4c8a99f73b714a9f4dfdbe350f6ee9bfcec4
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-6.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d585e81ee4b02dd85dade0635a90c1c9557d8333d613bb4fa5de1df439340edf
+size 1062961429
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-7.pt b/zipformer/pretrained/ctc/causal/exp/epoch-7.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1c05b9dfdd85984843e1e93f0fe883cb9503009e
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-7.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00f567efe03a64bbb2b4e6aea83d0e843eda4b82e6dc9368d055eb5c51fc8eff
+size 1062961493
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-8.pt b/zipformer/pretrained/ctc/causal/exp/epoch-8.pt
new file mode 100644
index 0000000000000000000000000000000000000000..18acc172465cacb94486351b260be7ef695cc492
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-8.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea516f04be5ccabe53945e12247a30feeb8400a27b613386a6ceed80d1ae6a16
+size 1062961493
diff --git a/zipformer/pretrained/ctc/causal/exp/epoch-9.pt b/zipformer/pretrained/ctc/causal/exp/epoch-9.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0e836f01ae06eac50707e1aa9d98d7ceab9aec82
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/epoch-9.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:80c9c071f902eeec395bbf6364e395e6f2d4ee030c7b6e7bc4755e084fab3c7c
+size 1062961557
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-0
new file mode 100644
index 0000000000000000000000000000000000000000..5fb13f1f285c78f91e4493dc2619180f75751555
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-0
@@ -0,0 +1,4886 @@
+2024-08-25 02:23:27,399 INFO [train.py:1182] (0/4) Training started
+2024-08-25 02:23:46,264 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-25 02:23:46,266 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 02:23:46,266 INFO [train.py:1212] (0/4) About to create model
+2024-08-25 02:23:46,944 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-25 02:23:47,739 INFO [train.py:1231] (0/4) Using DDP
+2024-08-25 02:23:51,127 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-25 02:23:51,498 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-25 02:23:51,498 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-25 02:23:51,498 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-25 02:23:51,498 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-25 02:23:53,043 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-25 02:23:53,051 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-25 02:23:53,293 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-25 02:23:53,346 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-25 02:23:53,656 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-25 02:23:53,656 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 02:27:50,706 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12287MB
+2024-08-25 02:27:52,173 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12375MB
+2024-08-25 02:28:01,911 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12375MB
+2024-08-25 02:28:03,369 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12375MB
+2024-08-25 02:28:25,874 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=256, metric=42.50 vs. limit=7.5
+2024-08-25 02:28:26,151 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12375MB
+2024-08-25 02:28:27,775 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12611MB
+2024-08-25 02:29:16,119 INFO [train.py:1114] (0/4) Epoch 1, batch 0, loss[loss=8.844, simple_loss=7.212, pruned_loss=6.79, ctc_loss=4.757, over 19814.00 frames. ], tot_loss[loss=8.844, simple_loss=7.212, pruned_loss=6.79, ctc_loss=4.757, over 19814.00 frames. ], batch size: 49, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 02:29:16,120 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 02:29:29,442 INFO [train.py:1146] (0/4) Epoch 1, validation: loss=8.973, simple_loss=7.311, pruned_loss=6.819, ctc_loss=4.895, over 944034.00 frames.
+2024-08-25 02:29:29,443 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 12611MB
+2024-08-25 02:29:31,175 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.82 vs. limit=7.5
+2024-08-25 02:29:40,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=0.0, ans=7.5
+2024-08-25 02:30:23,438 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.714e+03 3.750e+03 4.817e+03 5.615e+03 6.551e+03, threshold=1.927e+04, percent-clipped=0.0
+2024-08-25 02:30:45,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=53.333333333333336, ans=0.4975
+2024-08-25 02:32:20,508 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=105.09 vs. limit=7.54
+2024-08-25 02:32:26,052 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.867e+02 1.019e+03 3.714e+03 5.063e+03 6.846e+03, threshold=1.486e+04, percent-clipped=0.0
+2024-08-25 02:32:40,218 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106.66666666666667, ans=0.29893333333333333
+2024-08-25 02:32:40,594 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=238.44 vs. limit=7.54
+2024-08-25 02:33:18,707 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=297.01 vs. limit=7.56
+2024-08-25 02:33:29,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=213.33333333333334, ans=0.04933333333333333
+2024-08-25 02:33:33,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=213.33333333333334, ans=0.192
+2024-08-25 02:33:34,370 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.10 vs. limit=7.66
+2024-08-25 02:33:36,096 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=237.78 vs. limit=7.66
+2024-08-25 02:33:36,811 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.544e+02 7.649e+02 1.076e+03 3.731e+03 6.846e+03, threshold=4.304e+03, percent-clipped=0.0
+2024-08-25 02:33:47,866 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=17.69 vs. limit=4.085333333333334
+2024-08-25 02:33:48,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=213.33333333333334, ans=7.58
+2024-08-25 02:34:02,351 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=38.61 vs. limit=7.6
+2024-08-25 02:34:04,703 INFO [train.py:1114] (0/4) Epoch 1, batch 50, loss[loss=1.365, simple_loss=1.015, pruned_loss=1.182, ctc_loss=1.089, over 19697.00 frames. ], tot_loss[loss=3.548, simple_loss=2.93, pruned_loss=2.55, ctc_loss=1.778, over 845725.26 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 02:34:09,888 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=83.60 vs. limit=5.133333333333334
+2024-08-25 02:34:32,595 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.82 vs. limit=3.048
+2024-08-25 02:34:32,665 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=35.84 vs. limit=7.62
+2024-08-25 02:34:35,317 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=320.0, ans=7.74
+2024-08-25 02:35:14,687 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=20.72 vs. limit=5.093333333333334
+2024-08-25 02:35:25,159 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=26.31 vs. limit=5.093333333333334
+2024-08-25 02:35:28,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=426.6666666666667, ans=0.226
+2024-08-25 02:35:28,805 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=50.79 vs. limit=7.66
+2024-08-25 02:37:25,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten.whitening_limit, batch_count=480.0, ans=7.68
+2024-08-25 02:37:42,232 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=41.10 vs. limit=7.86
+2024-08-25 02:37:48,165 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=130.03 vs. limit=7.68
+2024-08-25 02:37:49,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=40.94 vs. limit=7.68
+2024-08-25 02:37:51,541 INFO [train.py:1114] (0/4) Epoch 1, batch 100, loss[loss=1.353, simple_loss=0.9669, pruned_loss=1.236, ctc_loss=1.153, over 19718.00 frames. ], tot_loss[loss=2.407, simple_loss=1.911, pruned_loss=1.859, ctc_loss=1.468, over 1499439.12 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 02:37:55,737 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.639e+01 1.517e+02 3.832e+02 1.019e+03 9.054e+03, threshold=7.665e+02, percent-clipped=2.0
+2024-08-25 02:38:01,167 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=22.74 vs. limit=4.213333333333333
+2024-08-25 02:38:02,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=533.3333333333334, ans=0.43333333333333335
+2024-08-25 02:38:05,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=533.3333333333334, ans=5.333333333333333
+2024-08-25 02:38:07,352 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=193.72 vs. limit=5.293333333333333
+2024-08-25 02:38:10,874 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=14.22 vs. limit=4.234666666666667
+2024-08-25 02:38:11,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=586.6666666666666, ans=7.72
+2024-08-25 02:38:19,489 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=328.05 vs. limit=7.72
+2024-08-25 02:38:25,751 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=20.39 vs. limit=5.1466666666666665
+2024-08-25 02:38:35,059 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=22.09 vs. limit=7.72
+2024-08-25 02:38:39,219 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=44.24 vs. limit=7.74
+2024-08-25 02:38:40,439 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=37.69 vs. limit=7.74
+2024-08-25 02:38:47,923 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=40.95 vs. limit=7.98
+2024-08-25 02:38:55,783 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=160.72 vs. limit=5.346666666666667
+2024-08-25 02:39:07,282 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=105.56 vs. limit=7.76
+2024-08-25 02:39:09,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=746.6666666666666, ans=0.46499999999999997
+2024-08-25 02:39:13,784 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.33 vs. limit=3.112
+2024-08-25 02:39:14,719 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=746.6666666666666, ans=0.24253333333333332
+2024-08-25 02:39:15,218 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=63.04 vs. limit=8.06
+2024-08-25 02:39:16,495 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=191.76 vs. limit=7.78
+2024-08-25 02:39:22,873 INFO [train.py:1114] (0/4) Epoch 1, batch 150, loss[loss=1.132, simple_loss=0.783, pruned_loss=0.9897, ctc_loss=1.062, over 19717.00 frames. ], tot_loss[loss=1.939, simple_loss=1.489, pruned_loss=1.561, ctc_loss=1.343, over 2027737.98 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 02:39:26,034 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=800.0, ans=0.872
+2024-08-25 02:39:26,319 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=30.59 vs. limit=8.1
+2024-08-25 02:39:30,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=800.0, ans=0.872
+2024-08-25 02:39:41,718 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=25.35 vs. limit=8.14
+2024-08-25 02:39:56,925 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=91.94 vs. limit=7.84
+2024-08-25 02:40:15,218 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=51.79 vs. limit=7.86
+2024-08-25 02:40:20,617 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=13.95 vs. limit=5.253333333333333
+2024-08-25 02:40:21,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=1013.3333333333334, ans=0.5
+2024-08-25 02:40:32,731 INFO [train.py:1114] (0/4) Epoch 1, batch 200, loss[loss=1.257, simple_loss=0.8673, pruned_loss=1.006, ctc_loss=1.207, over 18215.00 frames. ], tot_loss[loss=1.687, simple_loss=1.262, pruned_loss=1.373, ctc_loss=1.278, over 2435361.75 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 02:40:34,775 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=13.03 vs. limit=4.426666666666667
+2024-08-25 02:40:35,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.whiten.whitening_limit, batch_count=1066.6666666666667, ans=4.426666666666667
+2024-08-25 02:40:36,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=1066.6666666666667, ans=5.533333333333333
+2024-08-25 02:40:36,940 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.587e+01 1.185e+02 1.545e+02 1.999e+02 4.229e+02, threshold=3.089e+02, percent-clipped=0.0
+2024-08-25 02:41:02,915 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=21.53 vs. limit=4.426666666666667
+2024-08-25 02:41:08,261 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=1120.0, ans=0.8608
+2024-08-25 02:41:17,214 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.37 vs. limit=8.34
+2024-08-25 02:41:18,497 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.91 vs. limit=4.448
+2024-08-25 02:41:18,663 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=117.54 vs. limit=5.5600000000000005
+2024-08-25 02:41:24,174 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.09 vs. limit=8.38
+2024-08-25 02:41:50,714 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=155.74 vs. limit=7.96
+2024-08-25 02:42:01,541 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.83 vs. limit=8.46
+2024-08-25 02:42:11,858 INFO [train.py:1114] (0/4) Epoch 1, batch 250, loss[loss=1.253, simple_loss=0.8473, pruned_loss=0.9942, ctc_loss=1.236, over 19443.00 frames. ], tot_loss[loss=1.535, simple_loss=1.123, pruned_loss=1.249, ctc_loss=1.244, over 2755446.34 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 02:42:20,284 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=101.13 vs. limit=8.0
+2024-08-25 02:42:22,920 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=46.16 vs. limit=8.0
+2024-08-25 02:42:24,356 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=20.24 vs. limit=8.5
+2024-08-25 02:42:43,615 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=7.29 vs. limit=5.36
+2024-08-25 02:42:44,773 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=44.28 vs. limit=8.04
+2024-08-25 02:42:45,029 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.12 vs. limit=3.216
+2024-08-25 02:42:54,445 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.82 vs. limit=8.58
+2024-08-25 02:42:55,736 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=225.64 vs. limit=8.06
+2024-08-25 02:42:57,008 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=87.74 vs. limit=8.06
+2024-08-25 02:42:58,737 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=29.18 vs. limit=8.06
+2024-08-25 02:43:02,344 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=116.38 vs. limit=5.746666666666666
+2024-08-25 02:43:18,838 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=60.41 vs. limit=8.08
+2024-08-25 02:43:22,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=1600.0, ans=0.425
+2024-08-25 02:43:23,909 INFO [train.py:1114] (0/4) Epoch 1, batch 300, loss[loss=1.223, simple_loss=0.8178, pruned_loss=0.9622, ctc_loss=1.204, over 19507.00 frames. ], tot_loss[loss=1.435, simple_loss=1.029, pruned_loss=1.159, ctc_loss=1.221, over 2998983.33 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 02:43:27,980 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 8.516e+01 1.281e+02 1.784e+02 2.457e+02 1.092e+03, threshold=3.568e+02, percent-clipped=12.0
+2024-08-25 02:43:36,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=1653.3333333333333, ans=0.4225
+2024-08-25 02:43:38,433 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=90.18 vs. limit=8.12
+2024-08-25 02:43:43,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=1653.3333333333333, ans=0.4225
+2024-08-25 02:43:46,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=1653.3333333333333, ans=0.157
+2024-08-25 02:43:58,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=1706.6666666666667, ans=0.42
+2024-08-25 02:44:12,446 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.44 vs. limit=8.82
+2024-08-25 02:44:13,943 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.85 vs. limit=4.704
+2024-08-25 02:44:17,994 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=216.45 vs. limit=8.16
+2024-08-25 02:44:24,737 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=8.71 vs. limit=5.453333333333333
+2024-08-25 02:44:24,824 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=107.41 vs. limit=8.18
+2024-08-25 02:44:31,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=1813.3333333333333, ans=0.8365333333333334
+2024-08-25 02:44:37,982 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=12.11 vs. limit=4.746666666666667
+2024-08-25 02:44:38,850 INFO [train.py:1114] (0/4) Epoch 1, batch 350, loss[loss=1.085, simple_loss=0.7166, pruned_loss=0.8414, ctc_loss=1.073, over 19768.00 frames. ], tot_loss[loss=1.367, simple_loss=0.9644, pruned_loss=1.095, ctc_loss=1.206, over 3189785.81 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 02:44:40,581 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=1866.6666666666667, ans=0.14500000000000002
+2024-08-25 02:44:43,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=1866.6666666666667, ans=0.7686666666666666
+2024-08-25 02:44:45,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=1866.6666666666667, ans=0.13
+2024-08-25 02:44:48,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=1866.6666666666667, ans=0.5
+2024-08-25 02:44:55,586 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.44 vs. limit=8.94
+2024-08-25 02:44:58,237 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.25 vs. limit=5.96
+2024-08-25 02:45:03,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=1920.0, ans=6.2
+2024-08-25 02:45:23,917 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=74.49 vs. limit=8.26
+2024-08-25 02:45:25,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 02:45:25,618 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=16.06 vs. limit=8.26
+2024-08-25 02:45:26,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=2026.6666666666667, ans=0.08733333333333333
+2024-08-25 02:45:26,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=2026.6666666666667, ans=0.0544
+2024-08-25 02:45:31,815 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=2026.6666666666667, ans=0.5
+2024-08-25 02:46:57,347 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=18.94 vs. limit=8.28
+2024-08-25 02:47:09,119 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=1.61 vs. limit=3.32
+2024-08-25 02:47:09,644 INFO [train.py:1114] (0/4) Epoch 1, batch 400, loss[loss=1.209, simple_loss=0.7953, pruned_loss=0.9198, ctc_loss=1.177, over 19500.00 frames. ], tot_loss[loss=1.318, simple_loss=0.9159, pruned_loss=1.046, ctc_loss=1.192, over 3341471.66 frames. ], batch size: 54, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 02:47:13,858 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 9.873e+01 1.501e+02 1.913e+02 2.464e+02 6.763e+02, threshold=3.826e+02, percent-clipped=7.0
+2024-08-25 02:47:21,438 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=106.99 vs. limit=8.3
+2024-08-25 02:47:28,299 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=52.39 vs. limit=8.32
+2024-08-25 02:47:32,709 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.23 vs. limit=9.14
+2024-08-25 02:47:32,817 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.55 vs. limit=9.14
+2024-08-25 02:47:36,756 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=70.76 vs. limit=8.32
+2024-08-25 02:47:38,197 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.68 vs. limit=9.14
+2024-08-25 02:47:42,087 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=36.92 vs. limit=8.34
+2024-08-25 02:47:42,934 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=2240.0, ans=0.395
+2024-08-25 02:47:42,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=2240.0, ans=0.2336
+2024-08-25 02:47:47,358 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=65.20 vs. limit=8.34
+2024-08-25 02:47:56,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=2293.3333333333335, ans=0.8197333333333333
+2024-08-25 02:47:58,218 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=20.07 vs. limit=8.36
+2024-08-25 02:48:03,887 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=11.71 vs. limit=8.36
+2024-08-25 02:48:04,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=2293.3333333333335, ans=0.11399999999999999
+2024-08-25 02:48:05,193 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.79 vs. limit=9.22
+2024-08-25 02:48:13,554 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=52.23 vs. limit=8.38
+2024-08-25 02:48:13,563 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=198.25 vs. limit=8.38
+2024-08-25 02:48:14,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=2346.6666666666665, ans=0.2765333333333333
+2024-08-25 02:48:21,729 INFO [train.py:1114] (0/4) Epoch 1, batch 450, loss[loss=1.191, simple_loss=0.7788, pruned_loss=0.8944, ctc_loss=1.146, over 19617.00 frames. ], tot_loss[loss=1.284, simple_loss=0.8804, pruned_loss=1.007, ctc_loss=1.18, over 3449265.00 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 02:48:24,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=2400.0, ans=0.774
+2024-08-25 02:48:24,760 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.64 vs. limit=9.3
+2024-08-25 02:48:25,030 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.16 vs. limit=8.4
+2024-08-25 02:48:38,460 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=15.57 vs. limit=8.42
+2024-08-25 02:48:43,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=2453.3333333333335, ans=0.108
+2024-08-25 02:48:44,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=2453.3333333333335, ans=0.8141333333333334
+2024-08-25 02:48:47,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=2506.6666666666665, ans=0.3825
+2024-08-25 02:48:49,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=39.32 vs. limit=8.44
+2024-08-25 02:48:53,136 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=27.10 vs. limit=8.44
+2024-08-25 02:48:53,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=2506.6666666666665, ans=0.3825
+2024-08-25 02:48:54,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=2506.6666666666665, ans=0.3825
+2024-08-25 02:49:03,817 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.97 vs. limit=5.64
+2024-08-25 02:49:11,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=2560.0, ans=0.38
+2024-08-25 02:49:13,400 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=18.19 vs. limit=8.46
+2024-08-25 02:49:18,811 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=13.64 vs. limit=8.48
+2024-08-25 02:49:18,893 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.69 vs. limit=9.46
+2024-08-25 02:49:20,074 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.40 vs. limit=5.653333333333333
+2024-08-25 02:49:21,250 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=33.19 vs. limit=8.48
+2024-08-25 02:49:28,647 INFO [train.py:1114] (0/4) Epoch 1, batch 500, loss[loss=1.248, simple_loss=0.8246, pruned_loss=0.8885, ctc_loss=1.19, over 19666.00 frames. ], tot_loss[loss=1.252, simple_loss=0.8503, pruned_loss=0.9674, ctc_loss=1.162, over 3545091.46 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:49:30,855 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=11.90 vs. limit=8.5
+2024-08-25 02:49:31,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=2666.6666666666665, ans=0.375
+2024-08-25 02:49:32,580 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.834e+02 2.411e+02 2.968e+02 6.409e+02, threshold=4.822e+02, percent-clipped=7.0
+2024-08-25 02:49:35,948 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.02 vs. limit=9.5
+2024-08-25 02:49:37,000 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=2666.6666666666665, ans=0.16666666666666669
+2024-08-25 02:49:37,352 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.55 vs. limit=9.5
+2024-08-25 02:49:41,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=2720.0, ans=0.7772
+2024-08-25 02:49:48,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.whiten.whitening_limit, batch_count=2720.0, ans=5.088
+2024-08-25 02:49:53,911 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.02 vs. limit=8.52
+2024-08-25 02:49:59,227 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.18 vs. limit=9.58
+2024-08-25 02:50:00,695 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=10.25 vs. limit=8.54
+2024-08-25 02:50:04,985 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.08 vs. limit=9.58
+2024-08-25 02:50:06,159 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.62 vs. limit=8.54
+2024-08-25 02:50:18,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=2826.6666666666665, ans=0.7782666666666667
+2024-08-25 02:50:22,386 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=20.19 vs. limit=8.56
+2024-08-25 02:50:24,114 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.65 vs. limit=5.706666666666667
+2024-08-25 02:50:24,265 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=18.45 vs. limit=8.56
+2024-08-25 02:50:31,759 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.76 vs. limit=8.58
+2024-08-25 02:50:33,192 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=11.69 vs. limit=8.58
+2024-08-25 02:50:34,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=2880.0, ans=0.365
+2024-08-25 02:50:39,247 INFO [train.py:1114] (0/4) Epoch 1, batch 550, loss[loss=1.18, simple_loss=0.8011, pruned_loss=0.7633, ctc_loss=1.121, over 19288.00 frames. ], tot_loss[loss=1.22, simple_loss=0.8261, pruned_loss=0.9177, ctc_loss=1.138, over 3607779.82 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:50:45,480 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=23.75 vs. limit=8.6
+2024-08-25 02:50:47,252 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=18.43 vs. limit=8.6
+2024-08-25 02:50:47,656 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=17.64 vs. limit=8.6
+2024-08-25 02:50:54,620 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.39 vs. limit=5.733333333333333
+2024-08-25 02:51:00,658 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=21.75 vs. limit=8.620000000000001
+2024-08-25 02:51:02,599 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=2986.6666666666665, ans=0.7954666666666667
+2024-08-25 02:51:04,701 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.13 vs. limit=9.74
+2024-08-25 02:51:05,980 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.12 vs. limit=9.74
+2024-08-25 02:51:07,619 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.58 vs. limit=9.74
+2024-08-25 02:51:15,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=3040.0, ans=0.17188
+2024-08-25 02:51:15,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3040.0, ans=0.2696
+2024-08-25 02:51:20,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=3040.0, ans=0.086
+2024-08-25 02:51:23,669 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.00 vs. limit=8.64
+2024-08-25 02:51:25,166 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.35 vs. limit=8.66
+2024-08-25 02:51:31,466 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.65 vs. limit=9.82
+2024-08-25 02:51:31,731 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.68 vs. limit=5.773333333333333
+2024-08-25 02:51:35,077 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=2.100e+02
+2024-08-25 02:51:41,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=3146.6666666666665, ans=0.35250000000000004
+2024-08-25 02:51:44,873 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.56 vs. limit=8.68
+2024-08-25 02:51:48,675 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.51 vs. limit=9.86
+2024-08-25 02:51:54,368 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=3200.0, ans=0.35
+2024-08-25 02:51:55,125 INFO [train.py:1114] (0/4) Epoch 1, batch 600, loss[loss=0.9636, simple_loss=0.6644, pruned_loss=0.5763, ctc_loss=0.928, over 19369.00 frames. ], tot_loss[loss=1.173, simple_loss=0.7961, pruned_loss=0.8503, ctc_loss=1.1, over 3664209.30 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:51:59,177 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.677e+02 3.553e+02 4.456e+02 9.241e+02, threshold=7.106e+02, percent-clipped=18.0
+2024-08-25 02:52:09,519 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=10.25 vs. limit=5.8133333333333335
+2024-08-25 02:52:33,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.83 vs. limit=9.98
+2024-08-25 02:52:51,119 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.78 vs. limit=8.78
+2024-08-25 02:53:01,034 INFO [train.py:1114] (0/4) Epoch 1, batch 650, loss[loss=0.9319, simple_loss=0.6536, pruned_loss=0.5221, ctc_loss=0.8974, over 19771.00 frames. ], tot_loss[loss=1.111, simple_loss=0.758, pruned_loss=0.7741, ctc_loss=1.047, over 3714614.08 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 4.0
+2024-08-25 02:53:04,356 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.24 vs. limit=10.1
+2024-08-25 02:53:14,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=3520.0, ans=0.0208
+2024-08-25 02:53:19,900 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.80 vs. limit=5.88
+2024-08-25 02:53:24,742 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_na.min_abs, batch_count=3520.0, ans=0.01808
+2024-08-25 02:53:49,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=3626.6666666666665, ans=0.32999999999999996
+2024-08-25 02:53:54,115 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.83 vs. limit=10.26
+2024-08-25 02:53:57,087 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.63 vs. limit=8.879999999999999
+2024-08-25 02:53:57,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=3680.0, ans=0.017199999999999993
+2024-08-25 02:53:59,458 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.45 vs. limit=8.879999999999999
+2024-08-25 02:54:00,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=3680.0, ans=0.017199999999999993
+2024-08-25 02:54:00,642 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.68 vs. limit=8.879999999999999
+2024-08-25 02:54:09,008 INFO [train.py:1114] (0/4) Epoch 1, batch 700, loss[loss=0.7998, simple_loss=0.5628, pruned_loss=0.4396, ctc_loss=0.7653, over 19712.00 frames. ], tot_loss[loss=1.05, simple_loss=0.7216, pruned_loss=0.7016, ctc_loss=0.9919, over 3746967.74 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:54:12,434 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.63 vs. limit=8.9
+2024-08-25 02:54:14,213 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.975e+02 3.878e+02 5.385e+02 1.936e+03, threshold=7.756e+02, percent-clipped=10.0
+2024-08-25 02:54:27,031 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.31 vs. limit=8.92
+2024-08-25 02:54:30,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=3786.6666666666665, ans=0.3225
+2024-08-25 02:54:36,605 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.28 vs. limit=5.96
+2024-08-25 02:54:42,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1.whitening_limit, batch_count=3840.0, ans=5.96
+2024-08-25 02:54:44,278 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.29 vs. limit=8.94
+2024-08-25 02:55:00,353 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.64 vs. limit=10.42
+2024-08-25 02:55:01,275 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.21 vs. limit=5.973333333333334
+2024-08-25 02:55:02,218 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=3893.3333333333335, ans=0.3175
+2024-08-25 02:55:12,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3946.6666666666665, ans=0.26053333333333334
+2024-08-25 02:55:15,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=4000.0, ans=0.3125
+2024-08-25 02:55:16,802 INFO [train.py:1114] (0/4) Epoch 1, batch 750, loss[loss=0.8004, simple_loss=0.5798, pruned_loss=0.4093, ctc_loss=0.7508, over 19497.00 frames. ], tot_loss[loss=0.9861, simple_loss=0.6839, pruned_loss=0.6323, ctc_loss=0.9314, over 3772518.90 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:55:17,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=4000.0, ans=0.3125
+2024-08-25 02:55:36,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=4053.3333333333335, ans=0.31
+2024-08-25 02:56:10,083 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=4213.333333333333, ans=0.2632
+2024-08-25 02:56:11,854 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.16 vs. limit=10.66
+2024-08-25 02:56:22,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4213.333333333333, ans=0.2578666666666667
+2024-08-25 02:56:24,841 INFO [train.py:1114] (0/4) Epoch 1, batch 800, loss[loss=0.6259, simple_loss=0.4699, pruned_loss=0.3012, ctc_loss=0.5569, over 19798.00 frames. ], tot_loss[loss=0.9258, simple_loss=0.6493, pruned_loss=0.5697, ctc_loss=0.8703, over 3793423.11 frames. ], batch size: 49, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:56:29,875 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.945e+02 3.956e+02 5.210e+02 9.107e+02, threshold=7.913e+02, percent-clipped=4.0
+2024-08-25 02:57:15,330 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=5.158e-02
+2024-08-25 02:57:18,296 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.67 vs. limit=10.86
+2024-08-25 02:57:22,950 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=4480.0, ans=0.29000000000000004
+2024-08-25 02:57:30,597 INFO [train.py:1114] (0/4) Epoch 1, batch 850, loss[loss=0.7145, simple_loss=0.5405, pruned_loss=0.3382, ctc_loss=0.6286, over 19652.00 frames. ], tot_loss[loss=0.8712, simple_loss=0.6186, pruned_loss=0.5148, ctc_loss=0.8122, over 3813331.40 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:57:38,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=4533.333333333333, ans=0.7413333333333334
+2024-08-25 02:57:55,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=4586.666666666667, ans=0.28500000000000003
+2024-08-25 02:57:56,753 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.11 vs. limit=10.94
+2024-08-25 02:57:58,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=4640.0, ans=0.04733333333333334
+2024-08-25 02:58:04,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=4640.0, ans=0.2825
+2024-08-25 02:58:17,084 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=4693.333333333333, ans=7.933333333333334
+2024-08-25 02:58:42,819 INFO [train.py:1114] (0/4) Epoch 1, batch 900, loss[loss=0.6303, simple_loss=0.479, pruned_loss=0.2974, ctc_loss=0.5441, over 19815.00 frames. ], tot_loss[loss=0.8267, simple_loss=0.5941, pruned_loss=0.4705, ctc_loss=0.7628, over 3817847.63 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 02:58:48,910 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.783e+02 3.682e+02 4.971e+02 1.764e+03, threshold=7.364e+02, percent-clipped=6.0
+2024-08-25 02:59:04,247 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=2.512e-03
+2024-08-25 02:59:05,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=4853.333333333333, ans=0.27249999999999996
+2024-08-25 02:59:05,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=4853.333333333333, ans=0.2728
+2024-08-25 02:59:33,733 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=4960.0, ans=0.7264
+2024-08-25 02:59:41,611 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=5013.333333333333, ans=0.265
+2024-08-25 02:59:50,575 INFO [train.py:1114] (0/4) Epoch 1, batch 950, loss[loss=0.5972, simple_loss=0.4646, pruned_loss=0.2652, ctc_loss=0.5148, over 19497.00 frames. ], tot_loss[loss=0.7866, simple_loss=0.5722, pruned_loss=0.4319, ctc_loss=0.7177, over 3819682.03 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:00:02,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=5120.0, ans=0.26
+2024-08-25 03:00:12,841 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.42 vs. limit=7.5600000000000005
+2024-08-25 03:00:15,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=5173.333333333333, ans=0.009744927536231884
+2024-08-25 03:00:54,654 INFO [train.py:1114] (0/4) Epoch 1, batch 1000, loss[loss=0.5859, simple_loss=0.4622, pruned_loss=0.2541, ctc_loss=0.4972, over 19848.00 frames. ], tot_loss[loss=0.7513, simple_loss=0.5535, pruned_loss=0.3986, ctc_loss=0.6767, over 3816190.86 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:01:01,310 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.847e+02 3.463e+02 4.611e+02 9.717e+02, threshold=6.926e+02, percent-clipped=4.0
+2024-08-25 03:01:01,763 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=5333.333333333333, ans=0.044444444444444446
+2024-08-25 03:01:04,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=5333.333333333333, ans=0.25
+2024-08-25 03:01:34,128 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.98 vs. limit=9.54
+2024-08-25 03:01:51,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=5546.666666666667, ans=0.24
+2024-08-25 03:02:07,694 INFO [train.py:1114] (0/4) Epoch 1, batch 1050, loss[loss=0.6187, simple_loss=0.492, pruned_loss=0.2646, ctc_loss=0.5219, over 19842.00 frames. ], tot_loss[loss=0.7164, simple_loss=0.5346, pruned_loss=0.368, ctc_loss=0.6368, over 3821745.37 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:02:10,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=5600.0, ans=8.5
+2024-08-25 03:02:15,577 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.49 vs. limit=11.7
+2024-08-25 03:02:16,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=5600.0, ans=0.043333333333333335
+2024-08-25 03:02:18,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=5653.333333333333, ans=0.0
+2024-08-25 03:02:36,987 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.62 vs. limit=9.64
+2024-08-25 03:03:13,747 INFO [train.py:1114] (0/4) Epoch 1, batch 1100, loss[loss=0.5333, simple_loss=0.4344, pruned_loss=0.2207, ctc_loss=0.4354, over 19601.00 frames. ], tot_loss[loss=0.6862, simple_loss=0.5186, pruned_loss=0.342, ctc_loss=0.6017, over 3828941.86 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:03:20,120 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.626e+02 3.754e+02 4.559e+02 6.965e+02, threshold=7.509e+02, percent-clipped=1.0
+2024-08-25 03:03:23,168 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=5866.666666666667, ans=0.22499999999999998
+2024-08-25 03:03:33,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=5920.0, ans=0.2888
+2024-08-25 03:03:33,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=5920.0, ans=0.22249999999999998
+2024-08-25 03:03:36,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=5920.0, ans=0.2408
+2024-08-25 03:03:39,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=5973.333333333333, ans=0.21999999999999997
+2024-08-25 03:04:13,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=6080.0, ans=0.21500000000000002
+2024-08-25 03:04:15,104 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.49 vs. limit=9.78
+2024-08-25 03:04:18,531 INFO [train.py:1114] (0/4) Epoch 1, batch 1150, loss[loss=0.5629, simple_loss=0.4536, pruned_loss=0.2398, ctc_loss=0.4584, over 19591.00 frames. ], tot_loss[loss=0.6605, simple_loss=0.5051, pruned_loss=0.3202, ctc_loss=0.5719, over 3828275.11 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:04:24,853 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.86 vs. limit=9.8
+2024-08-25 03:04:28,181 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=6133.333333333333, ans=0.21250000000000002
+2024-08-25 03:04:29,947 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.17 vs. limit=9.8
+2024-08-25 03:04:38,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=6186.666666666667, ans=0.00952463768115942
+2024-08-25 03:05:10,918 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:05:19,577 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=6346.666666666667, ans=0.2025
+2024-08-25 03:05:19,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=6346.666666666667, ans=0.6778666666666666
+2024-08-25 03:05:24,509 INFO [train.py:1114] (0/4) Epoch 1, batch 1200, loss[loss=0.525, simple_loss=0.4368, pruned_loss=0.2125, ctc_loss=0.4163, over 19841.00 frames. ], tot_loss[loss=0.6398, simple_loss=0.4949, pruned_loss=0.3025, ctc_loss=0.547, over 3823615.31 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 03:05:30,709 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.702e+02 3.344e+02 4.028e+02 1.038e+03, threshold=6.687e+02, percent-clipped=4.0
+2024-08-25 03:05:45,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=6453.333333333333, ans=0.07
+2024-08-25 03:05:46,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=6453.333333333333, ans=0.23546666666666666
+2024-08-25 03:05:56,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=6506.666666666667, ans=0.6722666666666667
+2024-08-25 03:06:02,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=6506.666666666667, ans=0.195
+2024-08-25 03:06:04,274 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.20 vs. limit=9.94
+2024-08-25 03:06:05,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=6560.0, ans=0.03933333333333334
+2024-08-25 03:06:07,918 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.50 vs. limit=6.64
+2024-08-25 03:06:07,964 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=13.95 vs. limit=12.42
+2024-08-25 03:06:18,154 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=5.52 vs. limit=5.322666666666667
+2024-08-25 03:06:33,195 INFO [train.py:1114] (0/4) Epoch 1, batch 1250, loss[loss=0.5577, simple_loss=0.4624, pruned_loss=0.2294, ctc_loss=0.439, over 19507.00 frames. ], tot_loss[loss=0.6188, simple_loss=0.4847, pruned_loss=0.2854, ctc_loss=0.522, over 3841835.80 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:06:38,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=6666.666666666667, ans=0.03888888888888889
+2024-08-25 03:06:51,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=6720.0, ans=0.185
+2024-08-25 03:06:54,358 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.91 vs. limit=12.54
+2024-08-25 03:06:54,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=6720.0, ans=12.54
+2024-08-25 03:06:54,499 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.59 vs. limit=6.68
+2024-08-25 03:07:18,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=6826.666666666667, ans=0.18
+2024-08-25 03:07:19,455 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:07:32,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=6880.0, ans=0.07
+2024-08-25 03:07:53,439 INFO [train.py:1114] (0/4) Epoch 1, batch 1300, loss[loss=0.5651, simple_loss=0.4631, pruned_loss=0.2361, ctc_loss=0.4542, over 18813.00 frames. ], tot_loss[loss=0.5974, simple_loss=0.4733, pruned_loss=0.2697, ctc_loss=0.4979, over 3845162.98 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:07:56,771 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.21 vs. limit=12.7
+2024-08-25 03:08:00,983 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.595e+02 3.171e+02 4.007e+02 5.829e+02, threshold=6.342e+02, percent-clipped=0.0
+2024-08-25 03:08:01,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=6933.333333333333, ans=0.175
+2024-08-25 03:08:29,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=7040.0, ans=0.05600000000000001
+2024-08-25 03:08:38,038 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.42 vs. limit=12.82
+2024-08-25 03:08:47,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.79 vs. limit=6.786666666666667
+2024-08-25 03:08:50,615 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=7146.666666666667, ans=0.16499999999999998
+2024-08-25 03:09:00,206 INFO [train.py:1114] (0/4) Epoch 1, batch 1350, loss[loss=0.5349, simple_loss=0.4416, pruned_loss=0.2219, ctc_loss=0.4269, over 19757.00 frames. ], tot_loss[loss=0.5798, simple_loss=0.4645, pruned_loss=0.2566, ctc_loss=0.4774, over 3855296.72 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:09:11,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=7253.333333333333, ans=0.036444444444444446
+2024-08-25 03:10:19,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=7306.666666666667, ans=0.15749999999999997
+2024-08-25 03:10:35,773 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:10:44,282 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=7360.0, ans=0.036000000000000004
+2024-08-25 03:12:10,378 INFO [train.py:1114] (0/4) Epoch 1, batch 1400, loss[loss=0.39, simple_loss=0.3473, pruned_loss=0.145, ctc_loss=0.292, over 19686.00 frames. ], tot_loss[loss=0.5646, simple_loss=0.4571, pruned_loss=0.2456, ctc_loss=0.4597, over 3862562.08 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:12:15,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=7466.666666666667, ans=0.15000000000000002
+2024-08-25 03:12:32,377 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.490e+02 2.974e+02 4.034e+02 6.918e+02, threshold=5.948e+02, percent-clipped=1.0
+2024-08-25 03:13:10,139 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=7626.666666666667, ans=0.14250000000000002
+2024-08-25 03:13:25,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=7680.0, ans=0.14
+2024-08-25 03:13:28,394 INFO [train.py:1114] (0/4) Epoch 1, batch 1450, loss[loss=0.4807, simple_loss=0.4271, pruned_loss=0.1826, ctc_loss=0.3549, over 19680.00 frames. ], tot_loss[loss=0.5521, simple_loss=0.4515, pruned_loss=0.2364, ctc_loss=0.4454, over 3860643.92 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:13:28,571 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=7733.333333333333, ans=0.1375
+2024-08-25 03:13:42,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=7786.666666666667, ans=0.6274666666666666
+2024-08-25 03:14:23,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=7946.666666666667, ans=0.050333333333333334
+2024-08-25 03:14:24,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=7946.666666666667, ans=0.025
+2024-08-25 03:14:24,715 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.83 vs. limit=10.48
+2024-08-25 03:14:30,704 INFO [train.py:1114] (0/4) Epoch 1, batch 1500, loss[loss=0.4825, simple_loss=0.4233, pruned_loss=0.1861, ctc_loss=0.3682, over 19588.00 frames. ], tot_loss[loss=0.5398, simple_loss=0.4459, pruned_loss=0.2278, ctc_loss=0.4314, over 3860583.50 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:14:38,506 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.576e+02 3.382e+02 4.091e+02 7.597e+02, threshold=6.763e+02, percent-clipped=6.0
+2024-08-25 03:14:42,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=8053.333333333333, ans=0.125
+2024-08-25 03:14:56,472 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.04 vs. limit=13.54
+2024-08-25 03:15:22,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=8213.333333333334, ans=0.125
+2024-08-25 03:15:40,011 INFO [train.py:1114] (0/4) Epoch 1, batch 1550, loss[loss=0.5507, simple_loss=0.4626, pruned_loss=0.225, ctc_loss=0.4395, over 19602.00 frames. ], tot_loss[loss=0.531, simple_loss=0.4421, pruned_loss=0.2218, ctc_loss=0.4213, over 3844875.38 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 8.0
+2024-08-25 03:16:05,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=8373.333333333334, ans=0.125
+2024-08-25 03:16:07,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=8373.333333333334, ans=0.8337333333333333
+2024-08-25 03:16:23,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=8426.666666666666, ans=0.03155555555555556
+2024-08-25 03:16:30,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=8480.0, ans=0.00902608695652174
+2024-08-25 03:16:35,771 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.07 vs. limit=7.12
+2024-08-25 03:16:45,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=8480.0, ans=0.025
+2024-08-25 03:16:49,352 INFO [train.py:1114] (0/4) Epoch 1, batch 1600, loss[loss=0.4763, simple_loss=0.4216, pruned_loss=0.1842, ctc_loss=0.3611, over 19842.00 frames. ], tot_loss[loss=0.5216, simple_loss=0.4378, pruned_loss=0.2159, ctc_loss=0.4107, over 3835647.09 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:16:59,532 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.604e+02 3.125e+02 4.170e+02 2.617e+03, threshold=6.251e+02, percent-clipped=7.0
+2024-08-25 03:16:59,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=8533.333333333334, ans=0.125
+2024-08-25 03:17:06,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=8586.666666666666, ans=0.21413333333333334
+2024-08-25 03:17:36,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=8693.333333333334, ans=0.030444444444444444
+2024-08-25 03:17:40,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=8693.333333333334, ans=0.025
+2024-08-25 03:18:56,611 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.78 vs. limit=14.059999999999999
+2024-08-25 03:19:05,109 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.95 vs. limit=10.78
+2024-08-25 03:19:09,294 INFO [train.py:1114] (0/4) Epoch 1, batch 1650, loss[loss=0.5122, simple_loss=0.4395, pruned_loss=0.2038, ctc_loss=0.4115, over 19671.00 frames. ], tot_loss[loss=0.5123, simple_loss=0.4336, pruned_loss=0.2101, ctc_loss=0.4007, over 3832259.73 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:19:15,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=8800.0, ans=0.008956521739130436
+2024-08-25 03:19:23,051 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:19:26,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=8853.333333333334, ans=0.008944927536231884
+2024-08-25 03:19:41,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=8906.666666666666, ans=0.125
+2024-08-25 03:20:12,486 INFO [train.py:1114] (0/4) Epoch 1, batch 1700, loss[loss=0.468, simple_loss=0.3963, pruned_loss=0.191, ctc_loss=0.3745, over 19670.00 frames. ], tot_loss[loss=0.5031, simple_loss=0.4294, pruned_loss=0.2046, ctc_loss=0.3909, over 3846772.86 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:20:19,831 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.828e+02 2.395e+02 2.888e+02 3.702e+02 8.491e+02, threshold=5.776e+02, percent-clipped=2.0
+2024-08-25 03:20:30,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=9120.0, ans=0.125
+2024-08-25 03:20:35,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=9120.0, ans=10.92
+2024-08-25 03:20:54,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 03:22:16,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 03:22:23,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=9280.0, ans=0.008852173913043479
+2024-08-25 03:22:23,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=9280.0, ans=0.5752
+2024-08-25 03:22:25,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=9280.0, ans=0.5752
+2024-08-25 03:22:25,356 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.04 vs. limit=14.46
+2024-08-25 03:22:29,579 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=9280.0, ans=0.0
+2024-08-25 03:22:33,855 INFO [train.py:1114] (0/4) Epoch 1, batch 1750, loss[loss=0.3843, simple_loss=0.3631, pruned_loss=0.1421, ctc_loss=0.2698, over 19689.00 frames. ], tot_loss[loss=0.4939, simple_loss=0.4254, pruned_loss=0.1994, ctc_loss=0.3812, over 3851413.56 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:22:50,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=9386.666666666666, ans=0.125
+2024-08-25 03:22:53,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=9386.666666666666, ans=0.025
+2024-08-25 03:22:54,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=9386.666666666666, ans=0.125
+2024-08-25 03:23:01,283 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:23:25,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=9546.666666666666, ans=0.125
+2024-08-25 03:23:31,438 INFO [train.py:1114] (0/4) Epoch 1, batch 1800, loss[loss=0.4395, simple_loss=0.4101, pruned_loss=0.1635, ctc_loss=0.3256, over 19621.00 frames. ], tot_loss[loss=0.487, simple_loss=0.4228, pruned_loss=0.1953, ctc_loss=0.374, over 3852102.84 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 8.0
+2024-08-25 03:23:32,985 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.50 vs. limit=11.1
+2024-08-25 03:23:39,418 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.646e+02 3.473e+02 4.220e+02 8.344e+02, threshold=6.945e+02, percent-clipped=3.0
+2024-08-25 03:23:40,834 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=9600.0, ans=0.125
+2024-08-25 03:23:44,844 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.58 vs. limit=14.74
+2024-08-25 03:24:08,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=9760.0, ans=0.125
+2024-08-25 03:24:16,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=9760.0, ans=0.125
+2024-08-25 03:24:17,032 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.18 vs. limit=14.82
+2024-08-25 03:24:28,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=9813.333333333334, ans=0.125
+2024-08-25 03:24:35,868 INFO [train.py:1114] (0/4) Epoch 1, batch 1850, loss[loss=0.4632, simple_loss=0.4237, pruned_loss=0.1806, ctc_loss=0.3355, over 19592.00 frames. ], tot_loss[loss=0.4784, simple_loss=0.4194, pruned_loss=0.1905, ctc_loss=0.3654, over 3856242.32 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:24:57,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=9920.0, ans=0.2008
+2024-08-25 03:25:20,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=10026.666666666666, ans=0.025
+2024-08-25 03:25:25,692 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=6.16 vs. limit=8.010666666666665
+2024-08-25 03:25:40,395 INFO [train.py:1114] (0/4) Epoch 1, batch 1900, loss[loss=0.5035, simple_loss=0.4569, pruned_loss=0.1982, ctc_loss=0.3722, over 19637.00 frames. ], tot_loss[loss=0.4741, simple_loss=0.4187, pruned_loss=0.1881, ctc_loss=0.3605, over 3862203.38 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:25:48,468 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.873e+02 2.554e+02 2.990e+02 4.033e+02 8.041e+02, threshold=5.979e+02, percent-clipped=3.0
+2024-08-25 03:25:52,383 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.02 vs. limit=11.32
+2024-08-25 03:26:10,962 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.00 vs. limit=11.34
+2024-08-25 03:26:11,716 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=10240.0, ans=0.024000000000000004
+2024-08-25 03:26:16,571 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=10293.333333333334, ans=0.125
+2024-08-25 03:26:18,522 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=10293.333333333334, ans=0.19706666666666667
+2024-08-25 03:26:25,002 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=9.39 vs. limit=10.146666666666668
+2024-08-25 03:26:26,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10346.666666666666, ans=0.19653333333333334
+2024-08-25 03:26:34,840 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=10346.666666666666, ans=0.0
+2024-08-25 03:26:38,016 INFO [train.py:1114] (0/4) Epoch 1, batch 1950, loss[loss=0.4437, simple_loss=0.4095, pruned_loss=0.172, ctc_loss=0.3288, over 19597.00 frames. ], tot_loss[loss=0.4691, simple_loss=0.4176, pruned_loss=0.1852, ctc_loss=0.3558, over 3870430.16 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:27:13,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=10560.0, ans=0.02266666666666667
+2024-08-25 03:27:16,644 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=10560.0, ans=0.09899494936611666
+2024-08-25 03:27:20,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=10560.0, ans=0.5304000000000001
+2024-08-25 03:27:36,518 INFO [train.py:1114] (0/4) Epoch 1, batch 2000, loss[loss=0.3685, simple_loss=0.3452, pruned_loss=0.1399, ctc_loss=0.2798, over 19630.00 frames. ], tot_loss[loss=0.4652, simple_loss=0.4167, pruned_loss=0.1832, ctc_loss=0.3521, over 3853410.72 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:27:44,894 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.508e+02 3.011e+02 3.695e+02 6.472e+02, threshold=6.022e+02, percent-clipped=1.0
+2024-08-25 03:27:51,056 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.47 vs. limit=15.54
+2024-08-25 03:27:56,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2.whitening_limit, batch_count=10720.0, ans=10.36
+2024-08-25 03:28:22,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=10880.0, ans=0.025
+2024-08-25 03:28:25,648 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.06 vs. limit=11.58
+2024-08-25 03:28:41,985 INFO [train.py:1114] (0/4) Epoch 1, batch 2050, loss[loss=0.3956, simple_loss=0.3742, pruned_loss=0.1495, ctc_loss=0.2951, over 19705.00 frames. ], tot_loss[loss=0.4597, simple_loss=0.4142, pruned_loss=0.1808, ctc_loss=0.3466, over 3850116.75 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:29:13,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=10986.666666666666, ans=0.05
+2024-08-25 03:30:30,307 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=11093.333333333334, ans=0.18906666666666666
+2024-08-25 03:31:02,649 INFO [train.py:1114] (0/4) Epoch 1, batch 2100, loss[loss=0.4473, simple_loss=0.4137, pruned_loss=0.175, ctc_loss=0.3274, over 19773.00 frames. ], tot_loss[loss=0.4534, simple_loss=0.4114, pruned_loss=0.1776, ctc_loss=0.3406, over 3857796.26 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:31:14,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=11200.0, ans=0.020000000000000004
+2024-08-25 03:31:19,370 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 2.443e+02 2.901e+02 4.101e+02 7.108e+02, threshold=5.802e+02, percent-clipped=5.0
+2024-08-25 03:31:19,721 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=11200.0, ans=0.368
+2024-08-25 03:32:16,418 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.28 vs. limit=16.060000000000002
+2024-08-25 03:32:32,802 INFO [train.py:1114] (0/4) Epoch 1, batch 2150, loss[loss=0.3694, simple_loss=0.3604, pruned_loss=0.1361, ctc_loss=0.2658, over 19587.00 frames. ], tot_loss[loss=0.4452, simple_loss=0.4074, pruned_loss=0.1734, ctc_loss=0.333, over 3868506.34 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:32:44,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=11466.666666666666, ans=0.018888888888888893
+2024-08-25 03:32:50,952 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=11520.0, ans=0.008365217391304348
+2024-08-25 03:32:54,831 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=11520.0, ans=0.008365217391304348
+2024-08-25 03:33:14,600 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.88 vs. limit=11.84
+2024-08-25 03:33:18,746 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11573.333333333334, ans=0.18426666666666666
+2024-08-25 03:33:40,157 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=11626.666666666666, ans=0.125
+2024-08-25 03:33:40,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=11626.666666666666, ans=0.4930666666666667
+2024-08-25 03:33:40,294 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=11626.666666666666, ans=0.125
+2024-08-25 03:33:57,375 INFO [train.py:1114] (0/4) Epoch 1, batch 2200, loss[loss=0.4024, simple_loss=0.3908, pruned_loss=0.1477, ctc_loss=0.2965, over 19580.00 frames. ], tot_loss[loss=0.4402, simple_loss=0.4053, pruned_loss=0.1708, ctc_loss=0.3281, over 3867344.24 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:33:57,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11733.333333333334, ans=0.18266666666666664
+2024-08-25 03:34:05,038 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11733.333333333334, ans=0.18266666666666664
+2024-08-25 03:34:07,529 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=11733.333333333334, ans=0.125
+2024-08-25 03:34:08,398 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.628e+02 3.380e+02 4.438e+02 7.655e+02, threshold=6.760e+02, percent-clipped=12.0
+2024-08-25 03:34:16,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=11786.666666666666, ans=0.01755555555555556
+2024-08-25 03:34:17,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=11786.666666666666, ans=0.07
+2024-08-25 03:34:29,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=11840.0, ans=0.025
+2024-08-25 03:34:45,011 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.61 vs. limit=4.784
+2024-08-25 03:34:47,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=11946.666666666666, ans=0.4818666666666667
+2024-08-25 03:35:01,289 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.20 vs. limit=16.46
+2024-08-25 03:35:03,311 INFO [train.py:1114] (0/4) Epoch 1, batch 2250, loss[loss=0.4709, simple_loss=0.4257, pruned_loss=0.1849, ctc_loss=0.366, over 19620.00 frames. ], tot_loss[loss=0.437, simple_loss=0.4039, pruned_loss=0.1691, ctc_loss=0.3251, over 3868369.72 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:35:05,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=12000.0, ans=0.125
+2024-08-25 03:35:20,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=12053.333333333334, ans=0.125
+2024-08-25 03:35:24,264 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.70 vs. limit=16.54
+2024-08-25 03:35:53,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=12213.333333333334, ans=0.015777777777777773
+2024-08-25 03:36:01,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=12213.333333333334, ans=0.015777777777777773
+2024-08-25 03:36:03,067 INFO [train.py:1114] (0/4) Epoch 1, batch 2300, loss[loss=0.3946, simple_loss=0.3789, pruned_loss=0.1464, ctc_loss=0.2937, over 19478.00 frames. ], tot_loss[loss=0.4333, simple_loss=0.402, pruned_loss=0.1673, ctc_loss=0.3214, over 3862515.72 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:36:03,472 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.63 vs. limit=16.7
+2024-08-25 03:36:12,293 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.546e+02 3.099e+02 3.956e+02 8.242e+02, threshold=6.199e+02, percent-clipped=6.0
+2024-08-25 03:36:26,027 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.35 vs. limit=16.78
+2024-08-25 03:36:39,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=12426.666666666666, ans=0.125
+2024-08-25 03:36:59,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=12533.333333333334, ans=0.125
+2024-08-25 03:37:00,690 INFO [train.py:1114] (0/4) Epoch 1, batch 2350, loss[loss=0.4363, simple_loss=0.4172, pruned_loss=0.1645, ctc_loss=0.316, over 19657.00 frames. ], tot_loss[loss=0.4289, simple_loss=0.3998, pruned_loss=0.1651, ctc_loss=0.3169, over 3864591.13 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:37:24,676 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=6.80 vs. limit=9.056000000000001
+2024-08-25 03:37:30,150 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=12640.0, ans=0.008121739130434782
+2024-08-25 03:37:38,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=12693.333333333334, ans=0.17306666666666667
+2024-08-25 03:37:48,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=12746.666666666666, ans=0.125
+2024-08-25 03:37:53,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=12746.666666666666, ans=0.125
+2024-08-25 03:37:59,409 INFO [train.py:1114] (0/4) Epoch 1, batch 2400, loss[loss=0.4701, simple_loss=0.433, pruned_loss=0.1842, ctc_loss=0.3469, over 19375.00 frames. ], tot_loss[loss=0.4297, simple_loss=0.4016, pruned_loss=0.1652, ctc_loss=0.3165, over 3860114.29 frames. ], batch size: 67, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:38:00,732 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=12800.0, ans=0.125
+2024-08-25 03:38:08,242 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.522e+02 3.053e+02 3.990e+02 1.210e+03, threshold=6.106e+02, percent-clipped=3.0
+2024-08-25 03:38:16,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=12853.333333333334, ans=0.00807536231884058
+2024-08-25 03:38:43,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=12960.0, ans=0.125
+2024-08-25 03:39:03,886 INFO [train.py:1114] (0/4) Epoch 1, batch 2450, loss[loss=0.5474, simple_loss=0.4643, pruned_loss=0.2311, ctc_loss=0.4204, over 13585.00 frames. ], tot_loss[loss=0.4378, simple_loss=0.4064, pruned_loss=0.1694, ctc_loss=0.3242, over 3739887.67 frames. ], batch size: 140, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:39:09,339 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.01 vs. limit=17.3
+2024-08-25 03:39:13,040 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.64 vs. limit=12.4
+2024-08-25 03:39:16,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=13120.0, ans=0.125
+2024-08-25 03:39:19,252 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=15.80 vs. limit=17.34
+2024-08-25 03:39:32,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=13173.333333333334, ans=0.125
+2024-08-25 03:39:39,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=13226.666666666666, ans=0.125
+2024-08-25 03:39:48,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=13226.666666666666, ans=0.125
+2024-08-25 03:39:50,244 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-1.pt
+2024-08-25 03:40:42,880 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=2.510e-03
+2024-08-25 03:40:43,722 INFO [train.py:1114] (0/4) Epoch 2, batch 0, loss[loss=0.4069, simple_loss=0.3795, pruned_loss=0.1585, ctc_loss=0.2935, over 19411.00 frames. ], tot_loss[loss=0.4069, simple_loss=0.3795, pruned_loss=0.1585, ctc_loss=0.2935, over 19411.00 frames. ], batch size: 48, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 03:40:43,722 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 03:40:55,171 INFO [train.py:1146] (0/4) Epoch 2, validation: loss=0.3317, simple_loss=0.3718, pruned_loss=0.1058, ctc_loss=0.2, over 944034.00 frames.
+2024-08-25 03:40:55,172 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 03:41:11,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=13333.333333333334, ans=0.125
+2024-08-25 03:41:17,118 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.388e+02 2.818e+02 3.444e+02 6.577e+02, threshold=5.636e+02, percent-clipped=3.0
+2024-08-25 03:41:20,780 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=13386.666666666666, ans=0.16613333333333333
+2024-08-25 03:41:21,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 03:41:25,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=13386.666666666666, ans=0.4314666666666667
+2024-08-25 03:41:57,951 INFO [train.py:1114] (0/4) Epoch 2, batch 50, loss[loss=0.3601, simple_loss=0.3509, pruned_loss=0.1328, ctc_loss=0.2593, over 19712.00 frames. ], tot_loss[loss=0.428, simple_loss=0.4029, pruned_loss=0.164, ctc_loss=0.3128, over 843639.75 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:42:07,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=13546.666666666666, ans=0.125
+2024-08-25 03:42:13,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=13600.0, ans=0.125
+2024-08-25 03:42:30,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=13653.333333333334, ans=0.00790144927536232
+2024-08-25 03:43:26,691 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.47 vs. limit=5.064
+2024-08-25 03:43:33,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=13760.0, ans=0.125
+2024-08-25 03:43:36,879 INFO [train.py:1114] (0/4) Epoch 2, batch 100, loss[loss=0.3628, simple_loss=0.3692, pruned_loss=0.1291, ctc_loss=0.2456, over 19726.00 frames. ], tot_loss[loss=0.4224, simple_loss=0.4011, pruned_loss=0.1605, ctc_loss=0.3069, over 1499110.32 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:43:44,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=13813.333333333334, ans=0.16186666666666666
+2024-08-25 03:43:51,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=13866.666666666666, ans=0.125
+2024-08-25 03:43:54,762 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=13866.666666666666, ans=0.125
+2024-08-25 03:44:02,828 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.500e+02 2.916e+02 3.893e+02 6.295e+02, threshold=5.832e+02, percent-clipped=2.0
+2024-08-25 03:44:06,937 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=13920.0, ans=0.125
+2024-08-25 03:44:12,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=13920.0, ans=0.1608
+2024-08-25 03:44:13,017 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.48 vs. limit=9.568
+2024-08-25 03:44:33,792 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.67 vs. limit=12.76
+2024-08-25 03:44:38,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=14026.666666666666, ans=0.025
+2024-08-25 03:44:42,785 INFO [train.py:1114] (0/4) Epoch 2, batch 150, loss[loss=0.3602, simple_loss=0.358, pruned_loss=0.1297, ctc_loss=0.2577, over 19727.00 frames. ], tot_loss[loss=0.4141, simple_loss=0.3957, pruned_loss=0.1562, ctc_loss=0.3001, over 2027714.20 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:44:42,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=14080.0, ans=0.4072
+2024-08-25 03:45:12,540 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=14186.666666666666, ans=0.125
+2024-08-25 03:45:19,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=14240.0, ans=0.125
+2024-08-25 03:45:24,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=14240.0, ans=0.125
+2024-08-25 03:45:34,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=14293.333333333334, ans=0.007762318840579711
+2024-08-25 03:45:34,157 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=14293.333333333334, ans=0.10706666666666664
+2024-08-25 03:45:35,687 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.72 vs. limit=18.22
+2024-08-25 03:45:42,121 INFO [train.py:1114] (0/4) Epoch 2, batch 200, loss[loss=0.4582, simple_loss=0.4264, pruned_loss=0.1789, ctc_loss=0.3307, over 18298.00 frames. ], tot_loss[loss=0.4074, simple_loss=0.3919, pruned_loss=0.1527, ctc_loss=0.294, over 2435029.92 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:45:59,985 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:46:06,457 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.951e+02 2.445e+02 2.940e+02 3.728e+02 6.995e+02, threshold=5.880e+02, percent-clipped=3.0
+2024-08-25 03:46:44,125 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.82 vs. limit=18.42
+2024-08-25 03:46:44,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=14613.333333333334, ans=0.125
+2024-08-25 03:46:45,925 INFO [train.py:1114] (0/4) Epoch 2, batch 250, loss[loss=0.4519, simple_loss=0.418, pruned_loss=0.174, ctc_loss=0.3443, over 19414.00 frames. ], tot_loss[loss=0.4036, simple_loss=0.3895, pruned_loss=0.1509, ctc_loss=0.2896, over 2754771.79 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:47:04,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=14666.666666666666, ans=0.3866666666666667
+2024-08-25 03:47:08,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=14720.0, ans=0.125
+2024-08-25 03:47:11,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=14720.0, ans=0.125
+2024-08-25 03:47:26,842 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.73 vs. limit=13.04
+2024-08-25 03:47:39,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=14826.666666666666, ans=0.125
+2024-08-25 03:47:41,883 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.82 vs. limit=18.619999999999997
+2024-08-25 03:47:42,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14826.666666666666, ans=0.15173333333333333
+2024-08-25 03:47:50,864 INFO [train.py:1114] (0/4) Epoch 2, batch 300, loss[loss=0.4161, simple_loss=0.4035, pruned_loss=0.1558, ctc_loss=0.293, over 19540.00 frames. ], tot_loss[loss=0.4003, simple_loss=0.3878, pruned_loss=0.1491, ctc_loss=0.2863, over 2999480.96 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:47:51,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=14880.0, ans=0.3792
+2024-08-25 03:47:53,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=14880.0, ans=0.3792
+2024-08-25 03:48:02,065 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.57 vs. limit=5.24
+2024-08-25 03:48:13,149 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.396e+02 2.818e+02 3.488e+02 8.647e+02, threshold=5.636e+02, percent-clipped=6.0
+2024-08-25 03:48:32,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=15040.0, ans=0.0040000000000000036
+2024-08-25 03:48:48,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_na.min_abs, batch_count=15093.333333333334, ans=0.02
+2024-08-25 03:48:50,387 INFO [train.py:1114] (0/4) Epoch 2, batch 350, loss[loss=0.3351, simple_loss=0.3468, pruned_loss=0.1165, ctc_loss=0.2264, over 19747.00 frames. ], tot_loss[loss=0.3997, simple_loss=0.388, pruned_loss=0.1487, ctc_loss=0.2848, over 3189626.37 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:48:59,191 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.63 vs. limit=13.18
+2024-08-25 03:49:02,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=15200.0, ans=0.003333333333333334
+2024-08-25 03:49:38,861 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=15253.333333333334, ans=0.125
+2024-08-25 03:50:10,915 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.21 vs. limit=13.26
+2024-08-25 03:50:17,378 INFO [train.py:1114] (0/4) Epoch 2, batch 400, loss[loss=0.383, simple_loss=0.3789, pruned_loss=0.1393, ctc_loss=0.2714, over 19505.00 frames. ], tot_loss[loss=0.3958, simple_loss=0.3856, pruned_loss=0.1467, ctc_loss=0.2813, over 3341816.65 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:50:37,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=15466.666666666666, ans=0.0075072463768115945
+2024-08-25 03:50:39,707 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.407e+02 2.984e+02 3.456e+02 5.488e+02, threshold=5.968e+02, percent-clipped=0.0
+2024-08-25 03:50:43,634 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:51:02,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=15573.333333333334, ans=0.125
+2024-08-25 03:51:07,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=15626.666666666666, ans=0.125
+2024-08-25 03:51:18,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=15680.0, ans=0.125
+2024-08-25 03:51:18,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=15680.0, ans=0.125
+2024-08-25 03:51:19,335 INFO [train.py:1114] (0/4) Epoch 2, batch 450, loss[loss=0.3733, simple_loss=0.3832, pruned_loss=0.1303, ctc_loss=0.257, over 19618.00 frames. ], tot_loss[loss=0.3963, simple_loss=0.3857, pruned_loss=0.1472, ctc_loss=0.2811, over 3449617.60 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:51:29,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=15680.0, ans=0.125
+2024-08-25 03:51:55,949 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.19 vs. limit=13.42
+2024-08-25 03:51:57,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=15840.0, ans=0.125
+2024-08-25 03:52:02,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=15840.0, ans=0.125
+2024-08-25 03:52:21,861 INFO [train.py:1114] (0/4) Epoch 2, batch 500, loss[loss=0.3874, simple_loss=0.3946, pruned_loss=0.1375, ctc_loss=0.2627, over 19670.00 frames. ], tot_loss[loss=0.3932, simple_loss=0.3837, pruned_loss=0.1457, ctc_loss=0.2785, over 3545631.43 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:52:25,782 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=15946.666666666666, ans=0.125
+2024-08-25 03:53:11,995 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.425e+02 3.079e+02 3.995e+02 1.154e+03, threshold=6.159e+02, percent-clipped=13.0
+2024-08-25 03:53:22,641 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.06 vs. limit=13.52
+2024-08-25 03:53:23,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=16106.666666666666, ans=0.0
+2024-08-25 03:53:29,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=16106.666666666666, ans=0.3362666666666667
+2024-08-25 03:53:32,906 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.10 vs. limit=10.442666666666668
+2024-08-25 03:53:41,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=16160.0, ans=0.125
+2024-08-25 03:53:49,958 INFO [train.py:1114] (0/4) Epoch 2, batch 550, loss[loss=0.4051, simple_loss=0.3967, pruned_loss=0.1489, ctc_loss=0.2895, over 19306.00 frames. ], tot_loss[loss=0.3915, simple_loss=0.3831, pruned_loss=0.1447, ctc_loss=0.2763, over 3608461.94 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:54:01,878 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.98 vs. limit=13.58
+2024-08-25 03:54:23,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=16320.0, ans=0.0
+2024-08-25 03:54:26,060 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.57 vs. limit=19.740000000000002
+2024-08-25 03:54:29,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=16373.333333333334, ans=0.0
+2024-08-25 03:54:36,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=16373.333333333334, ans=0.125
+2024-08-25 03:54:39,004 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.85 vs. limit=13.66
+2024-08-25 03:54:51,530 INFO [train.py:1114] (0/4) Epoch 2, batch 600, loss[loss=0.4379, simple_loss=0.4132, pruned_loss=0.1698, ctc_loss=0.3074, over 19382.00 frames. ], tot_loss[loss=0.3901, simple_loss=0.3829, pruned_loss=0.1438, ctc_loss=0.2745, over 3666275.06 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:54:54,177 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=16480.0, ans=0.0
+2024-08-25 03:55:14,972 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.336e+02 2.753e+02 3.494e+02 8.105e+02, threshold=5.507e+02, percent-clipped=1.0
+2024-08-25 03:55:15,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=16586.666666666668, ans=0.125
+2024-08-25 03:55:33,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys.whitening_limit, batch_count=16640.0, ans=5.496
+2024-08-25 03:55:38,920 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.04 vs. limit=13.74
+2024-08-25 03:55:45,937 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.66 vs. limit=13.759999999999998
+2024-08-25 03:55:56,157 INFO [train.py:1114] (0/4) Epoch 2, batch 650, loss[loss=0.3786, simple_loss=0.3846, pruned_loss=0.1341, ctc_loss=0.2608, over 19784.00 frames. ], tot_loss[loss=0.3876, simple_loss=0.3812, pruned_loss=0.1426, ctc_loss=0.2719, over 3716432.68 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:56:23,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16853.333333333332, ans=0.13146666666666668
+2024-08-25 03:56:56,409 INFO [train.py:1114] (0/4) Epoch 2, batch 700, loss[loss=0.385, simple_loss=0.3786, pruned_loss=0.1419, ctc_loss=0.2688, over 19706.00 frames. ], tot_loss[loss=0.3895, simple_loss=0.3823, pruned_loss=0.1436, ctc_loss=0.2736, over 3747743.13 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:57:00,268 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=17013.333333333332, ans=0.025
+2024-08-25 03:57:03,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=17013.333333333332, ans=0.125
+2024-08-25 03:57:23,235 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.519e+02 2.895e+02 3.628e+02 6.087e+02, threshold=5.790e+02, percent-clipped=2.0
+2024-08-25 03:57:41,536 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.32 vs. limit=9.293333333333333
+2024-08-25 03:57:54,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=17226.666666666668, ans=0.2970666666666667
+2024-08-25 03:58:01,086 INFO [train.py:1114] (0/4) Epoch 2, batch 750, loss[loss=0.4175, simple_loss=0.4001, pruned_loss=0.1578, ctc_loss=0.2985, over 19521.00 frames. ], tot_loss[loss=0.3875, simple_loss=0.3813, pruned_loss=0.1425, ctc_loss=0.2715, over 3774462.36 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:58:06,379 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.61 vs. limit=13.98
+2024-08-25 03:58:14,061 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=17333.333333333332, ans=0.125
+2024-08-25 03:58:28,688 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.31 vs. limit=10.954666666666668
+2024-08-25 03:58:40,163 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=17440.0, ans=0.007078260869565218
+2024-08-25 03:58:40,867 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.96 vs. limit=20.58
+2024-08-25 04:00:12,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=17546.666666666668, ans=0.125
+2024-08-25 04:00:16,095 INFO [train.py:1114] (0/4) Epoch 2, batch 800, loss[loss=0.3199, simple_loss=0.3308, pruned_loss=0.1114, ctc_loss=0.2158, over 19396.00 frames. ], tot_loss[loss=0.3846, simple_loss=0.3797, pruned_loss=0.141, ctc_loss=0.2688, over 3795728.73 frames. ], batch size: 48, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:00:17,754 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.37 vs. limit=14.08
+2024-08-25 04:00:36,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=17600.0, ans=0.007043478260869565
+2024-08-25 04:00:39,334 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.611e+02 3.088e+02 3.881e+02 9.768e+02, threshold=6.176e+02, percent-clipped=6.0
+2024-08-25 04:01:03,140 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.04 vs. limit=11.104
+2024-08-25 04:01:15,038 INFO [train.py:1114] (0/4) Epoch 2, batch 850, loss[loss=0.4252, simple_loss=0.4128, pruned_loss=0.1595, ctc_loss=0.2964, over 19654.00 frames. ], tot_loss[loss=0.3825, simple_loss=0.3787, pruned_loss=0.1398, ctc_loss=0.2667, over 3814410.93 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:01:15,306 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=17813.333333333332, ans=0.12186666666666668
+2024-08-25 04:01:18,990 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=17813.333333333332, ans=0.0
+2024-08-25 04:01:46,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=17920.0, ans=0.0708
+2024-08-25 04:02:12,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff2.min_abs, batch_count=18026.666666666668, ans=0.1
+2024-08-25 04:02:18,991 INFO [train.py:1114] (0/4) Epoch 2, batch 900, loss[loss=0.3214, simple_loss=0.3366, pruned_loss=0.1107, ctc_loss=0.2122, over 19409.00 frames. ], tot_loss[loss=0.3835, simple_loss=0.3791, pruned_loss=0.1405, ctc_loss=0.2672, over 3817568.43 frames. ], batch size: 48, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:02:40,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=18080.0, ans=0.125
+2024-08-25 04:02:41,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=18080.0, ans=0.025
+2024-08-25 04:02:53,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=18133.333333333332, ans=0.025
+2024-08-25 04:03:03,823 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.814e+02 2.530e+02 3.033e+02 3.602e+02 3.379e+03, threshold=6.066e+02, percent-clipped=6.0
+2024-08-25 04:03:04,442 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.99 vs. limit=14.32
+2024-08-25 04:03:10,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=18186.666666666668, ans=0.0
+2024-08-25 04:03:16,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.45 vs. limit=14.34
+2024-08-25 04:03:35,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=18346.666666666668, ans=0.05
+2024-08-25 04:03:36,933 INFO [train.py:1114] (0/4) Epoch 2, batch 950, loss[loss=0.3467, simple_loss=0.3445, pruned_loss=0.1261, ctc_loss=0.2421, over 19504.00 frames. ], tot_loss[loss=0.3821, simple_loss=0.3783, pruned_loss=0.1398, ctc_loss=0.2656, over 3819324.98 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:03:57,597 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=18400.0, ans=0.125
+2024-08-25 04:04:06,496 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.78 vs. limit=21.34
+2024-08-25 04:04:14,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=18506.666666666668, ans=0.0
+2024-08-25 04:04:14,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=18506.666666666668, ans=0.25226666666666675
+2024-08-25 04:04:15,477 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.10 vs. limit=14.253333333333334
+2024-08-25 04:04:21,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=18506.666666666668, ans=0.0
+2024-08-25 04:04:22,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=18506.666666666668, ans=0.125
+2024-08-25 04:04:27,958 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.94 vs. limit=14.46
+2024-08-25 04:04:34,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18560.0, ans=0.1144
+2024-08-25 04:04:39,326 INFO [train.py:1114] (0/4) Epoch 2, batch 1000, loss[loss=0.3341, simple_loss=0.347, pruned_loss=0.1161, ctc_loss=0.2227, over 19845.00 frames. ], tot_loss[loss=0.3819, simple_loss=0.3785, pruned_loss=0.1396, ctc_loss=0.2652, over 3815946.82 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:04:45,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=18613.333333333332, ans=0.24853333333333338
+2024-08-25 04:04:51,762 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=18666.666666666668, ans=0.125
+2024-08-25 04:04:58,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=18666.666666666668, ans=0.11333333333333331
+2024-08-25 04:05:01,184 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=18666.666666666668, ans=0.125
+2024-08-25 04:05:05,789 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.321e+02 2.743e+02 3.485e+02 6.350e+02, threshold=5.486e+02, percent-clipped=2.0
+2024-08-25 04:05:41,818 INFO [train.py:1114] (0/4) Epoch 2, batch 1050, loss[loss=0.3451, simple_loss=0.3612, pruned_loss=0.1188, ctc_loss=0.2284, over 19838.00 frames. ], tot_loss[loss=0.3803, simple_loss=0.3774, pruned_loss=0.1389, ctc_loss=0.2637, over 3823718.17 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:05:45,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=18880.0, ans=0.125
+2024-08-25 04:06:04,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=18933.333333333332, ans=0.0
+2024-08-25 04:06:27,275 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=19040.0, ans=0.0
+2024-08-25 04:06:36,259 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=19093.333333333332, ans=0.23173333333333346
+2024-08-25 04:06:44,167 INFO [train.py:1114] (0/4) Epoch 2, batch 1100, loss[loss=0.3337, simple_loss=0.3465, pruned_loss=0.1146, ctc_loss=0.2293, over 19565.00 frames. ], tot_loss[loss=0.38, simple_loss=0.377, pruned_loss=0.1388, ctc_loss=0.2632, over 3831878.62 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:07:11,086 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.465e+02 2.960e+02 4.039e+02 7.406e+02, threshold=5.919e+02, percent-clipped=11.0
+2024-08-25 04:07:14,246 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.50 vs. limit=9.813333333333333
+2024-08-25 04:07:49,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=19306.666666666668, ans=0.125
+2024-08-25 04:07:55,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19360.0, ans=0.10640000000000002
+2024-08-25 04:08:08,069 INFO [train.py:1114] (0/4) Epoch 2, batch 1150, loss[loss=0.3876, simple_loss=0.3776, pruned_loss=0.1434, ctc_loss=0.2767, over 19587.00 frames. ], tot_loss[loss=0.3785, simple_loss=0.3764, pruned_loss=0.138, ctc_loss=0.2615, over 3831567.54 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:08:09,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=19413.333333333332, ans=0.125
+2024-08-25 04:08:32,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=19520.0, ans=0.125
+2024-08-25 04:08:46,154 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=19573.333333333332, ans=0.0
+2024-08-25 04:08:49,037 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.82 vs. limit=14.84
+2024-08-25 04:08:56,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=19626.666666666668, ans=0.125
+2024-08-25 04:09:08,085 INFO [train.py:1114] (0/4) Epoch 2, batch 1200, loss[loss=0.3908, simple_loss=0.3923, pruned_loss=0.1407, ctc_loss=0.2696, over 19835.00 frames. ], tot_loss[loss=0.3791, simple_loss=0.3771, pruned_loss=0.1382, ctc_loss=0.2618, over 3827106.49 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 04:09:29,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=19733.333333333332, ans=0.125
+2024-08-25 04:09:34,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=19786.666666666668, ans=0.2074666666666667
+2024-08-25 04:09:35,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=19786.666666666668, ans=0.125
+2024-08-25 04:09:36,232 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.637e+02 3.065e+02 4.000e+02 6.600e+02, threshold=6.130e+02, percent-clipped=2.0
+2024-08-25 04:09:41,767 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=19786.666666666668, ans=0.10213333333333333
+2024-08-25 04:10:02,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=19893.333333333332, ans=0.125
+2024-08-25 04:10:11,986 INFO [train.py:1114] (0/4) Epoch 2, batch 1250, loss[loss=0.3997, simple_loss=0.3953, pruned_loss=0.145, ctc_loss=0.2855, over 19537.00 frames. ], tot_loss[loss=0.3763, simple_loss=0.376, pruned_loss=0.1365, ctc_loss=0.2586, over 3844123.46 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:10:29,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=20000.0, ans=0.95
+2024-08-25 04:11:09,335 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=26.98 vs. limit=22.5
+2024-08-25 04:11:15,941 INFO [train.py:1114] (0/4) Epoch 2, batch 1300, loss[loss=0.4363, simple_loss=0.4174, pruned_loss=0.1657, ctc_loss=0.3098, over 18852.00 frames. ], tot_loss[loss=0.3745, simple_loss=0.375, pruned_loss=0.1355, ctc_loss=0.2571, over 3845391.14 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:11:35,566 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.59 vs. limit=15.0
+2024-08-25 04:11:39,417 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=15.40 vs. limit=22.5
+2024-08-25 04:11:41,991 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.187e+02 2.429e+02 2.931e+02 4.736e+02, threshold=4.858e+02, percent-clipped=0.0
+2024-08-25 04:11:45,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=20320.0, ans=0.125
+2024-08-25 04:11:58,537 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.06 vs. limit=10.0
+2024-08-25 04:12:03,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=20426.666666666668, ans=0.006428985507246377
+2024-08-25 04:12:07,858 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=25.09 vs. limit=22.5
+2024-08-25 04:12:15,265 INFO [train.py:1114] (0/4) Epoch 2, batch 1350, loss[loss=0.3335, simple_loss=0.359, pruned_loss=0.112, ctc_loss=0.2099, over 19753.00 frames. ], tot_loss[loss=0.3717, simple_loss=0.3735, pruned_loss=0.1341, ctc_loss=0.2543, over 3856086.37 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:12:15,811 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.43 vs. limit=22.5
+2024-08-25 04:12:21,261 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=20480.0, ans=0.0
+2024-08-25 04:12:24,228 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.97 vs. limit=15.0
+2024-08-25 04:12:25,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=20480.0, ans=0.0
+2024-08-25 04:12:30,183 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:12:37,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=20533.333333333332, ans=0.1
+2024-08-25 04:12:48,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=20586.666666666668, ans=0.006394202898550725
+2024-08-25 04:12:49,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20586.666666666668, ans=0.1
+2024-08-25 04:12:58,796 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.43 vs. limit=22.5
+2024-08-25 04:13:03,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=20640.0, ans=0.2
+2024-08-25 04:13:10,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=20693.333333333332, ans=0.125
+2024-08-25 04:13:10,351 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=20693.333333333332, ans=0.0
+2024-08-25 04:13:18,518 INFO [train.py:1114] (0/4) Epoch 2, batch 1400, loss[loss=0.2968, simple_loss=0.3085, pruned_loss=0.1043, ctc_loss=0.1911, over 19657.00 frames. ], tot_loss[loss=0.3697, simple_loss=0.3725, pruned_loss=0.133, ctc_loss=0.2522, over 3863322.05 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:13:36,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=20800.0, ans=0.0
+2024-08-25 04:14:03,163 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.385e+02 2.674e+02 3.744e+02 6.684e+02, threshold=5.347e+02, percent-clipped=6.0
+2024-08-25 04:14:12,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=20906.666666666668, ans=0.0
+2024-08-25 04:14:18,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=20906.666666666668, ans=0.04949747468305833
+2024-08-25 04:14:18,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20906.666666666668, ans=0.1
+2024-08-25 04:14:37,932 INFO [train.py:1114] (0/4) Epoch 2, batch 1450, loss[loss=0.3556, simple_loss=0.375, pruned_loss=0.1231, ctc_loss=0.225, over 19700.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.3727, pruned_loss=0.1329, ctc_loss=0.2515, over 3861891.40 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:15:59,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=21120.0, ans=0.2
+2024-08-25 04:16:15,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=21173.333333333332, ans=0.025
+2024-08-25 04:16:16,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=21173.333333333332, ans=0.125
+2024-08-25 04:16:23,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=21226.666666666668, ans=0.125
+2024-08-25 04:16:26,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21226.666666666668, ans=0.1
+2024-08-25 04:16:28,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=21226.666666666668, ans=0.0
+2024-08-25 04:16:28,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=21226.666666666668, ans=0.125
+2024-08-25 04:16:33,105 INFO [train.py:1114] (0/4) Epoch 2, batch 1500, loss[loss=0.3554, simple_loss=0.377, pruned_loss=0.1195, ctc_loss=0.237, over 19567.00 frames. ], tot_loss[loss=0.3687, simple_loss=0.3726, pruned_loss=0.1324, ctc_loss=0.2501, over 3860597.06 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:16:42,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=21280.0, ans=0.0
+2024-08-25 04:16:44,294 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-4000.pt
+2024-08-25 04:17:08,008 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.509e+02 2.906e+02 4.274e+02 8.598e+02, threshold=5.813e+02, percent-clipped=13.0
+2024-08-25 04:17:13,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=21386.666666666668, ans=0.125
+2024-08-25 04:17:18,150 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=21440.0, ans=0.125
+2024-08-25 04:17:19,575 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.00 vs. limit=15.0
+2024-08-25 04:17:25,476 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.99 vs. limit=12.0
+2024-08-25 04:17:31,487 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.54 vs. limit=15.0
+2024-08-25 04:17:41,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=21546.666666666668, ans=0.006185507246376811
+2024-08-25 04:17:42,731 INFO [train.py:1114] (0/4) Epoch 2, batch 1550, loss[loss=0.3915, simple_loss=0.3955, pruned_loss=0.1401, ctc_loss=0.268, over 19590.00 frames. ], tot_loss[loss=0.3692, simple_loss=0.3727, pruned_loss=0.1328, ctc_loss=0.2505, over 3846009.12 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 04:18:10,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=21653.333333333332, ans=10.0
+2024-08-25 04:18:44,965 INFO [train.py:1114] (0/4) Epoch 2, batch 1600, loss[loss=0.3852, simple_loss=0.3881, pruned_loss=0.1374, ctc_loss=0.2684, over 19845.00 frames. ], tot_loss[loss=0.3687, simple_loss=0.3721, pruned_loss=0.1325, ctc_loss=0.2505, over 3836526.19 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:19:07,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21866.666666666668, ans=0.1
+2024-08-25 04:19:09,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=21866.666666666668, ans=0.0
+2024-08-25 04:19:13,747 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.370e+02 2.902e+02 3.664e+02 6.938e+02, threshold=5.803e+02, percent-clipped=2.0
+2024-08-25 04:19:25,935 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=21973.333333333332, ans=0.2
+2024-08-25 04:19:27,442 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.40 vs. limit=15.0
+2024-08-25 04:19:28,199 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21973.333333333332, ans=0.1
+2024-08-25 04:19:30,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=21973.333333333332, ans=0.0
+2024-08-25 04:19:41,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=22026.666666666668, ans=0.125
+2024-08-25 04:19:47,378 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=22026.666666666668, ans=0.0
+2024-08-25 04:19:49,387 INFO [train.py:1114] (0/4) Epoch 2, batch 1650, loss[loss=0.4057, simple_loss=0.4005, pruned_loss=0.1481, ctc_loss=0.2867, over 19663.00 frames. ], tot_loss[loss=0.3686, simple_loss=0.3717, pruned_loss=0.1326, ctc_loss=0.2505, over 3833014.04 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:19:50,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=22080.0, ans=0.125
+2024-08-25 04:19:54,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=22080.0, ans=0.125
+2024-08-25 04:19:58,208 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:20:07,874 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.68 vs. limit=12.0
+2024-08-25 04:20:17,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=22186.666666666668, ans=0.5
+2024-08-25 04:20:26,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=22240.0, ans=0.2
+2024-08-25 04:20:41,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=22293.333333333332, ans=0.2
+2024-08-25 04:20:48,548 INFO [train.py:1114] (0/4) Epoch 2, batch 1700, loss[loss=0.3328, simple_loss=0.3327, pruned_loss=0.121, ctc_loss=0.2273, over 19663.00 frames. ], tot_loss[loss=0.3656, simple_loss=0.3703, pruned_loss=0.1309, ctc_loss=0.2474, over 3847569.20 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:21:16,630 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.264e+02 2.715e+02 3.253e+02 5.462e+02, threshold=5.430e+02, percent-clipped=0.0
+2024-08-25 04:21:19,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=22453.333333333332, ans=0.125
+2024-08-25 04:21:48,270 INFO [train.py:1114] (0/4) Epoch 2, batch 1750, loss[loss=0.3342, simple_loss=0.3361, pruned_loss=0.1211, ctc_loss=0.2255, over 19718.00 frames. ], tot_loss[loss=0.363, simple_loss=0.3684, pruned_loss=0.1298, ctc_loss=0.245, over 3852085.59 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:21:48,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=22613.333333333332, ans=0.025
+2024-08-25 04:21:55,544 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.08 vs. limit=15.0
+2024-08-25 04:22:11,583 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=22720.0, ans=0.025
+2024-08-25 04:22:21,132 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.94 vs. limit=15.0
+2024-08-25 04:22:24,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=22773.333333333332, ans=0.125
+2024-08-25 04:22:33,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=22773.333333333332, ans=0.1
+2024-08-25 04:23:01,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=22880.0, ans=0.125
+2024-08-25 04:23:02,448 INFO [train.py:1114] (0/4) Epoch 2, batch 1800, loss[loss=0.3738, simple_loss=0.3766, pruned_loss=0.1348, ctc_loss=0.2536, over 19612.00 frames. ], tot_loss[loss=0.3632, simple_loss=0.3686, pruned_loss=0.1299, ctc_loss=0.2452, over 3853623.10 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:23:08,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_na.min_abs, batch_count=22880.0, ans=0.02
+2024-08-25 04:23:11,536 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=22880.0, ans=0.0
+2024-08-25 04:23:17,515 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.90 vs. limit=15.0
+2024-08-25 04:23:28,018 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.473e+02 2.913e+02 3.585e+02 6.262e+02, threshold=5.825e+02, percent-clipped=5.0
+2024-08-25 04:23:29,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=22986.666666666668, ans=0.2
+2024-08-25 04:23:31,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=22986.666666666668, ans=0.0
+2024-08-25 04:23:32,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22986.666666666668, ans=0.1
+2024-08-25 04:23:35,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=23040.0, ans=0.125
+2024-08-25 04:23:42,967 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.25 vs. limit=22.5
+2024-08-25 04:23:55,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=23093.333333333332, ans=0.04949747468305833
+2024-08-25 04:23:58,972 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.08 vs. limit=22.5
+2024-08-25 04:23:59,520 INFO [train.py:1114] (0/4) Epoch 2, batch 1850, loss[loss=0.3558, simple_loss=0.3728, pruned_loss=0.1235, ctc_loss=0.2297, over 19567.00 frames. ], tot_loss[loss=0.3606, simple_loss=0.367, pruned_loss=0.1286, ctc_loss=0.2428, over 3856613.79 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:24:02,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=23146.666666666668, ans=0.04949747468305833
+2024-08-25 04:24:19,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=23200.0, ans=0.125
+2024-08-25 04:24:37,214 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=23306.666666666668, ans=0.125
+2024-08-25 04:24:52,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=23360.0, ans=0.125
+2024-08-25 04:24:54,816 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.98 vs. limit=6.0
+2024-08-25 04:24:56,426 INFO [train.py:1114] (0/4) Epoch 2, batch 1900, loss[loss=0.3601, simple_loss=0.3764, pruned_loss=0.1237, ctc_loss=0.2414, over 19660.00 frames. ], tot_loss[loss=0.3611, simple_loss=0.3676, pruned_loss=0.1287, ctc_loss=0.243, over 3861488.72 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 32.0
+2024-08-25 04:24:56,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=23413.333333333332, ans=0.125
+2024-08-25 04:25:21,307 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.247e+02 2.781e+02 3.399e+02 7.136e+02, threshold=5.561e+02, percent-clipped=3.0
+2024-08-25 04:25:36,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=23573.333333333332, ans=0.0
+2024-08-25 04:25:55,301 INFO [train.py:1114] (0/4) Epoch 2, batch 1950, loss[loss=0.3441, simple_loss=0.3516, pruned_loss=0.1221, ctc_loss=0.2308, over 19592.00 frames. ], tot_loss[loss=0.3616, simple_loss=0.3684, pruned_loss=0.1288, ctc_loss=0.243, over 3870074.09 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:25:57,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=23680.0, ans=0.2
+2024-08-25 04:26:05,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=23680.0, ans=0.0
+2024-08-25 04:26:17,905 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=23786.666666666668, ans=0.2
+2024-08-25 04:26:25,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=23786.666666666668, ans=0.125
+2024-08-25 04:26:53,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=23946.666666666668, ans=0.1
+2024-08-25 04:26:54,474 INFO [train.py:1114] (0/4) Epoch 2, batch 2000, loss[loss=0.3721, simple_loss=0.3574, pruned_loss=0.1402, ctc_loss=0.2662, over 19656.00 frames. ], tot_loss[loss=0.3639, simple_loss=0.3699, pruned_loss=0.13, ctc_loss=0.245, over 3855218.47 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:26:56,025 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.51 vs. limit=15.0
+2024-08-25 04:27:08,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=24000.0, ans=0.125
+2024-08-25 04:27:20,441 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.625e+02 3.128e+02 3.968e+02 6.078e+02, threshold=6.255e+02, percent-clipped=2.0
+2024-08-25 04:27:32,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=24106.666666666668, ans=0.2
+2024-08-25 04:27:36,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=24106.666666666668, ans=0.1
+2024-08-25 04:27:49,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=24213.333333333332, ans=0.1
+2024-08-25 04:27:51,124 INFO [train.py:1114] (0/4) Epoch 2, batch 2050, loss[loss=0.2713, simple_loss=0.3073, pruned_loss=0.0837, ctc_loss=0.17, over 19714.00 frames. ], tot_loss[loss=0.3637, simple_loss=0.3693, pruned_loss=0.13, ctc_loss=0.2454, over 3852140.17 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:27:55,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=24213.333333333332, ans=0.0
+2024-08-25 04:27:55,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=24213.333333333332, ans=0.125
+2024-08-25 04:27:55,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=24213.333333333332, ans=0.0
+2024-08-25 04:28:00,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=24213.333333333332, ans=0.125
+2024-08-25 04:28:08,419 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.12 vs. limit=10.0
+2024-08-25 04:28:12,624 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.57 vs. limit=8.0
+2024-08-25 04:28:15,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=24320.0, ans=0.125
+2024-08-25 04:28:29,087 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=24373.333333333332, ans=0.95
+2024-08-25 04:28:29,236 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=24373.333333333332, ans=0.1
+2024-08-25 04:28:47,780 INFO [train.py:1114] (0/4) Epoch 2, batch 2100, loss[loss=0.3298, simple_loss=0.3476, pruned_loss=0.1137, ctc_loss=0.212, over 19792.00 frames. ], tot_loss[loss=0.3601, simple_loss=0.3673, pruned_loss=0.1281, ctc_loss=0.242, over 3859095.84 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:28:53,656 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=24480.0, ans=0.0
+2024-08-25 04:28:57,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=24480.0, ans=0.1
+2024-08-25 04:28:58,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=24533.333333333332, ans=0.0
+2024-08-25 04:29:04,624 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.81 vs. limit=22.5
+2024-08-25 04:29:07,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=24533.333333333332, ans=0.005536231884057972
+2024-08-25 04:29:11,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=24586.666666666668, ans=0.125
+2024-08-25 04:29:14,124 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.311e+02 2.619e+02 3.137e+02 5.086e+02, threshold=5.238e+02, percent-clipped=0.0
+2024-08-25 04:29:24,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=24640.0, ans=0.0
+2024-08-25 04:29:35,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=24693.333333333332, ans=0.2
+2024-08-25 04:29:44,340 INFO [train.py:1114] (0/4) Epoch 2, batch 2150, loss[loss=0.326, simple_loss=0.3462, pruned_loss=0.1107, ctc_loss=0.211, over 19582.00 frames. ], tot_loss[loss=0.3569, simple_loss=0.3653, pruned_loss=0.1265, ctc_loss=0.2387, over 3869528.29 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 04:29:51,154 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=24746.666666666668, ans=0.125
+2024-08-25 04:30:01,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=24800.0, ans=0.0
+2024-08-25 04:30:08,599 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.63 vs. limit=15.0
+2024-08-25 04:30:17,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 04:30:20,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=24906.666666666668, ans=0.2
+2024-08-25 04:30:24,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=24906.666666666668, ans=0.025
+2024-08-25 04:30:39,204 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=25013.333333333332, ans=0.125
+2024-08-25 04:30:40,040 INFO [train.py:1114] (0/4) Epoch 2, batch 2200, loss[loss=0.3349, simple_loss=0.3534, pruned_loss=0.1149, ctc_loss=0.2165, over 19607.00 frames. ], tot_loss[loss=0.3571, simple_loss=0.3656, pruned_loss=0.1265, ctc_loss=0.2388, over 3867725.96 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:30:44,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25013.333333333332, ans=0.1
+2024-08-25 04:31:01,285 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.59 vs. limit=15.0
+2024-08-25 04:31:06,350 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.398e+02 2.814e+02 3.505e+02 8.042e+02, threshold=5.628e+02, percent-clipped=3.0
+2024-08-25 04:31:15,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=25173.333333333332, ans=0.0
+2024-08-25 04:31:27,238 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=25226.666666666668, ans=0.125
+2024-08-25 04:31:35,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=25226.666666666668, ans=0.125
+2024-08-25 04:31:37,496 INFO [train.py:1114] (0/4) Epoch 2, batch 2250, loss[loss=0.3802, simple_loss=0.3787, pruned_loss=0.1382, ctc_loss=0.263, over 19629.00 frames. ], tot_loss[loss=0.3565, simple_loss=0.3651, pruned_loss=0.1263, ctc_loss=0.2382, over 3866861.28 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:31:45,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=25280.0, ans=0.005373913043478261
+2024-08-25 04:31:56,472 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25333.333333333332, ans=0.1
+2024-08-25 04:32:06,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 04:32:08,039 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 04:32:21,373 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25493.333333333332, ans=0.1
+2024-08-25 04:32:33,191 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.64 vs. limit=15.0
+2024-08-25 04:32:33,459 INFO [train.py:1114] (0/4) Epoch 2, batch 2300, loss[loss=0.3605, simple_loss=0.3629, pruned_loss=0.131, ctc_loss=0.2404, over 19494.00 frames. ], tot_loss[loss=0.3563, simple_loss=0.3644, pruned_loss=0.1264, ctc_loss=0.2383, over 3861320.93 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 16.0
+2024-08-25 04:32:37,327 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.64 vs. limit=22.5
+2024-08-25 04:32:40,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=25546.666666666668, ans=0.125
+2024-08-25 04:32:41,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=25546.666666666668, ans=0.2
+2024-08-25 04:32:45,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=25600.0, ans=0.2
+2024-08-25 04:32:51,864 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.85 vs. limit=15.0
+2024-08-25 04:33:00,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=25653.333333333332, ans=0.125
+2024-08-25 04:33:02,258 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=25653.333333333332, ans=0.0
+2024-08-25 04:33:03,049 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.317e+02 2.709e+02 3.466e+02 6.027e+02, threshold=5.417e+02, percent-clipped=4.0
+2024-08-25 04:33:07,669 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.55 vs. limit=22.5
+2024-08-25 04:33:14,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=25706.666666666668, ans=0.125
+2024-08-25 04:33:19,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=25706.666666666668, ans=0.125
+2024-08-25 04:33:32,456 INFO [train.py:1114] (0/4) Epoch 2, batch 2350, loss[loss=0.3885, simple_loss=0.3859, pruned_loss=0.1429, ctc_loss=0.2632, over 19671.00 frames. ], tot_loss[loss=0.3565, simple_loss=0.3646, pruned_loss=0.1266, ctc_loss=0.2379, over 3864359.88 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 16.0
+2024-08-25 04:33:45,189 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:33:47,696 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.37 vs. limit=15.0
+2024-08-25 04:33:49,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=25866.666666666668, ans=0.125
+2024-08-25 04:34:00,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=25920.0, ans=0.125
+2024-08-25 04:34:01,278 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:34:10,384 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=23.17 vs. limit=15.0
+2024-08-25 04:34:13,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=25973.333333333332, ans=0.125
+2024-08-25 04:34:26,149 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=26026.666666666668, ans=0.025
+2024-08-25 04:34:30,672 INFO [train.py:1114] (0/4) Epoch 2, batch 2400, loss[loss=0.3307, simple_loss=0.3618, pruned_loss=0.1073, ctc_loss=0.2124, over 19337.00 frames. ], tot_loss[loss=0.3588, simple_loss=0.3669, pruned_loss=0.1274, ctc_loss=0.2397, over 3858931.03 frames. ], batch size: 67, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 04:34:57,145 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.184e+02 2.505e+02 3.102e+02 8.045e+02, threshold=5.010e+02, percent-clipped=5.0
+2024-08-25 04:35:04,044 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:35:27,217 INFO [train.py:1114] (0/4) Epoch 2, batch 2450, loss[loss=0.4359, simple_loss=0.4027, pruned_loss=0.1718, ctc_loss=0.3139, over 13904.00 frames. ], tot_loss[loss=0.3694, simple_loss=0.3728, pruned_loss=0.1331, ctc_loss=0.2494, over 3730699.37 frames. ], batch size: 140, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 04:35:34,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=26346.666666666668, ans=0.0
+2024-08-25 04:35:35,006 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.61 vs. limit=22.5
+2024-08-25 04:35:42,934 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.92 vs. limit=15.0
+2024-08-25 04:36:11,219 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-2.pt
+2024-08-25 04:36:55,764 INFO [train.py:1114] (0/4) Epoch 3, batch 0, loss[loss=0.3357, simple_loss=0.3463, pruned_loss=0.1184, ctc_loss=0.2208, over 19791.00 frames. ], tot_loss[loss=0.3357, simple_loss=0.3463, pruned_loss=0.1184, ctc_loss=0.2208, over 19791.00 frames. ], batch size: 49, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 04:36:55,765 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 04:37:08,018 INFO [train.py:1146] (0/4) Epoch 3, validation: loss=0.2847, simple_loss=0.3461, pruned_loss=0.08168, ctc_loss=0.1499, over 944034.00 frames.
+2024-08-25 04:37:08,018 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 04:37:16,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=26554.666666666668, ans=0.04949747468305833
+2024-08-25 04:37:27,666 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=26608.0, ans=0.125
+2024-08-25 04:37:36,177 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:37:43,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26714.666666666668, ans=0.1
+2024-08-25 04:37:46,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=26714.666666666668, ans=0.09899494936611666
+2024-08-25 04:37:50,801 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.252e+02 2.580e+02 3.143e+02 6.401e+02, threshold=5.159e+02, percent-clipped=2.0
+2024-08-25 04:37:53,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=26714.666666666668, ans=0.1
+2024-08-25 04:38:10,080 INFO [train.py:1114] (0/4) Epoch 3, batch 50, loss[loss=0.2937, simple_loss=0.3174, pruned_loss=0.09674, ctc_loss=0.1914, over 19698.00 frames. ], tot_loss[loss=0.3658, simple_loss=0.3722, pruned_loss=0.1305, ctc_loss=0.2463, over 845269.70 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:38:18,472 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.87 vs. limit=15.0
+2024-08-25 04:38:35,083 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=26821.333333333332, ans=0.005038840579710145
+2024-08-25 04:38:40,977 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:38:42,181 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=26874.666666666668, ans=0.09899494936611666
+2024-08-25 04:38:52,951 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.46 vs. limit=10.0
+2024-08-25 04:38:53,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=26928.0, ans=0.025
+2024-08-25 04:38:55,451 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.78 vs. limit=6.0
+2024-08-25 04:39:12,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=26981.333333333332, ans=0.0
+2024-08-25 04:39:12,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=26981.333333333332, ans=0.0
+2024-08-25 04:39:14,216 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=26981.333333333332, ans=0.0
+2024-08-25 04:39:26,801 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.93 vs. limit=15.0
+2024-08-25 04:39:28,431 INFO [train.py:1114] (0/4) Epoch 3, batch 100, loss[loss=0.3251, simple_loss=0.3424, pruned_loss=0.1114, ctc_loss=0.2124, over 19727.00 frames. ], tot_loss[loss=0.3601, simple_loss=0.369, pruned_loss=0.1275, ctc_loss=0.2406, over 1500423.89 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:39:33,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27088.0, ans=0.1
+2024-08-25 04:39:52,255 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=27194.666666666668, ans=0.125
+2024-08-25 04:40:03,041 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.98 vs. limit=15.0
+2024-08-25 04:40:11,089 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.221e+02 2.583e+02 3.158e+02 4.904e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-08-25 04:40:27,491 INFO [train.py:1114] (0/4) Epoch 3, batch 150, loss[loss=0.298, simple_loss=0.3216, pruned_loss=0.1009, ctc_loss=0.1818, over 19745.00 frames. ], tot_loss[loss=0.3534, simple_loss=0.3641, pruned_loss=0.1244, ctc_loss=0.2346, over 2028606.56 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 04:40:34,672 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=27354.666666666668, ans=0.125
+2024-08-25 04:40:35,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27354.666666666668, ans=0.1
+2024-08-25 04:40:54,121 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:40:56,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=27461.333333333332, ans=0.125
+2024-08-25 04:41:01,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27461.333333333332, ans=0.1
+2024-08-25 04:41:07,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=27514.666666666668, ans=0.0
+2024-08-25 04:41:14,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=27514.666666666668, ans=0.125
+2024-08-25 04:41:21,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=27568.0, ans=0.125
+2024-08-25 04:41:26,627 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.62 vs. limit=10.0
+2024-08-25 04:41:29,357 INFO [train.py:1114] (0/4) Epoch 3, batch 200, loss[loss=0.3843, simple_loss=0.3738, pruned_loss=0.1448, ctc_loss=0.2626, over 18269.00 frames. ], tot_loss[loss=0.351, simple_loss=0.362, pruned_loss=0.1235, ctc_loss=0.2328, over 2435586.34 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:41:31,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=27621.333333333332, ans=0.004864927536231884
+2024-08-25 04:41:56,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=27728.0, ans=0.0
+2024-08-25 04:42:03,696 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=27728.0, ans=0.125
+2024-08-25 04:42:14,181 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.192e+02 2.550e+02 3.125e+02 5.269e+02, threshold=5.099e+02, percent-clipped=1.0
+2024-08-25 04:42:28,833 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=27834.666666666668, ans=0.025
+2024-08-25 04:42:35,057 INFO [train.py:1114] (0/4) Epoch 3, batch 250, loss[loss=0.4041, simple_loss=0.3973, pruned_loss=0.149, ctc_loss=0.2822, over 19362.00 frames. ], tot_loss[loss=0.3512, simple_loss=0.3622, pruned_loss=0.1236, ctc_loss=0.2325, over 2754938.96 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:42:49,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=27941.333333333332, ans=0.09899494936611666
+2024-08-25 04:42:56,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 04:43:15,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.28 vs. limit=15.0
+2024-08-25 04:43:25,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=28101.333333333332, ans=0.0
+2024-08-25 04:43:33,551 INFO [train.py:1114] (0/4) Epoch 3, batch 300, loss[loss=0.3772, simple_loss=0.3851, pruned_loss=0.1348, ctc_loss=0.2494, over 19553.00 frames. ], tot_loss[loss=0.3491, simple_loss=0.3606, pruned_loss=0.1226, ctc_loss=0.2309, over 2999555.88 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:43:33,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=28154.666666666668, ans=0.125
+2024-08-25 04:43:34,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=28154.666666666668, ans=0.004748985507246377
+2024-08-25 04:43:38,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=28154.666666666668, ans=0.125
+2024-08-25 04:43:41,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=28154.666666666668, ans=0.1
+2024-08-25 04:43:42,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=28154.666666666668, ans=0.004748985507246377
+2024-08-25 04:43:43,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=28154.666666666668, ans=0.125
+2024-08-25 04:43:48,472 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=28208.0, ans=0.0
+2024-08-25 04:43:51,552 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.47 vs. limit=22.5
+2024-08-25 04:43:52,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=28208.0, ans=10.0
+2024-08-25 04:44:10,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=28314.666666666668, ans=0.0
+2024-08-25 04:44:18,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 04:44:18,931 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.242e+02 2.624e+02 3.299e+02 5.169e+02, threshold=5.248e+02, percent-clipped=1.0
+2024-08-25 04:44:24,585 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.98 vs. limit=15.0
+2024-08-25 04:44:36,137 INFO [train.py:1114] (0/4) Epoch 3, batch 350, loss[loss=0.325, simple_loss=0.3361, pruned_loss=0.1147, ctc_loss=0.2113, over 19749.00 frames. ], tot_loss[loss=0.3501, simple_loss=0.3614, pruned_loss=0.123, ctc_loss=0.2318, over 3189804.67 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:44:39,466 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:44:44,243 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:45:09,210 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.66 vs. limit=6.0
+2024-08-25 04:45:59,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=28634.666666666668, ans=0.5
+2024-08-25 04:46:52,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28634.666666666668, ans=0.1
+2024-08-25 04:46:55,908 INFO [train.py:1114] (0/4) Epoch 3, batch 400, loss[loss=0.3213, simple_loss=0.3501, pruned_loss=0.1066, ctc_loss=0.198, over 19501.00 frames. ], tot_loss[loss=0.3483, simple_loss=0.3605, pruned_loss=0.122, ctc_loss=0.2302, over 3342807.24 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 04:47:29,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=28741.333333333332, ans=0.125
+2024-08-25 04:47:37,204 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=28741.333333333332, ans=0.125
+2024-08-25 04:47:39,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=28794.666666666668, ans=0.125
+2024-08-25 04:48:22,804 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.232e+02 2.568e+02 3.025e+02 1.134e+03, threshold=5.136e+02, percent-clipped=4.0
+2024-08-25 04:48:36,685 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=9.27 vs. limit=12.0
+2024-08-25 04:48:45,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=28901.333333333332, ans=0.025
+2024-08-25 04:48:48,319 INFO [train.py:1114] (0/4) Epoch 3, batch 450, loss[loss=0.3477, simple_loss=0.3688, pruned_loss=0.1185, ctc_loss=0.2244, over 19610.00 frames. ], tot_loss[loss=0.3464, simple_loss=0.3595, pruned_loss=0.121, ctc_loss=0.2283, over 3452268.07 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:48:55,236 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=28954.666666666668, ans=0.0
+2024-08-25 04:49:03,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=29008.0, ans=0.125
+2024-08-25 04:49:28,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=29114.666666666668, ans=0.09899494936611666
+2024-08-25 04:49:31,337 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.92 vs. limit=15.0
+2024-08-25 04:50:09,360 INFO [train.py:1114] (0/4) Epoch 3, batch 500, loss[loss=0.3209, simple_loss=0.3535, pruned_loss=0.1059, ctc_loss=0.1916, over 19636.00 frames. ], tot_loss[loss=0.3453, simple_loss=0.3585, pruned_loss=0.1206, ctc_loss=0.2275, over 3547304.73 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:50:54,717 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.47 vs. limit=15.0
+2024-08-25 04:51:09,146 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.370e+02 2.734e+02 3.745e+02 5.336e+02, threshold=5.469e+02, percent-clipped=1.0
+2024-08-25 04:51:28,444 INFO [train.py:1114] (0/4) Epoch 3, batch 550, loss[loss=0.348, simple_loss=0.3649, pruned_loss=0.1193, ctc_loss=0.2316, over 19227.00 frames. ], tot_loss[loss=0.3451, simple_loss=0.3586, pruned_loss=0.1204, ctc_loss=0.2268, over 3608971.33 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:51:54,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=29594.666666666668, ans=0.0
+2024-08-25 04:52:03,825 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.37 vs. limit=15.0
+2024-08-25 04:52:07,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=29648.0, ans=0.1
+2024-08-25 04:52:50,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29648.0, ans=0.1
+2024-08-25 04:52:56,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=29701.333333333332, ans=0.0
+2024-08-25 04:53:06,044 INFO [train.py:1114] (0/4) Epoch 3, batch 600, loss[loss=0.3857, simple_loss=0.3881, pruned_loss=0.1406, ctc_loss=0.2549, over 19344.00 frames. ], tot_loss[loss=0.3429, simple_loss=0.3575, pruned_loss=0.1192, ctc_loss=0.2246, over 3665528.88 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:53:06,316 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29754.666666666668, ans=0.1
+2024-08-25 04:53:09,065 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.77 vs. limit=15.0
+2024-08-25 04:53:21,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29808.0, ans=0.1
+2024-08-25 04:53:49,297 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.141e+02 2.536e+02 3.031e+02 6.622e+02, threshold=5.071e+02, percent-clipped=2.0
+2024-08-25 04:54:00,789 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.83 vs. limit=15.0
+2024-08-25 04:54:04,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29968.0, ans=0.1
+2024-08-25 04:54:06,081 INFO [train.py:1114] (0/4) Epoch 3, batch 650, loss[loss=0.3153, simple_loss=0.3485, pruned_loss=0.1026, ctc_loss=0.1921, over 19787.00 frames. ], tot_loss[loss=0.3421, simple_loss=0.3568, pruned_loss=0.1188, ctc_loss=0.2241, over 3715665.21 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 32.0
+2024-08-25 04:54:14,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=30021.333333333332, ans=0.125
+2024-08-25 04:54:20,760 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:54:22,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:54:37,754 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:55:06,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.56 vs. limit=15.0
+2024-08-25 04:55:10,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30234.666666666668, ans=0.1
+2024-08-25 04:55:16,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=30234.666666666668, ans=0.0
+2024-08-25 04:55:19,007 INFO [train.py:1114] (0/4) Epoch 3, batch 700, loss[loss=0.3472, simple_loss=0.3544, pruned_loss=0.1228, ctc_loss=0.2362, over 19730.00 frames. ], tot_loss[loss=0.3423, simple_loss=0.357, pruned_loss=0.119, ctc_loss=0.2243, over 3746817.77 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:55:22,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=30288.0, ans=0.125
+2024-08-25 04:55:44,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=30394.666666666668, ans=0.2
+2024-08-25 04:56:38,918 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.292e+02 2.520e+02 3.192e+02 5.203e+02, threshold=5.040e+02, percent-clipped=1.0
+2024-08-25 04:56:41,164 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.83 vs. limit=15.0
+2024-08-25 04:56:44,263 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30501.333333333332, ans=0.1
+2024-08-25 04:56:56,400 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30554.666666666668, ans=0.1
+2024-08-25 04:56:57,175 INFO [train.py:1114] (0/4) Epoch 3, batch 750, loss[loss=0.3245, simple_loss=0.3482, pruned_loss=0.1088, ctc_loss=0.2078, over 19487.00 frames. ], tot_loss[loss=0.3417, simple_loss=0.3564, pruned_loss=0.1188, ctc_loss=0.2234, over 3772901.44 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:57:04,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=30554.666666666668, ans=0.125
+2024-08-25 04:57:07,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=30554.666666666668, ans=15.0
+2024-08-25 04:57:09,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30554.666666666668, ans=0.1
+2024-08-25 04:57:19,833 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=30608.0, ans=0.125
+2024-08-25 04:57:37,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=30714.666666666668, ans=0.0
+2024-08-25 04:57:38,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=30714.666666666668, ans=0.004192463768115941
+2024-08-25 04:57:45,445 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=30714.666666666668, ans=0.0
+2024-08-25 04:57:59,168 INFO [train.py:1114] (0/4) Epoch 3, batch 800, loss[loss=0.3067, simple_loss=0.3264, pruned_loss=0.1048, ctc_loss=0.1935, over 19821.00 frames. ], tot_loss[loss=0.3408, simple_loss=0.3558, pruned_loss=0.1184, ctc_loss=0.2225, over 3794536.37 frames. ], batch size: 49, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:58:26,204 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=30928.0, ans=0.2
+2024-08-25 04:58:27,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=30928.0, ans=0.125
+2024-08-25 04:58:32,136 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:58:42,765 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.211e+02 2.622e+02 3.205e+02 5.257e+02, threshold=5.244e+02, percent-clipped=1.0
+2024-08-25 04:58:46,183 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.79 vs. limit=15.0
+2024-08-25 04:58:47,213 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.26 vs. limit=10.0
+2024-08-25 04:58:49,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=31034.666666666668, ans=0.125
+2024-08-25 04:58:51,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=31034.666666666668, ans=0.125
+2024-08-25 04:59:01,970 INFO [train.py:1114] (0/4) Epoch 3, batch 850, loss[loss=0.3465, simple_loss=0.3672, pruned_loss=0.1175, ctc_loss=0.2272, over 19628.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.3559, pruned_loss=0.1182, ctc_loss=0.2222, over 3815463.35 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:59:23,801 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:59:54,019 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=5.499e-03
+2024-08-25 05:00:04,411 INFO [train.py:1114] (0/4) Epoch 3, batch 900, loss[loss=0.3238, simple_loss=0.3303, pruned_loss=0.1146, ctc_loss=0.22, over 19827.00 frames. ], tot_loss[loss=0.3427, simple_loss=0.357, pruned_loss=0.1194, ctc_loss=0.224, over 3819825.72 frames. ], batch size: 49, lr: 3.72e-02, grad_scale: 8.0
+2024-08-25 05:00:06,140 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=31354.666666666668, ans=0.004053333333333333
+2024-08-25 05:00:08,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=31354.666666666668, ans=0.125
+2024-08-25 05:00:09,170 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.86 vs. limit=15.0
+2024-08-25 05:00:17,017 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=31408.0, ans=0.125
+2024-08-25 05:00:19,373 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=31408.0, ans=0.2
+2024-08-25 05:00:54,424 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.296e+02 2.736e+02 3.525e+02 1.528e+03, threshold=5.472e+02, percent-clipped=4.0
+2024-08-25 05:01:02,171 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.09 vs. limit=15.0
+2024-08-25 05:01:08,295 INFO [train.py:1114] (0/4) Epoch 3, batch 950, loss[loss=0.3109, simple_loss=0.3273, pruned_loss=0.1051, ctc_loss=0.2106, over 19499.00 frames. ], tot_loss[loss=0.3423, simple_loss=0.3569, pruned_loss=0.1191, ctc_loss=0.2239, over 3821606.58 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:01:14,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=31621.333333333332, ans=0.125
+2024-08-25 05:01:21,589 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=31674.666666666668, ans=0.125
+2024-08-25 05:01:27,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=31674.666666666668, ans=0.5
+2024-08-25 05:01:53,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=31781.333333333332, ans=0.1
+2024-08-25 05:02:08,124 INFO [train.py:1114] (0/4) Epoch 3, batch 1000, loss[loss=0.2884, simple_loss=0.3227, pruned_loss=0.0923, ctc_loss=0.1738, over 19845.00 frames. ], tot_loss[loss=0.3433, simple_loss=0.3577, pruned_loss=0.1196, ctc_loss=0.2243, over 3817304.87 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:02:08,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=31888.0, ans=0.1
+2024-08-25 05:02:37,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=31994.666666666668, ans=0.2
+2024-08-25 05:02:51,015 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.38 vs. limit=15.0
+2024-08-25 05:02:56,467 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.163e+02 2.492e+02 3.027e+02 5.724e+02, threshold=4.983e+02, percent-clipped=1.0
+2024-08-25 05:02:58,474 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.57 vs. limit=22.5
+2024-08-25 05:03:13,729 INFO [train.py:1114] (0/4) Epoch 3, batch 1050, loss[loss=0.3936, simple_loss=0.3971, pruned_loss=0.1432, ctc_loss=0.2594, over 19840.00 frames. ], tot_loss[loss=0.3425, simple_loss=0.3569, pruned_loss=0.1193, ctc_loss=0.2238, over 3823854.97 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:03:14,107 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=32154.666666666668, ans=0.2
+2024-08-25 05:03:18,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=32154.666666666668, ans=0.1
+2024-08-25 05:04:30,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=32314.666666666668, ans=0.025
+2024-08-25 05:04:30,424 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.30 vs. limit=12.0
+2024-08-25 05:04:54,484 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.82 vs. limit=6.0
+2024-08-25 05:05:04,405 INFO [train.py:1114] (0/4) Epoch 3, batch 1100, loss[loss=0.3226, simple_loss=0.344, pruned_loss=0.1103, ctc_loss=0.2016, over 19578.00 frames. ], tot_loss[loss=0.3412, simple_loss=0.3563, pruned_loss=0.1185, ctc_loss=0.2225, over 3830357.79 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:05:04,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=32421.333333333332, ans=0.2
+2024-08-25 05:05:08,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=32421.333333333332, ans=0.125
+2024-08-25 05:06:00,571 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.355e+02 2.517e+02 3.019e+02 4.945e+02, threshold=5.033e+02, percent-clipped=0.0
+2024-08-25 05:06:15,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=32634.666666666668, ans=0.125
+2024-08-25 05:06:23,033 INFO [train.py:1114] (0/4) Epoch 3, batch 1150, loss[loss=0.2892, simple_loss=0.3198, pruned_loss=0.09385, ctc_loss=0.1773, over 19566.00 frames. ], tot_loss[loss=0.3414, simple_loss=0.356, pruned_loss=0.1188, ctc_loss=0.2229, over 3828597.53 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 8.0
+2024-08-25 05:06:40,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=32741.333333333332, ans=0.125
+2024-08-25 05:06:41,261 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=32741.333333333332, ans=0.125
+2024-08-25 05:07:07,693 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.34 vs. limit=15.0
+2024-08-25 05:07:08,984 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.95 vs. limit=6.0
+2024-08-25 05:07:14,780 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=32848.0, ans=0.0
+2024-08-25 05:07:29,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=32901.333333333336, ans=0.125
+2024-08-25 05:07:32,070 INFO [train.py:1114] (0/4) Epoch 3, batch 1200, loss[loss=0.3156, simple_loss=0.3536, pruned_loss=0.1009, ctc_loss=0.1894, over 19832.00 frames. ], tot_loss[loss=0.3425, simple_loss=0.3571, pruned_loss=0.1192, ctc_loss=0.2236, over 3824323.82 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:07:34,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=32954.666666666664, ans=0.0
+2024-08-25 05:07:54,987 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.89 vs. limit=22.5
+2024-08-25 05:07:57,500 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.02 vs. limit=15.0
+2024-08-25 05:08:16,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=33114.666666666664, ans=0.00367072463768116
+2024-08-25 05:08:19,680 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.128e+02 2.359e+02 2.757e+02 6.653e+02, threshold=4.718e+02, percent-clipped=2.0
+2024-08-25 05:08:31,443 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.21 vs. limit=10.0
+2024-08-25 05:08:38,023 INFO [train.py:1114] (0/4) Epoch 3, batch 1250, loss[loss=0.3799, simple_loss=0.3859, pruned_loss=0.1342, ctc_loss=0.2639, over 19515.00 frames. ], tot_loss[loss=0.3419, simple_loss=0.3572, pruned_loss=0.1187, ctc_loss=0.2229, over 3843084.15 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:08:42,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33221.333333333336, ans=0.1
+2024-08-25 05:08:58,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=33274.666666666664, ans=0.125
+2024-08-25 05:09:03,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=33328.0, ans=0.0
+2024-08-25 05:09:18,708 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.66 vs. limit=15.0
+2024-08-25 05:09:19,990 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.37 vs. limit=15.0
+2024-08-25 05:09:42,118 INFO [train.py:1114] (0/4) Epoch 3, batch 1300, loss[loss=0.3645, simple_loss=0.3718, pruned_loss=0.1287, ctc_loss=0.2495, over 18759.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.3561, pruned_loss=0.1182, ctc_loss=0.222, over 3847195.98 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:09:44,051 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.10 vs. limit=15.0
+2024-08-25 05:09:50,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=33488.0, ans=0.125
+2024-08-25 05:09:51,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=33488.0, ans=0.125
+2024-08-25 05:09:56,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33541.333333333336, ans=0.1
+2024-08-25 05:10:04,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=33594.666666666664, ans=0.125
+2024-08-25 05:10:05,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=33594.666666666664, ans=0.2
+2024-08-25 05:10:48,159 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.161e+02 2.525e+02 2.896e+02 5.464e+02, threshold=5.050e+02, percent-clipped=3.0
+2024-08-25 05:11:02,302 INFO [train.py:1114] (0/4) Epoch 3, batch 1350, loss[loss=0.328, simple_loss=0.3485, pruned_loss=0.1119, ctc_loss=0.2094, over 19761.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.356, pruned_loss=0.1183, ctc_loss=0.2216, over 3857821.88 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:11:07,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=33754.666666666664, ans=0.2
+2024-08-25 05:11:15,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=33754.666666666664, ans=0.125
+2024-08-25 05:11:49,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=33861.333333333336, ans=0.003508405797101449
+2024-08-25 05:11:55,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33861.333333333336, ans=0.1
+2024-08-25 05:12:05,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=33914.666666666664, ans=0.0034968115942028994
+2024-08-25 05:12:05,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=33914.666666666664, ans=0.0034968115942028994
+2024-08-25 05:12:06,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=33914.666666666664, ans=0.125
+2024-08-25 05:12:26,329 INFO [train.py:1114] (0/4) Epoch 3, batch 1400, loss[loss=0.3036, simple_loss=0.3214, pruned_loss=0.104, ctc_loss=0.1949, over 19694.00 frames. ], tot_loss[loss=0.3384, simple_loss=0.3547, pruned_loss=0.1172, ctc_loss=0.2196, over 3864952.25 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 16.0
+2024-08-25 05:12:35,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=34021.333333333336, ans=0.125
+2024-08-25 05:13:29,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=34181.333333333336, ans=0.125
+2024-08-25 05:13:31,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=34181.333333333336, ans=0.07
+2024-08-25 05:13:31,982 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.210e+02 2.531e+02 3.096e+02 9.067e+02, threshold=5.062e+02, percent-clipped=2.0
+2024-08-25 05:14:24,484 INFO [train.py:1114] (0/4) Epoch 3, batch 1450, loss[loss=0.3541, simple_loss=0.3764, pruned_loss=0.12, ctc_loss=0.2295, over 19649.00 frames. ], tot_loss[loss=0.3397, simple_loss=0.3555, pruned_loss=0.1178, ctc_loss=0.2209, over 3863352.84 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:14:27,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=34288.0, ans=0.125
+2024-08-25 05:14:27,642 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.15 vs. limit=22.5
+2024-08-25 05:14:46,173 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=34341.333333333336, ans=0.0
+2024-08-25 05:14:51,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34341.333333333336, ans=0.1
+2024-08-25 05:14:55,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=34394.666666666664, ans=0.125
+2024-08-25 05:15:09,650 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.89 vs. limit=15.0
+2024-08-25 05:15:30,876 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=34501.333333333336, ans=0.0
+2024-08-25 05:15:32,930 INFO [train.py:1114] (0/4) Epoch 3, batch 1500, loss[loss=0.3387, simple_loss=0.3693, pruned_loss=0.1107, ctc_loss=0.2164, over 19595.00 frames. ], tot_loss[loss=0.3401, simple_loss=0.356, pruned_loss=0.1179, ctc_loss=0.2211, over 3863083.24 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:15:53,591 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=34608.0, ans=0.0
+2024-08-25 05:15:59,166 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.83 vs. limit=15.0
+2024-08-25 05:16:43,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=34714.666666666664, ans=0.125
+2024-08-25 05:16:51,229 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.151e+02 2.498e+02 3.151e+02 6.810e+02, threshold=4.996e+02, percent-clipped=2.0
+2024-08-25 05:19:30,677 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.69 vs. limit=15.0
+2024-08-25 05:19:36,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=34768.0, ans=0.125
+2024-08-25 05:19:40,302 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.75 vs. limit=15.0
+2024-08-25 05:20:00,634 INFO [train.py:1114] (0/4) Epoch 3, batch 1550, loss[loss=0.3799, simple_loss=0.3922, pruned_loss=0.1352, ctc_loss=0.2427, over 19626.00 frames. ], tot_loss[loss=0.34, simple_loss=0.3559, pruned_loss=0.1179, ctc_loss=0.2208, over 3846218.17 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 16.0
+2024-08-25 05:21:06,769 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=34981.333333333336, ans=0.125
+2024-08-25 05:22:04,996 INFO [train.py:1114] (0/4) Epoch 3, batch 1600, loss[loss=0.3178, simple_loss=0.3489, pruned_loss=0.1051, ctc_loss=0.1911, over 19836.00 frames. ], tot_loss[loss=0.3403, simple_loss=0.3557, pruned_loss=0.1182, ctc_loss=0.2212, over 3835008.45 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 05:22:11,520 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=35088.0, ans=0.0
+2024-08-25 05:22:16,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=35141.333333333336, ans=0.95
+2024-08-25 05:22:17,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=35141.333333333336, ans=0.125
+2024-08-25 05:23:22,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=35248.0, ans=0.0
+2024-08-25 05:23:25,570 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=35248.0, ans=0.125
+2024-08-25 05:23:42,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=35248.0, ans=0.125
+2024-08-25 05:23:43,086 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.193e+02 2.529e+02 3.233e+02 6.645e+02, threshold=5.059e+02, percent-clipped=2.0
+2024-08-25 05:23:45,101 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.39 vs. limit=12.0
+2024-08-25 05:23:49,279 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=35301.333333333336, ans=0.2
+2024-08-25 05:24:23,000 INFO [train.py:1114] (0/4) Epoch 3, batch 1650, loss[loss=0.3575, simple_loss=0.3793, pruned_loss=0.1228, ctc_loss=0.2252, over 19661.00 frames. ], tot_loss[loss=0.3403, simple_loss=0.3556, pruned_loss=0.1182, ctc_loss=0.2214, over 3831787.34 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 05:24:31,373 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=35354.666666666664, ans=0.125
+2024-08-25 05:25:28,147 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.22 vs. limit=6.0
+2024-08-25 05:25:56,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35514.666666666664, ans=0.1
+2024-08-25 05:26:16,061 INFO [train.py:1114] (0/4) Epoch 3, batch 1700, loss[loss=0.3002, simple_loss=0.3128, pruned_loss=0.1026, ctc_loss=0.2061, over 19671.00 frames. ], tot_loss[loss=0.338, simple_loss=0.3544, pruned_loss=0.1169, ctc_loss=0.2194, over 3846193.75 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:26:16,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=35621.333333333336, ans=0.0
+2024-08-25 05:26:31,278 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.21 vs. limit=15.0
+2024-08-25 05:26:40,001 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.83 vs. limit=15.0
+2024-08-25 05:26:48,319 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.69 vs. limit=10.0
+2024-08-25 05:26:49,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=35728.0, ans=0.0
+2024-08-25 05:26:58,248 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.36 vs. limit=15.0
+2024-08-25 05:26:59,413 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.60 vs. limit=15.0
+2024-08-25 05:27:08,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=35781.333333333336, ans=0.04949747468305833
+2024-08-25 05:27:10,189 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.342e+02 2.819e+02 3.429e+02 5.215e+02, threshold=5.637e+02, percent-clipped=1.0
+2024-08-25 05:27:19,614 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.06 vs. limit=6.0
+2024-08-25 05:27:19,697 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.95 vs. limit=15.0
+2024-08-25 05:27:23,548 INFO [train.py:1114] (0/4) Epoch 3, batch 1750, loss[loss=0.2892, simple_loss=0.3037, pruned_loss=0.1002, ctc_loss=0.1857, over 19628.00 frames. ], tot_loss[loss=0.3366, simple_loss=0.3535, pruned_loss=0.1163, ctc_loss=0.2179, over 3851012.17 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:27:35,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=35941.333333333336, ans=0.125
+2024-08-25 05:27:36,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=35941.333333333336, ans=0.0
+2024-08-25 05:29:19,805 INFO [train.py:1114] (0/4) Epoch 3, batch 1800, loss[loss=0.3328, simple_loss=0.3619, pruned_loss=0.1095, ctc_loss=0.2117, over 19624.00 frames. ], tot_loss[loss=0.3364, simple_loss=0.3536, pruned_loss=0.1161, ctc_loss=0.2178, over 3852942.62 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:29:45,910 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=36154.666666666664, ans=0.0
+2024-08-25 05:31:58,636 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.106e+02 2.466e+02 3.299e+02 1.077e+03, threshold=4.933e+02, percent-clipped=1.0
+2024-08-25 05:32:11,706 INFO [train.py:1114] (0/4) Epoch 3, batch 1850, loss[loss=0.3065, simple_loss=0.3454, pruned_loss=0.09589, ctc_loss=0.1895, over 19590.00 frames. ], tot_loss[loss=0.3343, simple_loss=0.3521, pruned_loss=0.1151, ctc_loss=0.216, over 3857160.26 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:32:40,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=36528.0, ans=0.125
+2024-08-25 05:33:00,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=36634.666666666664, ans=0.2
+2024-08-25 05:33:11,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=36688.0, ans=0.2
+2024-08-25 05:33:12,868 INFO [train.py:1114] (0/4) Epoch 3, batch 1900, loss[loss=0.3451, simple_loss=0.3701, pruned_loss=0.116, ctc_loss=0.2204, over 19648.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3523, pruned_loss=0.1149, ctc_loss=0.2156, over 3860676.35 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 16.0
+2024-08-25 05:34:05,256 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.260e+02 2.560e+02 3.105e+02 5.689e+02, threshold=5.120e+02, percent-clipped=2.0
+2024-08-25 05:34:06,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36901.333333333336, ans=0.1
+2024-08-25 05:34:14,626 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=36901.333333333336, ans=0.05
+2024-08-25 05:34:49,883 INFO [train.py:1114] (0/4) Epoch 3, batch 1950, loss[loss=0.3078, simple_loss=0.3327, pruned_loss=0.1021, ctc_loss=0.197, over 19579.00 frames. ], tot_loss[loss=0.3361, simple_loss=0.3542, pruned_loss=0.1156, ctc_loss=0.217, over 3869319.91 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 16.0
+2024-08-25 05:34:54,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=36954.666666666664, ans=0.2
+2024-08-25 05:35:54,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=36954.666666666664, ans=0.0028359420289855086
+2024-08-25 05:36:28,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=37061.333333333336, ans=0.0
+2024-08-25 05:36:34,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=37061.333333333336, ans=0.125
+2024-08-25 05:36:37,132 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=37061.333333333336, ans=0.125
+2024-08-25 05:36:47,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=37114.666666666664, ans=0.125
+2024-08-25 05:36:50,400 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=37114.666666666664, ans=10.0
+2024-08-25 05:36:51,252 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=37114.666666666664, ans=0.2
+2024-08-25 05:37:05,778 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:37:09,008 INFO [train.py:1114] (0/4) Epoch 3, batch 2000, loss[loss=0.2815, simple_loss=0.3095, pruned_loss=0.09152, ctc_loss=0.176, over 19645.00 frames. ], tot_loss[loss=0.3367, simple_loss=0.3546, pruned_loss=0.1159, ctc_loss=0.2175, over 3854838.15 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 05:37:36,327 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=26.04 vs. limit=22.5
+2024-08-25 05:38:02,421 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.243e+02 2.650e+02 3.292e+02 1.299e+03, threshold=5.300e+02, percent-clipped=6.0
+2024-08-25 05:38:08,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=37434.666666666664, ans=0.95
+2024-08-25 05:38:10,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=37434.666666666664, ans=0.0027315942028985516
+2024-08-25 05:38:13,916 INFO [train.py:1114] (0/4) Epoch 3, batch 2050, loss[loss=0.3178, simple_loss=0.3309, pruned_loss=0.1109, ctc_loss=0.2069, over 19718.00 frames. ], tot_loss[loss=0.3355, simple_loss=0.3532, pruned_loss=0.1155, ctc_loss=0.2169, over 3852097.82 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:38:15,231 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=37488.0, ans=0.125
+2024-08-25 05:38:15,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=37488.0, ans=0.125
+2024-08-25 05:38:16,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=37488.0, ans=0.125
+2024-08-25 05:38:30,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=37541.333333333336, ans=0.125
+2024-08-25 05:38:33,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37541.333333333336, ans=0.1
+2024-08-25 05:38:33,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=37541.333333333336, ans=0.1
+2024-08-25 05:38:42,807 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.06 vs. limit=15.0
+2024-08-25 05:38:44,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=37594.666666666664, ans=0.125
+2024-08-25 05:38:44,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=37594.666666666664, ans=0.04949747468305833
+2024-08-25 05:38:56,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=37648.0, ans=0.95
+2024-08-25 05:39:04,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=37701.333333333336, ans=0.05
+2024-08-25 05:39:13,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=37701.333333333336, ans=0.05
+2024-08-25 05:39:40,810 INFO [train.py:1114] (0/4) Epoch 3, batch 2100, loss[loss=0.3207, simple_loss=0.3416, pruned_loss=0.1085, ctc_loss=0.2071, over 19755.00 frames. ], tot_loss[loss=0.3345, simple_loss=0.3527, pruned_loss=0.115, ctc_loss=0.2158, over 3858773.03 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:39:43,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=37754.666666666664, ans=0.125
+2024-08-25 05:40:12,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=37808.0, ans=0.2
+2024-08-25 05:40:15,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=37808.0, ans=0.025
+2024-08-25 05:40:18,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=37861.333333333336, ans=0.125
+2024-08-25 05:40:40,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=37861.333333333336, ans=0.04949747468305833
+2024-08-25 05:40:53,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=37914.666666666664, ans=0.125
+2024-08-25 05:40:58,553 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.072e+02 2.352e+02 2.718e+02 4.903e+02, threshold=4.703e+02, percent-clipped=0.0
+2024-08-25 05:41:02,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=37968.0, ans=0.2
+2024-08-25 05:41:10,087 INFO [train.py:1114] (0/4) Epoch 3, batch 2150, loss[loss=0.3094, simple_loss=0.3313, pruned_loss=0.104, ctc_loss=0.199, over 19588.00 frames. ], tot_loss[loss=0.3329, simple_loss=0.3514, pruned_loss=0.1143, ctc_loss=0.2143, over 3869168.56 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 05:41:45,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=38128.0, ans=0.0
+2024-08-25 05:41:51,252 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.71 vs. limit=22.5
+2024-08-25 05:41:54,671 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.14 vs. limit=15.0
+2024-08-25 05:42:42,872 INFO [train.py:1114] (0/4) Epoch 3, batch 2200, loss[loss=0.363, simple_loss=0.3704, pruned_loss=0.1294, ctc_loss=0.2416, over 19591.00 frames. ], tot_loss[loss=0.3326, simple_loss=0.3511, pruned_loss=0.1142, ctc_loss=0.214, over 3867395.74 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:42:52,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=38288.0, ans=0.025
+2024-08-25 05:42:52,721 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.26 vs. limit=15.0
+2024-08-25 05:42:59,640 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=38341.333333333336, ans=0.125
+2024-08-25 05:43:05,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=38341.333333333336, ans=0.07
+2024-08-25 05:43:15,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=38394.666666666664, ans=0.2
+2024-08-25 05:43:34,315 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.197e+02 2.629e+02 2.994e+02 6.107e+02, threshold=5.259e+02, percent-clipped=1.0
+2024-08-25 05:43:50,322 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=38554.666666666664, ans=0.2
+2024-08-25 05:43:51,063 INFO [train.py:1114] (0/4) Epoch 3, batch 2250, loss[loss=0.3456, simple_loss=0.3663, pruned_loss=0.1184, ctc_loss=0.2203, over 19631.00 frames. ], tot_loss[loss=0.3327, simple_loss=0.3513, pruned_loss=0.1143, ctc_loss=0.2136, over 3866860.08 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:44:29,398 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.33 vs. limit=15.0
+2024-08-25 05:44:33,121 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.90 vs. limit=15.0
+2024-08-25 05:44:44,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=38661.333333333336, ans=0.2
+2024-08-25 05:44:53,461 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.62 vs. limit=6.0
+2024-08-25 05:45:15,056 INFO [train.py:1114] (0/4) Epoch 3, batch 2300, loss[loss=0.2673, simple_loss=0.304, pruned_loss=0.08335, ctc_loss=0.1596, over 19515.00 frames. ], tot_loss[loss=0.3326, simple_loss=0.3507, pruned_loss=0.1145, ctc_loss=0.2141, over 3860470.99 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:45:22,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=38821.333333333336, ans=0.5
+2024-08-25 05:45:26,884 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.88 vs. limit=15.0
+2024-08-25 05:46:11,640 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.07 vs. limit=15.0
+2024-08-25 05:46:11,678 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.54 vs. limit=15.0
+2024-08-25 05:46:22,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=38981.333333333336, ans=0.07
+2024-08-25 05:46:22,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.whiten.whitening_limit, batch_count=38981.333333333336, ans=12.0
+2024-08-25 05:47:15,674 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.233e+02 2.542e+02 3.133e+02 7.552e+02, threshold=5.083e+02, percent-clipped=3.0
+2024-08-25 05:47:19,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=39034.666666666664, ans=0.1
+2024-08-25 05:47:27,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=39088.0, ans=0.125
+2024-08-25 05:47:27,937 INFO [train.py:1114] (0/4) Epoch 3, batch 2350, loss[loss=0.3395, simple_loss=0.3531, pruned_loss=0.1193, ctc_loss=0.2183, over 19684.00 frames. ], tot_loss[loss=0.3316, simple_loss=0.3501, pruned_loss=0.1139, ctc_loss=0.213, over 3863948.62 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:47:30,279 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:47:30,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=39088.0, ans=0.125
+2024-08-25 05:47:31,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=39088.0, ans=0.05
+2024-08-25 05:47:36,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=39088.0, ans=15.0
+2024-08-25 05:47:40,419 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=39141.333333333336, ans=0.125
+2024-08-25 05:47:55,419 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=39194.666666666664, ans=0.09899494936611666
+2024-08-25 05:48:24,945 INFO [train.py:1114] (0/4) Epoch 3, batch 2400, loss[loss=0.3639, simple_loss=0.3774, pruned_loss=0.127, ctc_loss=0.2412, over 19526.00 frames. ], tot_loss[loss=0.3352, simple_loss=0.353, pruned_loss=0.1155, ctc_loss=0.2157, over 3858826.04 frames. ], batch size: 67, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 05:48:26,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=39354.666666666664, ans=0.125
+2024-08-25 05:48:42,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=39408.0, ans=0.125
+2024-08-25 05:49:03,897 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.03 vs. limit=6.0
+2024-08-25 05:49:04,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=39514.666666666664, ans=0.125
+2024-08-25 05:49:09,959 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.31 vs. limit=15.0
+2024-08-25 05:49:10,299 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.241e+02 2.672e+02 3.161e+02 5.607e+02, threshold=5.344e+02, percent-clipped=4.0
+2024-08-25 05:49:26,460 INFO [train.py:1114] (0/4) Epoch 3, batch 2450, loss[loss=0.4103, simple_loss=0.3838, pruned_loss=0.1599, ctc_loss=0.2929, over 13134.00 frames. ], tot_loss[loss=0.3433, simple_loss=0.3577, pruned_loss=0.1197, ctc_loss=0.224, over 3733231.49 frames. ], batch size: 141, lr: 3.53e-02, grad_scale: 32.0
+2024-08-25 05:49:30,000 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=39621.333333333336, ans=0.2
+2024-08-25 05:49:30,127 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.31 vs. limit=15.0
+2024-08-25 05:49:40,614 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.24 vs. limit=22.5
+2024-08-25 05:49:48,833 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.88 vs. limit=22.5
+2024-08-25 05:49:57,958 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.48 vs. limit=15.0
+2024-08-25 05:50:05,714 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.16 vs. limit=15.0
+2024-08-25 05:50:10,757 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-3.pt
+2024-08-25 05:51:05,709 INFO [train.py:1114] (0/4) Epoch 4, batch 0, loss[loss=0.3345, simple_loss=0.3462, pruned_loss=0.1166, ctc_loss=0.2243, over 19412.00 frames. ], tot_loss[loss=0.3345, simple_loss=0.3462, pruned_loss=0.1166, ctc_loss=0.2243, over 19412.00 frames. ], batch size: 48, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:51:05,710 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 05:51:35,403 INFO [train.py:1146] (0/4) Epoch 4, validation: loss=0.2629, simple_loss=0.3337, pruned_loss=0.07032, ctc_loss=0.1284, over 944034.00 frames.
+2024-08-25 05:51:35,404 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 05:51:48,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=39882.666666666664, ans=0.125
+2024-08-25 05:52:06,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=39936.0, ans=0.125
+2024-08-25 05:52:12,135 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.40 vs. limit=15.0
+2024-08-25 05:52:17,762 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=39989.333333333336, ans=0.1
+2024-08-25 05:52:41,492 INFO [train.py:1114] (0/4) Epoch 4, batch 50, loss[loss=0.2894, simple_loss=0.3173, pruned_loss=0.09519, ctc_loss=0.1776, over 19690.00 frames. ], tot_loss[loss=0.3432, simple_loss=0.3575, pruned_loss=0.1194, ctc_loss=0.2252, over 844645.13 frames. ], batch size: 47, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:52:47,062 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.147e+02 2.483e+02 2.920e+02 4.932e+02, threshold=4.967e+02, percent-clipped=0.0
+2024-08-25 05:52:56,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=40096.0, ans=0.125
+2024-08-25 05:53:06,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=40149.333333333336, ans=0.125
+2024-08-25 05:53:09,642 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.74 vs. limit=15.0
+2024-08-25 05:53:34,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=40202.666666666664, ans=0.0
+2024-08-25 05:53:56,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40309.333333333336, ans=0.1
+2024-08-25 05:54:02,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=40309.333333333336, ans=0.125
+2024-08-25 05:54:03,008 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.02 vs. limit=10.0
+2024-08-25 05:54:08,134 INFO [train.py:1114] (0/4) Epoch 4, batch 100, loss[loss=0.3455, simple_loss=0.3609, pruned_loss=0.1196, ctc_loss=0.2277, over 19708.00 frames. ], tot_loss[loss=0.3364, simple_loss=0.355, pruned_loss=0.1154, ctc_loss=0.2178, over 1498983.17 frames. ], batch size: 51, lr: 3.29e-02, grad_scale: 32.0
+2024-08-25 05:54:31,237 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.78 vs. limit=6.0
+2024-08-25 05:54:35,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=40416.0, ans=0.0020834782608695653
+2024-08-25 05:55:03,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=40469.333333333336, ans=0.1
+2024-08-25 05:55:40,420 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.30 vs. limit=12.0
+2024-08-25 05:55:55,480 INFO [train.py:1114] (0/4) Epoch 4, batch 150, loss[loss=0.292, simple_loss=0.3145, pruned_loss=0.09688, ctc_loss=0.1895, over 19733.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.35, pruned_loss=0.1119, ctc_loss=0.2117, over 2028351.17 frames. ], batch size: 47, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:55:56,487 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.033e+02 2.286e+02 2.661e+02 4.118e+02, threshold=4.571e+02, percent-clipped=0.0
+2024-08-25 05:56:09,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=40682.666666666664, ans=0.125
+2024-08-25 05:56:13,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=40682.666666666664, ans=0.125
+2024-08-25 05:56:21,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=40736.0, ans=0.07
+2024-08-25 05:56:24,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=40736.0, ans=0.125
+2024-08-25 05:56:42,663 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.94 vs. limit=15.0
+2024-08-25 05:57:00,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=40842.666666666664, ans=0.125
+2024-08-25 05:57:04,756 INFO [train.py:1114] (0/4) Epoch 4, batch 200, loss[loss=0.3678, simple_loss=0.3749, pruned_loss=0.1304, ctc_loss=0.2499, over 18390.00 frames. ], tot_loss[loss=0.3265, simple_loss=0.348, pruned_loss=0.1107, ctc_loss=0.2091, over 2436007.39 frames. ], batch size: 85, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:57:19,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=40896.0, ans=0.125
+2024-08-25 05:57:38,399 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:57:45,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=41002.666666666664, ans=0.125
+2024-08-25 05:57:52,025 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.80 vs. limit=15.0
+2024-08-25 05:58:08,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=41056.0, ans=0.125
+2024-08-25 05:58:28,524 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.02 vs. limit=15.0
+2024-08-25 05:58:44,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=41109.333333333336, ans=0.1
+2024-08-25 05:59:03,059 INFO [train.py:1114] (0/4) Epoch 4, batch 250, loss[loss=0.3759, simple_loss=0.3858, pruned_loss=0.1349, ctc_loss=0.2405, over 19399.00 frames. ], tot_loss[loss=0.326, simple_loss=0.3478, pruned_loss=0.1105, ctc_loss=0.2081, over 2755963.59 frames. ], batch size: 67, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 05:59:04,092 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.098e+02 2.387e+02 2.939e+02 4.251e+02, threshold=4.774e+02, percent-clipped=0.0
+2024-08-25 05:59:11,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=41162.666666666664, ans=0.125
+2024-08-25 05:59:31,470 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.05 vs. limit=15.0
+2024-08-25 05:59:59,354 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=41322.666666666664, ans=0.125
+2024-08-25 06:00:14,175 INFO [train.py:1114] (0/4) Epoch 4, batch 300, loss[loss=0.37, simple_loss=0.3778, pruned_loss=0.1319, ctc_loss=0.2461, over 19505.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3474, pruned_loss=0.1103, ctc_loss=0.2077, over 3001044.60 frames. ], batch size: 61, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 06:00:29,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=41482.666666666664, ans=0.125
+2024-08-25 06:00:39,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=41536.0, ans=0.125
+2024-08-25 06:00:53,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=41536.0, ans=0.1
+2024-08-25 06:00:55,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=41536.0, ans=0.125
+2024-08-25 06:01:02,045 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.21 vs. limit=10.0
+2024-08-25 06:01:05,318 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.78 vs. limit=22.5
+2024-08-25 06:01:16,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=41642.666666666664, ans=0.0018168115942028985
+2024-08-25 06:01:20,178 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=41642.666666666664, ans=0.125
+2024-08-25 06:01:21,995 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=29.05 vs. limit=22.5
+2024-08-25 06:01:24,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=41642.666666666664, ans=0.07
+2024-08-25 06:01:36,629 INFO [train.py:1114] (0/4) Epoch 4, batch 350, loss[loss=0.2835, simple_loss=0.3148, pruned_loss=0.09125, ctc_loss=0.1743, over 19766.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3476, pruned_loss=0.1102, ctc_loss=0.2075, over 3191455.99 frames. ], batch size: 48, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:01:37,794 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.143e+02 2.517e+02 2.887e+02 6.595e+02, threshold=5.034e+02, percent-clipped=1.0
+2024-08-25 06:01:46,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=41696.0, ans=0.0
+2024-08-25 06:02:38,784 INFO [train.py:1114] (0/4) Epoch 4, batch 400, loss[loss=0.329, simple_loss=0.3516, pruned_loss=0.113, ctc_loss=0.2008, over 19502.00 frames. ], tot_loss[loss=0.3249, simple_loss=0.347, pruned_loss=0.11, ctc_loss=0.2068, over 3343576.12 frames. ], batch size: 54, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:02:42,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=41962.666666666664, ans=0.125
+2024-08-25 06:03:04,031 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=42016.0, ans=0.0
+2024-08-25 06:03:06,690 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.09 vs. limit=15.0
+2024-08-25 06:03:25,891 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.92 vs. limit=15.0
+2024-08-25 06:03:26,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=42122.666666666664, ans=0.125
+2024-08-25 06:04:04,050 INFO [train.py:1114] (0/4) Epoch 4, batch 450, loss[loss=0.3205, simple_loss=0.3538, pruned_loss=0.1041, ctc_loss=0.1975, over 19616.00 frames. ], tot_loss[loss=0.324, simple_loss=0.3465, pruned_loss=0.1096, ctc_loss=0.2056, over 3452205.27 frames. ], batch size: 55, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:04:06,526 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.107e+02 2.479e+02 2.897e+02 5.564e+02, threshold=4.958e+02, percent-clipped=2.0
+2024-08-25 06:04:26,312 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.63 vs. limit=15.0
+2024-08-25 06:04:34,938 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:04:34,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=42336.0, ans=0.125
+2024-08-25 06:04:56,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=42389.333333333336, ans=0.0
+2024-08-25 06:05:10,923 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.01 vs. limit=6.0
+2024-08-25 06:05:29,497 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.02 vs. limit=22.5
+2024-08-25 06:05:32,257 INFO [train.py:1114] (0/4) Epoch 4, batch 500, loss[loss=0.2587, simple_loss=0.3151, pruned_loss=0.0738, ctc_loss=0.1365, over 19707.00 frames. ], tot_loss[loss=0.3221, simple_loss=0.345, pruned_loss=0.1088, ctc_loss=0.204, over 3547717.56 frames. ], batch size: 63, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:05:33,858 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.86 vs. limit=15.0
+2024-08-25 06:06:03,070 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=42602.666666666664, ans=0.1
+2024-08-25 06:06:08,757 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-8000.pt
+2024-08-25 06:06:34,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=42709.333333333336, ans=0.125
+2024-08-25 06:06:41,087 INFO [train.py:1114] (0/4) Epoch 4, batch 550, loss[loss=0.3416, simple_loss=0.3581, pruned_loss=0.1181, ctc_loss=0.2225, over 19276.00 frames. ], tot_loss[loss=0.3225, simple_loss=0.3454, pruned_loss=0.109, ctc_loss=0.2042, over 3609344.55 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:06:44,788 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.027e+02 2.416e+02 2.881e+02 5.051e+02, threshold=4.833e+02, percent-clipped=1.0
+2024-08-25 06:06:45,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten.whitening_limit, batch_count=42762.666666666664, ans=15.0
+2024-08-25 06:06:57,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=42816.0, ans=0.125
+2024-08-25 06:07:15,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=42869.333333333336, ans=0.0
+2024-08-25 06:07:20,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=42922.666666666664, ans=0.125
+2024-08-25 06:07:49,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=43029.333333333336, ans=0.1
+2024-08-25 06:07:50,622 INFO [train.py:1114] (0/4) Epoch 4, batch 600, loss[loss=0.3331, simple_loss=0.3518, pruned_loss=0.1154, ctc_loss=0.2091, over 19390.00 frames. ], tot_loss[loss=0.3206, simple_loss=0.3444, pruned_loss=0.1079, ctc_loss=0.2024, over 3666017.55 frames. ], batch size: 67, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:07:55,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=43029.333333333336, ans=0.025
+2024-08-25 06:08:27,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=43189.333333333336, ans=0.1
+2024-08-25 06:08:29,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=43189.333333333336, ans=0.1
+2024-08-25 06:08:34,775 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.00 vs. limit=15.0
+2024-08-25 06:08:39,501 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=10.64 vs. limit=15.0
+2024-08-25 06:08:58,509 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=43242.666666666664, ans=0.125
+2024-08-25 06:09:00,685 INFO [train.py:1114] (0/4) Epoch 4, batch 650, loss[loss=0.287, simple_loss=0.3291, pruned_loss=0.08963, ctc_loss=0.1643, over 19764.00 frames. ], tot_loss[loss=0.3183, simple_loss=0.3425, pruned_loss=0.107, ctc_loss=0.2006, over 3716027.46 frames. ], batch size: 54, lr: 3.23e-02, grad_scale: 16.0
+2024-08-25 06:09:15,855 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.140e+02 2.544e+02 3.023e+02 7.017e+02, threshold=5.088e+02, percent-clipped=9.0
+2024-08-25 06:09:32,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=43349.333333333336, ans=0.125
+2024-08-25 06:09:48,397 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.79 vs. limit=15.0
+2024-08-25 06:09:59,311 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=43456.0, ans=0.0
+2024-08-25 06:10:08,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=43509.333333333336, ans=0.2
+2024-08-25 06:10:13,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.whiten.whitening_limit, batch_count=43509.333333333336, ans=15.0
+2024-08-25 06:10:18,930 INFO [train.py:1114] (0/4) Epoch 4, batch 700, loss[loss=0.2821, simple_loss=0.3135, pruned_loss=0.09129, ctc_loss=0.1703, over 19712.00 frames. ], tot_loss[loss=0.3186, simple_loss=0.3431, pruned_loss=0.1069, ctc_loss=0.2005, over 3748433.07 frames. ], batch size: 51, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:10:25,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=43562.666666666664, ans=0.2
+2024-08-25 06:10:27,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=43562.666666666664, ans=0.125
+2024-08-25 06:10:50,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=43669.333333333336, ans=0.125
+2024-08-25 06:10:53,028 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.75 vs. limit=22.5
+2024-08-25 06:11:09,057 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=43776.0, ans=0.00135304347826087
+2024-08-25 06:11:14,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=43776.0, ans=0.00135304347826087
+2024-08-25 06:11:22,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=43829.333333333336, ans=0.125
+2024-08-25 06:11:23,852 INFO [train.py:1114] (0/4) Epoch 4, batch 750, loss[loss=0.3647, simple_loss=0.3833, pruned_loss=0.1258, ctc_loss=0.236, over 19497.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.3426, pruned_loss=0.1062, ctc_loss=0.1993, over 3775360.00 frames. ], batch size: 54, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:11:28,686 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.141e+02 2.481e+02 2.931e+02 4.472e+02, threshold=4.962e+02, percent-clipped=0.0
+2024-08-25 06:11:36,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=43829.333333333336, ans=10.0
+2024-08-25 06:11:46,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=43882.666666666664, ans=0.125
+2024-08-25 06:11:47,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=43882.666666666664, ans=0.025
+2024-08-25 06:11:47,670 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.85 vs. limit=6.0
+2024-08-25 06:11:53,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=43936.0, ans=0.0
+2024-08-25 06:12:00,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=43936.0, ans=0.125
+2024-08-25 06:12:21,962 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.75 vs. limit=15.0
+2024-08-25 06:12:22,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=44042.666666666664, ans=0.5
+2024-08-25 06:12:24,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=44042.666666666664, ans=0.125
+2024-08-25 06:12:29,307 INFO [train.py:1114] (0/4) Epoch 4, batch 800, loss[loss=0.3216, simple_loss=0.3356, pruned_loss=0.1138, ctc_loss=0.2004, over 19439.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3424, pruned_loss=0.106, ctc_loss=0.1988, over 3795552.55 frames. ], batch size: 48, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:12:57,373 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.21 vs. limit=10.0
+2024-08-25 06:13:02,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=44202.666666666664, ans=0.1
+2024-08-25 06:13:13,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=44256.0, ans=0.0
+2024-08-25 06:13:28,041 INFO [train.py:1114] (0/4) Epoch 4, batch 850, loss[loss=0.3406, simple_loss=0.3707, pruned_loss=0.1135, ctc_loss=0.2089, over 19643.00 frames. ], tot_loss[loss=0.3166, simple_loss=0.342, pruned_loss=0.1059, ctc_loss=0.1983, over 3815313.62 frames. ], batch size: 59, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:13:29,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_na.min_abs, batch_count=44362.666666666664, ans=0.02
+2024-08-25 06:13:31,253 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.074e+02 2.402e+02 2.888e+02 5.555e+02, threshold=4.804e+02, percent-clipped=1.0
+2024-08-25 06:13:33,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=44362.666666666664, ans=0.09899494936611666
+2024-08-25 06:13:38,529 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=44416.0, ans=0.125
+2024-08-25 06:13:54,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=44469.333333333336, ans=0.125
+2024-08-25 06:14:32,268 INFO [train.py:1114] (0/4) Epoch 4, batch 900, loss[loss=0.3076, simple_loss=0.3247, pruned_loss=0.1042, ctc_loss=0.2056, over 19384.00 frames. ], tot_loss[loss=0.3184, simple_loss=0.3428, pruned_loss=0.1069, ctc_loss=0.2002, over 3818849.62 frames. ], batch size: 48, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:14:38,488 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:14:38,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=44629.333333333336, ans=0.0
+2024-08-25 06:14:49,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=44682.666666666664, ans=0.0011559420289855085
+2024-08-25 06:14:53,589 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.93 vs. limit=15.0
+2024-08-25 06:15:06,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=44736.0, ans=0.1
+2024-08-25 06:15:07,658 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.67 vs. limit=15.0
+2024-08-25 06:15:08,888 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.77 vs. limit=15.0
+2024-08-25 06:15:20,866 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.90 vs. limit=10.0
+2024-08-25 06:15:38,567 INFO [train.py:1114] (0/4) Epoch 4, batch 950, loss[loss=0.2611, simple_loss=0.3014, pruned_loss=0.07938, ctc_loss=0.1554, over 19495.00 frames. ], tot_loss[loss=0.3197, simple_loss=0.3434, pruned_loss=0.1076, ctc_loss=0.2018, over 3820486.04 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:15:42,144 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.101e+02 2.364e+02 2.735e+02 6.196e+02, threshold=4.728e+02, percent-clipped=2.0
+2024-08-25 06:16:06,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=45002.666666666664, ans=0.125
+2024-08-25 06:16:14,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=45002.666666666664, ans=0.125
+2024-08-25 06:16:42,343 INFO [train.py:1114] (0/4) Epoch 4, batch 1000, loss[loss=0.2972, simple_loss=0.3235, pruned_loss=0.09899, ctc_loss=0.1822, over 19848.00 frames. ], tot_loss[loss=0.32, simple_loss=0.3438, pruned_loss=0.1077, ctc_loss=0.2019, over 3816355.45 frames. ], batch size: 52, lr: 3.19e-02, grad_scale: 32.0
+2024-08-25 06:16:48,685 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.10 vs. limit=6.0
+2024-08-25 06:17:06,818 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_na.min_abs, batch_count=45216.0, ans=0.02
+2024-08-25 06:17:13,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45216.0, ans=0.1
+2024-08-25 06:17:31,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=45269.333333333336, ans=0.125
+2024-08-25 06:18:04,339 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.63 vs. limit=15.0
+2024-08-25 06:18:10,601 INFO [train.py:1114] (0/4) Epoch 4, batch 1050, loss[loss=0.3683, simple_loss=0.3843, pruned_loss=0.1294, ctc_loss=0.2343, over 19838.00 frames. ], tot_loss[loss=0.3174, simple_loss=0.3423, pruned_loss=0.1064, ctc_loss=0.1993, over 3823183.60 frames. ], batch size: 57, lr: 3.19e-02, grad_scale: 16.0
+2024-08-25 06:18:23,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=45429.333333333336, ans=0.025
+2024-08-25 06:18:24,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=45429.333333333336, ans=0.125
+2024-08-25 06:18:26,180 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.982e+02 2.200e+02 2.634e+02 5.388e+02, threshold=4.401e+02, percent-clipped=1.0
+2024-08-25 06:18:35,091 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.46 vs. limit=15.0
+2024-08-25 06:18:36,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=45482.666666666664, ans=0.0009820289855072464
+2024-08-25 06:18:46,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=45536.0, ans=0.125
+2024-08-25 06:19:01,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=45589.333333333336, ans=0.125
+2024-08-25 06:19:10,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=45589.333333333336, ans=0.0009588405797101435
+2024-08-25 06:19:18,252 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=45589.333333333336, ans=0.125
+2024-08-25 06:19:26,167 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.04 vs. limit=22.5
+2024-08-25 06:19:36,328 INFO [train.py:1114] (0/4) Epoch 4, batch 1100, loss[loss=0.2892, simple_loss=0.3259, pruned_loss=0.09189, ctc_loss=0.1717, over 19598.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3418, pruned_loss=0.106, ctc_loss=0.1986, over 3830731.93 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:19:55,642 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=45749.333333333336, ans=0.1
+2024-08-25 06:20:12,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=45802.666666666664, ans=0.125
+2024-08-25 06:20:52,238 INFO [train.py:1114] (0/4) Epoch 4, batch 1150, loss[loss=0.3182, simple_loss=0.3348, pruned_loss=0.1096, ctc_loss=0.2056, over 19591.00 frames. ], tot_loss[loss=0.3176, simple_loss=0.342, pruned_loss=0.1066, ctc_loss=0.1999, over 3829434.88 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:20:57,030 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.122e+02 2.390e+02 2.706e+02 4.199e+02, threshold=4.779e+02, percent-clipped=0.0
+2024-08-25 06:21:04,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=45962.666666666664, ans=0.0008776811594202894
+2024-08-25 06:21:15,262 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=5.630e-03
+2024-08-25 06:21:28,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=46069.333333333336, ans=0.025
+2024-08-25 06:21:28,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=46069.333333333336, ans=0.1
+2024-08-25 06:21:35,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=46122.666666666664, ans=0.05
+2024-08-25 06:22:00,000 INFO [train.py:1114] (0/4) Epoch 4, batch 1200, loss[loss=0.3466, simple_loss=0.3591, pruned_loss=0.1217, ctc_loss=0.2269, over 19839.00 frames. ], tot_loss[loss=0.3188, simple_loss=0.3429, pruned_loss=0.1071, ctc_loss=0.201, over 3825559.34 frames. ], batch size: 57, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:22:07,419 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=2.539e-03
+2024-08-25 06:22:14,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=46229.333333333336, ans=0.125
+2024-08-25 06:22:22,457 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=46282.666666666664, ans=0.125
+2024-08-25 06:22:44,719 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=46389.333333333336, ans=0.000784927536231883
+2024-08-25 06:23:10,731 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=8.78 vs. limit=12.0
+2024-08-25 06:23:19,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=46442.666666666664, ans=0.0
+2024-08-25 06:23:21,337 INFO [train.py:1114] (0/4) Epoch 4, batch 1250, loss[loss=0.3314, simple_loss=0.3564, pruned_loss=0.1129, ctc_loss=0.2013, over 19526.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.343, pruned_loss=0.1067, ctc_loss=0.2, over 3843759.16 frames. ], batch size: 61, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:23:26,216 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.962e+02 2.225e+02 2.468e+02 3.508e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 06:23:40,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=46549.333333333336, ans=0.125
+2024-08-25 06:23:48,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=46549.333333333336, ans=0.125
+2024-08-25 06:23:55,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=46549.333333333336, ans=0.2
+2024-08-25 06:24:04,031 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.10 vs. limit=15.0
+2024-08-25 06:24:24,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=46656.0, ans=0.125
+2024-08-25 06:24:25,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=46656.0, ans=0.025
+2024-08-25 06:24:34,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=46709.333333333336, ans=0.0
+2024-08-25 06:24:37,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=46709.333333333336, ans=0.125
+2024-08-25 06:24:48,933 INFO [train.py:1114] (0/4) Epoch 4, batch 1300, loss[loss=0.366, simple_loss=0.3856, pruned_loss=0.1264, ctc_loss=0.234, over 19010.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3421, pruned_loss=0.1063, ctc_loss=0.1992, over 3847381.00 frames. ], batch size: 76, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:24:52,821 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.44 vs. limit=12.0
+2024-08-25 06:25:03,324 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.10 vs. limit=10.0
+2024-08-25 06:25:11,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=46869.333333333336, ans=0.125
+2024-08-25 06:25:22,955 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=25.95 vs. limit=22.5
+2024-08-25 06:25:28,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=46922.666666666664, ans=0.2
+2024-08-25 06:25:52,879 INFO [train.py:1114] (0/4) Epoch 4, batch 1350, loss[loss=0.3269, simple_loss=0.3471, pruned_loss=0.1117, ctc_loss=0.208, over 19764.00 frames. ], tot_loss[loss=0.3157, simple_loss=0.3413, pruned_loss=0.1055, ctc_loss=0.1975, over 3856211.67 frames. ], batch size: 54, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:25:55,407 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:26:07,748 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.269e+02 2.560e+02 3.229e+02 4.886e+02, threshold=5.120e+02, percent-clipped=5.0
+2024-08-25 06:26:28,377 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=47029.333333333336, ans=0.125
+2024-08-25 06:26:41,530 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.40 vs. limit=15.0
+2024-08-25 06:27:04,163 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=47189.333333333336, ans=0.0006110144927536226
+2024-08-25 06:27:16,677 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.50 vs. limit=12.0
+2024-08-25 06:27:20,738 INFO [train.py:1114] (0/4) Epoch 4, batch 1400, loss[loss=0.2757, simple_loss=0.3057, pruned_loss=0.08884, ctc_loss=0.1704, over 19662.00 frames. ], tot_loss[loss=0.3154, simple_loss=0.3411, pruned_loss=0.1053, ctc_loss=0.1977, over 3862713.70 frames. ], batch size: 46, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:27:50,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=5.06 vs. limit=5.0
+2024-08-25 06:28:13,325 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=47456.0, ans=0.125
+2024-08-25 06:28:19,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=47456.0, ans=0.125
+2024-08-25 06:28:25,607 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.57 vs. limit=15.0
+2024-08-25 06:28:32,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=47509.333333333336, ans=0.0
+2024-08-25 06:28:39,597 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.21 vs. limit=22.5
+2024-08-25 06:28:43,704 INFO [train.py:1114] (0/4) Epoch 4, batch 1450, loss[loss=0.318, simple_loss=0.3457, pruned_loss=0.1061, ctc_loss=0.1954, over 19662.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3423, pruned_loss=0.106, ctc_loss=0.1991, over 3860652.50 frames. ], batch size: 63, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:28:48,586 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.026e+02 2.327e+02 2.659e+02 4.329e+02, threshold=4.654e+02, percent-clipped=0.0
+2024-08-25 06:29:44,346 INFO [train.py:1114] (0/4) Epoch 4, batch 1500, loss[loss=0.3245, simple_loss=0.3583, pruned_loss=0.1055, ctc_loss=0.1991, over 19581.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3425, pruned_loss=0.1057, ctc_loss=0.1987, over 3859580.36 frames. ], batch size: 57, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:30:11,915 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.18 vs. limit=6.0
+2024-08-25 06:30:26,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=47989.333333333336, ans=0.0
+2024-08-25 06:30:29,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=47989.333333333336, ans=0.125
+2024-08-25 06:30:38,473 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.61 vs. limit=15.0
+2024-08-25 06:31:38,007 INFO [train.py:1114] (0/4) Epoch 4, batch 1550, loss[loss=0.3502, simple_loss=0.3633, pruned_loss=0.1258, ctc_loss=0.2139, over 19593.00 frames. ], tot_loss[loss=0.3171, simple_loss=0.3425, pruned_loss=0.106, ctc_loss=0.199, over 3846078.25 frames. ], batch size: 60, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:31:49,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=48096.0, ans=15.0
+2024-08-25 06:31:49,984 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.013e+02 2.262e+02 2.770e+02 1.090e+03, threshold=4.525e+02, percent-clipped=1.0
+2024-08-25 06:32:09,347 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.66 vs. limit=15.0
+2024-08-25 06:32:11,338 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=48149.333333333336, ans=0.125
+2024-08-25 06:32:20,106 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.58 vs. limit=15.0
+2024-08-25 06:32:22,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=48202.666666666664, ans=0.125
+2024-08-25 06:32:31,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=48256.0, ans=0.125
+2024-08-25 06:32:39,436 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=19.07 vs. limit=15.0
+2024-08-25 06:32:44,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=48309.333333333336, ans=0.0
+2024-08-25 06:33:08,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=48309.333333333336, ans=0.025
+2024-08-25 06:33:09,317 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=48309.333333333336, ans=0.1
+2024-08-25 06:33:18,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=48309.333333333336, ans=0.0
+2024-08-25 06:33:26,250 INFO [train.py:1114] (0/4) Epoch 4, batch 1600, loss[loss=0.3325, simple_loss=0.3598, pruned_loss=0.1109, ctc_loss=0.209, over 19838.00 frames. ], tot_loss[loss=0.3164, simple_loss=0.342, pruned_loss=0.1057, ctc_loss=0.1985, over 3836386.44 frames. ], batch size: 57, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:33:29,971 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=48362.666666666664, ans=0.2
+2024-08-25 06:33:36,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=48362.666666666664, ans=0.95
+2024-08-25 06:33:36,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=48362.666666666664, ans=0.125
+2024-08-25 06:33:50,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=48416.0, ans=0.125
+2024-08-25 06:34:05,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=48469.333333333336, ans=0.125
+2024-08-25 06:35:15,016 INFO [train.py:1114] (0/4) Epoch 4, batch 1650, loss[loss=0.3374, simple_loss=0.3616, pruned_loss=0.1122, ctc_loss=0.2221, over 19659.00 frames. ], tot_loss[loss=0.316, simple_loss=0.3416, pruned_loss=0.1055, ctc_loss=0.198, over 3833121.08 frames. ], batch size: 59, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:35:21,186 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.079e+02 2.506e+02 2.996e+02 5.422e+02, threshold=5.011e+02, percent-clipped=2.0
+2024-08-25 06:36:11,937 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_na.min_abs, batch_count=48736.0, ans=0.02
+2024-08-25 06:36:37,433 INFO [train.py:1114] (0/4) Epoch 4, batch 1700, loss[loss=0.272, simple_loss=0.3037, pruned_loss=0.08725, ctc_loss=0.1646, over 19658.00 frames. ], tot_loss[loss=0.3152, simple_loss=0.3411, pruned_loss=0.1051, ctc_loss=0.1974, over 3847142.73 frames. ], batch size: 46, lr: 3.12e-02, grad_scale: 32.0
+2024-08-25 06:36:42,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=48896.0, ans=0.125
+2024-08-25 06:36:43,854 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=48896.0, ans=0.125
+2024-08-25 06:37:26,511 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=49002.666666666664, ans=0.1
+2024-08-25 06:38:07,133 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.10 vs. limit=22.5
+2024-08-25 06:38:12,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=49109.333333333336, ans=0.0
+2024-08-25 06:38:13,367 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.86 vs. limit=10.0
+2024-08-25 06:38:25,915 INFO [train.py:1114] (0/4) Epoch 4, batch 1750, loss[loss=0.2993, simple_loss=0.3182, pruned_loss=0.1016, ctc_loss=0.1931, over 19657.00 frames. ], tot_loss[loss=0.3135, simple_loss=0.3401, pruned_loss=0.1043, ctc_loss=0.196, over 3852663.18 frames. ], batch size: 45, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:38:33,085 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 1.987e+02 2.278e+02 2.713e+02 5.908e+02, threshold=4.555e+02, percent-clipped=1.0
+2024-08-25 06:38:41,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=49216.0, ans=0.0
+2024-08-25 06:38:50,098 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.66 vs. limit=15.0
+2024-08-25 06:39:13,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=49322.666666666664, ans=0.125
+2024-08-25 06:39:31,718 INFO [train.py:1114] (0/4) Epoch 4, batch 1800, loss[loss=0.3016, simple_loss=0.341, pruned_loss=0.09538, ctc_loss=0.1787, over 19615.00 frames. ], tot_loss[loss=0.3156, simple_loss=0.3411, pruned_loss=0.1055, ctc_loss=0.1979, over 3852526.50 frames. ], batch size: 55, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:39:45,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=49482.666666666664, ans=0.1
+2024-08-25 06:40:21,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=49536.0, ans=0.00010086956521739195
+2024-08-25 06:40:26,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=49536.0, ans=0.0
+2024-08-25 06:40:54,709 INFO [train.py:1114] (0/4) Epoch 4, batch 1850, loss[loss=0.3068, simple_loss=0.344, pruned_loss=0.09848, ctc_loss=0.1817, over 19599.00 frames. ], tot_loss[loss=0.3146, simple_loss=0.3405, pruned_loss=0.1049, ctc_loss=0.197, over 3856120.50 frames. ], batch size: 57, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:41:01,659 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.149e+02 2.307e+02 2.574e+02 4.619e+02, threshold=4.614e+02, percent-clipped=1.0
+2024-08-25 06:41:03,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=49696.0, ans=0.035
+2024-08-25 06:41:20,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=49802.666666666664, ans=0.1
+2024-08-25 06:41:23,567 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=49802.666666666664, ans=0.0
+2024-08-25 06:41:48,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=49909.333333333336, ans=0.125
+2024-08-25 06:41:49,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=49909.333333333336, ans=0.125
+2024-08-25 06:41:50,824 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.84 vs. limit=15.0
+2024-08-25 06:41:59,173 INFO [train.py:1114] (0/4) Epoch 4, batch 1900, loss[loss=0.2977, simple_loss=0.3454, pruned_loss=0.09086, ctc_loss=0.1706, over 19647.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3411, pruned_loss=0.105, ctc_loss=0.197, over 3860972.37 frames. ], batch size: 59, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:42:47,444 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:42:57,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=50069.333333333336, ans=0.125
+2024-08-25 06:43:18,269 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.77 vs. limit=15.0
+2024-08-25 06:43:21,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=50176.0, ans=0.07
+2024-08-25 06:43:38,990 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=50229.333333333336, ans=0.125
+2024-08-25 06:43:39,886 INFO [train.py:1114] (0/4) Epoch 4, batch 1950, loss[loss=0.313, simple_loss=0.3389, pruned_loss=0.1047, ctc_loss=0.1945, over 19588.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3417, pruned_loss=0.1049, ctc_loss=0.1965, over 3869830.13 frames. ], batch size: 52, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:43:45,592 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.065e+02 2.259e+02 2.635e+02 4.732e+02, threshold=4.517e+02, percent-clipped=1.0
+2024-08-25 06:44:02,366 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=50336.0, ans=0.035
+2024-08-25 06:44:24,181 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=50442.666666666664, ans=0.025
+2024-08-25 06:44:29,322 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=50442.666666666664, ans=0.125
+2024-08-25 06:44:31,103 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.96 vs. limit=6.0
+2024-08-25 06:44:41,913 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=50496.0, ans=0.0
+2024-08-25 06:44:48,825 INFO [train.py:1114] (0/4) Epoch 4, batch 2000, loss[loss=0.2959, simple_loss=0.3113, pruned_loss=0.1029, ctc_loss=0.187, over 19654.00 frames. ], tot_loss[loss=0.3163, simple_loss=0.3424, pruned_loss=0.1056, ctc_loss=0.1975, over 3855683.96 frames. ], batch size: 45, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:44:51,734 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.82 vs. limit=15.0
+2024-08-25 06:45:06,177 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.67 vs. limit=15.0
+2024-08-25 06:45:06,286 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.43 vs. limit=6.0
+2024-08-25 06:45:09,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=50549.333333333336, ans=0.025
+2024-08-25 06:45:18,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=50549.333333333336, ans=0.1
+2024-08-25 06:45:40,373 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.16 vs. limit=15.0
+2024-08-25 06:45:41,170 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.28 vs. limit=15.0
+2024-08-25 06:45:46,505 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.41 vs. limit=15.0
+2024-08-25 06:45:51,744 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.15 vs. limit=22.5
+2024-08-25 06:46:34,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=50762.666666666664, ans=0.125
+2024-08-25 06:46:35,029 INFO [train.py:1114] (0/4) Epoch 4, batch 2050, loss[loss=0.2866, simple_loss=0.3019, pruned_loss=0.09892, ctc_loss=0.1836, over 19719.00 frames. ], tot_loss[loss=0.3153, simple_loss=0.3409, pruned_loss=0.1054, ctc_loss=0.1972, over 3851342.85 frames. ], batch size: 47, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:46:38,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=50762.666666666664, ans=0.0
+2024-08-25 06:46:45,630 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.046e+02 2.338e+02 2.720e+02 4.537e+02, threshold=4.675e+02, percent-clipped=1.0
+2024-08-25 06:46:52,519 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.76 vs. limit=15.0
+2024-08-25 06:47:14,271 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.54 vs. limit=15.0
+2024-08-25 06:47:19,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=50922.666666666664, ans=0.0
+2024-08-25 06:47:24,966 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=50922.666666666664, ans=0.0
+2024-08-25 06:47:31,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=50976.0, ans=10.0
+2024-08-25 06:47:47,560 INFO [train.py:1114] (0/4) Epoch 4, batch 2100, loss[loss=0.314, simple_loss=0.3391, pruned_loss=0.1063, ctc_loss=0.1907, over 19770.00 frames. ], tot_loss[loss=0.3131, simple_loss=0.3397, pruned_loss=0.1042, ctc_loss=0.195, over 3857773.24 frames. ], batch size: 54, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:48:22,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=51082.666666666664, ans=0.0
+2024-08-25 06:48:22,577 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.00 vs. limit=15.0
+2024-08-25 06:48:35,456 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.17 vs. limit=22.5
+2024-08-25 06:48:44,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=51136.0, ans=0.1
+2024-08-25 06:49:45,534 INFO [train.py:1114] (0/4) Epoch 4, batch 2150, loss[loss=0.3037, simple_loss=0.331, pruned_loss=0.1009, ctc_loss=0.1866, over 19590.00 frames. ], tot_loss[loss=0.3112, simple_loss=0.3384, pruned_loss=0.1033, ctc_loss=0.1936, over 3868264.41 frames. ], batch size: 52, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:49:54,449 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.035e+02 2.305e+02 2.639e+02 4.596e+02, threshold=4.610e+02, percent-clipped=0.0
+2024-08-25 06:50:16,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=51349.333333333336, ans=0.2
+2024-08-25 06:50:45,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=51402.666666666664, ans=0.0
+2024-08-25 06:50:50,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=51456.0, ans=0.125
+2024-08-25 06:50:52,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=51456.0, ans=0.07
+2024-08-25 06:50:58,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=51456.0, ans=0.2
+2024-08-25 06:51:15,390 INFO [train.py:1114] (0/4) Epoch 4, batch 2200, loss[loss=0.2837, simple_loss=0.3281, pruned_loss=0.08671, ctc_loss=0.1644, over 19592.00 frames. ], tot_loss[loss=0.3114, simple_loss=0.3383, pruned_loss=0.1035, ctc_loss=0.1938, over 3867008.68 frames. ], batch size: 57, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:51:30,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=51616.0, ans=0.015
+2024-08-25 06:52:05,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=51722.666666666664, ans=0.1
+2024-08-25 06:52:07,869 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=51722.666666666664, ans=0.125
+2024-08-25 06:52:18,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=51776.0, ans=0.2
+2024-08-25 06:52:21,036 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.08 vs. limit=22.5
+2024-08-25 06:52:24,901 INFO [train.py:1114] (0/4) Epoch 4, batch 2250, loss[loss=0.3172, simple_loss=0.3483, pruned_loss=0.1024, ctc_loss=0.2028, over 19623.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3389, pruned_loss=0.1037, ctc_loss=0.1944, over 3867747.12 frames. ], batch size: 55, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:52:27,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=51829.333333333336, ans=0.0
+2024-08-25 06:52:31,993 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.164e+02 2.622e+02 3.263e+02 6.940e+02, threshold=5.245e+02, percent-clipped=2.0
+2024-08-25 06:52:35,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=51829.333333333336, ans=0.2
+2024-08-25 06:52:41,070 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=51882.666666666664, ans=0.125
+2024-08-25 06:52:48,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=51936.0, ans=0.2
+2024-08-25 06:53:04,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=51989.333333333336, ans=0.125
+2024-08-25 06:53:04,511 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=51989.333333333336, ans=0.07
+2024-08-25 06:53:04,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=51989.333333333336, ans=0.0
+2024-08-25 06:53:27,234 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=18.54 vs. limit=15.0
+2024-08-25 06:53:30,941 INFO [train.py:1114] (0/4) Epoch 4, batch 2300, loss[loss=0.2924, simple_loss=0.3252, pruned_loss=0.09512, ctc_loss=0.173, over 19504.00 frames. ], tot_loss[loss=0.3112, simple_loss=0.3378, pruned_loss=0.1035, ctc_loss=0.1942, over 3860995.81 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:53:55,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=52149.333333333336, ans=0.0
+2024-08-25 06:53:57,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=52202.666666666664, ans=0.025
+2024-08-25 06:54:06,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=52202.666666666664, ans=0.125
+2024-08-25 06:54:06,913 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=52202.666666666664, ans=0.0
+2024-08-25 06:54:32,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=52309.333333333336, ans=0.05
+2024-08-25 06:54:43,023 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=52362.666666666664, ans=0.2
+2024-08-25 06:54:53,358 INFO [train.py:1114] (0/4) Epoch 4, batch 2350, loss[loss=0.3269, simple_loss=0.3567, pruned_loss=0.1093, ctc_loss=0.1961, over 19660.00 frames. ], tot_loss[loss=0.3107, simple_loss=0.3377, pruned_loss=0.1032, ctc_loss=0.1933, over 3864022.99 frames. ], batch size: 63, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 06:54:58,708 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.121e+02 2.497e+02 3.048e+02 4.745e+02, threshold=4.995e+02, percent-clipped=0.0
+2024-08-25 06:55:01,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=52362.666666666664, ans=0.0
+2024-08-25 06:55:04,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=52362.666666666664, ans=0.125
+2024-08-25 06:55:20,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=52416.0, ans=0.0
+2024-08-25 06:55:21,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=52416.0, ans=0.0
+2024-08-25 06:55:38,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=52469.333333333336, ans=0.0
+2024-08-25 07:07:21,821 INFO [train.py:1114] (0/4) Epoch 4, batch 2400, loss[loss=0.3588, simple_loss=0.3738, pruned_loss=0.1235, ctc_loss=0.2417, over 19401.00 frames. ], tot_loss[loss=0.313, simple_loss=0.3399, pruned_loss=0.1041, ctc_loss=0.1948, over 3858940.05 frames. ], batch size: 67, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 07:10:22,335 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.59 vs. limit=12.0
+2024-08-25 07:18:32,377 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_ff3.min_abs, batch_count=52736.0, ans=0.2
+2024-08-25 07:18:32,481 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=52736.0, ans=10.0
+2024-08-25 07:20:53,364 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.30 vs. limit=15.0
+2024-08-25 07:34:58,794 INFO [train.py:1114] (0/4) Epoch 4, batch 2450, loss[loss=0.4387, simple_loss=0.3985, pruned_loss=0.1719, ctc_loss=0.338, over 13118.00 frames. ], tot_loss[loss=0.3226, simple_loss=0.3455, pruned_loss=0.109, ctc_loss=0.2042, over 3734912.68 frames. ], batch size: 140, lr: 3.05e-02, grad_scale: 16.0
+2024-08-25 07:36:27,110 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.096e+02 2.355e+02 2.735e+02 5.246e+02, threshold=4.710e+02, percent-clipped=1.0
+2024-08-25 07:39:43,447 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=52949.333333333336, ans=0.125
+2024-08-25 07:42:30,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=53002.666666666664, ans=0.0
+2024-08-25 07:42:30,602 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.31 vs. limit=22.5
+2024-08-25 07:44:03,812 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=53056.0, ans=0.025
+2024-08-25 07:44:48,701 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-4.pt
+2024-08-25 07:46:30,883 INFO [train.py:1114] (0/4) Epoch 5, batch 0, loss[loss=0.2965, simple_loss=0.3197, pruned_loss=0.1001, ctc_loss=0.1823, over 19423.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3197, pruned_loss=0.1001, ctc_loss=0.1823, over 19423.00 frames. ], batch size: 48, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 07:46:30,884 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 07:49:02,120 INFO [train.py:1146] (0/4) Epoch 5, validation: loss=0.2543, simple_loss=0.3259, pruned_loss=0.06691, ctc_loss=0.1221, over 944034.00 frames.
+2024-08-25 07:49:02,120 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 07:51:02,359 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.94 vs. limit=15.0
+2024-08-25 07:54:37,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=53157.333333333336, ans=0.125
+2024-08-25 07:55:21,793 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.48 vs. limit=15.0
+2024-08-25 07:58:52,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=53210.666666666664, ans=0.0
+2024-08-25 07:59:15,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=53264.0, ans=10.0
+2024-08-25 07:59:20,935 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=7.676e-02
+2024-08-25 07:59:21,214 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.80 vs. limit=22.5
+2024-08-25 07:59:25,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=53264.0, ans=0.0
+2024-08-25 08:01:56,938 INFO [train.py:1114] (0/4) Epoch 5, batch 50, loss[loss=0.2475, simple_loss=0.3007, pruned_loss=0.07098, ctc_loss=0.1308, over 19753.00 frames. ], tot_loss[loss=0.3214, simple_loss=0.3454, pruned_loss=0.1078, ctc_loss=0.2042, over 844155.08 frames. ], batch size: 47, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 08:03:41,030 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.95 vs. limit=15.0
+2024-08-25 08:03:51,548 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 1.984e+02 2.202e+02 2.522e+02 4.045e+02, threshold=4.404e+02, percent-clipped=0.0
+2024-08-25 08:04:16,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=53424.0, ans=0.125
+2024-08-25 08:05:13,664 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.47 vs. limit=15.0
+2024-08-25 08:06:45,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=53530.666666666664, ans=0.0
+2024-08-25 08:07:20,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=53584.0, ans=0.0
+2024-08-25 08:07:22,865 INFO [train.py:1114] (0/4) Epoch 5, batch 100, loss[loss=0.2778, simple_loss=0.3138, pruned_loss=0.0884, ctc_loss=0.1625, over 19724.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3432, pruned_loss=0.1054, ctc_loss=0.1992, over 1498403.14 frames. ], batch size: 51, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:08:21,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=53690.666666666664, ans=0.5
+2024-08-25 08:08:31,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=53744.0, ans=0.125
+2024-08-25 08:08:44,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=53744.0, ans=0.125
+2024-08-25 08:10:03,747 INFO [train.py:1114] (0/4) Epoch 5, batch 150, loss[loss=0.3018, simple_loss=0.3147, pruned_loss=0.1055, ctc_loss=0.1951, over 19741.00 frames. ], tot_loss[loss=0.3097, simple_loss=0.3384, pruned_loss=0.102, ctc_loss=0.1926, over 2028143.16 frames. ], batch size: 47, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:10:19,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=53904.0, ans=0.125
+2024-08-25 08:10:40,327 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.115e+02 2.389e+02 2.764e+02 4.531e+02, threshold=4.777e+02, percent-clipped=1.0
+2024-08-25 08:11:08,120 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.35 vs. limit=15.0
+2024-08-25 08:12:01,138 INFO [train.py:1114] (0/4) Epoch 5, batch 200, loss[loss=0.3452, simple_loss=0.3616, pruned_loss=0.1207, ctc_loss=0.2181, over 18194.00 frames. ], tot_loss[loss=0.3077, simple_loss=0.3366, pruned_loss=0.1013, ctc_loss=0.191, over 2436360.97 frames. ], batch size: 85, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:12:04,231 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.53 vs. limit=15.0
+2024-08-25 08:13:53,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=54170.666666666664, ans=0.125
+2024-08-25 08:13:53,585 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:13:53,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=54170.666666666664, ans=0.2
+2024-08-25 08:13:54,899 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:13:57,327 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=54170.666666666664, ans=0.125
+2024-08-25 08:14:32,723 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=54224.0, ans=0.125
+2024-08-25 08:14:40,154 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.78 vs. limit=15.0
+2024-08-25 08:14:43,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=54277.333333333336, ans=0.0
+2024-08-25 08:14:56,878 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.00 vs. limit=15.0
+2024-08-25 08:15:40,996 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.11 vs. limit=22.5
+2024-08-25 08:15:58,970 INFO [train.py:1114] (0/4) Epoch 5, batch 250, loss[loss=0.3369, simple_loss=0.3571, pruned_loss=0.1151, ctc_loss=0.2165, over 19428.00 frames. ], tot_loss[loss=0.3065, simple_loss=0.3356, pruned_loss=0.1007, ctc_loss=0.1901, over 2757087.79 frames. ], batch size: 67, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:16:00,110 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.57 vs. limit=22.5
+2024-08-25 08:16:47,932 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 1.969e+02 2.164e+02 2.373e+02 3.326e+02, threshold=4.328e+02, percent-clipped=0.0
+2024-08-25 08:16:59,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=54544.0, ans=0.025
+2024-08-25 08:17:30,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=54650.666666666664, ans=0.125
+2024-08-25 08:17:32,590 INFO [train.py:1114] (0/4) Epoch 5, batch 300, loss[loss=0.3326, simple_loss=0.36, pruned_loss=0.1104, ctc_loss=0.211, over 19541.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3349, pruned_loss=0.09997, ctc_loss=0.1887, over 3002184.15 frames. ], batch size: 61, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:17:49,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=54757.333333333336, ans=0.125
+2024-08-25 08:17:52,065 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.79 vs. limit=12.0
+2024-08-25 08:18:13,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=54864.0, ans=0.09899494936611666
+2024-08-25 08:18:32,274 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.33 vs. limit=22.5
+2024-08-25 08:18:37,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=54970.666666666664, ans=0.0
+2024-08-25 08:18:38,549 INFO [train.py:1114] (0/4) Epoch 5, batch 350, loss[loss=0.2965, simple_loss=0.3236, pruned_loss=0.09874, ctc_loss=0.1798, over 19769.00 frames. ], tot_loss[loss=0.3048, simple_loss=0.3348, pruned_loss=0.09978, ctc_loss=0.1881, over 3191174.97 frames. ], batch size: 48, lr: 2.80e-02, grad_scale: 16.0
+2024-08-25 08:18:42,867 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:18:44,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=54970.666666666664, ans=0.125
+2024-08-25 08:18:44,876 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=15.19 vs. limit=15.0
+2024-08-25 08:18:49,609 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.67 vs. limit=15.0
+2024-08-25 08:18:51,945 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=54970.666666666664, ans=0.125
+2024-08-25 08:18:57,690 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.09 vs. limit=15.0
+2024-08-25 08:18:57,782 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.90 vs. limit=15.0
+2024-08-25 08:19:10,791 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.967e+02 2.265e+02 2.794e+02 4.039e+02, threshold=4.529e+02, percent-clipped=0.0
+2024-08-25 08:19:20,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=55077.333333333336, ans=0.2
+2024-08-25 08:19:24,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=55077.333333333336, ans=0.125
+2024-08-25 08:19:51,989 INFO [train.py:1114] (0/4) Epoch 5, batch 400, loss[loss=0.2838, simple_loss=0.3314, pruned_loss=0.08493, ctc_loss=0.1658, over 19499.00 frames. ], tot_loss[loss=0.3034, simple_loss=0.3339, pruned_loss=0.09913, ctc_loss=0.1867, over 3343119.31 frames. ], batch size: 54, lr: 2.80e-02, grad_scale: 32.0
+2024-08-25 08:19:58,968 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.08 vs. limit=22.5
+2024-08-25 08:20:04,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=55290.666666666664, ans=0.125
+2024-08-25 08:20:12,472 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=55290.666666666664, ans=0.125
+2024-08-25 08:20:13,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=55290.666666666664, ans=0.0
+2024-08-25 08:20:25,382 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=55344.0, ans=0.125
+2024-08-25 08:21:27,043 INFO [train.py:1114] (0/4) Epoch 5, batch 450, loss[loss=0.2916, simple_loss=0.3299, pruned_loss=0.09039, ctc_loss=0.1811, over 19599.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.3337, pruned_loss=0.09897, ctc_loss=0.1865, over 3450619.45 frames. ], batch size: 55, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:21:27,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=55504.0, ans=0.0
+2024-08-25 08:21:28,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=55504.0, ans=0.125
+2024-08-25 08:21:47,867 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.008e+02 2.249e+02 2.774e+02 4.428e+02, threshold=4.498e+02, percent-clipped=0.0
+2024-08-25 08:21:51,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=55610.666666666664, ans=0.2
+2024-08-25 08:21:56,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=55610.666666666664, ans=0.2
+2024-08-25 08:22:06,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=55664.0, ans=0.04949747468305833
+2024-08-25 08:22:53,987 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.46 vs. limit=15.0
+2024-08-25 08:22:58,212 INFO [train.py:1114] (0/4) Epoch 5, batch 500, loss[loss=0.3046, simple_loss=0.3382, pruned_loss=0.09933, ctc_loss=0.181, over 19665.00 frames. ], tot_loss[loss=0.302, simple_loss=0.333, pruned_loss=0.09846, ctc_loss=0.1854, over 3546264.51 frames. ], batch size: 63, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:23:40,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=55824.0, ans=0.0
+2024-08-25 08:24:06,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=55930.666666666664, ans=0.025
+2024-08-25 08:24:11,306 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=55984.0, ans=0.125
+2024-08-25 08:24:21,996 INFO [train.py:1114] (0/4) Epoch 5, batch 550, loss[loss=0.3305, simple_loss=0.359, pruned_loss=0.1101, ctc_loss=0.2046, over 19337.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3334, pruned_loss=0.09888, ctc_loss=0.1862, over 3609162.31 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:24:32,248 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.30 vs. limit=15.0
+2024-08-25 08:24:36,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=56090.666666666664, ans=0.035
+2024-08-25 08:24:37,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=56090.666666666664, ans=0.2
+2024-08-25 08:24:47,068 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 1.991e+02 2.247e+02 2.867e+02 6.260e+02, threshold=4.494e+02, percent-clipped=1.0
+2024-08-25 08:24:55,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=56144.0, ans=0.125
+2024-08-25 08:25:37,707 INFO [train.py:1114] (0/4) Epoch 5, batch 600, loss[loss=0.342, simple_loss=0.364, pruned_loss=0.1155, ctc_loss=0.2226, over 19446.00 frames. ], tot_loss[loss=0.3027, simple_loss=0.3335, pruned_loss=0.09878, ctc_loss=0.1858, over 3667103.29 frames. ], batch size: 67, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:25:38,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=56304.0, ans=0.125
+2024-08-25 08:25:40,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=56304.0, ans=0.125
+2024-08-25 08:26:14,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.85 vs. limit=22.5
+2024-08-25 08:26:24,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=56464.0, ans=0.05
+2024-08-25 08:26:34,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=56517.333333333336, ans=0.0
+2024-08-25 08:26:37,476 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.49 vs. limit=22.5
+2024-08-25 08:26:44,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=56570.666666666664, ans=0.2
+2024-08-25 08:26:47,456 INFO [train.py:1114] (0/4) Epoch 5, batch 650, loss[loss=0.3045, simple_loss=0.3423, pruned_loss=0.09768, ctc_loss=0.1782, over 19760.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.3323, pruned_loss=0.09785, ctc_loss=0.1842, over 3717918.01 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:26:47,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=56570.666666666664, ans=0.125
+2024-08-25 08:26:50,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=56570.666666666664, ans=0.07
+2024-08-25 08:26:55,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=56570.666666666664, ans=0.0
+2024-08-25 08:27:13,355 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.957e+02 2.352e+02 2.685e+02 4.359e+02, threshold=4.704e+02, percent-clipped=0.0
+2024-08-25 08:27:39,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=56730.666666666664, ans=0.2
+2024-08-25 08:28:04,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=56784.0, ans=0.0
+2024-08-25 08:28:10,109 INFO [train.py:1114] (0/4) Epoch 5, batch 700, loss[loss=0.2598, simple_loss=0.3032, pruned_loss=0.07727, ctc_loss=0.1545, over 19720.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3328, pruned_loss=0.09825, ctc_loss=0.1848, over 3749766.63 frames. ], batch size: 51, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:28:44,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=56890.666666666664, ans=0.0
+2024-08-25 08:28:59,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=56944.0, ans=0.125
+2024-08-25 08:29:21,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=57050.666666666664, ans=0.0
+2024-08-25 08:29:41,353 INFO [train.py:1114] (0/4) Epoch 5, batch 750, loss[loss=0.3353, simple_loss=0.362, pruned_loss=0.1109, ctc_loss=0.217, over 19841.00 frames. ], tot_loss[loss=0.3012, simple_loss=0.3324, pruned_loss=0.09806, ctc_loss=0.1848, over 3776345.09 frames. ], batch size: 55, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:29:43,384 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.65 vs. limit=15.0
+2024-08-25 08:30:18,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=57157.333333333336, ans=0.025
+2024-08-25 08:30:28,140 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=57157.333333333336, ans=0.125
+2024-08-25 08:30:40,377 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.099e+02 2.472e+02 3.181e+02 5.803e+02, threshold=4.945e+02, percent-clipped=2.0
+2024-08-25 08:30:46,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=57157.333333333336, ans=0.2
+2024-08-25 08:30:58,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=57210.666666666664, ans=0.0
+2024-08-25 08:31:03,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=57210.666666666664, ans=0.125
+2024-08-25 08:31:38,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=57317.333333333336, ans=0.0
+2024-08-25 08:32:02,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=57317.333333333336, ans=0.2
+2024-08-25 08:32:05,739 INFO [train.py:1114] (0/4) Epoch 5, batch 800, loss[loss=0.2481, simple_loss=0.2995, pruned_loss=0.07084, ctc_loss=0.1374, over 19430.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.332, pruned_loss=0.09762, ctc_loss=0.1839, over 3797007.90 frames. ], batch size: 48, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:32:23,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=57424.0, ans=0.2
+2024-08-25 08:32:56,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=57477.333333333336, ans=0.125
+2024-08-25 08:33:28,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=57584.0, ans=0.125
+2024-08-25 08:33:28,965 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=57584.0, ans=0.1
+2024-08-25 08:33:33,529 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:33:37,763 INFO [train.py:1114] (0/4) Epoch 5, batch 850, loss[loss=0.3167, simple_loss=0.3542, pruned_loss=0.1006, ctc_loss=0.1948, over 19660.00 frames. ], tot_loss[loss=0.2988, simple_loss=0.331, pruned_loss=0.09688, ctc_loss=0.1822, over 3815725.18 frames. ], batch size: 59, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:34:03,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=57637.333333333336, ans=0.04949747468305833
+2024-08-25 08:34:13,043 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.90 vs. limit=6.0
+2024-08-25 08:34:26,550 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.963e+02 2.197e+02 2.544e+02 4.330e+02, threshold=4.395e+02, percent-clipped=0.0
+2024-08-25 08:35:09,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.58 vs. limit=5.0
+2024-08-25 08:35:17,374 INFO [train.py:1114] (0/4) Epoch 5, batch 900, loss[loss=0.2455, simple_loss=0.2965, pruned_loss=0.0709, ctc_loss=0.1319, over 19403.00 frames. ], tot_loss[loss=0.2996, simple_loss=0.3314, pruned_loss=0.09728, ctc_loss=0.1828, over 3818513.59 frames. ], batch size: 48, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:35:22,072 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=18.48 vs. limit=15.0
+2024-08-25 08:35:30,009 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=57904.0, ans=0.125
+2024-08-25 08:35:35,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=57904.0, ans=0.1
+2024-08-25 08:35:40,618 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=57957.333333333336, ans=0.025
+2024-08-25 08:35:55,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=58010.666666666664, ans=0.0
+2024-08-25 08:36:07,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=58064.0, ans=0.125
+2024-08-25 08:36:12,995 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=58064.0, ans=0.125
+2024-08-25 08:36:18,577 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=58064.0, ans=22.5
+2024-08-25 08:36:41,307 INFO [train.py:1114] (0/4) Epoch 5, batch 950, loss[loss=0.2484, simple_loss=0.2949, pruned_loss=0.07297, ctc_loss=0.1399, over 19477.00 frames. ], tot_loss[loss=0.3005, simple_loss=0.3319, pruned_loss=0.09784, ctc_loss=0.1836, over 3820323.62 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:36:52,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=58224.0, ans=0.0
+2024-08-25 08:36:56,833 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=58224.0, ans=0.0
+2024-08-25 08:36:59,087 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=58224.0, ans=0.0
+2024-08-25 08:37:00,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=58224.0, ans=0.125
+2024-08-25 08:37:02,453 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.021e+02 2.236e+02 2.607e+02 6.234e+02, threshold=4.471e+02, percent-clipped=1.0
+2024-08-25 08:37:04,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=58277.333333333336, ans=0.125
+2024-08-25 08:37:20,185 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=12.67 vs. limit=15.0
+2024-08-25 08:37:26,372 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=58330.666666666664, ans=0.5
+2024-08-25 08:37:49,064 INFO [train.py:1114] (0/4) Epoch 5, batch 1000, loss[loss=0.2897, simple_loss=0.3264, pruned_loss=0.09177, ctc_loss=0.1738, over 19842.00 frames. ], tot_loss[loss=0.3007, simple_loss=0.3321, pruned_loss=0.09787, ctc_loss=0.1837, over 3816207.50 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:38:30,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=58490.666666666664, ans=0.2
+2024-08-25 08:38:41,189 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.06 vs. limit=22.5
+2024-08-25 08:38:51,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=58544.0, ans=0.0
+2024-08-25 08:39:20,336 INFO [train.py:1114] (0/4) Epoch 5, batch 1050, loss[loss=0.2969, simple_loss=0.3361, pruned_loss=0.09342, ctc_loss=0.1769, over 19842.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3316, pruned_loss=0.09778, ctc_loss=0.1838, over 3823196.06 frames. ], batch size: 57, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:39:41,242 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.929e+02 2.228e+02 2.594e+02 4.447e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 08:40:30,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=58917.333333333336, ans=0.0
+2024-08-25 08:40:42,231 INFO [train.py:1114] (0/4) Epoch 5, batch 1100, loss[loss=0.2905, simple_loss=0.3206, pruned_loss=0.09472, ctc_loss=0.1772, over 19566.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.3305, pruned_loss=0.09702, ctc_loss=0.1823, over 3831654.08 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:41:13,172 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.67 vs. limit=15.0
+2024-08-25 08:41:16,258 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:41:34,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=59130.666666666664, ans=0.1
+2024-08-25 08:42:01,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59184.0, ans=0.1
+2024-08-25 08:42:06,715 INFO [train.py:1114] (0/4) Epoch 5, batch 1150, loss[loss=0.2673, simple_loss=0.3054, pruned_loss=0.08354, ctc_loss=0.155, over 19606.00 frames. ], tot_loss[loss=0.2994, simple_loss=0.331, pruned_loss=0.09739, ctc_loss=0.1827, over 3829563.53 frames. ], batch size: 52, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:42:19,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=59237.333333333336, ans=0.1
+2024-08-25 08:42:24,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=59290.666666666664, ans=0.125
+2024-08-25 08:42:26,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=59290.666666666664, ans=0.2
+2024-08-25 08:42:26,930 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59290.666666666664, ans=0.1
+2024-08-25 08:42:38,157 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.022e+02 2.244e+02 2.636e+02 4.087e+02, threshold=4.489e+02, percent-clipped=0.0
+2024-08-25 08:42:44,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=59344.0, ans=0.125
+2024-08-25 08:42:50,818 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.54 vs. limit=22.5
+2024-08-25 08:42:59,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=59344.0, ans=0.0
+2024-08-25 08:43:22,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=59450.666666666664, ans=0.1
+2024-08-25 08:43:33,345 INFO [train.py:1114] (0/4) Epoch 5, batch 1200, loss[loss=0.2815, simple_loss=0.3259, pruned_loss=0.08644, ctc_loss=0.1606, over 19839.00 frames. ], tot_loss[loss=0.3002, simple_loss=0.3318, pruned_loss=0.09764, ctc_loss=0.1832, over 3824947.14 frames. ], batch size: 57, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:44:01,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=59557.333333333336, ans=0.125
+2024-08-25 08:44:07,469 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.75 vs. limit=22.5
+2024-08-25 08:44:19,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=59610.666666666664, ans=0.125
+2024-08-25 08:44:28,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=59664.0, ans=0.125
+2024-08-25 08:44:30,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=59664.0, ans=0.0
+2024-08-25 08:44:34,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=59664.0, ans=0.1
+2024-08-25 08:44:40,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=59717.333333333336, ans=0.0
+2024-08-25 08:44:55,340 INFO [train.py:1114] (0/4) Epoch 5, batch 1250, loss[loss=0.3171, simple_loss=0.3508, pruned_loss=0.1045, ctc_loss=0.1858, over 19557.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.3314, pruned_loss=0.09674, ctc_loss=0.1813, over 3842931.87 frames. ], batch size: 61, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:45:15,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59824.0, ans=0.1
+2024-08-25 08:45:21,216 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 1.906e+02 2.098e+02 2.362e+02 4.005e+02, threshold=4.196e+02, percent-clipped=0.0
+2024-08-25 08:45:21,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=59824.0, ans=0.1
+2024-08-25 08:45:28,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=59877.333333333336, ans=0.125
+2024-08-25 08:45:29,428 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=7.545e-01
+2024-08-25 08:45:46,803 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=59930.666666666664, ans=0.0
+2024-08-25 08:46:00,951 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.41 vs. limit=15.0
+2024-08-25 08:46:03,589 INFO [train.py:1114] (0/4) Epoch 5, batch 1300, loss[loss=0.3204, simple_loss=0.348, pruned_loss=0.1078, ctc_loss=0.1935, over 18839.00 frames. ], tot_loss[loss=0.2974, simple_loss=0.3304, pruned_loss=0.09621, ctc_loss=0.1802, over 3846267.59 frames. ], batch size: 76, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:46:21,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=60037.333333333336, ans=0.125
+2024-08-25 08:46:34,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=60090.666666666664, ans=0.1
+2024-08-25 08:46:37,601 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.20 vs. limit=15.0
+2024-08-25 08:47:24,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=60250.666666666664, ans=0.025
+2024-08-25 08:47:27,101 INFO [train.py:1114] (0/4) Epoch 5, batch 1350, loss[loss=0.2741, simple_loss=0.3172, pruned_loss=0.08401, ctc_loss=0.1573, over 19751.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.33, pruned_loss=0.09577, ctc_loss=0.1796, over 3857861.13 frames. ], batch size: 54, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:47:52,697 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.95 vs. limit=15.0
+2024-08-25 08:47:59,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=60357.333333333336, ans=0.125
+2024-08-25 08:48:06,341 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.950e+02 2.204e+02 2.621e+02 4.331e+02, threshold=4.409e+02, percent-clipped=1.0
+2024-08-25 08:48:32,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=60464.0, ans=0.2
+2024-08-25 08:48:43,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=60464.0, ans=0.2
+2024-08-25 08:49:14,311 INFO [train.py:1114] (0/4) Epoch 5, batch 1400, loss[loss=0.2479, simple_loss=0.2892, pruned_loss=0.07476, ctc_loss=0.1429, over 19661.00 frames. ], tot_loss[loss=0.296, simple_loss=0.3294, pruned_loss=0.09544, ctc_loss=0.1792, over 3864258.34 frames. ], batch size: 46, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:49:14,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=60570.666666666664, ans=0.125
+2024-08-25 08:49:21,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60570.666666666664, ans=0.1
+2024-08-25 08:49:26,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=60570.666666666664, ans=0.125
+2024-08-25 08:49:27,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=60624.0, ans=0.0
+2024-08-25 08:49:33,063 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.13 vs. limit=6.0
+2024-08-25 08:49:54,716 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=60730.666666666664, ans=0.125
+2024-08-25 08:49:54,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=60730.666666666664, ans=0.0
+2024-08-25 08:49:55,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=60730.666666666664, ans=0.07
+2024-08-25 09:01:57,444 INFO [train.py:1114] (0/4) Epoch 5, batch 1450, loss[loss=0.2943, simple_loss=0.3342, pruned_loss=0.09308, ctc_loss=0.1708, over 19665.00 frames. ], tot_loss[loss=0.2972, simple_loss=0.3306, pruned_loss=0.09584, ctc_loss=0.1801, over 3862292.07 frames. ], batch size: 63, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 09:08:28,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=60837.333333333336, ans=0.1
+2024-08-25 09:12:43,107 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=60890.666666666664, ans=0.125
+2024-08-25 09:14:29,255 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 1.942e+02 2.164e+02 2.480e+02 4.633e+02, threshold=4.329e+02, percent-clipped=1.0
+2024-08-25 09:36:13,493 INFO [train.py:1114] (0/4) Epoch 5, batch 1500, loss[loss=0.3, simple_loss=0.3421, pruned_loss=0.09317, ctc_loss=0.179, over 19582.00 frames. ], tot_loss[loss=0.2969, simple_loss=0.3304, pruned_loss=0.09569, ctc_loss=0.1799, over 3862535.21 frames. ], batch size: 57, lr: 2.70e-02, grad_scale: 32.0
+2024-08-25 09:44:49,270 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=61157.333333333336, ans=0.09899494936611666
+2024-08-25 09:52:32,910 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=61210.666666666664, ans=0.125
+2024-08-25 09:52:39,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=61210.666666666664, ans=0.2
+2024-08-25 09:56:04,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=61264.0, ans=0.1
+2024-08-25 10:03:52,737 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=61317.333333333336, ans=0.1
+2024-08-25 10:06:52,325 INFO [train.py:1114] (0/4) Epoch 5, batch 1550, loss[loss=0.3197, simple_loss=0.3468, pruned_loss=0.1063, ctc_loss=0.2005, over 19603.00 frames. ], tot_loss[loss=0.298, simple_loss=0.331, pruned_loss=0.09629, ctc_loss=0.1811, over 3847063.11 frames. ], batch size: 60, lr: 2.70e-02, grad_scale: 16.0
+2024-08-25 10:09:18,016 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=61370.666666666664, ans=0.1
+2024-08-25 10:14:47,428 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.971e+02 2.260e+02 2.611e+02 5.554e+02, threshold=4.519e+02, percent-clipped=3.0
+2024-08-25 10:16:54,054 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=61477.333333333336, ans=0.025
+2024-08-25 10:21:24,744 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=61530.666666666664, ans=0.125
+2024-08-25 10:28:13,746 INFO [train.py:1114] (0/4) Epoch 5, batch 1600, loss[loss=0.3018, simple_loss=0.3393, pruned_loss=0.09604, ctc_loss=0.1804, over 19839.00 frames. ], tot_loss[loss=0.2988, simple_loss=0.3313, pruned_loss=0.09675, ctc_loss=0.1821, over 3836525.65 frames. ], batch size: 57, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:28:14,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=61637.333333333336, ans=0.125
+2024-08-25 10:31:58,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=61690.666666666664, ans=0.125
+2024-08-25 10:31:58,760 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.63 vs. limit=10.0
+2024-08-25 10:33:18,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=61744.0, ans=0.0
+2024-08-25 10:36:29,729 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.22 vs. limit=12.0
+2024-08-25 10:39:04,900 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.62 vs. limit=10.0
+2024-08-25 10:40:17,104 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:40:45,714 INFO [train.py:1114] (0/4) Epoch 5, batch 1650, loss[loss=0.3018, simple_loss=0.3418, pruned_loss=0.09497, ctc_loss=0.1795, over 19640.00 frames. ], tot_loss[loss=0.2998, simple_loss=0.3318, pruned_loss=0.09727, ctc_loss=0.1831, over 3833447.05 frames. ], batch size: 59, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:42:09,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=61957.333333333336, ans=0.125
+2024-08-25 10:42:51,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff2.min_abs, batch_count=61957.333333333336, ans=0.1
+2024-08-25 10:43:04,118 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.985e+02 2.336e+02 2.616e+02 4.728e+02, threshold=4.672e+02, percent-clipped=1.0
+2024-08-25 10:43:08,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=62010.666666666664, ans=0.125
+2024-08-25 10:43:12,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=62010.666666666664, ans=0.125
+2024-08-25 10:44:08,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=62064.0, ans=0.0
+2024-08-25 10:45:13,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=62064.0, ans=0.0
+2024-08-25 10:46:25,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=62117.333333333336, ans=0.0
+2024-08-25 10:46:42,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=62170.666666666664, ans=0.0
+2024-08-25 10:46:43,734 INFO [train.py:1114] (0/4) Epoch 5, batch 1700, loss[loss=0.2456, simple_loss=0.2846, pruned_loss=0.07489, ctc_loss=0.1419, over 19665.00 frames. ], tot_loss[loss=0.2977, simple_loss=0.3304, pruned_loss=0.09622, ctc_loss=0.1811, over 3847846.32 frames. ], batch size: 46, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:48:00,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=62224.0, ans=0.125
+2024-08-25 10:48:32,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=62277.333333333336, ans=0.0
+2024-08-25 10:49:28,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=62330.666666666664, ans=0.125
+2024-08-25 10:50:15,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=62384.0, ans=0.05
+2024-08-25 10:50:54,998 INFO [train.py:1114] (0/4) Epoch 5, batch 1750, loss[loss=0.2631, simple_loss=0.293, pruned_loss=0.08487, ctc_loss=0.1586, over 19655.00 frames. ], tot_loss[loss=0.2961, simple_loss=0.3296, pruned_loss=0.09541, ctc_loss=0.1796, over 3852781.90 frames. ], batch size: 45, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:51:05,855 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=62437.333333333336, ans=0.125
+2024-08-25 10:53:52,988 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.010e+02 2.326e+02 2.972e+02 6.446e+02, threshold=4.653e+02, percent-clipped=3.0
+2024-08-25 10:54:44,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=62544.0, ans=0.125
+2024-08-25 10:55:12,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=62544.0, ans=0.125
+2024-08-25 10:56:56,150 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.14 vs. limit=15.0
+2024-08-25 10:57:11,534 INFO [train.py:1114] (0/4) Epoch 5, batch 1800, loss[loss=0.3092, simple_loss=0.3348, pruned_loss=0.1028, ctc_loss=0.1948, over 19613.00 frames. ], tot_loss[loss=0.2976, simple_loss=0.3305, pruned_loss=0.09617, ctc_loss=0.1811, over 3853809.76 frames. ], batch size: 55, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:58:13,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=62810.666666666664, ans=0.0
+2024-08-25 10:58:58,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=62917.333333333336, ans=0.125
+2024-08-25 10:59:01,577 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.41 vs. limit=15.0
+2024-08-25 10:59:06,183 INFO [train.py:1114] (0/4) Epoch 5, batch 1850, loss[loss=0.2571, simple_loss=0.3144, pruned_loss=0.07291, ctc_loss=0.1349, over 19569.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.33, pruned_loss=0.09571, ctc_loss=0.1799, over 3856524.31 frames. ], batch size: 57, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 10:59:12,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=62970.666666666664, ans=0.0
+2024-08-25 10:59:16,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=62970.666666666664, ans=0.125
+2024-08-25 10:59:27,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63024.0, ans=0.1
+2024-08-25 10:59:32,444 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.044e+02 2.314e+02 2.820e+02 4.474e+02, threshold=4.628e+02, percent-clipped=0.0
+2024-08-25 11:00:05,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=63184.0, ans=0.125
+2024-08-25 11:00:20,616 INFO [train.py:1114] (0/4) Epoch 5, batch 1900, loss[loss=0.2902, simple_loss=0.3338, pruned_loss=0.08839, ctc_loss=0.1744, over 19652.00 frames. ], tot_loss[loss=0.2966, simple_loss=0.3302, pruned_loss=0.09555, ctc_loss=0.1795, over 3860781.12 frames. ], batch size: 59, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:00:29,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=63237.333333333336, ans=0.5
+2024-08-25 11:00:33,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63237.333333333336, ans=0.1
+2024-08-25 11:00:59,406 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63344.0, ans=0.1
+2024-08-25 11:01:18,796 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.99 vs. limit=6.0
+2024-08-25 11:01:35,354 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=63397.333333333336, ans=0.125
+2024-08-25 11:01:57,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=63397.333333333336, ans=0.0
+2024-08-25 11:02:34,474 INFO [train.py:1114] (0/4) Epoch 5, batch 1950, loss[loss=0.272, simple_loss=0.3178, pruned_loss=0.08182, ctc_loss=0.1566, over 19569.00 frames. ], tot_loss[loss=0.2971, simple_loss=0.331, pruned_loss=0.09566, ctc_loss=0.1797, over 3869409.36 frames. ], batch size: 52, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:02:46,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=63504.0, ans=0.125
+2024-08-25 11:02:51,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=63557.333333333336, ans=0.125
+2024-08-25 11:02:56,184 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63557.333333333336, ans=0.1
+2024-08-25 11:03:15,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=63557.333333333336, ans=0.1
+2024-08-25 11:03:16,688 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.932e+02 2.130e+02 2.461e+02 4.838e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 11:03:35,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=63610.666666666664, ans=0.0
+2024-08-25 11:04:21,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=63717.333333333336, ans=0.125
+2024-08-25 11:04:37,799 INFO [train.py:1114] (0/4) Epoch 5, batch 2000, loss[loss=0.2422, simple_loss=0.2792, pruned_loss=0.07365, ctc_loss=0.1448, over 19636.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.332, pruned_loss=0.09654, ctc_loss=0.1811, over 3853618.52 frames. ], batch size: 45, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:04:41,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=63770.666666666664, ans=0.0
+2024-08-25 11:05:12,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=63877.333333333336, ans=0.125
+2024-08-25 11:05:13,140 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=63877.333333333336, ans=0.125
+2024-08-25 11:05:18,697 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=63877.333333333336, ans=0.125
+2024-08-25 11:05:29,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=63930.666666666664, ans=0.125
+2024-08-25 11:05:35,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=63930.666666666664, ans=0.2
+2024-08-25 11:05:37,758 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=63930.666666666664, ans=0.05
+2024-08-25 11:05:39,336 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.67 vs. limit=6.0
+2024-08-25 11:05:46,651 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:05:49,747 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-12000.pt
+2024-08-25 11:06:03,487 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=63984.0, ans=0.2
+2024-08-25 11:06:04,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=63984.0, ans=0.125
+2024-08-25 11:06:08,376 INFO [train.py:1114] (0/4) Epoch 5, batch 2050, loss[loss=0.2528, simple_loss=0.2909, pruned_loss=0.07765, ctc_loss=0.1485, over 19706.00 frames. ], tot_loss[loss=0.2972, simple_loss=0.3303, pruned_loss=0.09601, ctc_loss=0.1803, over 3850977.62 frames. ], batch size: 47, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:06:13,924 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=64037.333333333336, ans=0.09899494936611666
+2024-08-25 11:06:19,819 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.69 vs. limit=15.0
+2024-08-25 11:06:29,153 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.037e+02 2.272e+02 2.892e+02 6.343e+02, threshold=4.544e+02, percent-clipped=1.0
+2024-08-25 11:06:35,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=64144.0, ans=0.125
+2024-08-25 11:06:46,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=64144.0, ans=0.125
+2024-08-25 11:07:13,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=64197.333333333336, ans=0.125
+2024-08-25 11:07:25,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=64250.666666666664, ans=0.125
+2024-08-25 11:07:48,385 INFO [train.py:1114] (0/4) Epoch 5, batch 2100, loss[loss=0.3122, simple_loss=0.3408, pruned_loss=0.1018, ctc_loss=0.2003, over 19789.00 frames. ], tot_loss[loss=0.2957, simple_loss=0.3293, pruned_loss=0.09526, ctc_loss=0.1789, over 3858618.66 frames. ], batch size: 54, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:08:07,063 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.56 vs. limit=15.0
+2024-08-25 11:08:12,561 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.83 vs. limit=6.0
+2024-08-25 11:08:14,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64357.333333333336, ans=0.1
+2024-08-25 11:08:53,660 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=64464.0, ans=0.125
+2024-08-25 11:08:58,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64517.333333333336, ans=0.1
+2024-08-25 11:09:00,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=64517.333333333336, ans=0.0
+2024-08-25 11:09:15,723 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=64517.333333333336, ans=0.125
+2024-08-25 11:09:21,097 INFO [train.py:1114] (0/4) Epoch 5, batch 2150, loss[loss=0.2832, simple_loss=0.3214, pruned_loss=0.08801, ctc_loss=0.1727, over 19597.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3281, pruned_loss=0.0945, ctc_loss=0.1773, over 3869218.50 frames. ], batch size: 52, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:09:38,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=64624.0, ans=10.0
+2024-08-25 11:09:40,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=64624.0, ans=0.125
+2024-08-25 11:09:44,524 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.041e+02 2.279e+02 2.689e+02 3.624e+02, threshold=4.557e+02, percent-clipped=0.0
+2024-08-25 11:09:57,761 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.36 vs. limit=15.0
+2024-08-25 11:09:59,759 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.91 vs. limit=6.0
+2024-08-25 11:10:21,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=64784.0, ans=0.1
+2024-08-25 11:10:21,469 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.52 vs. limit=22.5
+2024-08-25 11:10:29,159 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.89 vs. limit=6.0
+2024-08-25 11:10:34,038 INFO [train.py:1114] (0/4) Epoch 5, batch 2200, loss[loss=0.3087, simple_loss=0.3419, pruned_loss=0.09827, ctc_loss=0.1971, over 19592.00 frames. ], tot_loss[loss=0.2934, simple_loss=0.3279, pruned_loss=0.09407, ctc_loss=0.177, over 3867821.49 frames. ], batch size: 57, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:10:36,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=64837.333333333336, ans=0.125
+2024-08-25 11:10:36,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=64837.333333333336, ans=0.07
+2024-08-25 11:10:58,805 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=64944.0, ans=10.0
+2024-08-25 11:10:58,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=64944.0, ans=0.2
+2024-08-25 11:11:29,245 INFO [train.py:1114] (0/4) Epoch 5, batch 2250, loss[loss=0.2711, simple_loss=0.3211, pruned_loss=0.08013, ctc_loss=0.1521, over 19606.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.3285, pruned_loss=0.09454, ctc_loss=0.1777, over 3867541.43 frames. ], batch size: 55, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:11:29,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=65104.0, ans=0.125
+2024-08-25 11:11:43,161 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=9.95 vs. limit=15.0
+2024-08-25 11:11:51,027 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=65157.333333333336, ans=0.125
+2024-08-25 11:11:51,991 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.180e+02 2.514e+02 3.003e+02 5.559e+02, threshold=5.029e+02, percent-clipped=2.0
+2024-08-25 11:11:56,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=65210.666666666664, ans=0.0
+2024-08-25 11:11:57,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=65210.666666666664, ans=0.125
+2024-08-25 11:12:16,079 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.31 vs. limit=22.5
+2024-08-25 11:12:24,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=65264.0, ans=0.0
+2024-08-25 11:12:38,218 INFO [train.py:1114] (0/4) Epoch 5, batch 2300, loss[loss=0.2787, simple_loss=0.3152, pruned_loss=0.08885, ctc_loss=0.1613, over 19504.00 frames. ], tot_loss[loss=0.2931, simple_loss=0.327, pruned_loss=0.09421, ctc_loss=0.1769, over 3861826.33 frames. ], batch size: 49, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:12:46,309 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.21 vs. limit=15.0
+2024-08-25 11:12:55,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=65424.0, ans=0.0
+2024-08-25 11:13:11,457 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.91 vs. limit=15.0
+2024-08-25 11:13:28,008 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.97 vs. limit=22.5
+2024-08-25 11:13:50,076 INFO [train.py:1114] (0/4) Epoch 5, batch 2350, loss[loss=0.2735, simple_loss=0.3263, pruned_loss=0.08036, ctc_loss=0.1502, over 19684.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.3277, pruned_loss=0.09461, ctc_loss=0.1775, over 3864672.00 frames. ], batch size: 63, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:14:00,365 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=65637.33333333333, ans=0.1
+2024-08-25 11:14:31,470 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.936e+02 2.303e+02 2.820e+02 4.151e+02, threshold=4.606e+02, percent-clipped=0.0
+2024-08-25 11:14:33,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=65744.0, ans=0.0
+2024-08-25 11:14:36,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=65744.0, ans=0.125
+2024-08-25 11:14:55,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-25 11:15:08,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-25 11:15:10,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-25 11:15:16,244 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.62 vs. limit=5.0
+2024-08-25 11:15:23,124 INFO [train.py:1114] (0/4) Epoch 5, batch 2400, loss[loss=0.3112, simple_loss=0.3507, pruned_loss=0.09777, ctc_loss=0.1906, over 19429.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.33, pruned_loss=0.09536, ctc_loss=0.1791, over 3858442.69 frames. ], batch size: 67, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:15:26,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=65904.0, ans=0.125
+2024-08-25 11:15:27,372 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.57 vs. limit=22.5
+2024-08-25 11:15:59,581 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=66010.66666666667, ans=0.1
+2024-08-25 11:16:19,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=66064.0, ans=0.05
+2024-08-25 11:16:21,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=66064.0, ans=0.125
+2024-08-25 11:16:32,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66064.0, ans=0.1
+2024-08-25 11:16:54,515 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.62 vs. limit=15.0
+2024-08-25 11:16:56,805 INFO [train.py:1114] (0/4) Epoch 5, batch 2450, loss[loss=0.3655, simple_loss=0.3663, pruned_loss=0.1322, ctc_loss=0.2509, over 13486.00 frames. ], tot_loss[loss=0.3049, simple_loss=0.335, pruned_loss=0.0999, ctc_loss=0.1873, over 3734260.93 frames. ], batch size: 141, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:17:21,908 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=66224.0, ans=0.2
+2024-08-25 11:17:43,156 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.021e+02 2.221e+02 2.524e+02 3.558e+02, threshold=4.443e+02, percent-clipped=0.0
+2024-08-25 11:17:56,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=66277.33333333333, ans=0.125
+2024-08-25 11:18:11,480 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-5.pt
+2024-08-25 11:19:28,343 INFO [train.py:1114] (0/4) Epoch 6, batch 0, loss[loss=0.2835, simple_loss=0.3088, pruned_loss=0.09401, ctc_loss=0.1755, over 19827.00 frames. ], tot_loss[loss=0.2835, simple_loss=0.3088, pruned_loss=0.09401, ctc_loss=0.1755, over 19827.00 frames. ], batch size: 49, lr: 2.45e-02, grad_scale: 32.0
+2024-08-25 11:19:28,344 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 11:20:29,258 INFO [train.py:1146] (0/4) Epoch 6, validation: loss=0.2388, simple_loss=0.3147, pruned_loss=0.05993, ctc_loss=0.1076, over 944034.00 frames.
+2024-08-25 11:20:29,258 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 11:21:00,570 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=66485.33333333333, ans=0.1
+2024-08-25 11:21:00,619 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=66485.33333333333, ans=0.125
+2024-08-25 11:21:18,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=66592.0, ans=0.0
+2024-08-25 11:21:26,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=66592.0, ans=0.2
+2024-08-25 11:21:52,009 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.60 vs. limit=22.5
+2024-08-25 11:21:56,953 INFO [train.py:1114] (0/4) Epoch 6, batch 50, loss[loss=0.2588, simple_loss=0.2907, pruned_loss=0.08219, ctc_loss=0.156, over 19728.00 frames. ], tot_loss[loss=0.3027, simple_loss=0.3329, pruned_loss=0.09875, ctc_loss=0.1874, over 844928.81 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:22:17,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=66698.66666666667, ans=0.125
+2024-08-25 11:22:45,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=66752.0, ans=0.0
+2024-08-25 11:22:50,719 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 1.959e+02 2.174e+02 2.569e+02 5.460e+02, threshold=4.347e+02, percent-clipped=1.0
+2024-08-25 11:23:07,883 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=66858.66666666667, ans=0.125
+2024-08-25 11:23:11,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=66858.66666666667, ans=0.125
+2024-08-25 11:23:18,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=66912.0, ans=0.2
+2024-08-25 11:23:18,892 INFO [train.py:1114] (0/4) Epoch 6, batch 100, loss[loss=0.2625, simple_loss=0.3078, pruned_loss=0.07885, ctc_loss=0.1486, over 19711.00 frames. ], tot_loss[loss=0.2992, simple_loss=0.3327, pruned_loss=0.09637, ctc_loss=0.1824, over 1499585.12 frames. ], batch size: 51, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:23:29,965 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.53 vs. limit=15.0
+2024-08-25 11:23:34,151 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.30 vs. limit=12.0
+2024-08-25 11:23:46,820 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.15 vs. limit=10.0
+2024-08-25 11:24:21,816 INFO [train.py:1114] (0/4) Epoch 6, batch 150, loss[loss=0.2673, simple_loss=0.2992, pruned_loss=0.08509, ctc_loss=0.163, over 19714.00 frames. ], tot_loss[loss=0.2926, simple_loss=0.3275, pruned_loss=0.09357, ctc_loss=0.1766, over 2027782.93 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:24:51,938 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=6.224e+00
+2024-08-25 11:24:53,575 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.64 vs. limit=12.0
+2024-08-25 11:25:04,952 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.947e+02 2.172e+02 2.650e+02 4.091e+02, threshold=4.343e+02, percent-clipped=0.0
+2024-08-25 11:25:14,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=67338.66666666667, ans=0.0
+2024-08-25 11:25:21,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=67392.0, ans=0.125
+2024-08-25 11:25:25,298 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.60 vs. limit=15.0
+2024-08-25 11:25:27,177 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.32 vs. limit=15.0
+2024-08-25 11:25:35,922 INFO [train.py:1114] (0/4) Epoch 6, batch 200, loss[loss=0.3444, simple_loss=0.3528, pruned_loss=0.1225, ctc_loss=0.2276, over 18434.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3243, pruned_loss=0.09134, ctc_loss=0.172, over 2435777.26 frames. ], batch size: 85, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:26:42,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=67605.33333333333, ans=0.0
+2024-08-25 11:26:52,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=67605.33333333333, ans=0.0
+2024-08-25 11:27:02,727 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.58 vs. limit=15.0
+2024-08-25 11:27:17,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=67712.0, ans=0.1
+2024-08-25 11:27:19,502 INFO [train.py:1114] (0/4) Epoch 6, batch 250, loss[loss=0.3237, simple_loss=0.35, pruned_loss=0.1081, ctc_loss=0.2026, over 19444.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.3254, pruned_loss=0.0919, ctc_loss=0.1733, over 2756020.36 frames. ], batch size: 67, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:27:53,808 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.98 vs. limit=15.0
+2024-08-25 11:27:55,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=67765.33333333333, ans=0.035
+2024-08-25 11:28:32,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=67818.66666666667, ans=0.125
+2024-08-25 11:28:34,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=67872.0, ans=0.2
+2024-08-25 11:28:36,846 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 1.900e+02 2.111e+02 2.483e+02 4.707e+02, threshold=4.222e+02, percent-clipped=1.0
+2024-08-25 11:29:38,945 INFO [train.py:1114] (0/4) Epoch 6, batch 300, loss[loss=0.3019, simple_loss=0.3427, pruned_loss=0.09583, ctc_loss=0.1735, over 19503.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3244, pruned_loss=0.09132, ctc_loss=0.172, over 3000636.50 frames. ], batch size: 61, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:29:46,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=67978.66666666667, ans=0.025
+2024-08-25 11:29:53,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=68032.0, ans=0.025
+2024-08-25 11:30:20,653 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.38 vs. limit=10.0
+2024-08-25 11:30:52,392 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=68085.33333333333, ans=0.125
+2024-08-25 11:31:07,132 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=68138.66666666667, ans=0.0
+2024-08-25 11:31:08,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=68138.66666666667, ans=0.04949747468305833
+2024-08-25 11:31:11,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=68192.0, ans=0.125
+2024-08-25 11:31:37,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=68192.0, ans=0.0
+2024-08-25 11:31:39,925 INFO [train.py:1114] (0/4) Epoch 6, batch 350, loss[loss=0.2515, simple_loss=0.2959, pruned_loss=0.07608, ctc_loss=0.1374, over 19759.00 frames. ], tot_loss[loss=0.2881, simple_loss=0.325, pruned_loss=0.09123, ctc_loss=0.1718, over 3190927.67 frames. ], batch size: 48, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:31:47,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=68245.33333333333, ans=0.125
+2024-08-25 11:32:13,187 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.56 vs. limit=15.0
+2024-08-25 11:32:19,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=68298.66666666667, ans=0.125
+2024-08-25 11:32:21,315 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.06 vs. limit=22.5
+2024-08-25 11:32:35,305 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.039e+02 2.360e+02 2.872e+02 5.301e+02, threshold=4.720e+02, percent-clipped=2.0
+2024-08-25 11:32:48,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=68458.66666666667, ans=0.1
+2024-08-25 11:32:52,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=68458.66666666667, ans=0.0
+2024-08-25 11:33:02,564 INFO [train.py:1114] (0/4) Epoch 6, batch 400, loss[loss=0.2452, simple_loss=0.2957, pruned_loss=0.071, ctc_loss=0.1317, over 19521.00 frames. ], tot_loss[loss=0.2866, simple_loss=0.3239, pruned_loss=0.09058, ctc_loss=0.1705, over 3343391.92 frames. ], batch size: 54, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:33:07,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=68512.0, ans=0.5
+2024-08-25 11:33:08,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=68512.0, ans=0.0
+2024-08-25 11:33:15,730 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:33:25,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=68618.66666666667, ans=0.1
+2024-08-25 11:33:58,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=68672.0, ans=0.0
+2024-08-25 11:34:13,405 INFO [train.py:1114] (0/4) Epoch 6, batch 450, loss[loss=0.325, simple_loss=0.3498, pruned_loss=0.1093, ctc_loss=0.204, over 19629.00 frames. ], tot_loss[loss=0.2876, simple_loss=0.3244, pruned_loss=0.09112, ctc_loss=0.1715, over 3451979.55 frames. ], batch size: 55, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:34:13,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=68778.66666666667, ans=0.2
+2024-08-25 11:34:30,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=68832.0, ans=0.0
+2024-08-25 11:34:40,716 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.57 vs. limit=15.0
+2024-08-25 11:34:49,657 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.969e+02 2.191e+02 2.793e+02 4.218e+02, threshold=4.382e+02, percent-clipped=0.0
+2024-08-25 11:34:54,477 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=68938.66666666667, ans=0.2
+2024-08-25 11:35:05,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=68992.0, ans=0.2
+2024-08-25 11:35:10,579 INFO [train.py:1114] (0/4) Epoch 6, batch 500, loss[loss=0.2976, simple_loss=0.3426, pruned_loss=0.09282, ctc_loss=0.1676, over 19635.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3226, pruned_loss=0.08998, ctc_loss=0.1693, over 3547779.06 frames. ], batch size: 63, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:35:20,245 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=69045.33333333333, ans=0.125
+2024-08-25 11:35:25,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=69098.66666666667, ans=0.125
+2024-08-25 11:35:27,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=69098.66666666667, ans=0.025
+2024-08-25 11:35:39,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=69152.0, ans=0.125
+2024-08-25 11:35:49,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=69205.33333333333, ans=0.125
+2024-08-25 11:35:49,898 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.97 vs. limit=15.0
+2024-08-25 11:35:50,679 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=69205.33333333333, ans=0.0
+2024-08-25 11:36:10,421 INFO [train.py:1114] (0/4) Epoch 6, batch 550, loss[loss=0.3145, simple_loss=0.344, pruned_loss=0.1045, ctc_loss=0.1904, over 19282.00 frames. ], tot_loss[loss=0.2862, simple_loss=0.3234, pruned_loss=0.09042, ctc_loss=0.1704, over 3609600.28 frames. ], batch size: 71, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:36:21,490 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.00 vs. limit=15.0
+2024-08-25 11:36:38,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=69418.66666666667, ans=0.07
+2024-08-25 11:36:43,277 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.29 vs. limit=15.0
+2024-08-25 11:36:46,543 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.100e+02 2.439e+02 2.966e+02 5.259e+02, threshold=4.878e+02, percent-clipped=1.0
+2024-08-25 11:37:20,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=69525.33333333333, ans=0.0
+2024-08-25 11:37:21,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=69525.33333333333, ans=0.1
+2024-08-25 11:37:28,779 INFO [train.py:1114] (0/4) Epoch 6, batch 600, loss[loss=0.3136, simple_loss=0.3477, pruned_loss=0.1024, ctc_loss=0.1863, over 19348.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3238, pruned_loss=0.09083, ctc_loss=0.171, over 3666363.11 frames. ], batch size: 67, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:37:40,460 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=69578.66666666667, ans=0.125
+2024-08-25 11:38:00,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=69685.33333333333, ans=0.0
+2024-08-25 11:38:19,959 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=5.424e-03
+2024-08-25 11:38:24,711 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.32 vs. limit=15.0
+2024-08-25 11:38:32,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=69792.0, ans=0.0
+2024-08-25 11:38:40,020 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.29 vs. limit=15.0
+2024-08-25 11:38:46,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=69792.0, ans=0.125
+2024-08-25 11:38:58,906 INFO [train.py:1114] (0/4) Epoch 6, batch 650, loss[loss=0.2548, simple_loss=0.3077, pruned_loss=0.07385, ctc_loss=0.1352, over 19766.00 frames. ], tot_loss[loss=0.2859, simple_loss=0.3231, pruned_loss=0.09031, ctc_loss=0.17, over 3716488.73 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:39:25,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=69898.66666666667, ans=0.025
+2024-08-25 11:39:50,478 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.931e+02 2.137e+02 2.425e+02 3.711e+02, threshold=4.274e+02, percent-clipped=0.0
+2024-08-25 11:40:16,199 INFO [train.py:1114] (0/4) Epoch 6, batch 700, loss[loss=0.241, simple_loss=0.2889, pruned_loss=0.07063, ctc_loss=0.1294, over 19724.00 frames. ], tot_loss[loss=0.2863, simple_loss=0.3235, pruned_loss=0.09054, ctc_loss=0.1702, over 3748518.01 frames. ], batch size: 51, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:40:24,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=70112.0, ans=0.0
+2024-08-25 11:40:40,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=70165.33333333333, ans=0.0
+2024-08-25 11:40:48,553 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:41:10,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=70218.66666666667, ans=0.125
+2024-08-25 11:41:57,512 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=70325.33333333333, ans=0.0
+2024-08-25 11:42:06,504 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.40 vs. limit=12.0
+2024-08-25 11:42:12,746 INFO [train.py:1114] (0/4) Epoch 6, batch 750, loss[loss=0.2955, simple_loss=0.3354, pruned_loss=0.09312, ctc_loss=0.1732, over 19502.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3228, pruned_loss=0.09003, ctc_loss=0.1691, over 3774484.37 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:42:28,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=70432.0, ans=0.125
+2024-08-25 11:42:53,007 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.09 vs. limit=15.0
+2024-08-25 11:42:59,521 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.30 vs. limit=15.0
+2024-08-25 11:43:09,525 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.022e+02 2.297e+02 2.693e+02 4.652e+02, threshold=4.594e+02, percent-clipped=2.0
+2024-08-25 11:43:16,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=70538.66666666667, ans=0.125
+2024-08-25 11:43:20,610 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.50 vs. limit=22.5
+2024-08-25 11:43:22,600 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=70592.0, ans=0.2
+2024-08-25 11:43:34,918 INFO [train.py:1114] (0/4) Epoch 6, batch 800, loss[loss=0.245, simple_loss=0.2861, pruned_loss=0.07458, ctc_loss=0.1368, over 19810.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.322, pruned_loss=0.08955, ctc_loss=0.1683, over 3794875.81 frames. ], batch size: 49, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:43:44,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=70645.33333333333, ans=0.125
+2024-08-25 11:43:45,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=70645.33333333333, ans=0.2
+2024-08-25 11:44:02,045 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.20 vs. limit=15.0
+2024-08-25 11:44:08,849 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=16.32 vs. limit=15.0
+2024-08-25 11:44:18,087 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.88 vs. limit=10.0
+2024-08-25 11:44:27,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=70805.33333333333, ans=0.125
+2024-08-25 11:44:46,463 INFO [train.py:1114] (0/4) Epoch 6, batch 850, loss[loss=0.2822, simple_loss=0.329, pruned_loss=0.08536, ctc_loss=0.1616, over 19681.00 frames. ], tot_loss[loss=0.283, simple_loss=0.3212, pruned_loss=0.08893, ctc_loss=0.167, over 3813603.32 frames. ], batch size: 59, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:44:51,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=70912.0, ans=0.125
+2024-08-25 11:45:08,718 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=70912.0, ans=0.125
+2024-08-25 11:45:37,128 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.06 vs. limit=15.0
+2024-08-25 11:45:46,245 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.893e+02 2.077e+02 2.374e+02 4.075e+02, threshold=4.154e+02, percent-clipped=0.0
+2024-08-25 11:46:07,495 INFO [train.py:1114] (0/4) Epoch 6, batch 900, loss[loss=0.2528, simple_loss=0.2907, pruned_loss=0.0781, ctc_loss=0.1466, over 19439.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3217, pruned_loss=0.08964, ctc_loss=0.1682, over 3817682.59 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 16.0
+2024-08-25 11:46:16,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=71178.66666666667, ans=0.0
+2024-08-25 11:46:35,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=71232.0, ans=0.0
+2024-08-25 11:46:38,464 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.82 vs. limit=22.5
+2024-08-25 11:46:41,493 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.43 vs. limit=6.0
+2024-08-25 11:46:45,334 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.76 vs. limit=22.5
+2024-08-25 11:47:21,574 INFO [train.py:1114] (0/4) Epoch 6, batch 950, loss[loss=0.2729, simple_loss=0.3121, pruned_loss=0.08289, ctc_loss=0.1699, over 19522.00 frames. ], tot_loss[loss=0.2837, simple_loss=0.3216, pruned_loss=0.08928, ctc_loss=0.1679, over 3819604.40 frames. ], batch size: 49, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:48:00,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=71498.66666666667, ans=0.125
+2024-08-25 11:48:00,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=71498.66666666667, ans=0.125
+2024-08-25 11:48:05,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=71552.0, ans=0.125
+2024-08-25 11:48:21,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=71605.33333333333, ans=0.125
+2024-08-25 11:48:23,513 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.900e+02 2.167e+02 2.553e+02 4.088e+02, threshold=4.334e+02, percent-clipped=0.0
+2024-08-25 11:49:03,396 INFO [train.py:1114] (0/4) Epoch 6, batch 1000, loss[loss=0.2646, simple_loss=0.31, pruned_loss=0.08, ctc_loss=0.1481, over 19845.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3226, pruned_loss=0.08985, ctc_loss=0.1686, over 3815694.02 frames. ], batch size: 52, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:49:07,809 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.53 vs. limit=15.0
+2024-08-25 11:49:23,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=71765.33333333333, ans=0.0
+2024-08-25 11:49:30,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=71765.33333333333, ans=0.0
+2024-08-25 11:50:03,424 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:50:04,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=71872.0, ans=0.0
+2024-08-25 11:50:10,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=71872.0, ans=0.125
+2024-08-25 11:50:48,056 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:50:57,810 INFO [train.py:1114] (0/4) Epoch 6, batch 1050, loss[loss=0.2681, simple_loss=0.3189, pruned_loss=0.07921, ctc_loss=0.1472, over 19845.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3219, pruned_loss=0.08958, ctc_loss=0.1681, over 3821802.79 frames. ], batch size: 57, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:51:35,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=72085.33333333333, ans=0.0
+2024-08-25 11:51:55,861 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=72085.33333333333, ans=0.025
+2024-08-25 11:52:00,139 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.944e+02 2.201e+02 2.550e+02 3.957e+02, threshold=4.403e+02, percent-clipped=0.0
+2024-08-25 11:52:23,477 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.64 vs. limit=10.0
+2024-08-25 11:52:25,600 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=72138.66666666667, ans=0.125
+2024-08-25 11:52:26,736 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=72138.66666666667, ans=0.05
+2024-08-25 11:52:27,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=72138.66666666667, ans=0.0
+2024-08-25 11:52:39,928 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.93 vs. limit=15.0
+2024-08-25 11:52:48,890 INFO [train.py:1114] (0/4) Epoch 6, batch 1100, loss[loss=0.2569, simple_loss=0.3107, pruned_loss=0.0755, ctc_loss=0.1301, over 19586.00 frames. ], tot_loss[loss=0.283, simple_loss=0.3213, pruned_loss=0.08897, ctc_loss=0.1668, over 3829564.03 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:52:51,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=72245.33333333333, ans=0.125
+2024-08-25 11:53:03,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=72245.33333333333, ans=0.2
+2024-08-25 11:53:14,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=72298.66666666667, ans=0.125
+2024-08-25 11:53:28,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=72352.0, ans=0.2
+2024-08-25 11:53:28,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72352.0, ans=0.1
+2024-08-25 11:53:31,456 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.08 vs. limit=15.0
+2024-08-25 11:53:39,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=72405.33333333333, ans=0.125
+2024-08-25 11:53:48,809 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=72458.66666666667, ans=0.0
+2024-08-25 11:53:58,642 INFO [train.py:1114] (0/4) Epoch 6, batch 1150, loss[loss=0.261, simple_loss=0.3031, pruned_loss=0.07922, ctc_loss=0.151, over 19581.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3207, pruned_loss=0.08875, ctc_loss=0.1666, over 3828050.86 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:54:19,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=72565.33333333333, ans=0.1
+2024-08-25 11:54:19,978 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.77 vs. limit=15.0
+2024-08-25 11:54:35,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=72618.66666666667, ans=0.0
+2024-08-25 11:54:43,441 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.952e+02 2.194e+02 2.505e+02 4.680e+02, threshold=4.387e+02, percent-clipped=1.0
+2024-08-25 11:54:47,554 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.62 vs. limit=15.0
+2024-08-25 11:54:48,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=72672.0, ans=0.125
+2024-08-25 11:55:11,879 INFO [train.py:1114] (0/4) Epoch 6, batch 1200, loss[loss=0.335, simple_loss=0.3603, pruned_loss=0.1134, ctc_loss=0.2069, over 19845.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3225, pruned_loss=0.08988, ctc_loss=0.1688, over 3823637.56 frames. ], batch size: 57, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:55:18,534 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=72778.66666666667, ans=0.125
+2024-08-25 11:55:22,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=72778.66666666667, ans=0.125
+2024-08-25 11:55:34,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=72832.0, ans=0.2
+2024-08-25 11:55:50,393 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.25 vs. limit=12.0
+2024-08-25 11:56:44,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=72992.0, ans=0.125
+2024-08-25 11:56:47,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=72992.0, ans=0.125
+2024-08-25 11:56:55,096 INFO [train.py:1114] (0/4) Epoch 6, batch 1250, loss[loss=0.2976, simple_loss=0.3321, pruned_loss=0.09737, ctc_loss=0.1708, over 19526.00 frames. ], tot_loss[loss=0.2854, simple_loss=0.3231, pruned_loss=0.08999, ctc_loss=0.169, over 3842300.87 frames. ], batch size: 61, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:57:06,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=73098.66666666667, ans=0.0
+2024-08-25 11:57:59,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=73152.0, ans=0.125
+2024-08-25 11:58:12,279 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=73205.33333333333, ans=0.125
+2024-08-25 11:58:13,321 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.073e+02 2.305e+02 2.660e+02 4.224e+02, threshold=4.609e+02, percent-clipped=0.0
+2024-08-25 11:58:25,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=73258.66666666667, ans=0.0
+2024-08-25 11:58:28,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=73258.66666666667, ans=0.125
+2024-08-25 11:58:45,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=73312.0, ans=0.125
+2024-08-25 11:58:46,690 INFO [train.py:1114] (0/4) Epoch 6, batch 1300, loss[loss=0.3193, simple_loss=0.3575, pruned_loss=0.1029, ctc_loss=0.1885, over 18835.00 frames. ], tot_loss[loss=0.2845, simple_loss=0.3225, pruned_loss=0.08963, ctc_loss=0.1683, over 3846607.15 frames. ], batch size: 76, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:59:04,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=73312.0, ans=0.125
+2024-08-25 11:59:34,457 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:59:34,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=73365.33333333333, ans=0.0
+2024-08-25 11:59:42,706 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=73418.66666666667, ans=0.2
+2024-08-25 11:59:45,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=73418.66666666667, ans=0.1
+2024-08-25 11:59:47,761 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=73418.66666666667, ans=0.0
+2024-08-25 11:59:59,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=73472.0, ans=0.125
+2024-08-25 12:00:04,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=73525.33333333333, ans=0.125
+2024-08-25 12:00:10,270 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=73525.33333333333, ans=0.125
+2024-08-25 12:00:19,976 INFO [train.py:1114] (0/4) Epoch 6, batch 1350, loss[loss=0.2802, simple_loss=0.3262, pruned_loss=0.08491, ctc_loss=0.161, over 19765.00 frames. ], tot_loss[loss=0.2821, simple_loss=0.3211, pruned_loss=0.08836, ctc_loss=0.166, over 3857129.69 frames. ], batch size: 54, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 12:00:34,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=73578.66666666667, ans=0.125
+2024-08-25 12:00:34,834 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=73578.66666666667, ans=0.125
+2024-08-25 12:00:48,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=73632.0, ans=0.125
+2024-08-25 12:00:59,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=73685.33333333333, ans=0.2
+2024-08-25 12:01:02,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=73738.66666666667, ans=0.1
+2024-08-25 12:01:05,005 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.025e+02 2.295e+02 2.579e+02 4.133e+02, threshold=4.590e+02, percent-clipped=0.0
+2024-08-25 12:01:30,917 INFO [train.py:1114] (0/4) Epoch 6, batch 1400, loss[loss=0.2849, simple_loss=0.3086, pruned_loss=0.09603, ctc_loss=0.173, over 19670.00 frames. ], tot_loss[loss=0.2823, simple_loss=0.3211, pruned_loss=0.08856, ctc_loss=0.1661, over 3864354.35 frames. ], batch size: 46, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:01:39,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=73845.33333333333, ans=0.09899494936611666
+2024-08-25 12:01:50,806 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.51 vs. limit=22.5
+2024-08-25 12:01:52,276 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.38 vs. limit=22.5
+2024-08-25 12:02:20,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=73952.0, ans=0.1
+2024-08-25 12:02:23,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74005.33333333333, ans=0.1
+2024-08-25 12:02:52,963 INFO [train.py:1114] (0/4) Epoch 6, batch 1450, loss[loss=0.2998, simple_loss=0.3387, pruned_loss=0.09361, ctc_loss=0.1842, over 19641.00 frames. ], tot_loss[loss=0.2825, simple_loss=0.3214, pruned_loss=0.08856, ctc_loss=0.1663, over 3863037.37 frames. ], batch size: 63, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:02:53,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=74112.0, ans=0.0
+2024-08-25 12:03:32,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=74165.33333333333, ans=0.07
+2024-08-25 12:03:34,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=74165.33333333333, ans=0.125
+2024-08-25 12:03:40,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=74218.66666666667, ans=0.125
+2024-08-25 12:03:48,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=74218.66666666667, ans=0.2
+2024-08-25 12:03:53,271 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 1.998e+02 2.330e+02 2.811e+02 4.670e+02, threshold=4.661e+02, percent-clipped=1.0
+2024-08-25 12:04:08,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=74325.33333333333, ans=0.0
+2024-08-25 12:04:25,548 INFO [train.py:1114] (0/4) Epoch 6, batch 1500, loss[loss=0.3002, simple_loss=0.3334, pruned_loss=0.09718, ctc_loss=0.1817, over 19578.00 frames. ], tot_loss[loss=0.2827, simple_loss=0.3217, pruned_loss=0.08856, ctc_loss=0.1663, over 3862765.27 frames. ], batch size: 57, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:04:30,619 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=74378.66666666667, ans=0.125
+2024-08-25 12:05:02,097 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.88 vs. limit=15.0
+2024-08-25 12:05:52,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74592.0, ans=0.1
+2024-08-25 12:06:01,373 INFO [train.py:1114] (0/4) Epoch 6, batch 1550, loss[loss=0.2908, simple_loss=0.3277, pruned_loss=0.09234, ctc_loss=0.1727, over 19610.00 frames. ], tot_loss[loss=0.2832, simple_loss=0.3218, pruned_loss=0.08892, ctc_loss=0.167, over 3846999.82 frames. ], batch size: 60, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:06:21,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=74698.66666666667, ans=0.125
+2024-08-25 12:06:22,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=74698.66666666667, ans=0.125
+2024-08-25 12:06:37,901 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.061e+02 2.512e+02 3.027e+02 4.789e+02, threshold=5.024e+02, percent-clipped=1.0
+2024-08-25 12:07:01,770 INFO [train.py:1114] (0/4) Epoch 6, batch 1600, loss[loss=0.3033, simple_loss=0.3415, pruned_loss=0.09644, ctc_loss=0.1806, over 19841.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3222, pruned_loss=0.08944, ctc_loss=0.1679, over 3836462.91 frames. ], batch size: 57, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:07:31,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75018.66666666667, ans=0.1
+2024-08-25 12:07:41,955 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.88 vs. limit=15.0
+2024-08-25 12:07:42,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=75072.0, ans=0.2
+2024-08-25 12:08:00,982 INFO [train.py:1114] (0/4) Epoch 6, batch 1650, loss[loss=0.2796, simple_loss=0.3299, pruned_loss=0.0833, ctc_loss=0.1565, over 19643.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.3222, pruned_loss=0.08954, ctc_loss=0.168, over 3832786.98 frames. ], batch size: 59, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:08:26,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=75285.33333333333, ans=0.125
+2024-08-25 12:08:34,522 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=75338.66666666667, ans=0.125
+2024-08-25 12:08:37,753 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.893e+02 2.381e+02 2.784e+02 7.281e+02, threshold=4.762e+02, percent-clipped=1.0
+2024-08-25 12:08:44,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=75338.66666666667, ans=0.125
+2024-08-25 12:08:50,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=75392.0, ans=0.0
+2024-08-25 12:08:51,688 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.58 vs. limit=10.0
+2024-08-25 12:09:00,105 INFO [train.py:1114] (0/4) Epoch 6, batch 1700, loss[loss=0.242, simple_loss=0.2806, pruned_loss=0.07463, ctc_loss=0.1356, over 19668.00 frames. ], tot_loss[loss=0.2818, simple_loss=0.3208, pruned_loss=0.0883, ctc_loss=0.1657, over 3846878.92 frames. ], batch size: 46, lr: 2.33e-02, grad_scale: 32.0
+2024-08-25 12:09:02,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=75445.33333333333, ans=0.125
+2024-08-25 12:09:08,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=75445.33333333333, ans=0.125
+2024-08-25 12:09:18,581 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=75498.66666666667, ans=0.2
+2024-08-25 12:09:26,372 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=75552.0, ans=0.125
+2024-08-25 12:09:42,137 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.68 vs. limit=15.0
+2024-08-25 12:09:43,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75658.66666666667, ans=0.1
+2024-08-25 12:09:50,480 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=3.906e-02
+2024-08-25 12:09:55,904 INFO [train.py:1114] (0/4) Epoch 6, batch 1750, loss[loss=0.2379, simple_loss=0.2805, pruned_loss=0.07065, ctc_loss=0.1349, over 19645.00 frames. ], tot_loss[loss=0.2809, simple_loss=0.3202, pruned_loss=0.08783, ctc_loss=0.165, over 3852018.78 frames. ], batch size: 45, lr: 2.33e-02, grad_scale: 16.0
+2024-08-25 12:10:01,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=75712.0, ans=0.0
+2024-08-25 12:10:03,917 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=75712.0, ans=0.125
+2024-08-25 12:10:04,130 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.94 vs. limit=15.0
+2024-08-25 12:10:05,848 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=75765.33333333333, ans=0.025
+2024-08-25 12:10:11,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=75765.33333333333, ans=0.125
+2024-08-25 12:10:12,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=75765.33333333333, ans=0.1
+2024-08-25 12:10:17,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=75818.66666666667, ans=0.0
+2024-08-25 12:10:31,315 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.92 vs. limit=10.0
+2024-08-25 12:10:32,752 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.890e+02 2.130e+02 2.587e+02 4.262e+02, threshold=4.260e+02, percent-clipped=0.0
+2024-08-25 12:10:35,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=75872.0, ans=0.1
+2024-08-25 12:10:39,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75925.33333333333, ans=0.1
+2024-08-25 12:10:47,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=75925.33333333333, ans=0.1
+2024-08-25 12:10:50,354 INFO [train.py:1114] (0/4) Epoch 6, batch 1800, loss[loss=0.2981, simple_loss=0.3377, pruned_loss=0.09346, ctc_loss=0.1787, over 19623.00 frames. ], tot_loss[loss=0.2806, simple_loss=0.3201, pruned_loss=0.08759, ctc_loss=0.1648, over 3853824.76 frames. ], batch size: 55, lr: 2.33e-02, grad_scale: 8.0
+2024-08-25 12:10:54,207 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.08 vs. limit=15.0
+2024-08-25 12:11:05,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=76032.0, ans=0.125
+2024-08-25 12:11:12,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff3.min_abs, batch_count=76085.33333333333, ans=0.2
+2024-08-25 12:11:14,804 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.71 vs. limit=6.0
+2024-08-25 12:11:19,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=76085.33333333333, ans=0.0
+2024-08-25 12:11:23,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=76138.66666666667, ans=0.0
+2024-08-25 12:11:44,879 INFO [train.py:1114] (0/4) Epoch 6, batch 1850, loss[loss=0.3182, simple_loss=0.3512, pruned_loss=0.1049, ctc_loss=0.1887, over 19579.00 frames. ], tot_loss[loss=0.2811, simple_loss=0.3202, pruned_loss=0.08796, ctc_loss=0.1651, over 3858141.23 frames. ], batch size: 57, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:11:46,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=76245.33333333333, ans=0.125
+2024-08-25 12:11:50,478 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=76245.33333333333, ans=0.125
+2024-08-25 12:12:06,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=76352.0, ans=0.125
+2024-08-25 12:12:11,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=76352.0, ans=0.125
+2024-08-25 12:12:22,230 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.994e+02 2.285e+02 2.712e+02 4.413e+02, threshold=4.569e+02, percent-clipped=2.0
+2024-08-25 12:12:43,393 INFO [train.py:1114] (0/4) Epoch 6, batch 1900, loss[loss=0.2632, simple_loss=0.3236, pruned_loss=0.07421, ctc_loss=0.1357, over 19656.00 frames. ], tot_loss[loss=0.2809, simple_loss=0.3207, pruned_loss=0.08767, ctc_loss=0.1646, over 3863486.84 frames. ], batch size: 59, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:12:59,005 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.82 vs. limit=15.0
+2024-08-25 12:13:21,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=76672.0, ans=0.125
+2024-08-25 12:13:40,519 INFO [train.py:1114] (0/4) Epoch 6, batch 1950, loss[loss=0.2712, simple_loss=0.3124, pruned_loss=0.08334, ctc_loss=0.1583, over 19571.00 frames. ], tot_loss[loss=0.2809, simple_loss=0.3214, pruned_loss=0.08742, ctc_loss=0.1641, over 3871849.41 frames. ], batch size: 52, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:13:42,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=76778.66666666667, ans=0.07
+2024-08-25 12:14:17,739 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=76938.66666666667, ans=0.0
+2024-08-25 12:14:18,621 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 1.890e+02 2.137e+02 2.349e+02 3.743e+02, threshold=4.275e+02, percent-clipped=0.0
+2024-08-25 12:14:34,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=77045.33333333333, ans=0.0
+2024-08-25 12:14:36,004 INFO [train.py:1114] (0/4) Epoch 6, batch 2000, loss[loss=0.2717, simple_loss=0.2997, pruned_loss=0.08873, ctc_loss=0.1658, over 19691.00 frames. ], tot_loss[loss=0.2821, simple_loss=0.3222, pruned_loss=0.08791, ctc_loss=0.1653, over 3855417.73 frames. ], batch size: 45, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:14:40,614 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:14:42,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=77045.33333333333, ans=0.025
+2024-08-25 12:14:44,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=77045.33333333333, ans=0.07
+2024-08-25 12:15:22,398 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.58 vs. limit=15.0
+2024-08-25 12:15:30,077 INFO [train.py:1114] (0/4) Epoch 6, batch 2050, loss[loss=0.2439, simple_loss=0.2798, pruned_loss=0.07446, ctc_loss=0.1479, over 19725.00 frames. ], tot_loss[loss=0.2802, simple_loss=0.3202, pruned_loss=0.08734, ctc_loss=0.164, over 3852827.87 frames. ], batch size: 47, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:16:07,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=77418.66666666667, ans=0.0
+2024-08-25 12:16:11,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=77472.0, ans=0.0
+2024-08-25 12:16:14,698 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.955e+02 2.380e+02 2.986e+02 1.021e+03, threshold=4.760e+02, percent-clipped=7.0
+2024-08-25 12:16:32,219 INFO [train.py:1114] (0/4) Epoch 6, batch 2100, loss[loss=0.2945, simple_loss=0.3339, pruned_loss=0.09206, ctc_loss=0.1775, over 19770.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.3196, pruned_loss=0.08686, ctc_loss=0.1633, over 3859819.95 frames. ], batch size: 54, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:16:34,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=77578.66666666667, ans=0.125
+2024-08-25 12:16:39,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=77578.66666666667, ans=0.0
+2024-08-25 12:16:41,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=77578.66666666667, ans=0.0
+2024-08-25 12:17:05,706 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:17:16,414 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.47 vs. limit=15.0
+2024-08-25 12:17:18,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=77792.0, ans=0.0
+2024-08-25 12:17:21,044 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.65 vs. limit=22.5
+2024-08-25 12:17:26,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=77792.0, ans=0.125
+2024-08-25 12:17:28,081 INFO [train.py:1114] (0/4) Epoch 6, batch 2150, loss[loss=0.2405, simple_loss=0.2979, pruned_loss=0.06641, ctc_loss=0.1258, over 19586.00 frames. ], tot_loss[loss=0.2785, simple_loss=0.319, pruned_loss=0.08648, ctc_loss=0.1625, over 3870116.27 frames. ], batch size: 52, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:17:28,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=77845.33333333333, ans=0.0
+2024-08-25 12:17:38,877 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=77898.66666666667, ans=0.2
+2024-08-25 12:17:44,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=77898.66666666667, ans=0.0
+2024-08-25 12:17:44,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=77898.66666666667, ans=0.1
+2024-08-25 12:17:45,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=77898.66666666667, ans=0.1
+2024-08-25 12:18:04,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=77952.0, ans=15.0
+2024-08-25 12:18:19,492 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.877e+02 2.258e+02 2.799e+02 6.726e+02, threshold=4.515e+02, percent-clipped=2.0
+2024-08-25 12:18:20,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=78005.33333333333, ans=0.0
+2024-08-25 12:18:22,990 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=78005.33333333333, ans=0.2
+2024-08-25 12:19:06,984 INFO [train.py:1114] (0/4) Epoch 6, batch 2200, loss[loss=0.301, simple_loss=0.3343, pruned_loss=0.09707, ctc_loss=0.1841, over 19602.00 frames. ], tot_loss[loss=0.2791, simple_loss=0.3193, pruned_loss=0.08687, ctc_loss=0.163, over 3868804.12 frames. ], batch size: 57, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:19:08,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=78112.0, ans=0.0
+2024-08-25 12:19:11,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=78112.0, ans=0.125
+2024-08-25 12:19:14,767 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=78112.0, ans=0.125
+2024-08-25 12:19:18,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=78165.33333333333, ans=0.0
+2024-08-25 12:19:20,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=78165.33333333333, ans=0.0
+2024-08-25 12:19:28,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=78218.66666666667, ans=0.125
+2024-08-25 12:19:46,287 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.83 vs. limit=15.0
+2024-08-25 12:19:48,386 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.74 vs. limit=15.0
+2024-08-25 12:20:02,388 INFO [train.py:1114] (0/4) Epoch 6, batch 2250, loss[loss=0.2582, simple_loss=0.3092, pruned_loss=0.07551, ctc_loss=0.1404, over 19609.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3188, pruned_loss=0.08642, ctc_loss=0.1622, over 3868380.80 frames. ], batch size: 55, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:20:09,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=78378.66666666667, ans=0.125
+2024-08-25 12:20:15,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=78432.0, ans=0.07
+2024-08-25 12:20:34,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=78538.66666666667, ans=0.125
+2024-08-25 12:20:38,634 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.005e+02 2.234e+02 2.581e+02 4.325e+02, threshold=4.468e+02, percent-clipped=0.0
+2024-08-25 12:20:45,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=78592.0, ans=0.125
+2024-08-25 12:20:53,143 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=78592.0, ans=0.125
+2024-08-25 12:20:56,329 INFO [train.py:1114] (0/4) Epoch 6, batch 2300, loss[loss=0.264, simple_loss=0.301, pruned_loss=0.08165, ctc_loss=0.1592, over 19514.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3183, pruned_loss=0.08662, ctc_loss=0.1628, over 3861729.87 frames. ], batch size: 49, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:21:14,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=78698.66666666667, ans=0.125
+2024-08-25 12:21:39,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=78805.33333333333, ans=0.0
+2024-08-25 12:21:42,458 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.99 vs. limit=15.0
+2024-08-25 12:21:52,656 INFO [train.py:1114] (0/4) Epoch 6, batch 2350, loss[loss=0.2569, simple_loss=0.3115, pruned_loss=0.07224, ctc_loss=0.1444, over 19666.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3184, pruned_loss=0.08661, ctc_loss=0.1626, over 3864765.58 frames. ], batch size: 63, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:21:53,955 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=78912.0, ans=0.125
+2024-08-25 12:22:09,440 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=78965.33333333333, ans=0.1
+2024-08-25 12:22:11,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=78965.33333333333, ans=0.1
+2024-08-25 12:22:18,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=79018.66666666667, ans=0.125
+2024-08-25 12:22:27,238 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=4.224e-02
+2024-08-25 12:22:30,295 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 2.097e+02 2.553e+02 3.084e+02 6.792e+02, threshold=5.106e+02, percent-clipped=2.0
+2024-08-25 12:22:47,954 INFO [train.py:1114] (0/4) Epoch 6, batch 2400, loss[loss=0.3017, simple_loss=0.3359, pruned_loss=0.09635, ctc_loss=0.1869, over 19346.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.321, pruned_loss=0.08796, ctc_loss=0.1652, over 3859314.25 frames. ], batch size: 67, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:22:50,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=79178.66666666667, ans=0.125
+2024-08-25 12:22:52,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=79178.66666666667, ans=10.0
+2024-08-25 12:23:02,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=79232.0, ans=0.125
+2024-08-25 12:23:06,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=79232.0, ans=0.125
+2024-08-25 12:23:26,178 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=79338.66666666667, ans=0.1
+2024-08-25 12:23:32,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=79338.66666666667, ans=0.125
+2024-08-25 12:23:43,089 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.75 vs. limit=15.0
+2024-08-25 12:23:43,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=79392.0, ans=0.125
+2024-08-25 12:23:45,704 INFO [train.py:1114] (0/4) Epoch 6, batch 2450, loss[loss=0.3622, simple_loss=0.3604, pruned_loss=0.1332, ctc_loss=0.244, over 13494.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3262, pruned_loss=0.09226, ctc_loss=0.1733, over 3735937.20 frames. ], batch size: 140, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:23:50,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=79445.33333333333, ans=0.125
+2024-08-25 12:24:16,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.47 vs. limit=22.5
+2024-08-25 12:25:48,655 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.60 vs. limit=15.0
+2024-08-25 12:25:48,696 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.72 vs. limit=22.5
+2024-08-25 12:27:22,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=79605.33333333333, ans=0.09899494936611666
+2024-08-25 12:28:01,634 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.056e+02 2.291e+02 2.526e+02 5.572e+02, threshold=4.582e+02, percent-clipped=1.0
+2024-08-25 12:28:19,343 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-6.pt
+2024-08-25 12:29:27,601 INFO [train.py:1114] (0/4) Epoch 7, batch 0, loss[loss=0.2917, simple_loss=0.3199, pruned_loss=0.09673, ctc_loss=0.1754, over 19812.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3199, pruned_loss=0.09673, ctc_loss=0.1754, over 19812.00 frames. ], batch size: 49, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:29:27,602 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 12:29:44,290 INFO [train.py:1146] (0/4) Epoch 7, validation: loss=0.2269, simple_loss=0.307, pruned_loss=0.05393, ctc_loss=0.0975, over 944034.00 frames.
+2024-08-25 12:29:44,291 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 12:29:44,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=79658.66666666667, ans=0.0
+2024-08-25 12:29:44,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=79658.66666666667, ans=0.0
+2024-08-25 12:30:04,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=79658.66666666667, ans=0.0
+2024-08-25 12:31:09,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=79765.33333333333, ans=0.125
+2024-08-25 12:31:14,406 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.65 vs. limit=6.0
+2024-08-25 12:31:23,347 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.45 vs. limit=15.0
+2024-08-25 12:33:04,695 INFO [train.py:1114] (0/4) Epoch 7, batch 50, loss[loss=0.2463, simple_loss=0.2939, pruned_loss=0.07189, ctc_loss=0.1371, over 19723.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.325, pruned_loss=0.0917, ctc_loss=0.1725, over 844189.99 frames. ], batch size: 47, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:33:56,255 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=80085.33333333333, ans=0.1
+2024-08-25 12:34:00,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=80085.33333333333, ans=0.125
+2024-08-25 12:34:08,368 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.82 vs. limit=15.0
+2024-08-25 12:34:17,271 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.999e+02 2.246e+02 2.808e+02 5.514e+02, threshold=4.492e+02, percent-clipped=3.0
+2024-08-25 12:34:24,309 INFO [train.py:1114] (0/4) Epoch 7, batch 100, loss[loss=0.2556, simple_loss=0.3054, pruned_loss=0.07375, ctc_loss=0.1457, over 19727.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3235, pruned_loss=0.08879, ctc_loss=0.1679, over 1500395.15 frames. ], batch size: 51, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:34:40,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=80245.33333333333, ans=0.0
+2024-08-25 12:34:53,444 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.32 vs. limit=22.5
+2024-08-25 12:35:05,195 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.06 vs. limit=22.5
+2024-08-25 12:35:07,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=80352.0, ans=0.2
+2024-08-25 12:35:11,972 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=80405.33333333333, ans=0.125
+2024-08-25 12:35:23,291 INFO [train.py:1114] (0/4) Epoch 7, batch 150, loss[loss=0.2578, simple_loss=0.2881, pruned_loss=0.08287, ctc_loss=0.1544, over 19726.00 frames. ], tot_loss[loss=0.2798, simple_loss=0.3203, pruned_loss=0.08685, ctc_loss=0.1642, over 2029556.49 frames. ], batch size: 47, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:35:36,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=80512.0, ans=0.09899494936611666
+2024-08-25 12:35:40,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=80512.0, ans=0.0
+2024-08-25 12:35:59,347 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.92 vs. limit=15.0
+2024-08-25 12:36:12,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=80618.66666666667, ans=0.025
+2024-08-25 12:36:13,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=80672.0, ans=0.1
+2024-08-25 12:36:18,826 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.959e+02 2.217e+02 2.953e+02 5.735e+02, threshold=4.434e+02, percent-clipped=2.0
+2024-08-25 12:36:20,297 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=80672.0, ans=0.125
+2024-08-25 12:36:21,894 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.67 vs. limit=15.0
+2024-08-25 12:36:22,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=80672.0, ans=0.125
+2024-08-25 12:36:26,001 INFO [train.py:1114] (0/4) Epoch 7, batch 200, loss[loss=0.3271, simple_loss=0.3511, pruned_loss=0.1102, ctc_loss=0.207, over 18351.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3184, pruned_loss=0.08634, ctc_loss=0.163, over 2436499.74 frames. ], batch size: 85, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:36:48,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=80832.0, ans=0.0
+2024-08-25 12:37:04,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=80885.33333333333, ans=0.2
+2024-08-25 12:37:07,680 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=80885.33333333333, ans=0.125
+2024-08-25 12:37:17,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=80938.66666666667, ans=0.1
+2024-08-25 12:37:22,886 INFO [train.py:1114] (0/4) Epoch 7, batch 250, loss[loss=0.2912, simple_loss=0.3349, pruned_loss=0.09043, ctc_loss=0.1665, over 19400.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3172, pruned_loss=0.08523, ctc_loss=0.1607, over 2756000.58 frames. ], batch size: 67, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:37:34,553 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.09 vs. limit=12.0
+2024-08-25 12:37:41,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=81045.33333333333, ans=0.0
+2024-08-25 12:37:56,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=81098.66666666667, ans=22.5
+2024-08-25 12:37:59,447 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=81152.0, ans=0.125
+2024-08-25 12:38:06,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=81152.0, ans=0.1
+2024-08-25 12:38:16,687 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.901e+02 2.294e+02 2.833e+02 4.254e+02, threshold=4.587e+02, percent-clipped=0.0
+2024-08-25 12:38:17,097 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:38:17,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=81205.33333333333, ans=0.125
+2024-08-25 12:38:23,341 INFO [train.py:1114] (0/4) Epoch 7, batch 300, loss[loss=0.2938, simple_loss=0.339, pruned_loss=0.08917, ctc_loss=0.1757, over 19538.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3161, pruned_loss=0.08429, ctc_loss=0.1591, over 2999935.01 frames. ], batch size: 61, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:38:27,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=81258.66666666667, ans=0.125
+2024-08-25 12:38:30,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=81258.66666666667, ans=0.2
+2024-08-25 12:38:57,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=81365.33333333333, ans=0.125
+2024-08-25 12:39:11,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81418.66666666667, ans=0.1
+2024-08-25 12:39:46,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=81472.0, ans=0.0
+2024-08-25 12:39:52,664 INFO [train.py:1114] (0/4) Epoch 7, batch 350, loss[loss=0.2385, simple_loss=0.281, pruned_loss=0.07171, ctc_loss=0.1312, over 19755.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3162, pruned_loss=0.08442, ctc_loss=0.1593, over 3189143.82 frames. ], batch size: 48, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:39:55,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=81525.33333333333, ans=0.07
+2024-08-25 12:40:03,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=81578.66666666667, ans=0.0
+2024-08-25 12:40:14,085 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.73 vs. limit=22.5
+2024-08-25 12:40:16,178 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.63 vs. limit=15.0
+2024-08-25 12:40:19,119 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=81632.0, ans=0.125
+2024-08-25 12:40:34,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=81685.33333333333, ans=0.2
+2024-08-25 12:40:38,527 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=81738.66666666667, ans=0.0
+2024-08-25 12:40:43,966 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.980e+02 2.268e+02 2.810e+02 5.782e+02, threshold=4.535e+02, percent-clipped=1.0
+2024-08-25 12:40:44,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=81738.66666666667, ans=0.1
+2024-08-25 12:40:50,665 INFO [train.py:1114] (0/4) Epoch 7, batch 400, loss[loss=0.2633, simple_loss=0.3156, pruned_loss=0.07597, ctc_loss=0.1478, over 19482.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3158, pruned_loss=0.08396, ctc_loss=0.1584, over 3340491.50 frames. ], batch size: 54, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:40:59,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=81792.0, ans=0.025
+2024-08-25 12:41:01,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=81845.33333333333, ans=0.1
+2024-08-25 12:41:04,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=81845.33333333333, ans=0.125
+2024-08-25 12:41:07,552 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.75 vs. limit=15.0
+2024-08-25 12:41:08,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=81845.33333333333, ans=0.0
+2024-08-25 12:41:11,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=81845.33333333333, ans=0.125
+2024-08-25 12:41:23,306 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.60 vs. limit=6.0
+2024-08-25 12:41:23,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=81898.66666666667, ans=0.2
+2024-08-25 12:41:28,565 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.73 vs. limit=10.0
+2024-08-25 12:41:50,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=82005.33333333333, ans=0.2
+2024-08-25 12:41:52,336 INFO [train.py:1114] (0/4) Epoch 7, batch 450, loss[loss=0.2491, simple_loss=0.3095, pruned_loss=0.06735, ctc_loss=0.1351, over 19601.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.316, pruned_loss=0.08409, ctc_loss=0.1586, over 3447901.07 frames. ], batch size: 55, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:42:11,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=82112.0, ans=0.05
+2024-08-25 12:42:13,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82112.0, ans=0.1
+2024-08-25 12:42:26,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=82218.66666666667, ans=15.0
+2024-08-25 12:42:32,972 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=82218.66666666667, ans=0.2
+2024-08-25 12:42:33,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82218.66666666667, ans=0.1
+2024-08-25 12:42:38,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=82272.0, ans=10.0
+2024-08-25 12:42:45,062 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 1.947e+02 2.448e+02 2.960e+02 4.262e+02, threshold=4.896e+02, percent-clipped=0.0
+2024-08-25 12:42:45,231 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:42:52,068 INFO [train.py:1114] (0/4) Epoch 7, batch 500, loss[loss=0.2594, simple_loss=0.316, pruned_loss=0.07345, ctc_loss=0.1397, over 19655.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3146, pruned_loss=0.08342, ctc_loss=0.1572, over 3543511.16 frames. ], batch size: 63, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:43:00,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=82325.33333333333, ans=10.0
+2024-08-25 12:43:07,126 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.31 vs. limit=15.0
+2024-08-25 12:43:09,143 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=82378.66666666667, ans=0.5
+2024-08-25 12:43:16,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=82432.0, ans=0.0
+2024-08-25 12:43:39,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=82538.66666666667, ans=0.125
+2024-08-25 12:43:51,852 INFO [train.py:1114] (0/4) Epoch 7, batch 550, loss[loss=0.282, simple_loss=0.3231, pruned_loss=0.08812, ctc_loss=0.1614, over 19167.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.315, pruned_loss=0.08361, ctc_loss=0.1574, over 3604774.38 frames. ], batch size: 71, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:44:00,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=82592.0, ans=0.125
+2024-08-25 12:44:32,742 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.74 vs. limit=6.0
+2024-08-25 12:44:38,193 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.23 vs. limit=15.0
+2024-08-25 12:44:42,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=82805.33333333333, ans=0.07
+2024-08-25 12:44:44,967 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.000e+02 2.364e+02 2.910e+02 5.356e+02, threshold=4.728e+02, percent-clipped=1.0
+2024-08-25 12:44:52,589 INFO [train.py:1114] (0/4) Epoch 7, batch 600, loss[loss=0.2949, simple_loss=0.338, pruned_loss=0.09179, ctc_loss=0.1705, over 19322.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.315, pruned_loss=0.083, ctc_loss=0.1565, over 3663939.57 frames. ], batch size: 67, lr: 2.11e-02, grad_scale: 16.0
+2024-08-25 12:44:56,836 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.48 vs. limit=15.0
+2024-08-25 12:45:03,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=82912.0, ans=0.125
+2024-08-25 12:45:05,140 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:45:25,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=82965.33333333333, ans=0.125
+2024-08-25 12:45:33,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=83018.66666666667, ans=0.1
+2024-08-25 12:45:38,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=83072.0, ans=0.0
+2024-08-25 12:45:51,130 INFO [train.py:1114] (0/4) Epoch 7, batch 650, loss[loss=0.2963, simple_loss=0.3361, pruned_loss=0.09319, ctc_loss=0.1753, over 19746.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3141, pruned_loss=0.08281, ctc_loss=0.1559, over 3714209.88 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:45:51,217 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=83125.33333333333, ans=0.125
+2024-08-25 12:45:54,274 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.17 vs. limit=15.0
+2024-08-25 12:45:54,758 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=83125.33333333333, ans=0.0
+2024-08-25 12:46:47,169 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.844e+02 2.004e+02 2.285e+02 4.065e+02, threshold=4.009e+02, percent-clipped=0.0
+2024-08-25 12:46:49,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=83338.66666666667, ans=0.1
+2024-08-25 12:46:52,907 INFO [train.py:1114] (0/4) Epoch 7, batch 700, loss[loss=0.2139, simple_loss=0.2774, pruned_loss=0.05413, ctc_loss=0.1054, over 19715.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3146, pruned_loss=0.08304, ctc_loss=0.1565, over 3746801.58 frames. ], batch size: 51, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:46:54,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=83392.0, ans=0.125
+2024-08-25 12:47:01,404 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.07 vs. limit=12.0
+2024-08-25 12:47:01,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=83392.0, ans=0.125
+2024-08-25 12:47:17,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=83498.66666666667, ans=0.2
+2024-08-25 12:47:21,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=83498.66666666667, ans=0.025
+2024-08-25 12:47:29,343 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:47:38,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=83605.33333333333, ans=0.0
+2024-08-25 12:47:49,556 INFO [train.py:1114] (0/4) Epoch 7, batch 750, loss[loss=0.2743, simple_loss=0.3193, pruned_loss=0.08299, ctc_loss=0.1583, over 19505.00 frames. ], tot_loss[loss=0.2701, simple_loss=0.3136, pruned_loss=0.0823, ctc_loss=0.1549, over 3773207.45 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:48:28,123 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=83818.66666666667, ans=0.0
+2024-08-25 12:48:45,005 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 1.885e+02 2.166e+02 2.690e+02 4.534e+02, threshold=4.331e+02, percent-clipped=3.0
+2024-08-25 12:48:50,718 INFO [train.py:1114] (0/4) Epoch 7, batch 800, loss[loss=0.2407, simple_loss=0.2918, pruned_loss=0.06895, ctc_loss=0.1296, over 19801.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3138, pruned_loss=0.0824, ctc_loss=0.1549, over 3794941.03 frames. ], batch size: 49, lr: 2.10e-02, grad_scale: 32.0
+2024-08-25 12:48:50,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=83925.33333333333, ans=0.2
+2024-08-25 12:48:53,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=83925.33333333333, ans=0.125
+2024-08-25 12:49:10,123 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=83978.66666666667, ans=0.125
+2024-08-25 12:49:18,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=84032.0, ans=0.2
+2024-08-25 12:49:19,788 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.87 vs. limit=22.5
+2024-08-25 12:49:21,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=84032.0, ans=0.125
+2024-08-25 12:49:23,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=84032.0, ans=0.125
+2024-08-25 12:49:26,382 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=84085.33333333333, ans=0.125
+2024-08-25 12:49:51,393 INFO [train.py:1114] (0/4) Epoch 7, batch 850, loss[loss=0.2648, simple_loss=0.3236, pruned_loss=0.07508, ctc_loss=0.1396, over 19655.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.314, pruned_loss=0.0829, ctc_loss=0.1558, over 3814006.77 frames. ], batch size: 59, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:49:53,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=84192.0, ans=0.1
+2024-08-25 12:50:12,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=84245.33333333333, ans=0.125
+2024-08-25 12:50:24,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.min_positive, batch_count=84298.66666666667, ans=0.025
+2024-08-25 12:50:30,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=84352.0, ans=0.035
+2024-08-25 12:50:38,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=84405.33333333333, ans=0.035
+2024-08-25 12:50:43,498 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 1.946e+02 2.270e+02 2.825e+02 4.143e+02, threshold=4.540e+02, percent-clipped=0.0
+2024-08-25 12:50:49,148 INFO [train.py:1114] (0/4) Epoch 7, batch 900, loss[loss=0.2454, simple_loss=0.2869, pruned_loss=0.07502, ctc_loss=0.1345, over 19440.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3144, pruned_loss=0.0834, ctc_loss=0.1565, over 3817543.60 frames. ], batch size: 48, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:51:24,078 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.30 vs. limit=15.0
+2024-08-25 12:51:26,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=84565.33333333333, ans=0.125
+2024-08-25 12:51:57,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=84672.0, ans=0.2
+2024-08-25 12:51:58,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=84672.0, ans=0.07
+2024-08-25 12:52:05,338 INFO [train.py:1114] (0/4) Epoch 7, batch 950, loss[loss=0.258, simple_loss=0.3002, pruned_loss=0.07915, ctc_loss=0.1439, over 19510.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3146, pruned_loss=0.08351, ctc_loss=0.157, over 3819914.51 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 16.0
+2024-08-25 12:52:06,826 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=84725.33333333333, ans=10.0
+2024-08-25 12:52:24,933 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.38 vs. limit=22.5
+2024-08-25 12:52:34,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=84832.0, ans=0.125
+2024-08-25 12:52:58,083 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.05 vs. limit=15.0
+2024-08-25 12:52:59,184 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.065e+02 2.373e+02 2.949e+02 1.128e+03, threshold=4.746e+02, percent-clipped=6.0
+2024-08-25 12:53:05,274 INFO [train.py:1114] (0/4) Epoch 7, batch 1000, loss[loss=0.2316, simple_loss=0.2876, pruned_loss=0.06288, ctc_loss=0.1245, over 19858.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.3157, pruned_loss=0.0842, ctc_loss=0.1582, over 3816644.91 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:53:05,512 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=84992.0, ans=0.05
+2024-08-25 12:53:14,635 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.98 vs. limit=15.0
+2024-08-25 12:53:15,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=84992.0, ans=0.1
+2024-08-25 12:53:22,753 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=85045.33333333333, ans=0.125
+2024-08-25 12:53:28,042 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.48 vs. limit=15.0
+2024-08-25 12:53:28,309 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.20 vs. limit=6.0
+2024-08-25 12:53:47,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=85152.0, ans=0.025
+2024-08-25 12:53:51,767 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.95 vs. limit=15.0
+2024-08-25 12:53:57,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=85205.33333333333, ans=0.0
+2024-08-25 12:54:01,124 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.07 vs. limit=15.0
+2024-08-25 12:54:05,085 INFO [train.py:1114] (0/4) Epoch 7, batch 1050, loss[loss=0.2693, simple_loss=0.3247, pruned_loss=0.07707, ctc_loss=0.1496, over 19864.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.3151, pruned_loss=0.08383, ctc_loss=0.1573, over 3823808.53 frames. ], batch size: 57, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:54:15,079 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.86 vs. limit=15.0
+2024-08-25 12:54:16,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=85312.0, ans=0.1
+2024-08-25 12:54:20,411 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-16000.pt
+2024-08-25 12:54:22,207 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=85312.0, ans=0.125
+2024-08-25 12:54:25,591 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=85312.0, ans=0.5
+2024-08-25 12:54:37,894 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=85365.33333333333, ans=0.125
+2024-08-25 12:54:42,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=85418.66666666667, ans=0.125
+2024-08-25 12:54:50,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=85418.66666666667, ans=0.0
+2024-08-25 12:54:51,796 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=85472.0, ans=0.125
+2024-08-25 12:54:55,799 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.98 vs. limit=22.5
+2024-08-25 12:54:56,469 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=85472.0, ans=0.07
+2024-08-25 12:55:01,663 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 1.918e+02 2.325e+02 2.776e+02 4.591e+02, threshold=4.650e+02, percent-clipped=1.0
+2024-08-25 12:55:03,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=85472.0, ans=0.125
+2024-08-25 12:55:06,550 INFO [train.py:1114] (0/4) Epoch 7, batch 1100, loss[loss=0.2567, simple_loss=0.3039, pruned_loss=0.07575, ctc_loss=0.1448, over 19601.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3139, pruned_loss=0.08283, ctc_loss=0.1558, over 3831458.50 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:55:06,895 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85525.33333333333, ans=0.1
+2024-08-25 12:55:22,223 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=85578.66666666667, ans=0.0
+2024-08-25 12:55:22,422 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.57 vs. limit=15.0
+2024-08-25 12:55:45,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=85685.33333333333, ans=0.0
+2024-08-25 12:56:01,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85738.66666666667, ans=0.1
+2024-08-25 12:56:01,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=85738.66666666667, ans=0.125
+2024-08-25 12:56:05,772 INFO [train.py:1114] (0/4) Epoch 7, batch 1150, loss[loss=0.2738, simple_loss=0.3086, pruned_loss=0.08745, ctc_loss=0.1604, over 19588.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3141, pruned_loss=0.08323, ctc_loss=0.1564, over 3830327.90 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:56:05,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=85792.0, ans=0.125
+2024-08-25 12:56:08,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=85792.0, ans=0.0
+2024-08-25 12:56:24,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=85845.33333333333, ans=0.125
+2024-08-25 12:56:27,581 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=85845.33333333333, ans=0.125
+2024-08-25 12:56:35,354 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.40 vs. limit=22.5
+2024-08-25 12:56:44,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=85952.0, ans=0.0
+2024-08-25 12:56:51,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=85952.0, ans=0.0
+2024-08-25 12:57:02,980 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.959e+02 2.167e+02 2.666e+02 4.946e+02, threshold=4.335e+02, percent-clipped=2.0
+2024-08-25 12:57:07,697 INFO [train.py:1114] (0/4) Epoch 7, batch 1200, loss[loss=0.26, simple_loss=0.3188, pruned_loss=0.07313, ctc_loss=0.1372, over 19850.00 frames. ], tot_loss[loss=0.2731, simple_loss=0.3154, pruned_loss=0.08389, ctc_loss=0.1576, over 3826209.33 frames. ], batch size: 57, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:57:27,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=86112.0, ans=0.0
+2024-08-25 12:57:52,159 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.65 vs. limit=15.0
+2024-08-25 12:58:05,929 INFO [train.py:1114] (0/4) Epoch 7, batch 1250, loss[loss=0.2737, simple_loss=0.3181, pruned_loss=0.08357, ctc_loss=0.1555, over 19523.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3152, pruned_loss=0.08324, ctc_loss=0.1563, over 3844367.75 frames. ], batch size: 61, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:58:07,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=86325.33333333333, ans=0.1
+2024-08-25 12:58:23,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=86378.66666666667, ans=0.2
+2024-08-25 12:58:24,779 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.40 vs. limit=15.0
+2024-08-25 12:58:26,344 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.56 vs. limit=15.0
+2024-08-25 12:58:47,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=86485.33333333333, ans=0.125
+2024-08-25 12:58:50,787 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.72 vs. limit=10.0
+2024-08-25 12:58:57,569 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=86538.66666666667, ans=0.025
+2024-08-25 12:59:02,864 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.964e+02 2.304e+02 2.729e+02 5.465e+02, threshold=4.608e+02, percent-clipped=2.0
+2024-08-25 12:59:07,509 INFO [train.py:1114] (0/4) Epoch 7, batch 1300, loss[loss=0.2903, simple_loss=0.3306, pruned_loss=0.09217, ctc_loss=0.1643, over 18932.00 frames. ], tot_loss[loss=0.2706, simple_loss=0.314, pruned_loss=0.08255, ctc_loss=0.1551, over 3847421.45 frames. ], batch size: 76, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:59:12,720 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.70 vs. limit=6.0
+2024-08-25 12:59:18,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=86645.33333333333, ans=0.025
+2024-08-25 12:59:34,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=86698.66666666667, ans=0.04949747468305833
+2024-08-25 12:59:47,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=86752.0, ans=0.125
+2024-08-25 12:59:50,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten.whitening_limit, batch_count=86752.0, ans=15.0
+2024-08-25 12:59:53,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=86805.33333333333, ans=0.125
+2024-08-25 13:00:07,956 INFO [train.py:1114] (0/4) Epoch 7, batch 1350, loss[loss=0.2608, simple_loss=0.3113, pruned_loss=0.07692, ctc_loss=0.1411, over 19771.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3125, pruned_loss=0.08148, ctc_loss=0.1534, over 3858190.88 frames. ], batch size: 54, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 13:00:18,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=86912.0, ans=0.125
+2024-08-25 13:00:30,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=86965.33333333333, ans=0.125
+2024-08-25 13:00:46,455 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.86 vs. limit=15.0
+2024-08-25 13:00:47,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=87018.66666666667, ans=0.125
+2024-08-25 13:01:52,686 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:01:55,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=87072.0, ans=0.125
+2024-08-25 13:01:59,609 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.935e+02 2.309e+02 3.009e+02 4.449e+02, threshold=4.618e+02, percent-clipped=0.0
+2024-08-25 13:02:02,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=87072.0, ans=0.025
+2024-08-25 13:02:04,190 INFO [train.py:1114] (0/4) Epoch 7, batch 1400, loss[loss=0.2105, simple_loss=0.2614, pruned_loss=0.05829, ctc_loss=0.1073, over 19698.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3113, pruned_loss=0.08075, ctc_loss=0.1518, over 3866044.91 frames. ], batch size: 46, lr: 2.06e-02, grad_scale: 32.0
+2024-08-25 13:02:08,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=87125.33333333333, ans=0.0
+2024-08-25 13:02:21,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=87178.66666666667, ans=0.0
+2024-08-25 13:02:28,876 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=87232.0, ans=0.125
+2024-08-25 13:02:28,956 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=87232.0, ans=0.125
+2024-08-25 13:02:46,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=87285.33333333333, ans=0.125
+2024-08-25 13:02:57,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=87338.66666666667, ans=0.0
+2024-08-25 13:03:03,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=87338.66666666667, ans=0.0
+2024-08-25 13:03:05,423 INFO [train.py:1114] (0/4) Epoch 7, batch 1450, loss[loss=0.2903, simple_loss=0.3306, pruned_loss=0.09105, ctc_loss=0.1698, over 19693.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.313, pruned_loss=0.08176, ctc_loss=0.1539, over 3862692.13 frames. ], batch size: 63, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:03:34,077 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.02 vs. limit=15.0
+2024-08-25 13:04:22,569 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=87552.0, ans=0.125
+2024-08-25 13:04:37,116 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.77 vs. limit=15.0
+2024-08-25 13:04:46,566 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.015e+02 2.285e+02 2.716e+02 4.465e+02, threshold=4.569e+02, percent-clipped=0.0
+2024-08-25 13:04:50,192 INFO [train.py:1114] (0/4) Epoch 7, batch 1500, loss[loss=0.2774, simple_loss=0.3229, pruned_loss=0.08512, ctc_loss=0.1539, over 19570.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3131, pruned_loss=0.08158, ctc_loss=0.1537, over 3861815.85 frames. ], batch size: 57, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:04:52,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=87658.66666666667, ans=0.125
+2024-08-25 13:05:02,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=87712.0, ans=0.0
+2024-08-25 13:05:05,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=87712.0, ans=0.125
+2024-08-25 13:05:21,566 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.28 vs. limit=15.0
+2024-08-25 13:05:48,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=87872.0, ans=0.1
+2024-08-25 13:05:57,452 INFO [train.py:1114] (0/4) Epoch 7, batch 1550, loss[loss=0.2836, simple_loss=0.3285, pruned_loss=0.08524, ctc_loss=0.1706, over 19600.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3137, pruned_loss=0.08241, ctc_loss=0.1551, over 3847484.92 frames. ], batch size: 60, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:06:07,016 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=87925.33333333333, ans=0.025
+2024-08-25 13:06:48,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=88138.66666666667, ans=0.2
+2024-08-25 13:06:55,900 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.880e+02 2.225e+02 2.757e+02 4.141e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 13:07:00,952 INFO [train.py:1114] (0/4) Epoch 7, batch 1600, loss[loss=0.2633, simple_loss=0.3169, pruned_loss=0.07723, ctc_loss=0.1381, over 19840.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.314, pruned_loss=0.08268, ctc_loss=0.1557, over 3836479.69 frames. ], batch size: 57, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:07:08,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=88192.0, ans=0.125
+2024-08-25 13:07:35,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=88352.0, ans=0.0
+2024-08-25 13:07:36,930 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:07:41,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=88352.0, ans=0.125
+2024-08-25 13:07:47,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=88405.33333333333, ans=0.125
+2024-08-25 13:07:58,851 INFO [train.py:1114] (0/4) Epoch 7, batch 1650, loss[loss=0.2839, simple_loss=0.3246, pruned_loss=0.08711, ctc_loss=0.1727, over 19659.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.3137, pruned_loss=0.0828, ctc_loss=0.1559, over 3832847.28 frames. ], batch size: 59, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:08:11,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=88512.0, ans=0.125
+2024-08-25 13:08:13,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=88512.0, ans=10.0
+2024-08-25 13:08:20,665 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.92 vs. limit=5.0
+2024-08-25 13:08:23,311 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=88565.33333333333, ans=0.125
+2024-08-25 13:08:29,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=88565.33333333333, ans=0.0
+2024-08-25 13:08:44,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=88618.66666666667, ans=15.0
+2024-08-25 13:08:54,965 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.917e+02 2.131e+02 2.729e+02 4.248e+02, threshold=4.261e+02, percent-clipped=0.0
+2024-08-25 13:08:58,388 INFO [train.py:1114] (0/4) Epoch 7, batch 1700, loss[loss=0.2408, simple_loss=0.2808, pruned_loss=0.07218, ctc_loss=0.1414, over 19668.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3131, pruned_loss=0.08185, ctc_loss=0.1542, over 3847638.34 frames. ], batch size: 46, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:09:17,215 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_ff3.min_abs, batch_count=88778.66666666667, ans=0.2
+2024-08-25 13:09:21,044 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.67 vs. limit=15.0
+2024-08-25 13:09:28,452 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=88832.0, ans=0.2
+2024-08-25 13:09:37,359 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=88885.33333333333, ans=0.125
+2024-08-25 13:09:42,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=88885.33333333333, ans=0.2
+2024-08-25 13:09:54,518 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.95 vs. limit=15.0
+2024-08-25 13:09:55,078 INFO [train.py:1114] (0/4) Epoch 7, batch 1750, loss[loss=0.2146, simple_loss=0.2593, pruned_loss=0.06182, ctc_loss=0.1159, over 19664.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3131, pruned_loss=0.08226, ctc_loss=0.1549, over 3851910.98 frames. ], batch size: 45, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:10:00,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=88992.0, ans=0.2
+2024-08-25 13:11:06,252 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=89045.33333333333, ans=0.0
+2024-08-25 13:16:07,771 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.27 vs. limit=22.5
+2024-08-25 13:16:10,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=89045.33333333333, ans=0.2
+2024-08-25 13:17:36,787 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.10 vs. limit=6.0
+2024-08-25 13:25:11,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=89205.33333333333, ans=0.125
+2024-08-25 13:29:44,214 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.972e+02 2.344e+02 2.828e+02 4.449e+02, threshold=4.688e+02, percent-clipped=1.0
+2024-08-25 13:29:47,701 INFO [train.py:1114] (0/4) Epoch 7, batch 1800, loss[loss=0.2784, simple_loss=0.3218, pruned_loss=0.08497, ctc_loss=0.1629, over 19609.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3131, pruned_loss=0.08212, ctc_loss=0.1545, over 3853707.10 frames. ], batch size: 55, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:38:42,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=89418.66666666667, ans=0.125
+2024-08-25 13:40:18,458 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.27 vs. limit=10.0
+2024-08-25 13:40:34,852 INFO [train.py:1114] (0/4) Epoch 7, batch 1850, loss[loss=0.2608, simple_loss=0.3166, pruned_loss=0.07464, ctc_loss=0.1392, over 19566.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3122, pruned_loss=0.08148, ctc_loss=0.1532, over 3856339.26 frames. ], batch size: 57, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:41:16,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=89525.33333333333, ans=0.125
+2024-08-25 13:41:51,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=89578.66666666667, ans=0.125
+2024-08-25 13:41:51,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=89578.66666666667, ans=0.07
+2024-08-25 13:43:05,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=89632.0, ans=0.0
+2024-08-25 13:43:22,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=89685.33333333333, ans=0.1
+2024-08-25 13:43:46,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=89738.66666666667, ans=0.2
+2024-08-25 13:43:46,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=89738.66666666667, ans=0.0
+2024-08-25 13:44:01,303 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.852e+02 2.070e+02 2.397e+02 4.608e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-25 13:44:07,949 INFO [train.py:1114] (0/4) Epoch 7, batch 1900, loss[loss=0.2696, simple_loss=0.3181, pruned_loss=0.08036, ctc_loss=0.1509, over 19679.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.313, pruned_loss=0.08166, ctc_loss=0.1535, over 3861764.73 frames. ], batch size: 59, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:44:16,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=89792.0, ans=0.1
+2024-08-25 13:44:27,334 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.74 vs. limit=15.0
+2024-08-25 13:44:33,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=89845.33333333333, ans=0.0
+2024-08-25 13:44:33,455 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:45:00,267 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.95 vs. limit=15.0
+2024-08-25 13:45:36,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=90005.33333333333, ans=0.025
+2024-08-25 13:45:41,212 INFO [train.py:1114] (0/4) Epoch 7, batch 1950, loss[loss=0.2592, simple_loss=0.3131, pruned_loss=0.07461, ctc_loss=0.1399, over 19594.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3135, pruned_loss=0.08135, ctc_loss=0.153, over 3871132.26 frames. ], batch size: 52, lr: 2.04e-02, grad_scale: 16.0
+2024-08-25 13:45:41,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=90058.66666666667, ans=0.2
+2024-08-25 13:45:55,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=90112.0, ans=0.025
+2024-08-25 13:45:56,217 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=90112.0, ans=0.025
+2024-08-25 13:46:42,768 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 1.896e+02 2.177e+02 2.703e+02 3.964e+02, threshold=4.354e+02, percent-clipped=0.0
+2024-08-25 13:46:45,048 INFO [train.py:1114] (0/4) Epoch 7, batch 2000, loss[loss=0.2469, simple_loss=0.2853, pruned_loss=0.07439, ctc_loss=0.1491, over 19665.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.314, pruned_loss=0.08184, ctc_loss=0.1539, over 3856701.82 frames. ], batch size: 45, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:46:46,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=90325.33333333333, ans=0.125
+2024-08-25 13:46:58,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=90378.66666666667, ans=0.0
+2024-08-25 13:46:59,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=90378.66666666667, ans=0.05
+2024-08-25 13:47:00,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=90378.66666666667, ans=0.125
+2024-08-25 13:47:04,088 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:47:11,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.06 vs. limit=15.0
+2024-08-25 13:47:12,610 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.74 vs. limit=22.5
+2024-08-25 13:47:39,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=90538.66666666667, ans=0.1
+2024-08-25 13:47:41,004 INFO [train.py:1114] (0/4) Epoch 7, batch 2050, loss[loss=0.2526, simple_loss=0.2923, pruned_loss=0.07794, ctc_loss=0.1427, over 19720.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3123, pruned_loss=0.08111, ctc_loss=0.1525, over 3852774.23 frames. ], batch size: 47, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:47:47,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=90592.0, ans=0.125
+2024-08-25 13:47:49,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=90592.0, ans=0.0
+2024-08-25 13:47:49,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=90592.0, ans=0.025
+2024-08-25 13:47:57,119 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.78 vs. limit=15.0
+2024-08-25 13:48:07,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.51 vs. limit=15.0
+2024-08-25 13:48:16,866 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=90752.0, ans=0.0
+2024-08-25 13:48:16,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=90752.0, ans=0.1
+2024-08-25 13:48:22,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=90752.0, ans=0.0
+2024-08-25 13:48:30,274 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.18 vs. limit=15.0
+2024-08-25 13:48:32,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=90805.33333333333, ans=0.2
+2024-08-25 13:48:36,344 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.053e+02 2.413e+02 3.017e+02 5.203e+02, threshold=4.827e+02, percent-clipped=2.0
+2024-08-25 13:48:38,583 INFO [train.py:1114] (0/4) Epoch 7, batch 2100, loss[loss=0.2702, simple_loss=0.3172, pruned_loss=0.08129, ctc_loss=0.1513, over 19765.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3116, pruned_loss=0.08034, ctc_loss=0.1514, over 3859772.11 frames. ], batch size: 54, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:48:42,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=90858.66666666667, ans=0.0
+2024-08-25 13:48:43,603 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=16.98 vs. limit=22.5
+2024-08-25 13:48:58,914 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.68 vs. limit=15.0
+2024-08-25 13:48:59,029 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.10 vs. limit=12.0
+2024-08-25 13:48:59,306 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.21 vs. limit=12.0
+2024-08-25 13:49:12,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=90965.33333333333, ans=0.125
+2024-08-25 13:49:20,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=91018.66666666667, ans=0.125
+2024-08-25 13:49:23,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=91018.66666666667, ans=0.0
+2024-08-25 13:49:24,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=91018.66666666667, ans=0.2
+2024-08-25 13:49:29,055 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=91018.66666666667, ans=0.125
+2024-08-25 13:49:43,250 INFO [train.py:1114] (0/4) Epoch 7, batch 2150, loss[loss=0.2382, simple_loss=0.2926, pruned_loss=0.06673, ctc_loss=0.126, over 19586.00 frames. ], tot_loss[loss=0.2651, simple_loss=0.3106, pruned_loss=0.07981, ctc_loss=0.1502, over 3870090.82 frames. ], batch size: 52, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:49:47,972 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.96 vs. limit=6.0
+2024-08-25 13:49:58,269 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.48 vs. limit=15.0
+2024-08-25 13:50:04,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=91232.0, ans=0.125
+2024-08-25 13:50:09,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=91232.0, ans=0.0
+2024-08-25 13:50:13,305 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=14.74 vs. limit=15.0
+2024-08-25 13:50:14,247 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=12.0
+2024-08-25 13:50:14,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=91232.0, ans=0.125
+2024-08-25 13:50:17,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=91285.33333333333, ans=0.125
+2024-08-25 13:50:20,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=91285.33333333333, ans=0.2
+2024-08-25 13:50:33,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=91338.66666666667, ans=0.0
+2024-08-25 13:50:36,454 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 1.920e+02 2.200e+02 2.924e+02 5.090e+02, threshold=4.400e+02, percent-clipped=1.0
+2024-08-25 13:50:39,149 INFO [train.py:1114] (0/4) Epoch 7, batch 2200, loss[loss=0.2755, simple_loss=0.3197, pruned_loss=0.08384, ctc_loss=0.1592, over 19566.00 frames. ], tot_loss[loss=0.2647, simple_loss=0.3104, pruned_loss=0.07953, ctc_loss=0.1498, over 3867872.96 frames. ], batch size: 57, lr: 2.02e-02, grad_scale: 32.0
+2024-08-25 13:51:01,106 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.40 vs. limit=6.0
+2024-08-25 13:51:01,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=91498.66666666667, ans=0.0
+2024-08-25 13:51:34,995 INFO [train.py:1114] (0/4) Epoch 7, batch 2250, loss[loss=0.2767, simple_loss=0.3224, pruned_loss=0.08377, ctc_loss=0.1587, over 19606.00 frames. ], tot_loss[loss=0.2658, simple_loss=0.311, pruned_loss=0.08012, ctc_loss=0.151, over 3867908.29 frames. ], batch size: 55, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:51:43,778 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=91658.66666666667, ans=0.125
+2024-08-25 13:52:03,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=91765.33333333333, ans=0.0
+2024-08-25 13:52:26,372 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=91872.0, ans=0.125
+2024-08-25 13:52:28,412 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.146e+02 2.677e+02 3.204e+02 4.930e+02, threshold=5.354e+02, percent-clipped=3.0
+2024-08-25 13:52:29,565 INFO [train.py:1114] (0/4) Epoch 7, batch 2300, loss[loss=0.2607, simple_loss=0.2975, pruned_loss=0.08062, ctc_loss=0.1565, over 19477.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.3105, pruned_loss=0.08033, ctc_loss=0.1514, over 3861624.74 frames. ], batch size: 49, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:52:31,979 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91925.33333333333, ans=0.1
+2024-08-25 13:52:39,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=91925.33333333333, ans=6.0
+2024-08-25 13:53:02,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff2.min_abs, batch_count=92085.33333333333, ans=0.1
+2024-08-25 13:53:03,917 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=92085.33333333333, ans=0.0
+2024-08-25 13:53:18,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=92138.66666666667, ans=0.125
+2024-08-25 13:53:23,181 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.70 vs. limit=15.0
+2024-08-25 13:53:25,154 INFO [train.py:1114] (0/4) Epoch 7, batch 2350, loss[loss=0.2519, simple_loss=0.3092, pruned_loss=0.07141, ctc_loss=0.1296, over 19667.00 frames. ], tot_loss[loss=0.2657, simple_loss=0.3104, pruned_loss=0.08039, ctc_loss=0.1508, over 3864539.95 frames. ], batch size: 63, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:53:32,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_na.min_abs, batch_count=92192.0, ans=0.02
+2024-08-25 13:54:02,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=92352.0, ans=0.2
+2024-08-25 13:54:12,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=92405.33333333333, ans=0.125
+2024-08-25 13:54:18,229 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.985e+02 2.336e+02 2.802e+02 4.974e+02, threshold=4.671e+02, percent-clipped=0.0
+2024-08-25 13:54:18,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=92458.66666666667, ans=0.125
+2024-08-25 13:54:19,292 INFO [train.py:1114] (0/4) Epoch 7, batch 2400, loss[loss=0.2661, simple_loss=0.318, pruned_loss=0.07833, ctc_loss=0.1439, over 19393.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3123, pruned_loss=0.08096, ctc_loss=0.1517, over 3858854.77 frames. ], batch size: 67, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:54:32,288 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.89 vs. limit=15.0
+2024-08-25 13:54:45,316 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.43 vs. limit=22.5
+2024-08-25 13:55:46,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=92565.33333333333, ans=0.0
+2024-08-25 13:55:58,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=92618.66666666667, ans=0.0
+2024-08-25 13:56:12,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=92725.33333333333, ans=0.125
+2024-08-25 13:56:13,546 INFO [train.py:1114] (0/4) Epoch 7, batch 2450, loss[loss=0.3284, simple_loss=0.3446, pruned_loss=0.114, ctc_loss=0.2103, over 14230.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3184, pruned_loss=0.08592, ctc_loss=0.1611, over 3733452.56 frames. ], batch size: 140, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:56:27,277 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.93 vs. limit=10.0
+2024-08-25 13:56:28,378 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=92778.66666666667, ans=10.0
+2024-08-25 13:56:29,519 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:56:55,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=92832.0, ans=0.125
+2024-08-25 13:56:57,599 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=92885.33333333333, ans=0.125
+2024-08-25 13:57:06,705 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-7.pt
+2024-08-25 13:57:54,269 INFO [train.py:1114] (0/4) Epoch 8, batch 0, loss[loss=0.2463, simple_loss=0.2936, pruned_loss=0.07354, ctc_loss=0.1299, over 19412.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.2936, pruned_loss=0.07354, ctc_loss=0.1299, over 19412.00 frames. ], batch size: 48, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 13:57:54,271 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 13:59:56,300 INFO [train.py:1146] (0/4) Epoch 8, validation: loss=0.2171, simple_loss=0.2997, pruned_loss=0.04948, ctc_loss=0.08904, over 944034.00 frames.
+2024-08-25 13:59:56,301 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 13:59:56,770 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.17 vs. limit=15.0
+2024-08-25 14:01:03,642 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.158e+02 2.483e+02 2.902e+02 5.180e+02, threshold=4.965e+02, percent-clipped=2.0
+2024-08-25 14:01:11,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=92986.66666666667, ans=0.0
+2024-08-25 14:02:02,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=93093.33333333333, ans=0.0
+2024-08-25 14:02:12,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=93146.66666666667, ans=0.2
+2024-08-25 14:02:17,079 INFO [train.py:1114] (0/4) Epoch 8, batch 50, loss[loss=0.2746, simple_loss=0.3056, pruned_loss=0.08734, ctc_loss=0.1724, over 19734.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3179, pruned_loss=0.08434, ctc_loss=0.1606, over 844483.73 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:02:30,500 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.00 vs. limit=15.0
+2024-08-25 14:02:57,388 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.70 vs. limit=15.0
+2024-08-25 14:05:03,229 INFO [train.py:1114] (0/4) Epoch 8, batch 100, loss[loss=0.2297, simple_loss=0.2863, pruned_loss=0.0629, ctc_loss=0.1181, over 19716.00 frames. ], tot_loss[loss=0.2705, simple_loss=0.315, pruned_loss=0.08195, ctc_loss=0.1552, over 1499106.47 frames. ], batch size: 51, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:05:09,977 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.53 vs. limit=15.0
+2024-08-25 14:05:14,929 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.910e+02 2.219e+02 2.660e+02 5.043e+02, threshold=4.439e+02, percent-clipped=1.0
+2024-08-25 14:05:22,265 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.27 vs. limit=15.0
+2024-08-25 14:05:24,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=93520.0, ans=0.1
+2024-08-25 14:05:32,384 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:05:49,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=93626.66666666667, ans=0.125
+2024-08-25 14:07:16,380 INFO [train.py:1114] (0/4) Epoch 8, batch 150, loss[loss=0.2105, simple_loss=0.2682, pruned_loss=0.05567, ctc_loss=0.1033, over 19697.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3119, pruned_loss=0.08035, ctc_loss=0.1523, over 2026666.89 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:07:17,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=93733.33333333333, ans=0.125
+2024-08-25 14:07:26,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=93786.66666666667, ans=0.125
+2024-08-25 14:08:13,931 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.95 vs. limit=6.0
+2024-08-25 14:09:06,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=93840.0, ans=0.0
+2024-08-25 14:09:12,471 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=19.11 vs. limit=22.5
+2024-08-25 14:09:17,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.21 vs. limit=15.0
+2024-08-25 14:10:16,267 INFO [train.py:1114] (0/4) Epoch 8, batch 200, loss[loss=0.2821, simple_loss=0.327, pruned_loss=0.08547, ctc_loss=0.1659, over 18453.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3087, pruned_loss=0.07834, ctc_loss=0.1483, over 2434139.91 frames. ], batch size: 85, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:10:16,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=94000.0, ans=0.1
+2024-08-25 14:10:29,233 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.854e+02 2.093e+02 2.544e+02 5.078e+02, threshold=4.187e+02, percent-clipped=1.0
+2024-08-25 14:10:32,049 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.59 vs. limit=6.0
+2024-08-25 14:10:40,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=94106.66666666667, ans=0.0
+2024-08-25 14:10:45,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=94106.66666666667, ans=0.2
+2024-08-25 14:10:45,388 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=94106.66666666667, ans=0.2
+2024-08-25 14:10:49,779 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.78 vs. limit=22.5
+2024-08-25 14:11:16,475 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.76 vs. limit=10.0
+2024-08-25 14:11:17,845 INFO [train.py:1114] (0/4) Epoch 8, batch 250, loss[loss=0.2882, simple_loss=0.3273, pruned_loss=0.09055, ctc_loss=0.17, over 19396.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3086, pruned_loss=0.07842, ctc_loss=0.1482, over 2754429.15 frames. ], batch size: 67, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:11:28,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=94266.66666666667, ans=0.0
+2024-08-25 14:12:37,743 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.62 vs. limit=15.0
+2024-08-25 14:12:49,962 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.69 vs. limit=10.0
+2024-08-25 14:13:15,487 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=94480.0, ans=0.125
+2024-08-25 14:13:21,909 INFO [train.py:1114] (0/4) Epoch 8, batch 300, loss[loss=0.2922, simple_loss=0.3346, pruned_loss=0.09057, ctc_loss=0.1716, over 19547.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3083, pruned_loss=0.07814, ctc_loss=0.1476, over 2999392.35 frames. ], batch size: 61, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:13:33,355 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 1.987e+02 2.340e+02 3.022e+02 6.047e+02, threshold=4.681e+02, percent-clipped=9.0
+2024-08-25 14:13:37,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=94586.66666666667, ans=0.125
+2024-08-25 14:13:40,957 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.17 vs. limit=15.0
+2024-08-25 14:13:44,163 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=94640.0, ans=0.125
+2024-08-25 14:14:20,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=94640.0, ans=0.125
+2024-08-25 14:14:35,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=94693.33333333333, ans=0.125
+2024-08-25 14:14:52,113 INFO [train.py:1114] (0/4) Epoch 8, batch 350, loss[loss=0.2479, simple_loss=0.2877, pruned_loss=0.07635, ctc_loss=0.1387, over 19784.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3084, pruned_loss=0.07787, ctc_loss=0.147, over 3189369.02 frames. ], batch size: 48, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:14:59,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_ff2.min_abs, batch_count=94800.0, ans=0.1
+2024-08-25 14:15:03,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=94853.33333333333, ans=0.125
+2024-08-25 14:15:52,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=94906.66666666667, ans=0.2
+2024-08-25 14:15:59,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=94906.66666666667, ans=0.125
+2024-08-25 14:16:35,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=94960.0, ans=0.0
+2024-08-25 14:16:35,268 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=94960.0, ans=15.0
+2024-08-25 14:16:39,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=95013.33333333333, ans=0.025
+2024-08-25 14:16:44,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=95013.33333333333, ans=0.125
+2024-08-25 14:16:50,836 INFO [train.py:1114] (0/4) Epoch 8, batch 400, loss[loss=0.2641, simple_loss=0.3158, pruned_loss=0.07684, ctc_loss=0.1469, over 19501.00 frames. ], tot_loss[loss=0.261, simple_loss=0.308, pruned_loss=0.07768, ctc_loss=0.1465, over 3341730.71 frames. ], batch size: 54, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:16:58,356 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.98 vs. limit=15.0
+2024-08-25 14:17:03,875 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.019e+02 2.528e+02 3.132e+02 5.852e+02, threshold=5.056e+02, percent-clipped=7.0
+2024-08-25 14:17:11,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=95120.0, ans=0.07
+2024-08-25 14:17:35,627 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.49 vs. limit=15.0
+2024-08-25 14:18:38,391 INFO [train.py:1114] (0/4) Epoch 8, batch 450, loss[loss=0.2345, simple_loss=0.2966, pruned_loss=0.06307, ctc_loss=0.1155, over 19589.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3082, pruned_loss=0.07784, ctc_loss=0.1466, over 3449127.76 frames. ], batch size: 55, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:19:15,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=95493.33333333333, ans=0.1
+2024-08-25 14:19:22,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=95493.33333333333, ans=0.125
+2024-08-25 14:19:23,992 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.00 vs. limit=15.0
+2024-08-25 14:19:39,041 INFO [train.py:1114] (0/4) Epoch 8, batch 500, loss[loss=0.2956, simple_loss=0.3363, pruned_loss=0.09344, ctc_loss=0.1699, over 19623.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3078, pruned_loss=0.07801, ctc_loss=0.1469, over 3545194.71 frames. ], batch size: 63, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:21:37,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=95600.0, ans=0.125
+2024-08-25 14:21:42,075 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.925e+02 2.242e+02 2.655e+02 4.786e+02, threshold=4.483e+02, percent-clipped=0.0
+2024-08-25 14:21:46,232 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.37 vs. limit=12.0
+2024-08-25 14:21:48,448 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.61 vs. limit=6.0
+2024-08-25 14:21:52,079 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.27 vs. limit=22.5
+2024-08-25 14:22:16,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=95760.0, ans=0.125
+2024-08-25 14:22:32,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=95813.33333333333, ans=0.025
+2024-08-25 14:22:36,090 INFO [train.py:1114] (0/4) Epoch 8, batch 550, loss[loss=0.2597, simple_loss=0.3127, pruned_loss=0.07395, ctc_loss=0.147, over 19305.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3072, pruned_loss=0.07761, ctc_loss=0.1462, over 3607776.32 frames. ], batch size: 71, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:22:39,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95866.66666666667, ans=0.1
+2024-08-25 14:22:43,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=95866.66666666667, ans=0.125
+2024-08-25 14:25:33,000 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=96080.0, ans=0.125
+2024-08-25 14:25:43,160 INFO [train.py:1114] (0/4) Epoch 8, batch 600, loss[loss=0.2569, simple_loss=0.3155, pruned_loss=0.07205, ctc_loss=0.1354, over 19373.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3078, pruned_loss=0.07758, ctc_loss=0.1462, over 3665938.20 frames. ], batch size: 67, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:25:54,320 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 1.975e+02 2.461e+02 2.998e+02 6.685e+02, threshold=4.922e+02, percent-clipped=2.0
+2024-08-25 14:26:00,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=96186.66666666667, ans=0.0
+2024-08-25 14:26:31,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=96293.33333333333, ans=0.0
+2024-08-25 14:27:47,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=96346.66666666667, ans=0.1
+2024-08-25 14:27:49,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=96346.66666666667, ans=0.0
+2024-08-25 14:29:19,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=96346.66666666667, ans=0.1
+2024-08-25 14:29:21,652 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=96400.0, ans=0.5
+2024-08-25 14:29:23,566 INFO [train.py:1114] (0/4) Epoch 8, batch 650, loss[loss=0.2501, simple_loss=0.3006, pruned_loss=0.07265, ctc_loss=0.1359, over 19765.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3071, pruned_loss=0.07749, ctc_loss=0.1458, over 3715955.42 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:29:25,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=96400.0, ans=0.025
+2024-08-25 14:29:35,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=96453.33333333333, ans=0.125
+2024-08-25 14:29:45,577 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=2.595e+00
+2024-08-25 14:29:45,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=96453.33333333333, ans=0.125
+2024-08-25 14:29:52,669 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.06 vs. limit=12.0
+2024-08-25 14:30:53,907 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.56 vs. limit=15.0
+2024-08-25 14:31:05,188 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=96613.33333333333, ans=0.125
+2024-08-25 14:31:23,487 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=96666.66666666667, ans=0.125
+2024-08-25 14:31:24,401 INFO [train.py:1114] (0/4) Epoch 8, batch 700, loss[loss=0.2615, simple_loss=0.3072, pruned_loss=0.07793, ctc_loss=0.1499, over 19717.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.3074, pruned_loss=0.07763, ctc_loss=0.1461, over 3747733.37 frames. ], batch size: 51, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:31:36,080 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 1.952e+02 2.228e+02 2.907e+02 4.140e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 14:31:47,337 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.77 vs. limit=15.0
+2024-08-25 14:32:15,147 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.95 vs. limit=6.0
+2024-08-25 14:32:35,090 INFO [train.py:1114] (0/4) Epoch 8, batch 750, loss[loss=0.2483, simple_loss=0.307, pruned_loss=0.06917, ctc_loss=0.1282, over 19505.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3067, pruned_loss=0.07677, ctc_loss=0.1447, over 3773504.01 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:32:43,475 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.42 vs. limit=15.0
+2024-08-25 14:32:49,424 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.21 vs. limit=22.5
+2024-08-25 14:33:00,285 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.80 vs. limit=22.5
+2024-08-25 14:33:10,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=97040.0, ans=0.125
+2024-08-25 14:33:15,375 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:33:16,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=97040.0, ans=0.1
+2024-08-25 14:33:41,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=97146.66666666667, ans=0.1
+2024-08-25 14:33:45,414 INFO [train.py:1114] (0/4) Epoch 8, batch 800, loss[loss=0.206, simple_loss=0.2654, pruned_loss=0.05211, ctc_loss=0.1057, over 19402.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.3061, pruned_loss=0.07637, ctc_loss=0.1439, over 3795054.21 frames. ], batch size: 48, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:34:35,081 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 1.855e+02 2.176e+02 2.933e+02 4.905e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-25 14:34:40,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=97253.33333333333, ans=0.125
+2024-08-25 14:35:14,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=97413.33333333333, ans=10.0
+2024-08-25 14:35:22,274 INFO [train.py:1114] (0/4) Epoch 8, batch 850, loss[loss=0.3085, simple_loss=0.3432, pruned_loss=0.09822, ctc_loss=0.1932, over 19658.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.3062, pruned_loss=0.07683, ctc_loss=0.1446, over 3814413.34 frames. ], batch size: 59, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:35:30,365 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=97466.66666666667, ans=0.0
+2024-08-25 14:35:32,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=97520.0, ans=0.0
+2024-08-25 14:35:39,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=97520.0, ans=0.125
+2024-08-25 14:35:39,696 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=97520.0, ans=0.125
+2024-08-25 14:35:39,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=97520.0, ans=0.1
+2024-08-25 14:35:40,755 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=97520.0, ans=0.125
+2024-08-25 14:36:02,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=97626.66666666667, ans=0.0
+2024-08-25 14:36:16,719 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=97680.0, ans=0.125
+2024-08-25 14:36:19,790 INFO [train.py:1114] (0/4) Epoch 8, batch 900, loss[loss=0.2492, simple_loss=0.292, pruned_loss=0.07527, ctc_loss=0.1395, over 19397.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.307, pruned_loss=0.07731, ctc_loss=0.1453, over 3817837.83 frames. ], batch size: 48, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:38:25,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=97733.33333333333, ans=0.125
+2024-08-25 14:38:27,274 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=97733.33333333333, ans=0.5
+2024-08-25 14:38:30,487 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 1.935e+02 2.327e+02 2.780e+02 5.034e+02, threshold=4.654e+02, percent-clipped=2.0
+2024-08-25 14:38:34,364 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.49 vs. limit=12.0
+2024-08-25 14:39:07,057 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=97893.33333333333, ans=0.025
+2024-08-25 14:39:56,385 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.46 vs. limit=15.0
+2024-08-25 14:39:58,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=97946.66666666667, ans=0.1
+2024-08-25 14:40:01,367 INFO [train.py:1114] (0/4) Epoch 8, batch 950, loss[loss=0.2638, simple_loss=0.309, pruned_loss=0.08057, ctc_loss=0.1439, over 19485.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3074, pruned_loss=0.07755, ctc_loss=0.1458, over 3818995.90 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:40:20,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=98053.33333333333, ans=0.125
+2024-08-25 14:41:23,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=98106.66666666667, ans=0.125
+2024-08-25 14:43:24,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=98213.33333333333, ans=0.125
+2024-08-25 14:43:29,291 INFO [train.py:1114] (0/4) Epoch 8, batch 1000, loss[loss=0.2315, simple_loss=0.2851, pruned_loss=0.06381, ctc_loss=0.1257, over 19854.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.308, pruned_loss=0.07794, ctc_loss=0.1464, over 3815625.27 frames. ], batch size: 52, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:43:29,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=98266.66666666667, ans=0.1
+2024-08-25 14:43:47,364 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.014e+02 2.465e+02 3.304e+02 4.205e+02, threshold=4.930e+02, percent-clipped=0.0
+2024-08-25 14:46:06,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=98320.0, ans=0.125
+2024-08-25 14:46:26,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=98426.66666666667, ans=0.125
+2024-08-25 14:46:39,038 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=98480.0, ans=0.05
+2024-08-25 14:46:44,412 INFO [train.py:1114] (0/4) Epoch 8, batch 1050, loss[loss=0.2549, simple_loss=0.3075, pruned_loss=0.07376, ctc_loss=0.1368, over 19837.00 frames. ], tot_loss[loss=0.2598, simple_loss=0.3067, pruned_loss=0.07734, ctc_loss=0.1453, over 3820525.10 frames. ], batch size: 57, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:46:56,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=98586.66666666667, ans=0.025
+2024-08-25 14:47:26,305 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=6.75 vs. limit=15.0
+2024-08-25 14:47:30,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=98746.66666666667, ans=0.125
+2024-08-25 14:47:44,583 INFO [train.py:1114] (0/4) Epoch 8, batch 1100, loss[loss=0.2567, simple_loss=0.3089, pruned_loss=0.07486, ctc_loss=0.1371, over 19597.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.3065, pruned_loss=0.07697, ctc_loss=0.145, over 3829314.28 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:47:45,117 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.37 vs. limit=15.0
+2024-08-25 14:48:13,773 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.814e+02 2.071e+02 2.620e+02 3.682e+02, threshold=4.142e+02, percent-clipped=0.0
+2024-08-25 14:48:32,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=98853.33333333333, ans=0.04949747468305833
+2024-08-25 14:49:04,252 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.61 vs. limit=15.0
+2024-08-25 14:49:55,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=99013.33333333333, ans=0.025
+2024-08-25 14:50:00,880 INFO [train.py:1114] (0/4) Epoch 8, batch 1150, loss[loss=0.2687, simple_loss=0.3098, pruned_loss=0.08282, ctc_loss=0.1549, over 19589.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3066, pruned_loss=0.07715, ctc_loss=0.1454, over 3827681.85 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:51:01,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=99066.66666666667, ans=0.125
+2024-08-25 14:51:03,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=99066.66666666667, ans=0.125
+2024-08-25 14:52:00,222 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.57 vs. limit=15.0
+2024-08-25 14:52:03,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten.whitening_limit, batch_count=99226.66666666667, ans=15.0
+2024-08-25 14:52:51,800 INFO [train.py:1114] (0/4) Epoch 8, batch 1200, loss[loss=0.2551, simple_loss=0.3099, pruned_loss=0.07424, ctc_loss=0.1296, over 19848.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3073, pruned_loss=0.07723, ctc_loss=0.1454, over 3824393.63 frames. ], batch size: 57, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:52:55,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=99333.33333333333, ans=0.2
+2024-08-25 14:53:05,860 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.44 vs. limit=15.0
+2024-08-25 14:53:06,263 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.879e+02 2.149e+02 2.634e+02 4.011e+02, threshold=4.298e+02, percent-clipped=0.0
+2024-08-25 14:53:51,470 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=99600.0, ans=0.025
+2024-08-25 14:53:52,347 INFO [train.py:1114] (0/4) Epoch 8, batch 1250, loss[loss=0.2977, simple_loss=0.3389, pruned_loss=0.09371, ctc_loss=0.173, over 19533.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.308, pruned_loss=0.07739, ctc_loss=0.1454, over 3842788.18 frames. ], batch size: 61, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:55:32,516 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.25 vs. limit=15.0
+2024-08-25 14:56:01,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=99813.33333333333, ans=0.025
+2024-08-25 14:56:05,572 INFO [train.py:1114] (0/4) Epoch 8, batch 1300, loss[loss=0.3116, simple_loss=0.3446, pruned_loss=0.1007, ctc_loss=0.1929, over 18859.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3072, pruned_loss=0.07694, ctc_loss=0.1446, over 3845901.22 frames. ], batch size: 76, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:56:12,112 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.29 vs. limit=12.0
+2024-08-25 14:56:15,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=99866.66666666667, ans=0.025
+2024-08-25 14:56:17,017 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.809e+02 2.147e+02 2.747e+02 4.726e+02, threshold=4.293e+02, percent-clipped=4.0
+2024-08-25 14:56:22,341 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.48 vs. limit=15.0
+2024-08-25 14:58:02,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=100026.66666666667, ans=0.07
+2024-08-25 14:58:50,536 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:58:53,770 INFO [train.py:1114] (0/4) Epoch 8, batch 1350, loss[loss=0.2561, simple_loss=0.3016, pruned_loss=0.07603, ctc_loss=0.1464, over 19769.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.306, pruned_loss=0.07598, ctc_loss=0.1427, over 3857801.95 frames. ], batch size: 54, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:58:58,235 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.42 vs. limit=15.0
+2024-08-25 14:59:06,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=100186.66666666667, ans=0.125
+2024-08-25 14:59:13,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=100186.66666666667, ans=0.09899494936611666
+2024-08-25 14:59:22,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=100240.0, ans=0.2
+2024-08-25 14:59:30,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100293.33333333333, ans=0.125
+2024-08-25 14:59:51,910 INFO [train.py:1114] (0/4) Epoch 8, batch 1400, loss[loss=0.2286, simple_loss=0.2755, pruned_loss=0.06546, ctc_loss=0.1269, over 19669.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.3059, pruned_loss=0.07625, ctc_loss=0.143, over 3864552.82 frames. ], batch size: 46, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:00:03,304 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.018e+02 2.600e+02 3.300e+02 7.375e+02, threshold=5.199e+02, percent-clipped=11.0
+2024-08-25 15:00:03,906 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.25 vs. limit=10.0
+2024-08-25 15:00:06,923 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:00:15,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100506.66666666667, ans=0.1
+2024-08-25 15:00:31,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=100560.0, ans=0.2
+2024-08-25 15:00:38,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=100613.33333333333, ans=0.0
+2024-08-25 15:00:56,744 INFO [train.py:1114] (0/4) Epoch 8, batch 1450, loss[loss=0.2791, simple_loss=0.3262, pruned_loss=0.08453, ctc_loss=0.1574, over 19703.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.3064, pruned_loss=0.07627, ctc_loss=0.1431, over 3862989.12 frames. ], batch size: 63, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:01:03,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100666.66666666667, ans=0.0
+2024-08-25 15:01:08,347 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=100720.0, ans=0.125
+2024-08-25 15:01:20,366 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=100773.33333333333, ans=0.0
+2024-08-25 15:03:17,705 INFO [train.py:1114] (0/4) Epoch 8, batch 1500, loss[loss=0.249, simple_loss=0.3066, pruned_loss=0.06902, ctc_loss=0.1332, over 19578.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3073, pruned_loss=0.07678, ctc_loss=0.1445, over 3862350.18 frames. ], batch size: 57, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:05:15,236 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100933.33333333333, ans=0.125
+2024-08-25 15:05:24,436 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.972e+02 2.271e+02 2.845e+02 5.404e+02, threshold=4.542e+02, percent-clipped=1.0
+2024-08-25 15:05:26,805 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=100986.66666666667, ans=0.125
+2024-08-25 15:05:32,519 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.05 vs. limit=22.5
+2024-08-25 15:07:43,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=100986.66666666667, ans=0.125
+2024-08-25 15:09:55,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=101146.66666666667, ans=0.0
+2024-08-25 15:10:02,863 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.77 vs. limit=15.0
+2024-08-25 15:10:18,890 INFO [train.py:1114] (0/4) Epoch 8, batch 1550, loss[loss=0.2504, simple_loss=0.3083, pruned_loss=0.06967, ctc_loss=0.1327, over 19607.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3069, pruned_loss=0.07672, ctc_loss=0.1444, over 3846891.21 frames. ], batch size: 60, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:10:45,644 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=101253.33333333333, ans=0.025
+2024-08-25 15:13:33,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=101360.0, ans=0.0
+2024-08-25 15:13:57,985 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.35 vs. limit=15.0
+2024-08-25 15:14:11,863 INFO [train.py:1114] (0/4) Epoch 8, batch 1600, loss[loss=0.2537, simple_loss=0.3075, pruned_loss=0.07241, ctc_loss=0.1379, over 19829.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3064, pruned_loss=0.07649, ctc_loss=0.1438, over 3836792.40 frames. ], batch size: 57, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:14:12,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=101466.66666666667, ans=0.125
+2024-08-25 15:14:12,703 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.01 vs. limit=22.5
+2024-08-25 15:14:21,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=101466.66666666667, ans=0.125
+2024-08-25 15:14:31,967 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.915e+02 2.222e+02 2.696e+02 4.640e+02, threshold=4.444e+02, percent-clipped=1.0
+2024-08-25 15:14:42,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=101520.0, ans=0.125
+2024-08-25 15:14:53,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=101573.33333333333, ans=0.0
+2024-08-25 15:15:06,429 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=101626.66666666667, ans=0.2
+2024-08-25 15:15:30,465 INFO [train.py:1114] (0/4) Epoch 8, batch 1650, loss[loss=0.2362, simple_loss=0.3003, pruned_loss=0.06183, ctc_loss=0.1212, over 19644.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3066, pruned_loss=0.07681, ctc_loss=0.1443, over 3834185.61 frames. ], batch size: 59, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:15:30,870 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.86 vs. limit=15.0
+2024-08-25 15:15:58,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101840.0, ans=0.1
+2024-08-25 15:16:27,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=102000.0, ans=0.0
+2024-08-25 15:16:28,208 INFO [train.py:1114] (0/4) Epoch 8, batch 1700, loss[loss=0.2282, simple_loss=0.2758, pruned_loss=0.06457, ctc_loss=0.1287, over 19678.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3058, pruned_loss=0.07607, ctc_loss=0.143, over 3848314.27 frames. ], batch size: 46, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:16:40,738 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.920e+02 2.237e+02 2.711e+02 4.644e+02, threshold=4.474e+02, percent-clipped=2.0
+2024-08-25 15:16:53,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.43 vs. limit=6.0
+2024-08-25 15:17:20,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=102160.0, ans=0.125
+2024-08-25 15:17:28,113 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.08 vs. limit=15.0
+2024-08-25 15:17:39,506 INFO [train.py:1114] (0/4) Epoch 8, batch 1750, loss[loss=0.2635, simple_loss=0.2951, pruned_loss=0.08475, ctc_loss=0.156, over 19684.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3053, pruned_loss=0.07601, ctc_loss=0.1426, over 3851911.08 frames. ], batch size: 45, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:17:50,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=102320.0, ans=0.0
+2024-08-25 15:19:54,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=102373.33333333333, ans=0.2
+2024-08-25 15:20:05,591 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.96 vs. limit=15.0
+2024-08-25 15:20:09,317 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=102426.66666666667, ans=0.125
+2024-08-25 15:20:10,867 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.73 vs. limit=15.0
+2024-08-25 15:20:14,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=102480.0, ans=0.125
+2024-08-25 15:20:17,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=102480.0, ans=0.05
+2024-08-25 15:20:25,849 INFO [train.py:1114] (0/4) Epoch 8, batch 1800, loss[loss=0.2588, simple_loss=0.3152, pruned_loss=0.07384, ctc_loss=0.1371, over 19612.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3055, pruned_loss=0.07603, ctc_loss=0.1424, over 3854379.38 frames. ], batch size: 55, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:20:37,815 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 1.874e+02 2.230e+02 2.859e+02 4.439e+02, threshold=4.460e+02, percent-clipped=0.0
+2024-08-25 15:24:49,984 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=102693.33333333333, ans=0.125
+2024-08-25 15:26:47,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-25 15:28:59,115 INFO [train.py:1114] (0/4) Epoch 8, batch 1850, loss[loss=0.2922, simple_loss=0.3338, pruned_loss=0.09085, ctc_loss=0.1724, over 19594.00 frames. ], tot_loss[loss=0.2571, simple_loss=0.3054, pruned_loss=0.07585, ctc_loss=0.1425, over 3857135.20 frames. ], batch size: 57, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:28:59,210 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=102800.0, ans=0.125
+2024-08-25 15:29:02,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102800.0, ans=0.1
+2024-08-25 15:29:14,411 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.38 vs. limit=10.0
+2024-08-25 15:29:18,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=102853.33333333333, ans=10.0
+2024-08-25 15:29:26,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=102906.66666666667, ans=0.09899494936611666
+2024-08-25 15:29:44,453 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.70 vs. limit=22.5
+2024-08-25 15:32:37,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=103066.66666666667, ans=0.0
+2024-08-25 15:32:38,670 INFO [train.py:1114] (0/4) Epoch 8, batch 1900, loss[loss=0.2515, simple_loss=0.3195, pruned_loss=0.06733, ctc_loss=0.122, over 19649.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3058, pruned_loss=0.07591, ctc_loss=0.1427, over 3861782.54 frames. ], batch size: 59, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:32:38,815 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103066.66666666667, ans=0.125
+2024-08-25 15:32:44,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=103066.66666666667, ans=0.125
+2024-08-25 15:32:45,410 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=103066.66666666667, ans=0.125
+2024-08-25 15:32:52,958 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.872e+02 2.139e+02 2.618e+02 5.849e+02, threshold=4.279e+02, percent-clipped=4.0
+2024-08-25 15:32:54,236 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=103120.0, ans=0.035
+2024-08-25 15:33:18,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=103226.66666666667, ans=0.125
+2024-08-25 15:33:37,635 INFO [train.py:1114] (0/4) Epoch 8, batch 1950, loss[loss=0.2433, simple_loss=0.2979, pruned_loss=0.0673, ctc_loss=0.1352, over 19589.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3072, pruned_loss=0.07613, ctc_loss=0.1435, over 3870478.34 frames. ], batch size: 52, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:33:50,191 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.80 vs. limit=12.0
+2024-08-25 15:33:51,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=103333.33333333333, ans=0.125
+2024-08-25 15:34:08,618 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=103440.0, ans=0.0
+2024-08-25 15:34:23,258 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=103493.33333333333, ans=0.125
+2024-08-25 15:34:23,366 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103493.33333333333, ans=0.125
+2024-08-25 15:34:26,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=103493.33333333333, ans=0.125
+2024-08-25 15:34:34,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=103546.66666666667, ans=0.125
+2024-08-25 15:34:42,878 INFO [train.py:1114] (0/4) Epoch 8, batch 2000, loss[loss=0.213, simple_loss=0.2638, pruned_loss=0.05975, ctc_loss=0.107, over 19643.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3082, pruned_loss=0.07708, ctc_loss=0.1449, over 3855829.59 frames. ], batch size: 45, lr: 1.81e-02, grad_scale: 32.0
+2024-08-25 15:34:52,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=103600.0, ans=0.025
+2024-08-25 15:34:55,662 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 2.022e+02 2.450e+02 4.734e+02, threshold=4.043e+02, percent-clipped=1.0
+2024-08-25 15:35:03,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=103653.33333333333, ans=0.125
+2024-08-25 15:35:05,883 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.min_positive, batch_count=103706.66666666667, ans=0.05
+2024-08-25 15:35:38,656 INFO [train.py:1114] (0/4) Epoch 8, batch 2050, loss[loss=0.2438, simple_loss=0.2878, pruned_loss=0.07309, ctc_loss=0.1339, over 19688.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.3073, pruned_loss=0.07704, ctc_loss=0.1446, over 3851400.97 frames. ], batch size: 47, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:35:43,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=103866.66666666667, ans=0.0
+2024-08-25 15:35:46,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=103866.66666666667, ans=0.125
+2024-08-25 15:35:47,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=103866.66666666667, ans=0.025
+2024-08-25 15:36:16,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=104026.66666666667, ans=0.125
+2024-08-25 15:36:24,444 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=104080.0, ans=0.125
+2024-08-25 15:36:32,754 INFO [train.py:1114] (0/4) Epoch 8, batch 2100, loss[loss=0.2474, simple_loss=0.3009, pruned_loss=0.07016, ctc_loss=0.1339, over 19772.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3058, pruned_loss=0.07594, ctc_loss=0.1429, over 3858466.81 frames. ], batch size: 54, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:36:41,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=104133.33333333333, ans=0.0
+2024-08-25 15:36:44,886 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.055e+02 2.348e+02 2.987e+02 4.948e+02, threshold=4.695e+02, percent-clipped=5.0
+2024-08-25 15:36:53,831 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104240.0, ans=0.1
+2024-08-25 15:36:59,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=104240.0, ans=0.0
+2024-08-25 15:37:25,674 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.41 vs. limit=22.5
+2024-08-25 15:37:27,179 INFO [train.py:1114] (0/4) Epoch 8, batch 2150, loss[loss=0.2381, simple_loss=0.2961, pruned_loss=0.0657, ctc_loss=0.1217, over 19585.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3048, pruned_loss=0.07538, ctc_loss=0.1417, over 3869125.10 frames. ], batch size: 52, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:37:42,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=104453.33333333333, ans=0.0
+2024-08-25 15:37:48,316 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.36 vs. limit=15.0
+2024-08-25 15:37:58,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=104506.66666666667, ans=0.04949747468305833
+2024-08-25 15:38:23,253 INFO [train.py:1114] (0/4) Epoch 8, batch 2200, loss[loss=0.2823, simple_loss=0.3251, pruned_loss=0.08663, ctc_loss=0.1659, over 19594.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3053, pruned_loss=0.07581, ctc_loss=0.1424, over 3867395.04 frames. ], batch size: 57, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:38:32,501 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.59 vs. limit=22.5
+2024-08-25 15:38:35,673 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.961e+02 2.280e+02 3.038e+02 5.675e+02, threshold=4.560e+02, percent-clipped=2.0
+2024-08-25 15:38:45,909 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104773.33333333333, ans=0.1
+2024-08-25 15:38:57,995 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:39:02,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104826.66666666667, ans=0.125
+2024-08-25 15:39:05,673 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104826.66666666667, ans=0.125
+2024-08-25 15:39:18,578 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.58 vs. limit=5.0
+2024-08-25 15:39:19,044 INFO [train.py:1114] (0/4) Epoch 8, batch 2250, loss[loss=0.24, simple_loss=0.304, pruned_loss=0.06424, ctc_loss=0.1191, over 19628.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3052, pruned_loss=0.07565, ctc_loss=0.1419, over 3867405.77 frames. ], batch size: 55, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:39:33,137 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.86 vs. limit=22.5
+2024-08-25 15:39:44,817 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.97 vs. limit=22.5
+2024-08-25 15:39:45,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=105040.0, ans=0.125
+2024-08-25 15:39:52,215 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=105093.33333333333, ans=0.07
+2024-08-25 15:39:52,737 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.69 vs. limit=15.0
+2024-08-25 15:40:01,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=105093.33333333333, ans=0.125
+2024-08-25 15:40:13,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=105200.0, ans=0.2
+2024-08-25 15:40:14,491 INFO [train.py:1114] (0/4) Epoch 8, batch 2300, loss[loss=0.2142, simple_loss=0.2725, pruned_loss=0.05682, ctc_loss=0.1058, over 19513.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3039, pruned_loss=0.07544, ctc_loss=0.1417, over 3860937.02 frames. ], batch size: 49, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:40:28,024 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.907e+02 2.167e+02 2.593e+02 4.976e+02, threshold=4.335e+02, percent-clipped=1.0
+2024-08-25 15:40:40,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=105306.66666666667, ans=0.125
+2024-08-25 15:41:11,101 INFO [train.py:1114] (0/4) Epoch 8, batch 2350, loss[loss=0.2924, simple_loss=0.3378, pruned_loss=0.08855, ctc_loss=0.1745, over 19669.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3038, pruned_loss=0.07526, ctc_loss=0.1413, over 3863603.50 frames. ], batch size: 63, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:41:22,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=105520.0, ans=0.125
+2024-08-25 15:41:40,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=105573.33333333333, ans=0.09899494936611666
+2024-08-25 15:41:40,995 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.75 vs. limit=15.0
+2024-08-25 15:41:53,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=105680.0, ans=0.125
+2024-08-25 15:42:05,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=105733.33333333333, ans=0.125
+2024-08-25 15:42:06,007 INFO [train.py:1114] (0/4) Epoch 8, batch 2400, loss[loss=0.2521, simple_loss=0.3134, pruned_loss=0.06942, ctc_loss=0.1297, over 19477.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.306, pruned_loss=0.07608, ctc_loss=0.1427, over 3858382.89 frames. ], batch size: 67, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:42:07,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=105733.33333333333, ans=0.0
+2024-08-25 15:42:14,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=105733.33333333333, ans=0.125
+2024-08-25 15:42:18,060 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.983e+02 2.255e+02 2.870e+02 5.067e+02, threshold=4.510e+02, percent-clipped=2.0
+2024-08-25 15:42:20,828 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.85 vs. limit=22.5
+2024-08-25 15:42:36,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=105840.0, ans=0.0
+2024-08-25 15:42:40,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=105893.33333333333, ans=0.125
+2024-08-25 15:43:01,715 INFO [train.py:1114] (0/4) Epoch 8, batch 2450, loss[loss=0.3471, simple_loss=0.354, pruned_loss=0.1234, ctc_loss=0.2331, over 13958.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3116, pruned_loss=0.0808, ctc_loss=0.1518, over 3736317.57 frames. ], batch size: 140, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:43:07,667 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.20 vs. limit=6.0
+2024-08-25 15:43:10,388 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=106000.0, ans=0.025
+2024-08-25 15:43:31,245 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=106106.66666666667, ans=0.05
+2024-08-25 15:43:43,377 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-8.pt
+2024-08-25 15:44:31,297 INFO [train.py:1114] (0/4) Epoch 9, batch 0, loss[loss=0.2416, simple_loss=0.2848, pruned_loss=0.07241, ctc_loss=0.1341, over 19818.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2848, pruned_loss=0.07241, ctc_loss=0.1341, over 19818.00 frames. ], batch size: 49, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:44:31,298 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 15:44:49,826 INFO [train.py:1146] (0/4) Epoch 9, validation: loss=0.21, simple_loss=0.2947, pruned_loss=0.04621, ctc_loss=0.08206, over 944034.00 frames.
+2024-08-25 15:44:49,826 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 15:45:00,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=106261.33333333333, ans=0.035
+2024-08-25 15:45:14,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106314.66666666667, ans=0.1
+2024-08-25 15:45:15,530 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.154e+02 2.510e+02 2.953e+02 5.707e+02, threshold=5.019e+02, percent-clipped=2.0
+2024-08-25 15:46:11,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=106314.66666666667, ans=0.125
+2024-08-25 15:46:22,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=106368.0, ans=0.0
+2024-08-25 15:46:36,868 INFO [train.py:1114] (0/4) Epoch 9, batch 50, loss[loss=0.2125, simple_loss=0.2683, pruned_loss=0.0565, ctc_loss=0.1092, over 19734.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3084, pruned_loss=0.07768, ctc_loss=0.1467, over 844043.89 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:46:40,806 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.32 vs. limit=15.0
+2024-08-25 15:46:41,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=106474.66666666667, ans=0.025
+2024-08-25 15:47:03,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=106581.33333333333, ans=0.0
+2024-08-25 15:47:11,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=106634.66666666667, ans=0.0
+2024-08-25 15:47:19,599 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-20000.pt
+2024-08-25 15:47:44,309 INFO [train.py:1114] (0/4) Epoch 9, batch 100, loss[loss=0.2363, simple_loss=0.2927, pruned_loss=0.06473, ctc_loss=0.126, over 19724.00 frames. ], tot_loss[loss=0.2598, simple_loss=0.3085, pruned_loss=0.07646, ctc_loss=0.1454, over 1497202.38 frames. ], batch size: 51, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:47:53,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=106741.33333333333, ans=0.1
+2024-08-25 15:48:01,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=106794.66666666667, ans=0.0
+2024-08-25 15:48:09,486 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.842e+02 2.163e+02 2.785e+02 4.838e+02, threshold=4.326e+02, percent-clipped=0.0
+2024-08-25 15:48:09,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106848.0, ans=0.1
+2024-08-25 15:48:13,306 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=106848.0, ans=0.0
+2024-08-25 15:48:15,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=106848.0, ans=0.125
+2024-08-25 15:48:23,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=106901.33333333333, ans=0.125
+2024-08-25 15:48:42,151 INFO [train.py:1114] (0/4) Epoch 9, batch 150, loss[loss=0.2247, simple_loss=0.2765, pruned_loss=0.06356, ctc_loss=0.1143, over 19693.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3047, pruned_loss=0.07438, ctc_loss=0.1411, over 2027503.00 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:48:57,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=107061.33333333333, ans=0.0
+2024-08-25 15:49:06,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=107114.66666666667, ans=0.2
+2024-08-25 15:49:13,727 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:49:41,060 INFO [train.py:1114] (0/4) Epoch 9, batch 200, loss[loss=0.2864, simple_loss=0.3219, pruned_loss=0.09117, ctc_loss=0.1712, over 18274.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3025, pruned_loss=0.07309, ctc_loss=0.1382, over 2435063.29 frames. ], batch size: 85, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:49:44,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=107274.66666666667, ans=0.125
+2024-08-25 15:49:47,034 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=107274.66666666667, ans=0.125
+2024-08-25 15:49:59,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=107328.0, ans=0.125
+2024-08-25 15:50:06,174 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.799e+02 2.039e+02 2.617e+02 5.282e+02, threshold=4.078e+02, percent-clipped=1.0
+2024-08-25 15:50:44,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107381.33333333333, ans=0.1
+2024-08-25 15:50:47,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=107381.33333333333, ans=0.125
+2024-08-25 15:50:53,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=107434.66666666667, ans=0.125
+2024-08-25 15:51:05,423 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=107488.0, ans=0.0
+2024-08-25 15:51:06,742 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=107488.0, ans=0.125
+2024-08-25 15:51:17,113 INFO [train.py:1114] (0/4) Epoch 9, batch 250, loss[loss=0.3086, simple_loss=0.3436, pruned_loss=0.1016, ctc_loss=0.1762, over 19360.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3034, pruned_loss=0.07382, ctc_loss=0.1391, over 2754598.94 frames. ], batch size: 67, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:51:17,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=107541.33333333333, ans=10.0
+2024-08-25 15:51:20,733 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=107541.33333333333, ans=0.125
+2024-08-25 15:51:54,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=107701.33333333333, ans=0.125
+2024-08-25 15:52:00,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=107701.33333333333, ans=0.125
+2024-08-25 15:52:01,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=107701.33333333333, ans=0.0
+2024-08-25 15:52:10,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-25 15:52:13,083 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-25 15:52:15,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-25 15:52:15,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=107754.66666666667, ans=0.125
+2024-08-25 15:52:18,773 INFO [train.py:1114] (0/4) Epoch 9, batch 300, loss[loss=0.2682, simple_loss=0.3129, pruned_loss=0.08198, ctc_loss=0.149, over 19553.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3021, pruned_loss=0.07312, ctc_loss=0.1375, over 2999865.94 frames. ], batch size: 61, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:52:30,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107861.33333333333, ans=0.1
+2024-08-25 15:52:31,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=107861.33333333333, ans=0.07
+2024-08-25 15:52:43,322 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107914.66666666667, ans=0.1
+2024-08-25 15:52:47,053 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 1.831e+02 2.248e+02 2.885e+02 5.251e+02, threshold=4.495e+02, percent-clipped=2.0
+2024-08-25 15:53:01,492 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.19 vs. limit=15.0
+2024-08-25 15:53:02,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=107968.0, ans=0.025
+2024-08-25 15:53:02,302 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107968.0, ans=0.1
+2024-08-25 15:53:18,339 INFO [train.py:1114] (0/4) Epoch 9, batch 350, loss[loss=0.2412, simple_loss=0.2865, pruned_loss=0.07006, ctc_loss=0.1392, over 19747.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3027, pruned_loss=0.07329, ctc_loss=0.138, over 3189846.95 frames. ], batch size: 48, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:53:19,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=108074.66666666667, ans=0.015
+2024-08-25 15:53:25,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=108074.66666666667, ans=0.0
+2024-08-25 15:53:33,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=108128.0, ans=0.125
+2024-08-25 15:53:39,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=108128.0, ans=0.0
+2024-08-25 15:53:40,828 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.29 vs. limit=15.0
+2024-08-25 15:53:41,984 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.15 vs. limit=22.5
+2024-08-25 15:53:55,055 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=108234.66666666667, ans=0.2
+2024-08-25 15:54:14,912 INFO [train.py:1114] (0/4) Epoch 9, batch 400, loss[loss=0.2434, simple_loss=0.307, pruned_loss=0.06575, ctc_loss=0.1206, over 19514.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3019, pruned_loss=0.07276, ctc_loss=0.137, over 3342190.73 frames. ], batch size: 54, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:54:21,143 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=108341.33333333333, ans=0.125
+2024-08-25 15:54:22,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=108341.33333333333, ans=15.0
+2024-08-25 15:54:43,462 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.039e+02 2.514e+02 3.062e+02 4.428e+02, threshold=5.028e+02, percent-clipped=0.0
+2024-08-25 15:54:56,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108501.33333333333, ans=0.1
+2024-08-25 15:55:18,514 INFO [train.py:1114] (0/4) Epoch 9, batch 450, loss[loss=0.2205, simple_loss=0.2911, pruned_loss=0.05381, ctc_loss=0.1057, over 19624.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3019, pruned_loss=0.07277, ctc_loss=0.1368, over 3450818.45 frames. ], batch size: 55, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:55:18,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=108608.0, ans=0.125
+2024-08-25 15:55:23,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=108608.0, ans=0.125
+2024-08-25 15:55:24,548 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=108608.0, ans=0.125
+2024-08-25 15:55:32,687 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.44 vs. limit=15.0
+2024-08-25 15:55:42,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=108714.66666666667, ans=0.125
+2024-08-25 15:59:08,945 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:59:11,154 INFO [train.py:1114] (0/4) Epoch 9, batch 500, loss[loss=0.2524, simple_loss=0.3116, pruned_loss=0.07077, ctc_loss=0.1288, over 19701.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3011, pruned_loss=0.07253, ctc_loss=0.1364, over 3545802.40 frames. ], batch size: 63, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:59:13,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=108874.66666666667, ans=0.0
+2024-08-25 15:59:14,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.97 vs. limit=22.5
+2024-08-25 15:59:24,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=108928.0, ans=0.125
+2024-08-25 15:59:30,136 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=108928.0, ans=0.0
+2024-08-25 15:59:33,283 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.29 vs. limit=22.5
+2024-08-25 15:59:37,503 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.839e+02 2.298e+02 3.023e+02 4.931e+02, threshold=4.596e+02, percent-clipped=0.0
+2024-08-25 15:59:58,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109088.0, ans=0.1
+2024-08-25 16:00:06,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=109088.0, ans=0.125
+2024-08-25 16:00:08,630 INFO [train.py:1114] (0/4) Epoch 9, batch 550, loss[loss=0.2638, simple_loss=0.309, pruned_loss=0.07944, ctc_loss=0.1491, over 19392.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3017, pruned_loss=0.07289, ctc_loss=0.1373, over 3607826.60 frames. ], batch size: 71, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:00:30,447 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.15 vs. limit=12.0
+2024-08-25 16:00:37,457 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.15 vs. limit=15.0
+2024-08-25 16:00:48,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=109301.33333333333, ans=0.0
+2024-08-25 16:01:00,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=109354.66666666667, ans=0.2
+2024-08-25 16:01:12,447 INFO [train.py:1114] (0/4) Epoch 9, batch 600, loss[loss=0.2718, simple_loss=0.3172, pruned_loss=0.08153, ctc_loss=0.1583, over 19429.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3022, pruned_loss=0.07288, ctc_loss=0.1373, over 3665188.56 frames. ], batch size: 67, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:01:43,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=109461.33333333333, ans=0.0
+2024-08-25 16:01:46,150 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=109461.33333333333, ans=0.125
+2024-08-25 16:01:48,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=109514.66666666667, ans=0.0
+2024-08-25 16:01:51,491 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.960e+02 2.208e+02 2.721e+02 5.490e+02, threshold=4.416e+02, percent-clipped=2.0
+2024-08-25 16:01:55,640 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.27 vs. limit=15.0
+2024-08-25 16:02:34,052 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109568.0, ans=0.1
+2024-08-25 16:02:47,609 INFO [train.py:1114] (0/4) Epoch 9, batch 650, loss[loss=0.218, simple_loss=0.2839, pruned_loss=0.05504, ctc_loss=0.1053, over 19758.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3012, pruned_loss=0.07221, ctc_loss=0.1362, over 3715812.23 frames. ], batch size: 54, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:02:53,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109674.66666666667, ans=0.125
+2024-08-25 16:03:22,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=109781.33333333333, ans=0.125
+2024-08-25 16:03:22,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=109781.33333333333, ans=0.2
+2024-08-25 16:03:31,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109834.66666666667, ans=0.125
+2024-08-25 16:03:47,859 INFO [train.py:1114] (0/4) Epoch 9, batch 700, loss[loss=0.2368, simple_loss=0.2901, pruned_loss=0.06647, ctc_loss=0.1262, over 19723.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3015, pruned_loss=0.07258, ctc_loss=0.1369, over 3748214.42 frames. ], batch size: 51, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:03:49,459 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=109941.33333333333, ans=0.025
+2024-08-25 16:04:02,989 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:04:06,823 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.92 vs. limit=15.0
+2024-08-25 16:04:06,862 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.16 vs. limit=22.5
+2024-08-25 16:04:09,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=109994.66666666667, ans=0.025
+2024-08-25 16:04:14,376 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 1.949e+02 2.382e+02 2.859e+02 4.618e+02, threshold=4.764e+02, percent-clipped=1.0
+2024-08-25 16:04:18,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=110048.0, ans=0.09899494936611666
+2024-08-25 16:04:22,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=110101.33333333333, ans=0.0
+2024-08-25 16:04:44,755 INFO [train.py:1114] (0/4) Epoch 9, batch 750, loss[loss=0.287, simple_loss=0.3244, pruned_loss=0.09087, ctc_loss=0.1699, over 19483.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3013, pruned_loss=0.07241, ctc_loss=0.1365, over 3774614.02 frames. ], batch size: 54, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:04:53,833 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.09 vs. limit=15.0
+2024-08-25 16:04:57,347 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=110261.33333333333, ans=0.125
+2024-08-25 16:05:20,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=110314.66666666667, ans=0.125
+2024-08-25 16:05:28,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=110368.0, ans=0.125
+2024-08-25 16:05:47,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=110474.66666666667, ans=0.2
+2024-08-25 16:05:48,071 INFO [train.py:1114] (0/4) Epoch 9, batch 800, loss[loss=0.207, simple_loss=0.2575, pruned_loss=0.05675, ctc_loss=0.1076, over 19841.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3012, pruned_loss=0.07241, ctc_loss=0.136, over 3796448.51 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:06:02,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=110528.0, ans=0.125
+2024-08-25 16:06:03,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=110528.0, ans=0.125
+2024-08-25 16:06:14,977 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.861e+02 2.104e+02 2.558e+02 4.618e+02, threshold=4.207e+02, percent-clipped=0.0
+2024-08-25 16:06:16,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=110581.33333333333, ans=0.0
+2024-08-25 16:06:32,084 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=110634.66666666667, ans=0.125
+2024-08-25 16:06:47,180 INFO [train.py:1114] (0/4) Epoch 9, batch 850, loss[loss=0.2375, simple_loss=0.2989, pruned_loss=0.06478, ctc_loss=0.1163, over 19657.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3006, pruned_loss=0.07217, ctc_loss=0.1356, over 3815637.27 frames. ], batch size: 59, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:06:49,004 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.47 vs. limit=15.0
+2024-08-25 16:07:02,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=110794.66666666667, ans=0.0
+2024-08-25 16:07:05,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=110794.66666666667, ans=0.0
+2024-08-25 16:07:23,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=110901.33333333333, ans=0.125
+2024-08-25 16:08:32,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=110954.66666666667, ans=0.125
+2024-08-25 16:08:42,730 INFO [train.py:1114] (0/4) Epoch 9, batch 900, loss[loss=0.2456, simple_loss=0.285, pruned_loss=0.07463, ctc_loss=0.1427, over 19805.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3011, pruned_loss=0.07264, ctc_loss=0.1365, over 3820448.07 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:09:12,338 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.982e+02 2.328e+02 2.784e+02 5.806e+02, threshold=4.657e+02, percent-clipped=1.0
+2024-08-25 16:09:12,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=111114.66666666667, ans=0.0
+2024-08-25 16:09:15,362 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.52 vs. limit=6.0
+2024-08-25 16:09:17,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=111114.66666666667, ans=0.125
+2024-08-25 16:09:20,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=111168.0, ans=0.125
+2024-08-25 16:09:29,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=111168.0, ans=0.2
+2024-08-25 16:09:47,306 INFO [train.py:1114] (0/4) Epoch 9, batch 950, loss[loss=0.2195, simple_loss=0.2746, pruned_loss=0.06068, ctc_loss=0.1076, over 19520.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3016, pruned_loss=0.07274, ctc_loss=0.1366, over 3822695.00 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:10:05,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=111328.0, ans=0.125
+2024-08-25 16:10:05,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=111328.0, ans=0.125
+2024-08-25 16:10:08,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=111328.0, ans=0.2
+2024-08-25 16:10:10,371 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.16 vs. limit=15.0
+2024-08-25 16:10:36,099 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=111488.0, ans=0.025
+2024-08-25 16:10:45,136 INFO [train.py:1114] (0/4) Epoch 9, batch 1000, loss[loss=0.2142, simple_loss=0.2785, pruned_loss=0.05403, ctc_loss=0.1048, over 19829.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3022, pruned_loss=0.07291, ctc_loss=0.1369, over 3819280.03 frames. ], batch size: 52, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:11:02,596 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=111594.66666666667, ans=0.125
+2024-08-25 16:11:06,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=111594.66666666667, ans=0.05
+2024-08-25 16:11:06,454 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.06 vs. limit=15.0
+2024-08-25 16:11:13,877 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.864e+02 2.156e+02 2.793e+02 4.751e+02, threshold=4.311e+02, percent-clipped=1.0
+2024-08-25 16:11:43,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=111754.66666666667, ans=0.0
+2024-08-25 16:11:45,639 INFO [train.py:1114] (0/4) Epoch 9, batch 1050, loss[loss=0.261, simple_loss=0.3121, pruned_loss=0.07658, ctc_loss=0.1419, over 19859.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3015, pruned_loss=0.07264, ctc_loss=0.1366, over 3823747.95 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:12:00,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111861.33333333333, ans=0.1
+2024-08-25 16:12:05,457 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=111861.33333333333, ans=0.0
+2024-08-25 16:12:51,802 INFO [train.py:1114] (0/4) Epoch 9, batch 1100, loss[loss=0.2397, simple_loss=0.2967, pruned_loss=0.06657, ctc_loss=0.1237, over 19592.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3005, pruned_loss=0.0719, ctc_loss=0.1354, over 3831698.90 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:13:04,365 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.04 vs. limit=15.0
+2024-08-25 16:13:09,056 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.28 vs. limit=22.5
+2024-08-25 16:13:19,827 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 1.820e+02 2.090e+02 2.645e+02 4.523e+02, threshold=4.179e+02, percent-clipped=2.0
+2024-08-25 16:13:21,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=112181.33333333333, ans=0.2
+2024-08-25 16:13:24,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=112181.33333333333, ans=0.125
+2024-08-25 16:13:46,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=112288.0, ans=0.0
+2024-08-25 16:13:50,956 INFO [train.py:1114] (0/4) Epoch 9, batch 1150, loss[loss=0.2223, simple_loss=0.2838, pruned_loss=0.05814, ctc_loss=0.1115, over 19600.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3001, pruned_loss=0.07192, ctc_loss=0.1353, over 3830700.83 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:13:51,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=112341.33333333333, ans=0.0
+2024-08-25 16:13:51,468 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.18 vs. limit=12.0
+2024-08-25 16:14:10,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=112394.66666666667, ans=0.0
+2024-08-25 16:14:15,347 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=112448.0, ans=0.125
+2024-08-25 16:14:51,114 INFO [train.py:1114] (0/4) Epoch 9, batch 1200, loss[loss=0.2296, simple_loss=0.2951, pruned_loss=0.05817, ctc_loss=0.1193, over 19834.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3006, pruned_loss=0.07203, ctc_loss=0.1355, over 3825832.07 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:15:06,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112661.33333333333, ans=0.1
+2024-08-25 16:16:05,769 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.875e+02 2.166e+02 2.598e+02 4.323e+02, threshold=4.331e+02, percent-clipped=2.0
+2024-08-25 16:16:24,128 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=112768.0, ans=0.2
+2024-08-25 16:16:28,798 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=112821.33333333333, ans=0.125
+2024-08-25 16:16:39,514 INFO [train.py:1114] (0/4) Epoch 9, batch 1250, loss[loss=0.3054, simple_loss=0.3372, pruned_loss=0.1, ctc_loss=0.1839, over 19548.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3009, pruned_loss=0.07188, ctc_loss=0.1353, over 3844137.32 frames. ], batch size: 61, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:16:44,540 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=112874.66666666667, ans=0.0
+2024-08-25 16:17:00,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=112928.0, ans=0.0
+2024-08-25 16:17:33,619 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.35 vs. limit=15.0
+2024-08-25 16:17:40,917 INFO [train.py:1114] (0/4) Epoch 9, batch 1300, loss[loss=0.3178, simple_loss=0.3449, pruned_loss=0.1055, ctc_loss=0.1992, over 18806.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3002, pruned_loss=0.07174, ctc_loss=0.1349, over 3847673.27 frames. ], batch size: 76, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:17:46,952 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=113141.33333333333, ans=0.2
+2024-08-25 16:17:47,251 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.48 vs. limit=22.5
+2024-08-25 16:18:00,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=113194.66666666667, ans=0.2
+2024-08-25 16:18:08,521 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 1.959e+02 2.315e+02 2.984e+02 4.812e+02, threshold=4.630e+02, percent-clipped=1.0
+2024-08-25 16:18:15,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=113301.33333333333, ans=0.125
+2024-08-25 16:18:30,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=113354.66666666667, ans=0.0
+2024-08-25 16:18:32,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=113354.66666666667, ans=0.125
+2024-08-25 16:18:42,123 INFO [train.py:1114] (0/4) Epoch 9, batch 1350, loss[loss=0.2196, simple_loss=0.2822, pruned_loss=0.05646, ctc_loss=0.11, over 19737.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.2989, pruned_loss=0.07073, ctc_loss=0.133, over 3859306.16 frames. ], batch size: 54, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:18:56,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=113461.33333333333, ans=0.125
+2024-08-25 16:19:04,382 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=113514.66666666667, ans=0.125
+2024-08-25 16:19:18,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=113568.0, ans=0.07
+2024-08-25 16:19:32,254 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.49 vs. limit=6.0
+2024-08-25 16:19:40,023 INFO [train.py:1114] (0/4) Epoch 9, batch 1400, loss[loss=0.2021, simple_loss=0.2625, pruned_loss=0.05168, ctc_loss=0.09587, over 19662.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.2995, pruned_loss=0.07113, ctc_loss=0.1337, over 3866130.34 frames. ], batch size: 46, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:19:50,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=113728.0, ans=0.125
+2024-08-25 16:20:07,559 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.860e+02 2.127e+02 2.545e+02 4.134e+02, threshold=4.253e+02, percent-clipped=0.0
+2024-08-25 16:20:07,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=113781.33333333333, ans=0.125
+2024-08-25 16:20:09,877 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113781.33333333333, ans=0.1
+2024-08-25 16:20:12,121 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:20:20,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=113834.66666666667, ans=0.0
+2024-08-25 16:20:25,398 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.39 vs. limit=15.0
+2024-08-25 16:20:43,034 INFO [train.py:1114] (0/4) Epoch 9, batch 1450, loss[loss=0.2751, simple_loss=0.3166, pruned_loss=0.08484, ctc_loss=0.16, over 19679.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.3002, pruned_loss=0.07162, ctc_loss=0.1346, over 3864108.67 frames. ], batch size: 63, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:20:43,218 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=113941.33333333333, ans=0.05
+2024-08-25 16:20:48,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=113941.33333333333, ans=0.09899494936611666
+2024-08-25 16:20:51,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=113941.33333333333, ans=0.125
+2024-08-25 16:20:55,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=113994.66666666667, ans=22.5
+2024-08-25 16:21:07,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=114048.0, ans=0.1
+2024-08-25 16:21:08,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=114048.0, ans=0.125
+2024-08-25 16:21:27,087 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=114101.33333333333, ans=0.0
+2024-08-25 16:21:45,885 INFO [train.py:1114] (0/4) Epoch 9, batch 1500, loss[loss=0.26, simple_loss=0.3149, pruned_loss=0.07356, ctc_loss=0.1451, over 19600.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3008, pruned_loss=0.07191, ctc_loss=0.135, over 3863198.78 frames. ], batch size: 57, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:21:46,307 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=114208.0, ans=0.95
+2024-08-25 16:21:49,120 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.41 vs. limit=15.0
+2024-08-25 16:21:54,682 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.41 vs. limit=10.0
+2024-08-25 16:22:02,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=114261.33333333333, ans=0.125
+2024-08-25 16:22:06,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=114261.33333333333, ans=10.0
+2024-08-25 16:22:08,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=114261.33333333333, ans=0.125
+2024-08-25 16:22:15,434 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 1.928e+02 2.180e+02 2.740e+02 4.350e+02, threshold=4.360e+02, percent-clipped=2.0
+2024-08-25 16:22:19,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=114314.66666666667, ans=0.125
+2024-08-25 16:22:45,662 INFO [train.py:1114] (0/4) Epoch 9, batch 1550, loss[loss=0.279, simple_loss=0.326, pruned_loss=0.08539, ctc_loss=0.153, over 19597.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3015, pruned_loss=0.07253, ctc_loss=0.136, over 3847363.39 frames. ], batch size: 60, lr: 1.64e-02, grad_scale: 16.0
+2024-08-25 16:23:10,245 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=114581.33333333333, ans=0.125
+2024-08-25 16:23:15,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=114581.33333333333, ans=0.0
+2024-08-25 16:23:17,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=114581.33333333333, ans=0.125
+2024-08-25 16:23:24,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=114634.66666666667, ans=0.025
+2024-08-25 16:23:32,275 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=114634.66666666667, ans=0.125
+2024-08-25 16:23:46,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=114741.33333333333, ans=0.125
+2024-08-25 16:23:47,249 INFO [train.py:1114] (0/4) Epoch 9, batch 1600, loss[loss=0.2307, simple_loss=0.2928, pruned_loss=0.061, ctc_loss=0.1163, over 19838.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3013, pruned_loss=0.07233, ctc_loss=0.1359, over 3835919.64 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 32.0
+2024-08-25 16:23:59,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=114794.66666666667, ans=0.125
+2024-08-25 16:24:03,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=114794.66666666667, ans=0.125
+2024-08-25 16:24:05,034 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=114794.66666666667, ans=0.125
+2024-08-25 16:24:16,806 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.930e+02 2.504e+02 3.084e+02 5.673e+02, threshold=5.009e+02, percent-clipped=4.0
+2024-08-25 16:24:46,353 INFO [train.py:1114] (0/4) Epoch 9, batch 1650, loss[loss=0.2497, simple_loss=0.31, pruned_loss=0.06904, ctc_loss=0.1283, over 19639.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3012, pruned_loss=0.07218, ctc_loss=0.1356, over 3833183.94 frames. ], batch size: 59, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:25:26,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=115168.0, ans=0.125
+2024-08-25 16:25:34,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=115221.33333333333, ans=0.0
+2024-08-25 16:25:41,736 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=115221.33333333333, ans=0.125
+2024-08-25 16:25:45,143 INFO [train.py:1114] (0/4) Epoch 9, batch 1700, loss[loss=0.1959, simple_loss=0.2599, pruned_loss=0.04677, ctc_loss=0.0958, over 19661.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3004, pruned_loss=0.07129, ctc_loss=0.1342, over 3847890.26 frames. ], batch size: 46, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:25:47,168 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=115274.66666666667, ans=0.07
+2024-08-25 16:25:58,139 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=115328.0, ans=0.0
+2024-08-25 16:26:13,051 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.773e+02 1.969e+02 2.283e+02 4.673e+02, threshold=3.938e+02, percent-clipped=0.0
+2024-08-25 16:26:26,615 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.60 vs. limit=15.0
+2024-08-25 16:26:41,738 INFO [train.py:1114] (0/4) Epoch 9, batch 1750, loss[loss=0.245, simple_loss=0.2847, pruned_loss=0.075, ctc_loss=0.1383, over 19683.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.2998, pruned_loss=0.07119, ctc_loss=0.1339, over 3852696.95 frames. ], batch size: 45, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:26:42,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=115541.33333333333, ans=0.125
+2024-08-25 16:26:47,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=115541.33333333333, ans=0.0
+2024-08-25 16:27:46,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=115648.0, ans=0.025
+2024-08-25 16:28:08,055 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=115754.66666666667, ans=0.125
+2024-08-25 16:28:12,464 INFO [train.py:1114] (0/4) Epoch 9, batch 1800, loss[loss=0.2636, simple_loss=0.3177, pruned_loss=0.07554, ctc_loss=0.1459, over 19616.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3003, pruned_loss=0.07163, ctc_loss=0.1348, over 3852814.33 frames. ], batch size: 55, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:28:22,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=115808.0, ans=0.125
+2024-08-25 16:28:40,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=115861.33333333333, ans=0.0
+2024-08-25 16:28:46,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=115914.66666666667, ans=0.95
+2024-08-25 16:28:49,004 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 1.840e+02 2.097e+02 2.711e+02 4.220e+02, threshold=4.193e+02, percent-clipped=2.0
+2024-08-25 16:28:59,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=115968.0, ans=0.125
+2024-08-25 16:29:10,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=115968.0, ans=0.0
+2024-08-25 16:29:25,118 INFO [train.py:1114] (0/4) Epoch 9, batch 1850, loss[loss=0.269, simple_loss=0.3232, pruned_loss=0.07808, ctc_loss=0.1467, over 19591.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3002, pruned_loss=0.07165, ctc_loss=0.1346, over 3856726.08 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:29:43,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=116128.0, ans=0.0
+2024-08-25 16:30:21,273 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:30:26,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=116181.33333333333, ans=0.0
+2024-08-25 16:30:33,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=116234.66666666667, ans=0.125
+2024-08-25 16:30:42,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=116234.66666666667, ans=0.025
+2024-08-25 16:30:55,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=116341.33333333333, ans=0.0
+2024-08-25 16:30:56,367 INFO [train.py:1114] (0/4) Epoch 9, batch 1900, loss[loss=0.2582, simple_loss=0.3155, pruned_loss=0.07306, ctc_loss=0.1371, over 19640.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3008, pruned_loss=0.07193, ctc_loss=0.135, over 3862410.33 frames. ], batch size: 59, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:32:01,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=116341.33333333333, ans=0.125
+2024-08-25 16:32:08,051 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.57 vs. limit=15.0
+2024-08-25 16:32:21,937 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.810e+02 2.075e+02 2.674e+02 4.757e+02, threshold=4.150e+02, percent-clipped=3.0
+2024-08-25 16:32:25,792 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=116448.0, ans=0.125
+2024-08-25 16:32:25,892 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.02 vs. limit=10.0
+2024-08-25 16:32:35,556 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=116501.33333333333, ans=0.0
+2024-08-25 16:32:45,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=116554.66666666667, ans=0.0
+2024-08-25 16:33:06,107 INFO [train.py:1114] (0/4) Epoch 9, batch 1950, loss[loss=0.2565, simple_loss=0.3082, pruned_loss=0.0732, ctc_loss=0.146, over 19575.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3016, pruned_loss=0.07191, ctc_loss=0.1352, over 3871997.96 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:33:09,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=116608.0, ans=0.2
+2024-08-25 16:33:23,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=116661.33333333333, ans=0.0
+2024-08-25 16:33:30,529 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:33:47,054 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=116768.0, ans=0.0
+2024-08-25 16:34:01,152 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.31 vs. limit=15.0
+2024-08-25 16:34:02,768 INFO [train.py:1114] (0/4) Epoch 9, batch 2000, loss[loss=0.1966, simple_loss=0.2506, pruned_loss=0.05241, ctc_loss=0.09474, over 19696.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3018, pruned_loss=0.07208, ctc_loss=0.1357, over 3856507.86 frames. ], batch size: 45, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:34:14,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=116928.0, ans=0.0
+2024-08-25 16:34:15,528 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.13 vs. limit=15.0
+2024-08-25 16:34:16,351 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=116928.0, ans=0.0
+2024-08-25 16:34:17,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=116928.0, ans=0.1
+2024-08-25 16:34:29,392 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.77 vs. limit=15.0
+2024-08-25 16:34:30,180 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=116981.33333333333, ans=0.125
+2024-08-25 16:34:30,982 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 1.787e+02 2.122e+02 2.673e+02 5.196e+02, threshold=4.245e+02, percent-clipped=10.0
+2024-08-25 16:34:42,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=117034.66666666667, ans=0.125
+2024-08-25 16:34:50,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=117088.0, ans=0.09899494936611666
+2024-08-25 16:34:59,605 INFO [train.py:1114] (0/4) Epoch 9, batch 2050, loss[loss=0.2136, simple_loss=0.2706, pruned_loss=0.05625, ctc_loss=0.1104, over 19686.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3011, pruned_loss=0.07206, ctc_loss=0.1357, over 3852956.16 frames. ], batch size: 47, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:35:12,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=117194.66666666667, ans=0.0
+2024-08-25 16:35:14,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=117194.66666666667, ans=0.0
+2024-08-25 16:35:28,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=117248.0, ans=0.125
+2024-08-25 16:35:32,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=117301.33333333333, ans=0.2
+2024-08-25 16:35:34,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=117301.33333333333, ans=0.125
+2024-08-25 16:36:04,968 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.27 vs. limit=15.0
+2024-08-25 16:36:57,814 INFO [train.py:1114] (0/4) Epoch 9, batch 2100, loss[loss=0.2543, simple_loss=0.3047, pruned_loss=0.07488, ctc_loss=0.1351, over 19767.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3004, pruned_loss=0.07159, ctc_loss=0.135, over 3859079.64 frames. ], batch size: 54, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:37:17,055 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.87 vs. limit=15.0
+2024-08-25 16:37:29,780 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.98 vs. limit=10.0
+2024-08-25 16:37:35,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=117514.66666666667, ans=0.05
+2024-08-25 16:37:36,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=117514.66666666667, ans=0.1
+2024-08-25 16:37:38,952 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.824e+02 2.012e+02 2.446e+02 4.504e+02, threshold=4.025e+02, percent-clipped=2.0
+2024-08-25 16:37:50,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=117568.0, ans=0.2
+2024-08-25 16:37:57,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=117621.33333333333, ans=0.07
+2024-08-25 16:38:06,806 INFO [train.py:1114] (0/4) Epoch 9, batch 2150, loss[loss=0.214, simple_loss=0.2694, pruned_loss=0.05747, ctc_loss=0.1091, over 19589.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.2992, pruned_loss=0.07095, ctc_loss=0.1335, over 3869879.56 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:38:12,993 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.82 vs. limit=15.0
+2024-08-25 16:38:14,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=117674.66666666667, ans=0.025
+2024-08-25 16:38:21,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=117728.0, ans=0.025
+2024-08-25 16:38:21,675 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.22 vs. limit=22.5
+2024-08-25 16:38:22,987 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.93 vs. limit=12.0
+2024-08-25 16:38:28,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=117781.33333333333, ans=0.1
+2024-08-25 16:38:33,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=117781.33333333333, ans=0.125
+2024-08-25 16:38:46,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=117834.66666666667, ans=0.125
+2024-08-25 16:38:57,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=117888.0, ans=0.125
+2024-08-25 16:39:02,671 INFO [train.py:1114] (0/4) Epoch 9, batch 2200, loss[loss=0.2637, simple_loss=0.312, pruned_loss=0.0766, ctc_loss=0.1557, over 19579.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.2993, pruned_loss=0.07082, ctc_loss=0.1333, over 3869284.31 frames. ], batch size: 57, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:39:04,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=117941.33333333333, ans=0.0
+2024-08-25 16:39:12,113 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=117941.33333333333, ans=0.125
+2024-08-25 16:39:27,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=118048.0, ans=0.025
+2024-08-25 16:39:27,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=118048.0, ans=0.0
+2024-08-25 16:39:30,923 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.840e+02 2.263e+02 2.882e+02 6.553e+02, threshold=4.526e+02, percent-clipped=9.0
+2024-08-25 16:39:38,460 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:39:57,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=118154.66666666667, ans=0.5
+2024-08-25 16:39:59,965 INFO [train.py:1114] (0/4) Epoch 9, batch 2250, loss[loss=0.2449, simple_loss=0.3016, pruned_loss=0.06772, ctc_loss=0.1318, over 19627.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.2996, pruned_loss=0.07097, ctc_loss=0.1337, over 3868578.45 frames. ], batch size: 55, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:40:14,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=118261.33333333333, ans=0.0
+2024-08-25 16:40:29,415 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=118314.66666666667, ans=0.2
+2024-08-25 16:40:35,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=118368.0, ans=0.125
+2024-08-25 16:40:54,823 INFO [train.py:1114] (0/4) Epoch 9, batch 2300, loss[loss=0.2173, simple_loss=0.2691, pruned_loss=0.06068, ctc_loss=0.1104, over 19510.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.2984, pruned_loss=0.07082, ctc_loss=0.1334, over 3861868.07 frames. ], batch size: 49, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:41:02,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=118474.66666666667, ans=0.125
+2024-08-25 16:41:24,907 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.864e+02 2.265e+02 3.023e+02 5.230e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 16:41:30,565 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.79 vs. limit=15.0
+2024-08-25 16:41:34,840 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=118634.66666666667, ans=10.0
+2024-08-25 16:41:40,181 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=118688.0, ans=0.125
+2024-08-25 16:41:44,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=118688.0, ans=0.2
+2024-08-25 16:41:51,058 INFO [train.py:1114] (0/4) Epoch 9, batch 2350, loss[loss=0.2707, simple_loss=0.3173, pruned_loss=0.07994, ctc_loss=0.1604, over 19635.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.2979, pruned_loss=0.07036, ctc_loss=0.1324, over 3864113.15 frames. ], batch size: 63, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:42:01,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=118741.33333333333, ans=0.025
+2024-08-25 16:42:04,487 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=118794.66666666667, ans=0.125
+2024-08-25 16:42:07,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=118794.66666666667, ans=0.04949747468305833
+2024-08-25 16:42:30,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=118848.0, ans=0.0
+2024-08-25 16:42:30,995 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=118848.0, ans=0.0
+2024-08-25 16:42:39,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=118901.33333333333, ans=0.2
+2024-08-25 16:42:44,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=118901.33333333333, ans=0.125
+2024-08-25 16:42:52,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=118954.66666666667, ans=0.0
+2024-08-25 16:42:59,716 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=118954.66666666667, ans=0.0
+2024-08-25 16:43:02,787 INFO [train.py:1114] (0/4) Epoch 9, batch 2400, loss[loss=0.2781, simple_loss=0.3222, pruned_loss=0.08454, ctc_loss=0.1622, over 19405.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.3002, pruned_loss=0.07157, ctc_loss=0.1346, over 3857848.50 frames. ], batch size: 67, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:43:12,965 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=119008.0, ans=0.125
+2024-08-25 16:43:27,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=119114.66666666667, ans=0.125
+2024-08-25 16:43:32,518 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.930e+02 2.301e+02 2.799e+02 4.768e+02, threshold=4.601e+02, percent-clipped=1.0
+2024-08-25 16:43:33,077 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.59 vs. limit=15.0
+2024-08-25 16:43:33,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=119114.66666666667, ans=0.125
+2024-08-25 16:43:48,423 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=119221.33333333333, ans=0.0
+2024-08-25 16:43:59,351 INFO [train.py:1114] (0/4) Epoch 9, batch 2450, loss[loss=0.3547, simple_loss=0.3567, pruned_loss=0.1287, ctc_loss=0.2384, over 13679.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3049, pruned_loss=0.07561, ctc_loss=0.1423, over 3728798.97 frames. ], batch size: 140, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:44:20,732 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=2.735e-03
+2024-08-25 16:44:22,086 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.92 vs. limit=10.0
+2024-08-25 16:44:36,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=119434.66666666667, ans=0.2
+2024-08-25 16:44:38,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=119434.66666666667, ans=0.0
+2024-08-25 16:44:40,057 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=119434.66666666667, ans=0.125
+2024-08-25 16:44:41,664 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-9.pt
+2024-08-25 16:45:25,693 INFO [train.py:1114] (0/4) Epoch 10, batch 0, loss[loss=0.2335, simple_loss=0.2831, pruned_loss=0.06725, ctc_loss=0.1234, over 19806.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.2831, pruned_loss=0.06725, ctc_loss=0.1234, over 19806.00 frames. ], batch size: 49, lr: 1.53e-02, grad_scale: 32.0
+2024-08-25 16:45:25,693 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 16:45:33,000 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.6104, 4.1222, 2.4195, 1.8686], device='cuda:0')
+2024-08-25 16:46:37,097 INFO [train.py:1146] (0/4) Epoch 10, validation: loss=0.2041, simple_loss=0.2903, pruned_loss=0.04356, ctc_loss=0.07708, over 944034.00 frames.
+2024-08-25 16:46:37,097 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 16:46:58,236 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.87 vs. limit=15.0
+2024-08-25 16:47:07,664 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=119589.33333333333, ans=0.125
+2024-08-25 16:47:17,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=119642.66666666667, ans=0.025
+2024-08-25 16:47:31,295 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=7.01 vs. limit=12.0
+2024-08-25 16:47:46,605 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 1.955e+02 2.116e+02 2.362e+02 4.652e+02, threshold=4.231e+02, percent-clipped=1.0
+2024-08-25 16:48:18,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=119696.0, ans=0.2
+2024-08-25 16:48:20,469 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=119696.0, ans=0.0
+2024-08-25 16:48:28,308 INFO [train.py:1114] (0/4) Epoch 10, batch 50, loss[loss=0.2072, simple_loss=0.2628, pruned_loss=0.05458, ctc_loss=0.1061, over 19711.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3031, pruned_loss=0.07283, ctc_loss=0.1373, over 843213.08 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:49:43,373 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.35 vs. limit=12.0
+2024-08-25 16:50:11,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=119856.0, ans=0.1
+2024-08-25 16:51:23,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=119962.66666666667, ans=0.0
+2024-08-25 16:51:36,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=119962.66666666667, ans=0.1
+2024-08-25 16:52:34,138 INFO [train.py:1114] (0/4) Epoch 10, batch 100, loss[loss=0.2276, simple_loss=0.2849, pruned_loss=0.06177, ctc_loss=0.1171, over 19710.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3045, pruned_loss=0.07305, ctc_loss=0.1376, over 1497783.82 frames. ], batch size: 51, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:52:37,758 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=120016.0, ans=0.125
+2024-08-25 16:53:12,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=120069.33333333333, ans=0.125
+2024-08-25 16:53:40,061 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=120176.0, ans=0.05
+2024-08-25 16:53:47,830 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 1.798e+02 2.253e+02 2.860e+02 4.134e+02, threshold=4.507e+02, percent-clipped=0.0
+2024-08-25 16:54:30,891 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.17 vs. limit=15.0
+2024-08-25 16:54:47,463 INFO [train.py:1114] (0/4) Epoch 10, batch 150, loss[loss=0.2617, simple_loss=0.2978, pruned_loss=0.08234, ctc_loss=0.1522, over 19725.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3017, pruned_loss=0.07164, ctc_loss=0.1345, over 2026659.98 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:54:54,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=120282.66666666667, ans=0.025
+2024-08-25 16:55:05,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=120336.0, ans=0.125
+2024-08-25 16:55:39,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=120442.66666666667, ans=0.125
+2024-08-25 16:55:43,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=120442.66666666667, ans=0.125
+2024-08-25 16:56:01,839 INFO [train.py:1114] (0/4) Epoch 10, batch 200, loss[loss=0.262, simple_loss=0.3072, pruned_loss=0.07912, ctc_loss=0.1466, over 18272.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.2994, pruned_loss=0.07085, ctc_loss=0.1329, over 2434216.39 frames. ], batch size: 85, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:56:02,447 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.32 vs. limit=15.0
+2024-08-25 16:56:10,317 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.623e-03
+2024-08-25 16:56:11,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=120549.33333333333, ans=0.125
+2024-08-25 16:57:32,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=120656.0, ans=0.125
+2024-08-25 16:57:36,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=120656.0, ans=0.1
+2024-08-25 16:58:07,761 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.824e+02 2.064e+02 2.548e+02 6.143e+02, threshold=4.128e+02, percent-clipped=2.0
+2024-08-25 16:58:32,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=120816.0, ans=0.125
+2024-08-25 16:58:32,994 INFO [train.py:1114] (0/4) Epoch 10, batch 250, loss[loss=0.2775, simple_loss=0.324, pruned_loss=0.08482, ctc_loss=0.1532, over 19445.00 frames. ], tot_loss[loss=0.246, simple_loss=0.2989, pruned_loss=0.07023, ctc_loss=0.1318, over 2755282.56 frames. ], batch size: 67, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:58:58,589 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=120869.33333333333, ans=0.125
+2024-08-25 16:58:59,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=120869.33333333333, ans=0.0
+2024-08-25 16:59:47,532 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.87 vs. limit=22.5
+2024-08-25 16:59:56,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=121029.33333333333, ans=0.2
+2024-08-25 17:00:02,078 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.17 vs. limit=22.5
+2024-08-25 17:00:08,739 INFO [train.py:1114] (0/4) Epoch 10, batch 300, loss[loss=0.2971, simple_loss=0.3351, pruned_loss=0.09341, ctc_loss=0.1805, over 19529.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.2977, pruned_loss=0.06965, ctc_loss=0.1309, over 3000652.65 frames. ], batch size: 61, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:00:21,279 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=121082.66666666667, ans=0.0
+2024-08-25 17:00:22,718 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.47 vs. limit=15.0
+2024-08-25 17:00:23,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=121082.66666666667, ans=0.125
+2024-08-25 17:00:34,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=121136.0, ans=0.125
+2024-08-25 17:00:43,123 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.09 vs. limit=6.0
+2024-08-25 17:00:50,113 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=121189.33333333333, ans=0.125
+2024-08-25 17:01:01,168 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.908e+02 2.186e+02 2.769e+02 4.118e+02, threshold=4.372e+02, percent-clipped=0.0
+2024-08-25 17:02:40,284 INFO [train.py:1114] (0/4) Epoch 10, batch 350, loss[loss=0.2065, simple_loss=0.2655, pruned_loss=0.05429, ctc_loss=0.09698, over 19747.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.2977, pruned_loss=0.06953, ctc_loss=0.1309, over 3191619.59 frames. ], batch size: 48, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:02:49,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=121349.33333333333, ans=15.0
+2024-08-25 17:02:58,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=121402.66666666667, ans=0.2
+2024-08-25 17:03:42,414 INFO [train.py:1114] (0/4) Epoch 10, batch 400, loss[loss=0.2343, simple_loss=0.2967, pruned_loss=0.06266, ctc_loss=0.1162, over 19862.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2966, pruned_loss=0.06876, ctc_loss=0.1293, over 3342903.28 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:03:44,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=121616.0, ans=0.1
+2024-08-25 17:03:47,209 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=121616.0, ans=0.125
+2024-08-25 17:03:48,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=121616.0, ans=0.0
+2024-08-25 17:03:54,590 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=121669.33333333333, ans=0.125
+2024-08-25 17:03:54,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=121669.33333333333, ans=0.125
+2024-08-25 17:04:05,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=121669.33333333333, ans=0.015
+2024-08-25 17:04:33,762 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 1.874e+02 2.151e+02 2.761e+02 4.102e+02, threshold=4.302e+02, percent-clipped=0.0
+2024-08-25 17:04:42,223 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=121829.33333333333, ans=0.1
+2024-08-25 17:04:47,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=121829.33333333333, ans=0.0
+2024-08-25 17:04:50,479 INFO [train.py:1114] (0/4) Epoch 10, batch 450, loss[loss=0.232, simple_loss=0.2942, pruned_loss=0.06106, ctc_loss=0.1193, over 19603.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.297, pruned_loss=0.06914, ctc_loss=0.1302, over 3450466.04 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:07:13,225 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.66 vs. limit=15.0
+2024-08-25 17:07:15,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=121936.0, ans=0.2
+2024-08-25 17:07:43,066 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.28 vs. limit=12.0
+2024-08-25 17:07:48,197 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=121989.33333333333, ans=0.125
+2024-08-25 17:08:12,733 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=122042.66666666667, ans=0.2
+2024-08-25 17:09:04,069 INFO [train.py:1114] (0/4) Epoch 10, batch 500, loss[loss=0.2553, simple_loss=0.305, pruned_loss=0.07505, ctc_loss=0.1389, over 19671.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2957, pruned_loss=0.06848, ctc_loss=0.1291, over 3546117.80 frames. ], batch size: 63, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:09:09,055 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=122149.33333333333, ans=0.0
+2024-08-25 17:09:25,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=122202.66666666667, ans=0.1
+2024-08-25 17:09:29,048 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=122202.66666666667, ans=0.2
+2024-08-25 17:09:30,453 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.49 vs. limit=22.5
+2024-08-25 17:09:36,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=122202.66666666667, ans=0.125
+2024-08-25 17:09:41,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=122256.0, ans=0.2
+2024-08-25 17:10:03,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=122256.0, ans=0.125
+2024-08-25 17:10:21,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=122309.33333333333, ans=10.0
+2024-08-25 17:10:22,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=122309.33333333333, ans=0.0
+2024-08-25 17:10:36,242 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.797e+02 2.290e+02 2.870e+02 3.920e+02, threshold=4.579e+02, percent-clipped=0.0
+2024-08-25 17:10:38,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-25 17:10:51,440 INFO [train.py:1114] (0/4) Epoch 10, batch 550, loss[loss=0.2989, simple_loss=0.3358, pruned_loss=0.09475, ctc_loss=0.1812, over 19236.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.2963, pruned_loss=0.06908, ctc_loss=0.1303, over 3606988.23 frames. ], batch size: 71, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:11:07,277 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.35 vs. limit=15.0
+2024-08-25 17:11:09,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=122416.0, ans=0.2
+2024-08-25 17:13:54,178 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=122576.0, ans=0.0
+2024-08-25 17:14:01,433 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.20 vs. limit=6.0
+2024-08-25 17:14:20,767 INFO [train.py:1114] (0/4) Epoch 10, batch 600, loss[loss=0.2906, simple_loss=0.3277, pruned_loss=0.0928, ctc_loss=0.1695, over 19406.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2964, pruned_loss=0.06888, ctc_loss=0.1301, over 3665624.41 frames. ], batch size: 67, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:14:22,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=122682.66666666667, ans=0.125
+2024-08-25 17:14:27,221 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.70 vs. limit=10.0
+2024-08-25 17:15:05,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=122842.66666666667, ans=0.125
+2024-08-25 17:15:08,638 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 2.061e+02 2.496e+02 4.365e+02, threshold=4.122e+02, percent-clipped=0.0
+2024-08-25 17:15:10,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=122896.0, ans=0.0
+2024-08-25 17:15:15,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=122896.0, ans=0.1
+2024-08-25 17:15:24,797 INFO [train.py:1114] (0/4) Epoch 10, batch 650, loss[loss=0.2502, simple_loss=0.2998, pruned_loss=0.07247, ctc_loss=0.1392, over 19767.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2955, pruned_loss=0.0681, ctc_loss=0.1286, over 3716191.05 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:15:43,920 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.47 vs. limit=22.5
+2024-08-25 17:15:51,398 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.60 vs. limit=6.0
+2024-08-25 17:16:01,210 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=123109.33333333333, ans=0.04949747468305833
+2024-08-25 17:16:05,117 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.37 vs. limit=15.0
+2024-08-25 17:16:34,664 INFO [train.py:1114] (0/4) Epoch 10, batch 700, loss[loss=0.2579, simple_loss=0.3053, pruned_loss=0.07646, ctc_loss=0.1438, over 19721.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.296, pruned_loss=0.06853, ctc_loss=0.1293, over 3747909.39 frames. ], batch size: 51, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:17:41,336 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.67 vs. limit=15.0
+2024-08-25 17:17:50,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=123269.33333333333, ans=0.125
+2024-08-25 17:18:07,876 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=123376.0, ans=0.2
+2024-08-25 17:18:13,482 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 1.934e+02 2.276e+02 3.026e+02 5.626e+02, threshold=4.552e+02, percent-clipped=3.0
+2024-08-25 17:18:16,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=123429.33333333333, ans=0.1
+2024-08-25 17:18:17,694 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.86 vs. limit=15.0
+2024-08-25 17:18:22,935 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=123429.33333333333, ans=0.0
+2024-08-25 17:18:28,275 INFO [train.py:1114] (0/4) Epoch 10, batch 750, loss[loss=0.252, simple_loss=0.3101, pruned_loss=0.07096, ctc_loss=0.1303, over 19860.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.2954, pruned_loss=0.06796, ctc_loss=0.128, over 3774351.17 frames. ], batch size: 55, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:18:32,973 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:18:40,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=123536.0, ans=6.0
+2024-08-25 17:18:40,901 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.89 vs. limit=6.0
+2024-08-25 17:19:01,857 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.16 vs. limit=22.5
+2024-08-25 17:19:24,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=123696.0, ans=0.125
+2024-08-25 17:19:25,045 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.64 vs. limit=15.0
+2024-08-25 17:19:32,718 INFO [train.py:1114] (0/4) Epoch 10, batch 800, loss[loss=0.2283, simple_loss=0.2788, pruned_loss=0.06537, ctc_loss=0.1176, over 19816.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.2958, pruned_loss=0.06824, ctc_loss=0.1285, over 3795624.09 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:20:04,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=123802.66666666667, ans=0.125
+2024-08-25 17:20:04,502 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.71 vs. limit=10.0
+2024-08-25 17:20:13,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=123856.0, ans=0.1
+2024-08-25 17:20:24,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=123909.33333333333, ans=0.0
+2024-08-25 17:20:24,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=123909.33333333333, ans=0.125
+2024-08-25 17:20:26,274 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=123909.33333333333, ans=10.0
+2024-08-25 17:20:33,018 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.887e+02 2.136e+02 2.736e+02 3.984e+02, threshold=4.273e+02, percent-clipped=0.0
+2024-08-25 17:20:47,944 INFO [train.py:1114] (0/4) Epoch 10, batch 850, loss[loss=0.2385, simple_loss=0.2973, pruned_loss=0.06431, ctc_loss=0.1277, over 19633.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2955, pruned_loss=0.06821, ctc_loss=0.1284, over 3815807.51 frames. ], batch size: 59, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:20:59,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=124069.33333333333, ans=0.125
+2024-08-25 17:21:11,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=124122.66666666667, ans=0.125
+2024-08-25 17:21:13,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=124122.66666666667, ans=0.125
+2024-08-25 17:22:20,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=124229.33333333333, ans=0.0
+2024-08-25 17:22:28,570 INFO [train.py:1114] (0/4) Epoch 10, batch 900, loss[loss=0.2556, simple_loss=0.2873, pruned_loss=0.08098, ctc_loss=0.155, over 19398.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2955, pruned_loss=0.06868, ctc_loss=0.1292, over 3820191.06 frames. ], batch size: 48, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:22:36,509 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=124282.66666666667, ans=0.1
+2024-08-25 17:22:36,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_na.min_abs, batch_count=124282.66666666667, ans=0.02
+2024-08-25 17:23:12,937 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=124442.66666666667, ans=0.0
+2024-08-25 17:23:13,390 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.54 vs. limit=10.0
+2024-08-25 17:23:13,941 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.854e+02 2.167e+02 2.763e+02 5.395e+02, threshold=4.333e+02, percent-clipped=2.0
+2024-08-25 17:23:30,305 INFO [train.py:1114] (0/4) Epoch 10, batch 950, loss[loss=0.2249, simple_loss=0.2835, pruned_loss=0.05984, ctc_loss=0.1166, over 19493.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2962, pruned_loss=0.06907, ctc_loss=0.13, over 3821394.20 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:23:41,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=124602.66666666667, ans=0.125
+2024-08-25 17:23:50,675 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.30 vs. limit=22.5
+2024-08-25 17:24:24,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=124762.66666666667, ans=0.1
+2024-08-25 17:24:34,471 INFO [train.py:1114] (0/4) Epoch 10, batch 1000, loss[loss=0.2575, simple_loss=0.306, pruned_loss=0.07521, ctc_loss=0.1461, over 19839.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2965, pruned_loss=0.06926, ctc_loss=0.1301, over 3818105.96 frames. ], batch size: 52, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:24:43,109 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=124816.0, ans=0.0
+2024-08-25 17:24:50,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=124869.33333333333, ans=0.125
+2024-08-25 17:25:10,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=124976.0, ans=0.125
+2024-08-25 17:25:11,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=124976.0, ans=0.125
+2024-08-25 17:25:18,036 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 1.797e+02 2.069e+02 2.553e+02 4.130e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-25 17:25:25,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=125029.33333333333, ans=0.125
+2024-08-25 17:25:33,282 INFO [train.py:1114] (0/4) Epoch 10, batch 1050, loss[loss=0.2865, simple_loss=0.3298, pruned_loss=0.08846, ctc_loss=0.1655, over 19856.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.2963, pruned_loss=0.06914, ctc_loss=0.1301, over 3822872.28 frames. ], batch size: 57, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:25:34,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=125082.66666666667, ans=0.025
+2024-08-25 17:26:00,305 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:26:13,146 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.66 vs. limit=15.0
+2024-08-25 17:26:20,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=125296.0, ans=0.125
+2024-08-25 17:26:21,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=125296.0, ans=0.1
+2024-08-25 17:26:25,798 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=125296.0, ans=0.125
+2024-08-25 17:26:32,056 INFO [train.py:1114] (0/4) Epoch 10, batch 1100, loss[loss=0.2563, simple_loss=0.3004, pruned_loss=0.07717, ctc_loss=0.1445, over 19586.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2956, pruned_loss=0.06839, ctc_loss=0.1287, over 3830875.15 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:26:32,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=125349.33333333333, ans=0.025
+2024-08-25 17:26:58,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=125456.0, ans=0.125
+2024-08-25 17:27:08,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=125456.0, ans=0.0
+2024-08-25 17:27:18,165 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.787e+02 2.060e+02 2.560e+02 4.808e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 17:27:33,334 INFO [train.py:1114] (0/4) Epoch 10, batch 1150, loss[loss=0.2148, simple_loss=0.2764, pruned_loss=0.05445, ctc_loss=0.1104, over 19602.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2954, pruned_loss=0.06826, ctc_loss=0.1284, over 3828605.14 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:27:40,395 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=125616.0, ans=0.125
+2024-08-25 17:27:40,590 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.30 vs. limit=10.0
+2024-08-25 17:28:10,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=125722.66666666667, ans=0.025
+2024-08-25 17:28:25,924 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=125776.0, ans=0.0
+2024-08-25 17:28:29,069 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.65 vs. limit=15.0
+2024-08-25 17:28:31,523 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.89 vs. limit=15.0
+2024-08-25 17:28:44,410 INFO [train.py:1114] (0/4) Epoch 10, batch 1200, loss[loss=0.2472, simple_loss=0.3056, pruned_loss=0.06717, ctc_loss=0.136, over 19844.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2958, pruned_loss=0.06827, ctc_loss=0.1286, over 3823900.47 frames. ], batch size: 57, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:28:56,348 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.63 vs. limit=15.0
+2024-08-25 17:29:04,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=125936.0, ans=0.2
+2024-08-25 17:29:30,100 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.823e+02 2.047e+02 2.358e+02 4.051e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 17:29:32,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=126096.0, ans=0.125
+2024-08-25 17:29:33,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=126096.0, ans=0.125
+2024-08-25 17:29:45,835 INFO [train.py:1114] (0/4) Epoch 10, batch 1250, loss[loss=0.2655, simple_loss=0.3108, pruned_loss=0.07998, ctc_loss=0.1506, over 19528.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.2965, pruned_loss=0.06859, ctc_loss=0.1288, over 3842182.81 frames. ], batch size: 61, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:30:57,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=126362.66666666667, ans=0.125
+2024-08-25 17:30:59,775 INFO [train.py:1114] (0/4) Epoch 10, batch 1300, loss[loss=0.2759, simple_loss=0.3174, pruned_loss=0.08596, ctc_loss=0.1563, over 18915.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2957, pruned_loss=0.06803, ctc_loss=0.1279, over 3846635.47 frames. ], batch size: 76, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:31:45,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=126469.33333333333, ans=0.125
+2024-08-25 17:32:13,113 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.900e+02 2.303e+02 2.970e+02 5.096e+02, threshold=4.606e+02, percent-clipped=7.0
+2024-08-25 17:32:19,441 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.47 vs. limit=15.0
+2024-08-25 17:32:20,403 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.29 vs. limit=22.5
+2024-08-25 17:32:28,194 INFO [train.py:1114] (0/4) Epoch 10, batch 1350, loss[loss=0.2241, simple_loss=0.285, pruned_loss=0.05938, ctc_loss=0.111, over 19767.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.2953, pruned_loss=0.0676, ctc_loss=0.1269, over 3858173.24 frames. ], batch size: 54, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:32:53,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=126789.33333333333, ans=0.09899494936611666
+2024-08-25 17:33:01,714 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=126842.66666666667, ans=0.07
+2024-08-25 17:33:18,407 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.73 vs. limit=6.0
+2024-08-25 17:33:30,485 INFO [train.py:1114] (0/4) Epoch 10, batch 1400, loss[loss=0.2204, simple_loss=0.2691, pruned_loss=0.06153, ctc_loss=0.1218, over 19682.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.2946, pruned_loss=0.06727, ctc_loss=0.1261, over 3865225.95 frames. ], batch size: 46, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:33:38,185 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.33 vs. limit=15.0
+2024-08-25 17:33:44,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127002.66666666667, ans=0.1
+2024-08-25 17:33:49,710 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.63 vs. limit=12.0
+2024-08-25 17:33:57,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=127056.0, ans=0.125
+2024-08-25 17:34:42,450 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.856e+02 2.167e+02 2.631e+02 4.500e+02, threshold=4.335e+02, percent-clipped=0.0
+2024-08-25 17:35:02,138 INFO [train.py:1114] (0/4) Epoch 10, batch 1450, loss[loss=0.2367, simple_loss=0.3081, pruned_loss=0.06006, ctc_loss=0.113, over 19636.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.2955, pruned_loss=0.06753, ctc_loss=0.1266, over 3863599.03 frames. ], batch size: 63, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:35:06,930 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=127216.0, ans=0.125
+2024-08-25 17:35:12,751 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.62 vs. limit=15.0
+2024-08-25 17:35:22,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=127269.33333333333, ans=0.0
+2024-08-25 17:35:57,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=127429.33333333333, ans=0.125
+2024-08-25 17:36:02,114 INFO [train.py:1114] (0/4) Epoch 10, batch 1500, loss[loss=0.2869, simple_loss=0.3256, pruned_loss=0.08966, ctc_loss=0.1721, over 19587.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2966, pruned_loss=0.06837, ctc_loss=0.1284, over 3862952.85 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:36:05,112 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.17 vs. limit=12.0
+2024-08-25 17:36:09,894 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=127482.66666666667, ans=0.025
+2024-08-25 17:36:43,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=127589.33333333333, ans=0.0
+2024-08-25 17:36:50,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=127642.66666666667, ans=0.125
+2024-08-25 17:36:55,685 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 1.877e+02 2.186e+02 2.626e+02 4.478e+02, threshold=4.372e+02, percent-clipped=1.0
+2024-08-25 17:37:23,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=127749.33333333333, ans=0.125
+2024-08-25 17:37:24,243 INFO [train.py:1114] (0/4) Epoch 10, batch 1550, loss[loss=0.2225, simple_loss=0.2841, pruned_loss=0.05873, ctc_loss=0.1084, over 19610.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2964, pruned_loss=0.06857, ctc_loss=0.1288, over 3847527.24 frames. ], batch size: 60, lr: 1.48e-02, grad_scale: 16.0
+2024-08-25 17:37:31,205 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=127749.33333333333, ans=0.125
+2024-08-25 17:37:40,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=127802.66666666667, ans=0.125
+2024-08-25 17:39:46,817 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=127962.66666666667, ans=0.0
+2024-08-25 17:39:52,372 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-24000.pt
+2024-08-25 17:41:06,795 INFO [train.py:1114] (0/4) Epoch 10, batch 1600, loss[loss=0.2342, simple_loss=0.2929, pruned_loss=0.06362, ctc_loss=0.1204, over 19839.00 frames. ], tot_loss[loss=0.243, simple_loss=0.2965, pruned_loss=0.06879, ctc_loss=0.1295, over 3836742.72 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:42:06,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=128016.0, ans=0.0
+2024-08-25 17:42:35,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=128016.0, ans=0.0
+2024-08-25 17:42:42,620 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.79 vs. limit=22.5
+2024-08-25 17:42:44,833 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.82 vs. limit=22.5
+2024-08-25 17:42:45,327 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=128069.33333333333, ans=0.04949747468305833
+2024-08-25 17:43:02,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=128176.0, ans=0.125
+2024-08-25 17:43:14,424 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=128176.0, ans=0.125
+2024-08-25 17:43:24,235 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 1.849e+02 2.080e+02 2.733e+02 5.175e+02, threshold=4.161e+02, percent-clipped=4.0
+2024-08-25 17:43:52,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=128229.33333333333, ans=0.125
+2024-08-25 17:43:54,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=128229.33333333333, ans=0.125
+2024-08-25 17:44:00,791 INFO [train.py:1114] (0/4) Epoch 10, batch 1650, loss[loss=0.2511, simple_loss=0.3065, pruned_loss=0.0702, ctc_loss=0.1381, over 19641.00 frames. ], tot_loss[loss=0.242, simple_loss=0.2959, pruned_loss=0.06834, ctc_loss=0.1287, over 3834161.07 frames. ], batch size: 59, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:44:34,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=128336.0, ans=0.09899494936611666
+2024-08-25 17:45:16,124 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.63 vs. limit=15.0
+2024-08-25 17:45:19,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=128496.0, ans=0.125
+2024-08-25 17:45:46,338 INFO [train.py:1114] (0/4) Epoch 10, batch 1700, loss[loss=0.2366, simple_loss=0.2794, pruned_loss=0.06986, ctc_loss=0.1354, over 19696.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.2952, pruned_loss=0.06751, ctc_loss=0.1269, over 3847940.54 frames. ], batch size: 46, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:46:32,391 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.233e-01
+2024-08-25 17:46:34,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=128602.66666666667, ans=0.2
+2024-08-25 17:46:36,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=128602.66666666667, ans=0.125
+2024-08-25 17:46:50,718 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=128656.0, ans=0.125
+2024-08-25 17:47:10,886 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.94 vs. limit=15.0
+2024-08-25 17:47:11,344 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.773e+02 2.059e+02 2.527e+02 4.467e+02, threshold=4.119e+02, percent-clipped=1.0
+2024-08-25 17:48:08,448 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.75 vs. limit=6.0
+2024-08-25 17:48:12,453 INFO [train.py:1114] (0/4) Epoch 10, batch 1750, loss[loss=0.238, simple_loss=0.2779, pruned_loss=0.07156, ctc_loss=0.1376, over 19649.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2947, pruned_loss=0.06745, ctc_loss=0.1271, over 3852797.68 frames. ], batch size: 45, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:48:20,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=128816.0, ans=0.125
+2024-08-25 17:48:25,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=128869.33333333333, ans=0.125
+2024-08-25 17:48:39,200 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.25 vs. limit=10.0
+2024-08-25 17:48:45,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=128922.66666666667, ans=0.125
+2024-08-25 17:49:11,939 INFO [train.py:1114] (0/4) Epoch 10, batch 1800, loss[loss=0.2234, simple_loss=0.2901, pruned_loss=0.05637, ctc_loss=0.1099, over 19616.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.2951, pruned_loss=0.0677, ctc_loss=0.1276, over 3854022.31 frames. ], batch size: 55, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 17:50:12,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=129136.0, ans=0.95
+2024-08-25 18:00:10,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=129189.33333333333, ans=0.0
+2024-08-25 18:06:34,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=129242.66666666667, ans=0.1
+2024-08-25 18:08:39,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=129242.66666666667, ans=0.0
+2024-08-25 18:11:17,736 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.930e+02 2.270e+02 3.115e+02 5.695e+02, threshold=4.540e+02, percent-clipped=10.0
+2024-08-25 18:13:41,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=129296.0, ans=0.125
+2024-08-25 18:17:46,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=129296.0, ans=0.125
+2024-08-25 18:19:59,315 INFO [train.py:1114] (0/4) Epoch 10, batch 1850, loss[loss=0.2533, simple_loss=0.3126, pruned_loss=0.07038, ctc_loss=0.1329, over 19577.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.2948, pruned_loss=0.0675, ctc_loss=0.127, over 3858493.05 frames. ], batch size: 57, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:26:29,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=129456.0, ans=0.125
+2024-08-25 18:28:59,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=129509.33333333333, ans=0.125
+2024-08-25 18:29:10,893 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 18:29:51,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=129509.33333333333, ans=0.025
+2024-08-25 18:32:26,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=129562.66666666667, ans=0.2
+2024-08-25 18:32:37,431 INFO [train.py:1114] (0/4) Epoch 10, batch 1900, loss[loss=0.2149, simple_loss=0.2877, pruned_loss=0.05128, ctc_loss=0.09868, over 19653.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2959, pruned_loss=0.06815, ctc_loss=0.1279, over 3864230.46 frames. ], batch size: 59, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:33:08,809 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=129616.0, ans=0.125
+2024-08-25 18:35:38,493 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.39 vs. limit=15.0
+2024-08-25 18:35:38,800 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.55 vs. limit=15.0
+2024-08-25 18:36:33,840 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=129722.66666666667, ans=0.125
+2024-08-25 18:37:43,362 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.882e+02 2.156e+02 2.772e+02 4.689e+02, threshold=4.313e+02, percent-clipped=1.0
+2024-08-25 18:37:43,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=129776.0, ans=0.125
+2024-08-25 18:38:09,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=129829.33333333333, ans=0.0
+2024-08-25 18:38:51,199 INFO [train.py:1114] (0/4) Epoch 10, batch 1950, loss[loss=0.2462, simple_loss=0.2931, pruned_loss=0.07125, ctc_loss=0.1419, over 19591.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2964, pruned_loss=0.06802, ctc_loss=0.1277, over 3872510.94 frames. ], batch size: 52, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:38:51,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=129882.66666666667, ans=0.125
+2024-08-25 18:39:44,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=129882.66666666667, ans=0.0
+2024-08-25 18:40:13,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=129936.0, ans=0.125
+2024-08-25 18:40:23,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=129936.0, ans=0.125
+2024-08-25 18:42:07,650 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.05 vs. limit=22.5
+2024-08-25 18:43:02,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=130096.0, ans=0.025
+2024-08-25 18:44:04,313 INFO [train.py:1114] (0/4) Epoch 10, batch 2000, loss[loss=0.2399, simple_loss=0.2808, pruned_loss=0.07218, ctc_loss=0.1366, over 19679.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2968, pruned_loss=0.06816, ctc_loss=0.1281, over 3857015.78 frames. ], batch size: 45, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:44:04,438 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 18:44:10,606 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.93 vs. limit=22.5
+2024-08-25 18:44:40,207 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=130202.66666666667, ans=0.125
+2024-08-25 18:45:35,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=130256.0, ans=0.2
+2024-08-25 18:47:32,406 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.882e+02 2.262e+02 2.707e+02 4.864e+02, threshold=4.523e+02, percent-clipped=1.0
+2024-08-25 18:48:39,774 INFO [train.py:1114] (0/4) Epoch 10, batch 2050, loss[loss=0.2176, simple_loss=0.2625, pruned_loss=0.06243, ctc_loss=0.1197, over 19736.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.2952, pruned_loss=0.06745, ctc_loss=0.1271, over 3851578.47 frames. ], batch size: 47, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:49:07,435 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.75 vs. limit=15.0
+2024-08-25 18:50:09,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=130469.33333333333, ans=0.125
+2024-08-25 18:50:09,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=130469.33333333333, ans=0.1
+2024-08-25 18:51:50,966 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=130629.33333333333, ans=0.125
+2024-08-25 18:52:20,501 INFO [train.py:1114] (0/4) Epoch 10, batch 2100, loss[loss=0.2321, simple_loss=0.2872, pruned_loss=0.06427, ctc_loss=0.121, over 19749.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2957, pruned_loss=0.06814, ctc_loss=0.128, over 3858479.51 frames. ], batch size: 54, lr: 1.47e-02, grad_scale: 16.0
+2024-08-25 18:52:23,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=130682.66666666667, ans=0.125
+2024-08-25 18:52:25,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=130682.66666666667, ans=0.0
+2024-08-25 18:52:40,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=130736.0, ans=0.0
+2024-08-25 18:53:34,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=130789.33333333333, ans=0.125
+2024-08-25 18:53:50,011 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.03 vs. limit=15.0
+2024-08-25 18:53:53,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=130842.66666666667, ans=0.0
+2024-08-25 18:53:58,216 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.839e+02 2.296e+02 2.721e+02 6.154e+02, threshold=4.593e+02, percent-clipped=3.0
+2024-08-25 18:54:37,552 INFO [train.py:1114] (0/4) Epoch 10, batch 2150, loss[loss=0.2206, simple_loss=0.2792, pruned_loss=0.05918, ctc_loss=0.109, over 19579.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2946, pruned_loss=0.06747, ctc_loss=0.1265, over 3869417.32 frames. ], batch size: 52, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:54:57,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=131002.66666666667, ans=0.125
+2024-08-25 18:55:03,796 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131002.66666666667, ans=0.1
+2024-08-25 18:55:37,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=131056.0, ans=0.05
+2024-08-25 18:56:07,027 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.06 vs. limit=15.0
+2024-08-25 18:56:19,487 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131162.66666666666, ans=0.1
+2024-08-25 18:56:33,016 INFO [train.py:1114] (0/4) Epoch 10, batch 2200, loss[loss=0.2342, simple_loss=0.3019, pruned_loss=0.06098, ctc_loss=0.1115, over 19596.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.2943, pruned_loss=0.06713, ctc_loss=0.1259, over 3867611.73 frames. ], batch size: 57, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:56:44,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=131216.0, ans=0.125
+2024-08-25 18:57:10,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff3.min_abs, batch_count=131269.33333333334, ans=0.2
+2024-08-25 18:57:38,763 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.20 vs. limit=15.0
+2024-08-25 18:57:50,638 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=131376.0, ans=0.2
+2024-08-25 18:57:51,462 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.773e+02 2.006e+02 2.540e+02 3.937e+02, threshold=4.013e+02, percent-clipped=0.0
+2024-08-25 18:58:07,774 INFO [train.py:1114] (0/4) Epoch 10, batch 2250, loss[loss=0.256, simple_loss=0.3059, pruned_loss=0.0758, ctc_loss=0.1362, over 19624.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.2946, pruned_loss=0.06733, ctc_loss=0.1261, over 3867698.01 frames. ], batch size: 55, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:58:14,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=131482.66666666666, ans=0.125
+2024-08-25 18:58:24,150 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.34 vs. limit=12.0
+2024-08-25 18:58:29,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131536.0, ans=0.1
+2024-08-25 18:58:33,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=131589.33333333334, ans=0.2
+2024-08-25 18:58:50,958 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.24 vs. limit=15.0
+2024-08-25 18:58:51,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=131642.66666666666, ans=0.09899494936611666
+2024-08-25 18:59:05,063 INFO [train.py:1114] (0/4) Epoch 10, batch 2300, loss[loss=0.2089, simple_loss=0.2714, pruned_loss=0.05281, ctc_loss=0.1019, over 19495.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2935, pruned_loss=0.06699, ctc_loss=0.1256, over 3861259.43 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:59:06,736 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.94 vs. limit=15.0
+2024-08-25 18:59:22,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=131802.66666666666, ans=0.0
+2024-08-25 18:59:30,792 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=131856.0, ans=0.125
+2024-08-25 18:59:52,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=131909.33333333334, ans=0.0
+2024-08-25 18:59:54,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=131909.33333333334, ans=0.1
+2024-08-25 19:00:00,743 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.815e+02 2.310e+02 2.961e+02 4.661e+02, threshold=4.621e+02, percent-clipped=5.0
+2024-08-25 19:00:07,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=131962.66666666666, ans=0.2
+2024-08-25 19:00:14,686 INFO [train.py:1114] (0/4) Epoch 10, batch 2350, loss[loss=0.2514, simple_loss=0.3124, pruned_loss=0.07053, ctc_loss=0.1233, over 19695.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.2938, pruned_loss=0.0672, ctc_loss=0.126, over 3864159.33 frames. ], batch size: 63, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:00:21,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=132016.0, ans=0.0
+2024-08-25 19:00:32,597 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.12 vs. limit=15.0
+2024-08-25 19:00:53,238 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=132176.0, ans=0.125
+2024-08-25 19:00:56,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=132176.0, ans=0.125
+2024-08-25 19:01:03,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=132229.33333333334, ans=0.125
+2024-08-25 19:01:13,185 INFO [train.py:1114] (0/4) Epoch 10, batch 2400, loss[loss=0.247, simple_loss=0.2994, pruned_loss=0.07074, ctc_loss=0.1327, over 19387.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.2961, pruned_loss=0.06838, ctc_loss=0.1283, over 3858298.54 frames. ], batch size: 67, lr: 1.46e-02, grad_scale: 32.0
+2024-08-25 19:01:24,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=132336.0, ans=0.125
+2024-08-25 19:01:29,472 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=132336.0, ans=0.025
+2024-08-25 19:01:32,642 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=132336.0, ans=0.125
+2024-08-25 19:01:35,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=132336.0, ans=0.0
+2024-08-25 19:01:58,227 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=8.03 vs. limit=12.0
+2024-08-25 19:02:10,724 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.986e+02 2.279e+02 2.618e+02 8.799e+02, threshold=4.558e+02, percent-clipped=0.0
+2024-08-25 19:02:21,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=132549.33333333334, ans=0.125
+2024-08-25 19:02:22,031 INFO [train.py:1114] (0/4) Epoch 10, batch 2450, loss[loss=0.3213, simple_loss=0.3418, pruned_loss=0.11, ctc_loss=0.2022, over 14117.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3008, pruned_loss=0.07194, ctc_loss=0.1354, over 3732667.83 frames. ], batch size: 140, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:03:09,889 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-10.pt
+2024-08-25 19:04:28,611 INFO [train.py:1114] (0/4) Epoch 11, batch 0, loss[loss=0.2676, simple_loss=0.305, pruned_loss=0.08401, ctc_loss=0.1556, over 19429.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.305, pruned_loss=0.08401, ctc_loss=0.1556, over 19429.00 frames. ], batch size: 48, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:04:28,612 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 19:04:55,863 INFO [train.py:1146] (0/4) Epoch 11, validation: loss=0.2031, simple_loss=0.2887, pruned_loss=0.04339, ctc_loss=0.0768, over 944034.00 frames.
+2024-08-25 19:04:55,863 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 19:04:58,730 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.07 vs. limit=15.0
+2024-08-25 19:05:00,571 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=132757.33333333334, ans=0.125
+2024-08-25 19:05:26,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=132864.0, ans=0.0
+2024-08-25 19:06:02,239 INFO [train.py:1114] (0/4) Epoch 11, batch 50, loss[loss=0.2049, simple_loss=0.2636, pruned_loss=0.05351, ctc_loss=0.09771, over 19703.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3005, pruned_loss=0.07083, ctc_loss=0.1351, over 844145.34 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:06:03,359 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.050e+02 2.234e+02 2.552e+02 4.359e+02, threshold=4.468e+02, percent-clipped=1.0
+2024-08-25 19:06:14,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=133024.0, ans=0.025
+2024-08-25 19:06:34,618 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.03 vs. limit=6.0
+2024-08-25 19:06:38,349 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=133130.66666666666, ans=0.95
+2024-08-25 19:06:51,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=133184.0, ans=0.1
+2024-08-25 19:07:25,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=133184.0, ans=0.0
+2024-08-25 19:07:42,547 INFO [train.py:1114] (0/4) Epoch 11, batch 100, loss[loss=0.2239, simple_loss=0.2842, pruned_loss=0.05928, ctc_loss=0.1126, over 19719.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.2998, pruned_loss=0.06887, ctc_loss=0.1306, over 1498581.10 frames. ], batch size: 51, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:07:54,845 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:07:59,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.59 vs. limit=15.0
+2024-08-25 19:08:20,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=133450.66666666666, ans=0.2
+2024-08-25 19:08:23,313 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.38 vs. limit=10.0
+2024-08-25 19:08:23,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=133450.66666666666, ans=0.125
+2024-08-25 19:08:26,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=133450.66666666666, ans=0.04949747468305833
+2024-08-25 19:08:45,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=133504.0, ans=0.0
+2024-08-25 19:08:59,332 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.36 vs. limit=12.0
+2024-08-25 19:09:10,084 INFO [train.py:1114] (0/4) Epoch 11, batch 150, loss[loss=0.2312, simple_loss=0.2738, pruned_loss=0.06807, ctc_loss=0.1311, over 19738.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.2956, pruned_loss=0.0666, ctc_loss=0.1259, over 2028235.97 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:09:12,929 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.747e+02 2.015e+02 2.344e+02 3.708e+02, threshold=4.031e+02, percent-clipped=0.0
+2024-08-25 19:09:59,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=133717.33333333334, ans=0.2
+2024-08-25 19:10:24,024 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.17 vs. limit=15.0
+2024-08-25 19:10:34,574 INFO [train.py:1114] (0/4) Epoch 11, batch 200, loss[loss=0.2805, simple_loss=0.3263, pruned_loss=0.08527, ctc_loss=0.1606, over 18128.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2935, pruned_loss=0.06534, ctc_loss=0.1233, over 2435787.07 frames. ], batch size: 85, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:11:34,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=133930.66666666666, ans=0.125
+2024-08-25 19:11:54,440 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=134037.33333333334, ans=0.2
+2024-08-25 19:11:59,247 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.35 vs. limit=15.0
+2024-08-25 19:12:01,076 INFO [train.py:1114] (0/4) Epoch 11, batch 250, loss[loss=0.2648, simple_loss=0.311, pruned_loss=0.07982, ctc_loss=0.1474, over 19397.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.293, pruned_loss=0.06547, ctc_loss=0.1235, over 2755511.01 frames. ], batch size: 67, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:12:02,131 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.716e+02 2.023e+02 2.469e+02 5.021e+02, threshold=4.046e+02, percent-clipped=3.0
+2024-08-25 19:12:14,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=134144.0, ans=0.1
+2024-08-25 19:12:18,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=134144.0, ans=0.025
+2024-08-25 19:12:19,616 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.95 vs. limit=15.0
+2024-08-25 19:12:21,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=134144.0, ans=0.2
+2024-08-25 19:12:32,382 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=134197.33333333334, ans=0.0
+2024-08-25 19:12:58,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=134304.0, ans=0.125
+2024-08-25 19:13:03,621 INFO [train.py:1114] (0/4) Epoch 11, batch 300, loss[loss=0.2855, simple_loss=0.323, pruned_loss=0.09191, ctc_loss=0.1605, over 19497.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.2921, pruned_loss=0.06496, ctc_loss=0.1226, over 2999613.45 frames. ], batch size: 61, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:13:06,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=134357.33333333334, ans=0.0
+2024-08-25 19:13:12,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=134357.33333333334, ans=0.0
+2024-08-25 19:13:39,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=134464.0, ans=0.1
+2024-08-25 19:13:57,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=134570.66666666666, ans=0.125
+2024-08-25 19:14:07,053 INFO [train.py:1114] (0/4) Epoch 11, batch 350, loss[loss=0.219, simple_loss=0.2709, pruned_loss=0.06068, ctc_loss=0.1143, over 19775.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.2923, pruned_loss=0.06496, ctc_loss=0.1222, over 3190014.24 frames. ], batch size: 48, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:14:08,106 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.838e+02 2.258e+02 2.898e+02 4.827e+02, threshold=4.516e+02, percent-clipped=2.0
+2024-08-25 19:14:08,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=134624.0, ans=0.125
+2024-08-25 19:14:48,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.87 vs. limit=22.5
+2024-08-25 19:14:57,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=134730.66666666666, ans=0.125
+2024-08-25 19:15:05,255 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=134730.66666666666, ans=0.125
+2024-08-25 19:15:14,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=134784.0, ans=0.1
+2024-08-25 19:15:49,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=134837.33333333334, ans=0.0
+2024-08-25 19:15:49,704 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.58 vs. limit=10.0
+2024-08-25 19:15:57,894 INFO [train.py:1114] (0/4) Epoch 11, batch 400, loss[loss=0.2365, simple_loss=0.3, pruned_loss=0.06358, ctc_loss=0.1145, over 19835.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.2923, pruned_loss=0.0649, ctc_loss=0.1221, over 3342096.11 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:15:59,598 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.53 vs. limit=15.0
+2024-08-25 19:16:15,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=134944.0, ans=0.0
+2024-08-25 19:16:30,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=134944.0, ans=0.035
+2024-08-25 19:16:31,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=134944.0, ans=0.09899494936611666
+2024-08-25 19:16:38,930 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=134997.33333333334, ans=0.125
+2024-08-25 19:17:13,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=135104.0, ans=0.05
+2024-08-25 19:17:22,222 INFO [train.py:1114] (0/4) Epoch 11, batch 450, loss[loss=0.2196, simple_loss=0.2866, pruned_loss=0.05533, ctc_loss=0.1048, over 19612.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2922, pruned_loss=0.06509, ctc_loss=0.1223, over 3450879.35 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:17:31,726 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.841e+02 2.102e+02 2.681e+02 4.407e+02, threshold=4.204e+02, percent-clipped=0.0
+2024-08-25 19:17:41,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=135210.66666666666, ans=0.0
+2024-08-25 19:17:49,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=135210.66666666666, ans=0.125
+2024-08-25 19:18:31,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=135370.66666666666, ans=0.125
+2024-08-25 19:18:34,775 INFO [train.py:1114] (0/4) Epoch 11, batch 500, loss[loss=0.2582, simple_loss=0.3072, pruned_loss=0.07608, ctc_loss=0.1427, over 19700.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2916, pruned_loss=0.06514, ctc_loss=0.1225, over 3546311.09 frames. ], batch size: 63, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:18:54,494 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.51 vs. limit=15.0
+2024-08-25 19:19:42,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=135584.0, ans=0.1
+2024-08-25 19:20:13,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=135637.33333333334, ans=0.2
+2024-08-25 19:20:13,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=135637.33333333334, ans=0.125
+2024-08-25 19:20:17,206 INFO [train.py:1114] (0/4) Epoch 11, batch 550, loss[loss=0.217, simple_loss=0.2835, pruned_loss=0.05417, ctc_loss=0.1053, over 19187.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.2918, pruned_loss=0.0653, ctc_loss=0.1228, over 3609092.84 frames. ], batch size: 71, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:20:18,391 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.822e+02 2.069e+02 2.386e+02 4.149e+02, threshold=4.137e+02, percent-clipped=0.0
+2024-08-25 19:20:21,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=135690.66666666666, ans=0.0
+2024-08-25 19:20:22,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=135690.66666666666, ans=0.125
+2024-08-25 19:20:42,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=135744.0, ans=0.125
+2024-08-25 19:20:49,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=135797.33333333334, ans=0.125
+2024-08-25 19:21:08,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=135850.66666666666, ans=0.125
+2024-08-25 19:21:22,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=135904.0, ans=0.0
+2024-08-25 19:21:24,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=135904.0, ans=0.0
+2024-08-25 19:21:30,824 INFO [train.py:1114] (0/4) Epoch 11, batch 600, loss[loss=0.2449, simple_loss=0.3064, pruned_loss=0.06652, ctc_loss=0.1261, over 19392.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2918, pruned_loss=0.06505, ctc_loss=0.1224, over 3667260.93 frames. ], batch size: 67, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:21:43,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=135957.33333333334, ans=0.125
+2024-08-25 19:22:28,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=136010.66666666666, ans=0.07
+2024-08-25 19:22:34,706 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.74 vs. limit=12.0
+2024-08-25 19:22:48,275 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=136064.0, ans=0.025
+2024-08-25 19:23:41,199 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=136170.66666666666, ans=0.125
+2024-08-25 19:23:54,558 INFO [train.py:1114] (0/4) Epoch 11, batch 650, loss[loss=0.2156, simple_loss=0.2866, pruned_loss=0.05134, ctc_loss=0.1046, over 19769.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.291, pruned_loss=0.06468, ctc_loss=0.1218, over 3717034.93 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:23:55,634 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.913e+02 2.094e+02 2.738e+02 4.984e+02, threshold=4.187e+02, percent-clipped=5.0
+2024-08-25 19:24:42,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=136330.66666666666, ans=0.2
+2024-08-25 19:24:43,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=136330.66666666666, ans=0.025
+2024-08-25 19:24:57,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=136384.0, ans=0.0
+2024-08-25 19:25:06,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=136437.33333333334, ans=0.125
+2024-08-25 19:25:12,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=136437.33333333334, ans=0.2
+2024-08-25 19:25:18,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=136490.66666666666, ans=0.025
+2024-08-25 19:25:34,150 INFO [train.py:1114] (0/4) Epoch 11, batch 700, loss[loss=0.225, simple_loss=0.2877, pruned_loss=0.05883, ctc_loss=0.1117, over 19722.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.2915, pruned_loss=0.06457, ctc_loss=0.1215, over 3748776.55 frames. ], batch size: 51, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:26:11,734 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.95 vs. limit=15.0
+2024-08-25 19:26:32,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=136544.0, ans=0.0
+2024-08-25 19:26:43,146 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.75 vs. limit=10.0
+2024-08-25 19:26:52,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=136597.33333333334, ans=0.07
+2024-08-25 19:27:12,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=136650.66666666666, ans=0.2
+2024-08-25 19:27:49,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=136704.0, ans=0.125
+2024-08-25 19:28:08,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=136757.33333333334, ans=0.2
+2024-08-25 19:28:09,974 INFO [train.py:1114] (0/4) Epoch 11, batch 750, loss[loss=0.2403, simple_loss=0.295, pruned_loss=0.06881, ctc_loss=0.1201, over 19492.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2906, pruned_loss=0.0642, ctc_loss=0.1206, over 3775041.64 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:28:25,939 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.821e+02 2.028e+02 2.720e+02 4.524e+02, threshold=4.057e+02, percent-clipped=2.0
+2024-08-25 19:28:49,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=136810.66666666666, ans=10.0
+2024-08-25 19:29:57,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=136917.33333333334, ans=0.125
+2024-08-25 19:30:12,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=136970.66666666666, ans=0.1
+2024-08-25 19:32:08,680 INFO [train.py:1114] (0/4) Epoch 11, batch 800, loss[loss=0.2108, simple_loss=0.2625, pruned_loss=0.0583, ctc_loss=0.1065, over 19826.00 frames. ], tot_loss[loss=0.234, simple_loss=0.291, pruned_loss=0.06435, ctc_loss=0.121, over 3797332.69 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:33:43,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=137237.33333333334, ans=0.0
+2024-08-25 19:33:47,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=137237.33333333334, ans=0.2
+2024-08-25 19:33:49,180 INFO [train.py:1114] (0/4) Epoch 11, batch 850, loss[loss=0.255, simple_loss=0.3096, pruned_loss=0.07322, ctc_loss=0.1348, over 19636.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.291, pruned_loss=0.06445, ctc_loss=0.1213, over 3815886.78 frames. ], batch size: 59, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:33:50,254 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.863e+02 2.065e+02 2.415e+02 4.305e+02, threshold=4.130e+02, percent-clipped=1.0
+2024-08-25 19:33:52,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=137290.66666666666, ans=0.0
+2024-08-25 19:33:55,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=137290.66666666666, ans=0.125
+2024-08-25 19:34:09,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=137344.0, ans=0.0
+2024-08-25 19:34:20,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=137397.33333333334, ans=0.0
+2024-08-25 19:34:40,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=137450.66666666666, ans=0.125
+2024-08-25 19:34:44,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=137450.66666666666, ans=0.035
+2024-08-25 19:34:47,638 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:35:05,261 INFO [train.py:1114] (0/4) Epoch 11, batch 900, loss[loss=0.2383, simple_loss=0.2797, pruned_loss=0.07069, ctc_loss=0.139, over 19416.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2917, pruned_loss=0.06509, ctc_loss=0.1224, over 3820477.17 frames. ], batch size: 48, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:35:23,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=137610.66666666666, ans=0.0
+2024-08-25 19:35:46,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=137664.0, ans=0.0
+2024-08-25 19:36:16,777 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.36 vs. limit=15.0
+2024-08-25 19:36:19,821 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.36 vs. limit=15.0
+2024-08-25 19:36:50,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=137770.66666666666, ans=0.125
+2024-08-25 19:36:53,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=137770.66666666666, ans=0.07
+2024-08-25 19:37:18,561 INFO [train.py:1114] (0/4) Epoch 11, batch 950, loss[loss=0.2694, simple_loss=0.3036, pruned_loss=0.08605, ctc_loss=0.1579, over 19507.00 frames. ], tot_loss[loss=0.236, simple_loss=0.292, pruned_loss=0.06539, ctc_loss=0.1229, over 3820471.57 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:37:19,709 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.805e+02 2.081e+02 2.536e+02 4.211e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-25 19:37:41,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=137877.33333333334, ans=0.125
+2024-08-25 19:37:41,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=137877.33333333334, ans=0.025
+2024-08-25 19:37:42,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=137930.66666666666, ans=0.1
+2024-08-25 19:38:40,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=138090.66666666666, ans=0.125
+2024-08-25 19:38:48,986 INFO [train.py:1114] (0/4) Epoch 11, batch 1000, loss[loss=0.2112, simple_loss=0.2747, pruned_loss=0.05381, ctc_loss=0.1003, over 19840.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2924, pruned_loss=0.06571, ctc_loss=0.1233, over 3815736.60 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:39:27,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=138197.33333333334, ans=0.025
+2024-08-25 19:39:40,676 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.82 vs. limit=15.0
+2024-08-25 19:39:50,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=138250.66666666666, ans=0.2
+2024-08-25 19:40:03,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=138304.0, ans=0.125
+2024-08-25 19:40:14,835 INFO [train.py:1114] (0/4) Epoch 11, batch 1050, loss[loss=0.2528, simple_loss=0.3092, pruned_loss=0.07007, ctc_loss=0.1405, over 19837.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.2918, pruned_loss=0.06554, ctc_loss=0.1231, over 3821539.10 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:40:16,861 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.874e+02 2.329e+02 2.645e+02 4.211e+02, threshold=4.658e+02, percent-clipped=2.0
+2024-08-25 19:40:38,737 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.46 vs. limit=10.0
+2024-08-25 19:40:38,765 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.65 vs. limit=10.0
+2024-08-25 19:40:46,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=138464.0, ans=0.1
+2024-08-25 19:40:50,938 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.91 vs. limit=10.0
+2024-08-25 19:41:02,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=138517.33333333334, ans=0.1
+2024-08-25 19:41:26,135 INFO [train.py:1114] (0/4) Epoch 11, batch 1100, loss[loss=0.22, simple_loss=0.2796, pruned_loss=0.05806, ctc_loss=0.111, over 19566.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2915, pruned_loss=0.06509, ctc_loss=0.1221, over 3828733.98 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:41:26,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=138624.0, ans=0.125
+2024-08-25 19:41:27,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=138624.0, ans=0.125
+2024-08-25 19:41:55,522 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=138624.0, ans=0.1
+2024-08-25 19:42:26,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=138677.33333333334, ans=0.1
+2024-08-25 19:42:40,571 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=138677.33333333334, ans=0.1
+2024-08-25 19:43:03,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=138730.66666666666, ans=0.2
+2024-08-25 19:43:18,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=138784.0, ans=0.125
+2024-08-25 19:43:18,983 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.99 vs. limit=15.0
+2024-08-25 19:43:36,017 INFO [train.py:1114] (0/4) Epoch 11, batch 1150, loss[loss=0.2081, simple_loss=0.2754, pruned_loss=0.05111, ctc_loss=0.09626, over 19585.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2916, pruned_loss=0.06532, ctc_loss=0.1224, over 3827621.95 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:43:37,198 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.797e+02 2.039e+02 2.453e+02 4.580e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-25 19:44:29,118 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=139104.0, ans=0.04949747468305833
+2024-08-25 19:44:39,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=139104.0, ans=0.1
+2024-08-25 19:44:41,837 INFO [train.py:1114] (0/4) Epoch 11, batch 1200, loss[loss=0.2769, simple_loss=0.3301, pruned_loss=0.08174, ctc_loss=0.1507, over 19840.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.2928, pruned_loss=0.06575, ctc_loss=0.1235, over 3823615.73 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:44:54,696 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=139210.66666666666, ans=0.0
+2024-08-25 19:45:15,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=139264.0, ans=0.025
+2024-08-25 19:45:37,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=139264.0, ans=0.0
+2024-08-25 19:46:04,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=139370.66666666666, ans=0.1
+2024-08-25 19:46:08,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=139370.66666666666, ans=0.125
+2024-08-25 19:46:15,640 INFO [train.py:1114] (0/4) Epoch 11, batch 1250, loss[loss=0.2163, simple_loss=0.2809, pruned_loss=0.05549, ctc_loss=0.1016, over 19520.00 frames. ], tot_loss[loss=0.236, simple_loss=0.2926, pruned_loss=0.06519, ctc_loss=0.1226, over 3842516.75 frames. ], batch size: 61, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:46:16,714 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.769e+02 1.992e+02 2.545e+02 3.633e+02, threshold=3.984e+02, percent-clipped=0.0
+2024-08-25 19:46:17,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=139424.0, ans=0.05
+2024-08-25 19:46:17,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=139424.0, ans=0.125
+2024-08-25 19:47:40,574 INFO [train.py:1114] (0/4) Epoch 11, batch 1300, loss[loss=0.274, simple_loss=0.3172, pruned_loss=0.084, ctc_loss=0.157, over 18823.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2922, pruned_loss=0.06514, ctc_loss=0.1225, over 3846394.09 frames. ], batch size: 76, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:47:53,909 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=139744.0, ans=0.025
+2024-08-25 19:47:53,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=139744.0, ans=0.2
+2024-08-25 19:48:21,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=139797.33333333334, ans=0.0
+2024-08-25 19:48:21,440 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=139797.33333333334, ans=0.125
+2024-08-25 19:48:31,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=139850.66666666666, ans=0.04949747468305833
+2024-08-25 19:48:56,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=139904.0, ans=0.125
+2024-08-25 19:48:59,253 INFO [train.py:1114] (0/4) Epoch 11, batch 1350, loss[loss=0.2473, simple_loss=0.3023, pruned_loss=0.0704, ctc_loss=0.1287, over 19769.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2918, pruned_loss=0.06497, ctc_loss=0.1221, over 3857051.96 frames. ], batch size: 54, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:49:01,647 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.851e+02 2.124e+02 2.742e+02 4.665e+02, threshold=4.248e+02, percent-clipped=3.0
+2024-08-25 19:49:29,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=140064.0, ans=0.125
+2024-08-25 19:49:34,570 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=140064.0, ans=0.1
+2024-08-25 19:49:51,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=140170.66666666666, ans=0.125
+2024-08-25 19:50:00,540 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=140170.66666666666, ans=0.125
+2024-08-25 19:50:07,171 INFO [train.py:1114] (0/4) Epoch 11, batch 1400, loss[loss=0.2027, simple_loss=0.2618, pruned_loss=0.05297, ctc_loss=0.09407, over 19656.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.292, pruned_loss=0.06534, ctc_loss=0.1225, over 3863704.71 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:50:08,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=140224.0, ans=0.0
+2024-08-25 19:50:08,990 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.96 vs. limit=15.0
+2024-08-25 19:50:40,785 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.23 vs. limit=12.0
+2024-08-25 19:50:52,696 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=140330.66666666666, ans=0.125
+2024-08-25 19:51:04,738 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=140330.66666666666, ans=0.1
+2024-08-25 19:51:08,517 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:51:23,835 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.27 vs. limit=22.5
+2024-08-25 19:51:38,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=140437.33333333334, ans=0.0
+2024-08-25 19:51:42,657 INFO [train.py:1114] (0/4) Epoch 11, batch 1450, loss[loss=0.2562, simple_loss=0.3027, pruned_loss=0.07657, ctc_loss=0.1415, over 19695.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.292, pruned_loss=0.06504, ctc_loss=0.1221, over 3861968.91 frames. ], batch size: 63, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:51:44,160 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:51:45,003 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.813e+02 2.052e+02 2.523e+02 4.896e+02, threshold=4.103e+02, percent-clipped=2.0
+2024-08-25 19:51:58,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=140544.0, ans=0.0
+2024-08-25 19:52:08,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=140544.0, ans=0.0
+2024-08-25 19:52:08,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=140544.0, ans=0.0
+2024-08-25 19:52:09,223 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.52 vs. limit=15.0
+2024-08-25 19:53:14,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=140704.0, ans=0.125
+2024-08-25 19:53:19,925 INFO [train.py:1114] (0/4) Epoch 11, batch 1500, loss[loss=0.2838, simple_loss=0.3285, pruned_loss=0.08556, ctc_loss=0.1702, over 19582.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.2925, pruned_loss=0.06524, ctc_loss=0.1228, over 3860681.04 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:53:27,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=140757.33333333334, ans=0.0
+2024-08-25 19:53:41,255 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=140757.33333333334, ans=0.1
+2024-08-25 19:53:50,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=140810.66666666666, ans=0.125
+2024-08-25 19:54:52,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=140970.66666666666, ans=0.07
+2024-08-25 19:54:58,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=140970.66666666666, ans=0.0
+2024-08-25 19:55:04,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=140970.66666666666, ans=0.0
+2024-08-25 19:55:07,085 INFO [train.py:1114] (0/4) Epoch 11, batch 1550, loss[loss=0.2648, simple_loss=0.3125, pruned_loss=0.07705, ctc_loss=0.1576, over 19578.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.2931, pruned_loss=0.06593, ctc_loss=0.124, over 3845422.91 frames. ], batch size: 60, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:55:10,759 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 1.804e+02 2.014e+02 2.422e+02 4.168e+02, threshold=4.028e+02, percent-clipped=1.0
+2024-08-25 19:55:12,470 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.05 vs. limit=22.5
+2024-08-25 19:55:47,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=141077.33333333334, ans=0.2
+2024-08-25 19:55:58,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=141130.66666666666, ans=0.125
+2024-08-25 19:56:13,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=141130.66666666666, ans=0.125
+2024-08-25 19:57:03,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=141184.0, ans=0.025
+2024-08-25 19:57:19,245 INFO [train.py:1114] (0/4) Epoch 11, batch 1600, loss[loss=0.2156, simple_loss=0.2928, pruned_loss=0.04997, ctc_loss=0.09643, over 19843.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.2925, pruned_loss=0.06559, ctc_loss=0.1233, over 3834287.04 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:58:03,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=141397.33333333334, ans=0.0
+2024-08-25 19:58:30,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=141504.0, ans=0.125
+2024-08-25 19:58:45,632 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.30 vs. limit=15.0
+2024-08-25 19:59:27,402 INFO [train.py:1114] (0/4) Epoch 11, batch 1650, loss[loss=0.2303, simple_loss=0.2951, pruned_loss=0.06074, ctc_loss=0.1103, over 19656.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2925, pruned_loss=0.06567, ctc_loss=0.1233, over 3831344.31 frames. ], batch size: 59, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:59:29,886 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.768e+02 1.990e+02 2.303e+02 4.438e+02, threshold=3.979e+02, percent-clipped=2.0
+2024-08-25 19:59:49,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=141610.66666666666, ans=0.035
+2024-08-25 19:59:49,898 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.96 vs. limit=15.0
+2024-08-25 20:00:01,265 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.69 vs. limit=15.0
+2024-08-25 20:00:06,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=141610.66666666666, ans=0.0
+2024-08-25 20:00:29,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=141717.33333333334, ans=0.125
+2024-08-25 20:01:05,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=141770.66666666666, ans=0.2
+2024-08-25 20:01:17,967 INFO [train.py:1114] (0/4) Epoch 11, batch 1700, loss[loss=0.2242, simple_loss=0.2691, pruned_loss=0.0651, ctc_loss=0.1229, over 19650.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2916, pruned_loss=0.06514, ctc_loss=0.1224, over 3845513.70 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:01:35,157 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=141877.33333333334, ans=0.125
+2024-08-25 20:01:45,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=141930.66666666666, ans=0.125
+2024-08-25 20:02:16,970 INFO [train.py:1114] (0/4) Epoch 11, batch 1750, loss[loss=0.193, simple_loss=0.252, pruned_loss=0.04819, ctc_loss=0.09381, over 19641.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.291, pruned_loss=0.06468, ctc_loss=0.1216, over 3851280.81 frames. ], batch size: 45, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:02:20,527 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.814e+02 2.107e+02 2.366e+02 3.890e+02, threshold=4.214e+02, percent-clipped=0.0
+2024-08-25 20:02:59,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=142144.0, ans=0.0
+2024-08-25 20:03:13,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=142197.33333333334, ans=15.0
+2024-08-25 20:03:27,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=142250.66666666666, ans=0.1
+2024-08-25 20:03:33,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=142250.66666666666, ans=0.125
+2024-08-25 20:04:24,829 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=142304.0, ans=0.125
+2024-08-25 20:04:26,054 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=142357.33333333334, ans=0.125
+2024-08-25 20:04:27,016 INFO [train.py:1114] (0/4) Epoch 11, batch 1800, loss[loss=0.228, simple_loss=0.2977, pruned_loss=0.05735, ctc_loss=0.1091, over 19620.00 frames. ], tot_loss[loss=0.235, simple_loss=0.2914, pruned_loss=0.06491, ctc_loss=0.122, over 3852829.33 frames. ], batch size: 55, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:04:45,794 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.61 vs. limit=15.0
+2024-08-25 20:05:38,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=142517.33333333334, ans=0.125
+2024-08-25 20:05:47,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=142570.66666666666, ans=0.125
+2024-08-25 20:06:13,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=142570.66666666666, ans=0.125
+2024-08-25 20:06:15,200 INFO [train.py:1114] (0/4) Epoch 11, batch 1850, loss[loss=0.2233, simple_loss=0.2886, pruned_loss=0.05687, ctc_loss=0.1107, over 19558.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2913, pruned_loss=0.0649, ctc_loss=0.122, over 3855883.92 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:06:17,716 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=142624.0, ans=0.2
+2024-08-25 20:06:18,513 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 1.849e+02 2.256e+02 2.966e+02 5.642e+02, threshold=4.511e+02, percent-clipped=6.0
+2024-08-25 20:06:19,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=142624.0, ans=0.05
+2024-08-25 20:06:25,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=142677.33333333334, ans=0.04949747468305833
+2024-08-25 20:06:33,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=142677.33333333334, ans=0.025
+2024-08-25 20:06:47,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=142677.33333333334, ans=0.025
+2024-08-25 20:07:00,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=142730.66666666666, ans=0.0
+2024-08-25 20:07:07,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=142730.66666666666, ans=0.125
+2024-08-25 20:07:24,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=142784.0, ans=0.0
+2024-08-25 20:07:51,895 INFO [train.py:1114] (0/4) Epoch 11, batch 1900, loss[loss=0.2329, simple_loss=0.3008, pruned_loss=0.06092, ctc_loss=0.1078, over 19643.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.2915, pruned_loss=0.06471, ctc_loss=0.1216, over 3860584.81 frames. ], batch size: 59, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:07:56,460 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=142890.66666666666, ans=0.125
+2024-08-25 20:08:07,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=142944.0, ans=0.09899494936611666
+2024-08-25 20:09:49,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=142997.33333333334, ans=0.125
+2024-08-25 20:28:43,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=143050.66666666666, ans=0.125
+2024-08-25 20:42:44,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=143104.0, ans=0.2
+2024-08-25 20:55:30,006 INFO [train.py:1114] (0/4) Epoch 11, batch 1950, loss[loss=0.2085, simple_loss=0.2806, pruned_loss=0.04923, ctc_loss=0.09483, over 19582.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2926, pruned_loss=0.06477, ctc_loss=0.1216, over 3869804.74 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 21:03:39,813 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.850e+02 2.123e+02 2.695e+02 5.282e+02, threshold=4.246e+02, percent-clipped=2.0
+2024-08-25 21:05:39,956 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=143157.33333333334, ans=0.125
+2024-08-25 21:13:01,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=143210.66666666666, ans=0.0
+2024-08-25 21:29:56,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=143317.33333333334, ans=0.025
+2024-08-25 21:36:26,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=143317.33333333334, ans=0.125
+2024-08-25 21:45:06,472 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=143370.66666666666, ans=0.09899494936611666
+2024-08-25 21:46:38,199 INFO [train.py:1114] (0/4) Epoch 11, batch 2000, loss[loss=0.2224, simple_loss=0.2713, pruned_loss=0.06336, ctc_loss=0.1168, over 19676.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2929, pruned_loss=0.06525, ctc_loss=0.1225, over 3855510.50 frames. ], batch size: 45, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 21:56:49,682 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=6.843e-02
+2024-08-25 22:05:28,136 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=143530.66666666666, ans=0.07
+2024-08-25 22:14:45,619 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=143637.33333333334, ans=0.2
+2024-08-25 22:18:48,477 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=143637.33333333334, ans=0.1
+2024-08-25 22:19:42,815 INFO [train.py:1114] (0/4) Epoch 11, batch 2050, loss[loss=0.223, simple_loss=0.2797, pruned_loss=0.05985, ctc_loss=0.1165, over 19730.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.2926, pruned_loss=0.06548, ctc_loss=0.1229, over 3852352.43 frames. ], batch size: 47, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:19:45,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=143690.66666666666, ans=0.0
+2024-08-25 22:20:13,491 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.838e+02 2.216e+02 2.724e+02 4.008e+02, threshold=4.432e+02, percent-clipped=0.0
+2024-08-25 22:20:41,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=143690.66666666666, ans=0.125
+2024-08-25 22:24:23,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=143744.0, ans=0.1
+2024-08-25 22:24:52,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=143797.33333333334, ans=0.0
+2024-08-25 22:30:50,492 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 22:32:23,598 INFO [train.py:1114] (0/4) Epoch 11, batch 2100, loss[loss=0.2195, simple_loss=0.2789, pruned_loss=0.05835, ctc_loss=0.1085, over 19789.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.2917, pruned_loss=0.06485, ctc_loss=0.1219, over 3859782.18 frames. ], batch size: 54, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:35:16,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=144010.66666666666, ans=0.125
+2024-08-25 22:35:17,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=144010.66666666666, ans=0.2
+2024-08-25 22:35:19,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=144010.66666666666, ans=0.95
+2024-08-25 22:35:47,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=144064.0, ans=0.0
+2024-08-25 22:35:47,958 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.92 vs. limit=22.5
+2024-08-25 22:37:32,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=144117.33333333334, ans=0.125
+2024-08-25 22:38:44,223 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=144170.66666666666, ans=0.0
+2024-08-25 22:39:07,558 INFO [train.py:1114] (0/4) Epoch 11, batch 2150, loss[loss=0.2493, simple_loss=0.2984, pruned_loss=0.0739, ctc_loss=0.1309, over 19591.00 frames. ], tot_loss[loss=0.235, simple_loss=0.2917, pruned_loss=0.06486, ctc_loss=0.1217, over 3869376.40 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:39:51,932 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 1.804e+02 2.068e+02 2.942e+02 5.639e+02, threshold=4.136e+02, percent-clipped=4.0
+2024-08-25 22:41:18,258 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.52 vs. limit=6.0
+2024-08-25 22:41:30,576 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.97 vs. limit=15.0
+2024-08-25 22:42:30,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=144330.66666666666, ans=0.125
+2024-08-25 22:43:27,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=144437.33333333334, ans=0.125
+2024-08-25 22:44:02,551 INFO [train.py:1114] (0/4) Epoch 11, batch 2200, loss[loss=0.2403, simple_loss=0.3045, pruned_loss=0.06425, ctc_loss=0.1193, over 19588.00 frames. ], tot_loss[loss=0.234, simple_loss=0.291, pruned_loss=0.06438, ctc_loss=0.1207, over 3869041.31 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:44:32,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=144490.66666666666, ans=0.0
+2024-08-25 22:47:57,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=144650.66666666666, ans=0.1
+2024-08-25 22:49:03,020 INFO [train.py:1114] (0/4) Epoch 11, batch 2250, loss[loss=0.2291, simple_loss=0.2884, pruned_loss=0.06179, ctc_loss=0.1154, over 19619.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.291, pruned_loss=0.06424, ctc_loss=0.1204, over 3868584.66 frames. ], batch size: 55, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:49:09,615 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.818e+02 2.110e+02 2.782e+02 6.628e+02, threshold=4.220e+02, percent-clipped=3.0
+2024-08-25 22:49:29,244 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 22:49:40,286 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=144810.66666666666, ans=0.0
+2024-08-25 22:50:03,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=144917.33333333334, ans=0.0
+2024-08-25 22:50:03,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=144917.33333333334, ans=0.125
+2024-08-25 22:50:10,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=144917.33333333334, ans=0.025
+2024-08-25 22:50:46,978 INFO [train.py:1114] (0/4) Epoch 11, batch 2300, loss[loss=0.2334, simple_loss=0.2797, pruned_loss=0.07007, ctc_loss=0.1175, over 19508.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2902, pruned_loss=0.06424, ctc_loss=0.1204, over 3861329.49 frames. ], batch size: 49, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:51:17,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=145024.0, ans=0.025
+2024-08-25 22:51:18,626 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=145024.0, ans=0.1
+2024-08-25 22:51:31,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=145077.33333333334, ans=0.09899494936611666
+2024-08-25 22:51:32,199 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=145077.33333333334, ans=0.125
+2024-08-25 22:51:39,151 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.54 vs. limit=22.5
+2024-08-25 22:52:42,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=145237.33333333334, ans=0.1
+2024-08-25 22:52:51,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=145237.33333333334, ans=0.025
+2024-08-25 22:52:55,290 INFO [train.py:1114] (0/4) Epoch 11, batch 2350, loss[loss=0.2301, simple_loss=0.3022, pruned_loss=0.05776, ctc_loss=0.1061, over 19678.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.29, pruned_loss=0.06436, ctc_loss=0.1207, over 3863696.93 frames. ], batch size: 63, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:52:56,719 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.67 vs. limit=15.0
+2024-08-25 22:53:01,247 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.317e+02 1.788e+02 2.141e+02 2.380e+02 3.835e+02, threshold=4.282e+02, percent-clipped=0.0
+2024-08-25 22:53:27,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=145344.0, ans=0.2
+2024-08-25 22:53:30,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145344.0, ans=0.1
+2024-08-25 22:53:44,731 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.46 vs. limit=22.5
+2024-08-25 22:54:11,483 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.89 vs. limit=15.0
+2024-08-25 22:54:16,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=145504.0, ans=0.125
+2024-08-25 22:54:26,026 INFO [train.py:1114] (0/4) Epoch 11, batch 2400, loss[loss=0.2543, simple_loss=0.3224, pruned_loss=0.06836, ctc_loss=0.124, over 19327.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2925, pruned_loss=0.06544, ctc_loss=0.1225, over 3858441.92 frames. ], batch size: 67, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:54:27,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=145557.33333333334, ans=0.0
+2024-08-25 22:54:28,797 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.10 vs. limit=10.0
+2024-08-25 22:54:43,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=145610.66666666666, ans=0.125
+2024-08-25 22:54:47,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=145610.66666666666, ans=6.0
+2024-08-25 22:55:11,285 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=145717.33333333334, ans=0.1
+2024-08-25 22:55:20,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=145717.33333333334, ans=0.0
+2024-08-25 22:55:44,076 INFO [train.py:1114] (0/4) Epoch 11, batch 2450, loss[loss=0.3518, simple_loss=0.3607, pruned_loss=0.1214, ctc_loss=0.2499, over 13466.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.297, pruned_loss=0.06881, ctc_loss=0.1294, over 3734133.43 frames. ], batch size: 141, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:56:00,768 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.910e+02 2.208e+02 2.594e+02 5.356e+02, threshold=4.415e+02, percent-clipped=1.0
+2024-08-25 22:56:01,333 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.00 vs. limit=10.0
+2024-08-25 22:56:53,872 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.24 vs. limit=22.5
+2024-08-25 22:56:56,007 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.57 vs. limit=10.0
+2024-08-25 22:57:26,218 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.26 vs. limit=22.5
+2024-08-25 22:57:27,679 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-11.pt
+2024-08-25 22:58:44,105 INFO [train.py:1114] (0/4) Epoch 12, batch 0, loss[loss=0.2364, simple_loss=0.2812, pruned_loss=0.06971, ctc_loss=0.1304, over 19444.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2812, pruned_loss=0.06971, ctc_loss=0.1304, over 19444.00 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 22:58:44,106 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 23:00:02,934 INFO [train.py:1146] (0/4) Epoch 12, validation: loss=0.1972, simple_loss=0.2841, pruned_loss=0.04086, ctc_loss=0.07109, over 944034.00 frames.
+2024-08-25 23:00:02,935 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-25 23:00:33,457 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=146138.66666666666, ans=0.125
+2024-08-25 23:01:08,423 INFO [train.py:1114] (0/4) Epoch 12, batch 50, loss[loss=0.2002, simple_loss=0.2577, pruned_loss=0.05189, ctc_loss=0.09718, over 19701.00 frames. ], tot_loss[loss=0.242, simple_loss=0.2961, pruned_loss=0.06816, ctc_loss=0.129, over 843793.79 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:01:21,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=146352.0, ans=0.125
+2024-08-25 23:01:26,978 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.43 vs. limit=22.5
+2024-08-25 23:01:27,718 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.810e+02 2.073e+02 2.436e+02 4.057e+02, threshold=4.147e+02, percent-clipped=0.0
+2024-08-25 23:01:37,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=146405.33333333334, ans=0.125
+2024-08-25 23:01:50,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=146458.66666666666, ans=0.125
+2024-08-25 23:02:22,999 INFO [train.py:1114] (0/4) Epoch 12, batch 100, loss[loss=0.2136, simple_loss=0.274, pruned_loss=0.05636, ctc_loss=0.1012, over 19716.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.2957, pruned_loss=0.06632, ctc_loss=0.1253, over 1498987.01 frames. ], batch size: 51, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:02:52,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=146618.66666666666, ans=0.125
+2024-08-25 23:02:55,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=146672.0, ans=0.05
+2024-08-25 23:03:08,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_ff2.min_abs, batch_count=146672.0, ans=0.1
+2024-08-25 23:03:10,063 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:03:18,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=146725.33333333334, ans=0.0
+2024-08-25 23:03:19,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=146725.33333333334, ans=10.0
+2024-08-25 23:03:24,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=146725.33333333334, ans=0.0
+2024-08-25 23:03:39,241 INFO [train.py:1114] (0/4) Epoch 12, batch 150, loss[loss=0.2283, simple_loss=0.2775, pruned_loss=0.06459, ctc_loss=0.1248, over 19734.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2919, pruned_loss=0.06435, ctc_loss=0.1213, over 2027803.58 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:03:39,826 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.29 vs. limit=15.0
+2024-08-25 23:04:09,862 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.659e+02 1.880e+02 2.314e+02 3.650e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-25 23:04:12,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=146885.33333333334, ans=0.025
+2024-08-25 23:04:19,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=146938.66666666666, ans=0.125
+2024-08-25 23:04:30,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=146938.66666666666, ans=0.125
+2024-08-25 23:04:44,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=146992.0, ans=0.125
+2024-08-25 23:04:51,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=146992.0, ans=0.125
+2024-08-25 23:04:55,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=147045.33333333334, ans=0.025
+2024-08-25 23:05:07,039 INFO [train.py:1114] (0/4) Epoch 12, batch 200, loss[loss=0.2644, simple_loss=0.3115, pruned_loss=0.07933, ctc_loss=0.1464, over 18565.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2897, pruned_loss=0.06325, ctc_loss=0.1192, over 2435789.11 frames. ], batch size: 85, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:05:13,107 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=147098.66666666666, ans=0.125
+2024-08-25 23:06:20,735 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:06:24,456 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.30 vs. limit=6.0
+2024-08-25 23:06:39,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=147258.66666666666, ans=0.0
+2024-08-25 23:06:40,717 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.64 vs. limit=15.0
+2024-08-25 23:06:51,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=147312.0, ans=0.05
+2024-08-25 23:07:01,819 INFO [train.py:1114] (0/4) Epoch 12, batch 250, loss[loss=0.2464, simple_loss=0.3045, pruned_loss=0.06939, ctc_loss=0.1237, over 19348.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.2901, pruned_loss=0.06302, ctc_loss=0.1187, over 2755129.31 frames. ], batch size: 67, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:07:22,627 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 1.825e+02 2.154e+02 2.499e+02 3.884e+02, threshold=4.307e+02, percent-clipped=2.0
+2024-08-25 23:07:33,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=147472.0, ans=0.2
+2024-08-25 23:07:49,169 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:08:13,949 INFO [train.py:1114] (0/4) Epoch 12, batch 300, loss[loss=0.2722, simple_loss=0.3118, pruned_loss=0.08555, ctc_loss=0.1539, over 19533.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.2898, pruned_loss=0.06314, ctc_loss=0.1188, over 3000376.94 frames. ], batch size: 61, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:08:20,280 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=6.98 vs. limit=15.0
+2024-08-25 23:08:33,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=147685.33333333334, ans=0.5
+2024-08-25 23:08:54,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=147792.0, ans=0.0
+2024-08-25 23:08:57,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=147792.0, ans=0.125
+2024-08-25 23:08:59,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=147792.0, ans=0.0
+2024-08-25 23:09:03,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=147845.33333333334, ans=0.125
+2024-08-25 23:09:17,484 INFO [train.py:1114] (0/4) Epoch 12, batch 350, loss[loss=0.2255, simple_loss=0.2728, pruned_loss=0.0658, ctc_loss=0.1165, over 19738.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.29, pruned_loss=0.06304, ctc_loss=0.1186, over 3190558.33 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:09:25,103 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.91 vs. limit=15.0
+2024-08-25 23:09:36,451 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.749e+02 2.047e+02 2.740e+02 4.170e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 23:09:37,203 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.59 vs. limit=15.0
+2024-08-25 23:09:45,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=148005.33333333334, ans=0.0
+2024-08-25 23:09:56,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=148058.66666666666, ans=0.07
+2024-08-25 23:09:58,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=148058.66666666666, ans=0.0
+2024-08-25 23:10:01,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=148058.66666666666, ans=0.125
+2024-08-25 23:10:12,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=148058.66666666666, ans=0.1
+2024-08-25 23:10:13,241 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=148112.0, ans=0.09899494936611666
+2024-08-25 23:10:25,926 INFO [train.py:1114] (0/4) Epoch 12, batch 400, loss[loss=0.2552, simple_loss=0.3113, pruned_loss=0.07254, ctc_loss=0.1353, over 19522.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.2889, pruned_loss=0.06231, ctc_loss=0.1172, over 3341150.43 frames. ], batch size: 54, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:10:58,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=148272.0, ans=0.2
+2024-08-25 23:11:16,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_na.min_abs, batch_count=148325.33333333334, ans=0.02
+2024-08-25 23:11:27,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=148325.33333333334, ans=0.1
+2024-08-25 23:12:05,920 INFO [train.py:1114] (0/4) Epoch 12, batch 450, loss[loss=0.2561, simple_loss=0.3106, pruned_loss=0.0729, ctc_loss=0.1396, over 19609.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.2893, pruned_loss=0.06245, ctc_loss=0.1175, over 3450543.39 frames. ], batch size: 55, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:12:16,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=148432.0, ans=0.0
+2024-08-25 23:12:28,367 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.359e+02 1.830e+02 2.201e+02 2.765e+02 4.484e+02, threshold=4.403e+02, percent-clipped=1.0
+2024-08-25 23:12:56,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=148645.33333333334, ans=0.1
+2024-08-25 23:13:01,100 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.22 vs. limit=15.0
+2024-08-25 23:13:21,160 INFO [train.py:1114] (0/4) Epoch 12, batch 500, loss[loss=0.2295, simple_loss=0.2973, pruned_loss=0.05947, ctc_loss=0.107, over 19667.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2886, pruned_loss=0.06236, ctc_loss=0.1172, over 3546574.60 frames. ], batch size: 63, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:13:21,314 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=148698.66666666666, ans=0.125
+2024-08-25 23:13:21,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=148698.66666666666, ans=0.0
+2024-08-25 23:13:22,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=148698.66666666666, ans=0.125
+2024-08-25 23:13:27,180 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=148698.66666666666, ans=0.2
+2024-08-25 23:13:31,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=148698.66666666666, ans=0.07
+2024-08-25 23:13:33,582 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.26 vs. limit=6.0
+2024-08-25 23:13:43,615 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.54 vs. limit=15.0
+2024-08-25 23:13:58,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=148805.33333333334, ans=0.0
+2024-08-25 23:14:12,265 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.15 vs. limit=15.0
+2024-08-25 23:14:41,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=148858.66666666666, ans=0.2
+2024-08-25 23:14:44,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=148912.0, ans=0.0
+2024-08-25 23:14:45,652 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.29 vs. limit=15.0
+2024-08-25 23:14:59,250 INFO [train.py:1114] (0/4) Epoch 12, batch 550, loss[loss=0.2719, simple_loss=0.317, pruned_loss=0.08293, ctc_loss=0.1522, over 19334.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.2885, pruned_loss=0.06249, ctc_loss=0.1174, over 3608185.56 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:15:42,256 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.692e+02 2.049e+02 2.499e+02 4.022e+02, threshold=4.098e+02, percent-clipped=0.0
+2024-08-25 23:16:01,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=149072.0, ans=0.125
+2024-08-25 23:16:46,429 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.70 vs. limit=15.0
+2024-08-25 23:16:54,716 INFO [train.py:1114] (0/4) Epoch 12, batch 600, loss[loss=0.2767, simple_loss=0.3262, pruned_loss=0.08223, ctc_loss=0.1569, over 19414.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.2888, pruned_loss=0.06268, ctc_loss=0.1178, over 3665590.69 frames. ], batch size: 67, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:17:22,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=149285.33333333334, ans=0.125
+2024-08-25 23:17:25,003 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-28000.pt
+2024-08-25 23:18:46,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=149285.33333333334, ans=0.1
+2024-08-25 23:18:52,432 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.24 vs. limit=15.0
+2024-08-25 23:19:03,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=149392.0, ans=0.0
+2024-08-25 23:19:22,625 INFO [train.py:1114] (0/4) Epoch 12, batch 650, loss[loss=0.2112, simple_loss=0.2836, pruned_loss=0.05065, ctc_loss=0.09372, over 19757.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.288, pruned_loss=0.06225, ctc_loss=0.1171, over 3715431.52 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:19:40,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=149552.0, ans=0.2
+2024-08-25 23:19:42,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=149552.0, ans=0.0
+2024-08-25 23:19:48,484 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.911e+02 2.346e+02 2.911e+02 5.072e+02, threshold=4.691e+02, percent-clipped=6.0
+2024-08-25 23:19:51,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=149552.0, ans=0.025
+2024-08-25 23:20:29,969 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=149658.66666666666, ans=0.5
+2024-08-25 23:20:37,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=149712.0, ans=0.125
+2024-08-25 23:20:43,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=149712.0, ans=0.125
+2024-08-25 23:20:46,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=149712.0, ans=0.0
+2024-08-25 23:20:49,397 INFO [train.py:1114] (0/4) Epoch 12, batch 700, loss[loss=0.2096, simple_loss=0.2759, pruned_loss=0.05194, ctc_loss=0.09859, over 19738.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2884, pruned_loss=0.06232, ctc_loss=0.1171, over 3747405.65 frames. ], batch size: 51, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:20:58,746 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=149765.33333333334, ans=10.0
+2024-08-25 23:21:04,642 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=149818.66666666666, ans=0.2
+2024-08-25 23:21:14,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=149872.0, ans=0.0
+2024-08-25 23:21:15,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=149872.0, ans=0.0
+2024-08-25 23:21:44,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=149978.66666666666, ans=0.125
+2024-08-25 23:21:51,364 INFO [train.py:1114] (0/4) Epoch 12, batch 750, loss[loss=0.2193, simple_loss=0.2852, pruned_loss=0.05536, ctc_loss=0.1071, over 19491.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.2877, pruned_loss=0.06193, ctc_loss=0.1164, over 3774030.89 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:22:14,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=150085.33333333334, ans=0.05
+2024-08-25 23:22:20,742 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.992e+02 2.563e+02 3.460e+02 5.252e+02, threshold=5.125e+02, percent-clipped=3.0
+2024-08-25 23:22:25,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=150085.33333333334, ans=0.125
+2024-08-25 23:22:43,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=150138.66666666666, ans=0.1
+2024-08-25 23:23:04,530 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.30 vs. limit=10.0
+2024-08-25 23:23:04,718 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.71 vs. limit=15.0
+2024-08-25 23:23:10,619 INFO [train.py:1114] (0/4) Epoch 12, batch 800, loss[loss=0.239, simple_loss=0.2895, pruned_loss=0.06815, ctc_loss=0.1307, over 19782.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.2877, pruned_loss=0.06194, ctc_loss=0.1163, over 3796126.48 frames. ], batch size: 49, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:23:18,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=150298.66666666666, ans=0.1
+2024-08-25 23:23:20,107 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.47 vs. limit=6.0
+2024-08-25 23:23:32,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=150405.33333333334, ans=0.0
+2024-08-25 23:23:36,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=150405.33333333334, ans=0.125
+2024-08-25 23:24:03,023 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.77 vs. limit=22.5
+2024-08-25 23:24:07,596 INFO [train.py:1114] (0/4) Epoch 12, batch 850, loss[loss=0.2019, simple_loss=0.2765, pruned_loss=0.04583, ctc_loss=0.0894, over 19663.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.287, pruned_loss=0.06164, ctc_loss=0.1157, over 3814370.38 frames. ], batch size: 59, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:24:15,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=150565.33333333334, ans=0.0
+2024-08-25 23:24:30,659 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.376e+02 1.732e+02 2.149e+02 2.756e+02 4.869e+02, threshold=4.297e+02, percent-clipped=0.0
+2024-08-25 23:25:39,226 INFO [train.py:1114] (0/4) Epoch 12, batch 900, loss[loss=0.2283, simple_loss=0.2775, pruned_loss=0.06514, ctc_loss=0.1222, over 19826.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2877, pruned_loss=0.06231, ctc_loss=0.1172, over 3818421.65 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:25:43,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=150832.0, ans=0.5
+2024-08-25 23:25:47,243 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.37 vs. limit=10.0
+2024-08-25 23:26:19,260 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.54 vs. limit=22.5
+2024-08-25 23:26:28,774 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=150938.66666666666, ans=0.125
+2024-08-25 23:26:41,953 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.43 vs. limit=15.0
+2024-08-25 23:26:49,086 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.68 vs. limit=15.0
+2024-08-25 23:27:07,270 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=151098.66666666666, ans=0.05
+2024-08-25 23:27:21,987 INFO [train.py:1114] (0/4) Epoch 12, batch 950, loss[loss=0.227, simple_loss=0.276, pruned_loss=0.0641, ctc_loss=0.1242, over 19519.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2879, pruned_loss=0.06242, ctc_loss=0.1174, over 3821068.99 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:27:32,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=151098.66666666666, ans=0.0
+2024-08-25 23:27:39,207 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.51 vs. limit=6.0
+2024-08-25 23:27:39,924 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=151098.66666666666, ans=0.0
+2024-08-25 23:27:44,054 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.39 vs. limit=15.0
+2024-08-25 23:27:47,805 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 1.727e+02 2.047e+02 2.468e+02 3.873e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-08-25 23:28:00,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=151152.0, ans=0.0
+2024-08-25 23:28:03,875 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=151205.33333333334, ans=0.0
+2024-08-25 23:28:31,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=151258.66666666666, ans=0.025
+2024-08-25 23:28:45,894 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=151312.0, ans=10.0
+2024-08-25 23:28:55,957 INFO [train.py:1114] (0/4) Epoch 12, batch 1000, loss[loss=0.23, simple_loss=0.2846, pruned_loss=0.06349, ctc_loss=0.1211, over 19850.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.2886, pruned_loss=0.06273, ctc_loss=0.118, over 3816496.46 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:30:03,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=151418.66666666666, ans=0.05
+2024-08-25 23:30:03,218 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=151418.66666666666, ans=0.0
+2024-08-25 23:30:14,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=151418.66666666666, ans=0.025
+2024-08-25 23:30:15,991 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:30:30,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=151525.33333333334, ans=0.125
+2024-08-25 23:30:37,716 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.09 vs. limit=15.0
+2024-08-25 23:30:38,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=151525.33333333334, ans=0.0
+2024-08-25 23:30:52,123 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=151578.66666666666, ans=0.125
+2024-08-25 23:30:55,451 INFO [train.py:1114] (0/4) Epoch 12, batch 1050, loss[loss=0.2224, simple_loss=0.2853, pruned_loss=0.05718, ctc_loss=0.1126, over 19841.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.2875, pruned_loss=0.06226, ctc_loss=0.1169, over 3823111.14 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:31:06,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=151632.0, ans=0.125
+2024-08-25 23:31:14,267 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.748e+02 2.222e+02 2.883e+02 4.562e+02, threshold=4.445e+02, percent-clipped=3.0
+2024-08-25 23:31:45,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=151845.33333333334, ans=0.035
+2024-08-25 23:31:50,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=151845.33333333334, ans=0.125
+2024-08-25 23:32:14,291 INFO [train.py:1114] (0/4) Epoch 12, batch 1100, loss[loss=0.224, simple_loss=0.2788, pruned_loss=0.0618, ctc_loss=0.1139, over 19597.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.2875, pruned_loss=0.06224, ctc_loss=0.1169, over 3830721.63 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:32:14,611 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:32:23,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=151898.66666666666, ans=0.0
+2024-08-25 23:32:36,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=151952.0, ans=0.2
+2024-08-25 23:32:49,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=152005.33333333334, ans=0.125
+2024-08-25 23:32:54,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=152058.66666666666, ans=15.0
+2024-08-25 23:33:01,718 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=152058.66666666666, ans=0.125
+2024-08-25 23:33:01,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=152058.66666666666, ans=0.125
+2024-08-25 23:33:32,438 INFO [train.py:1114] (0/4) Epoch 12, batch 1150, loss[loss=0.2321, simple_loss=0.2879, pruned_loss=0.06385, ctc_loss=0.1215, over 19581.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.288, pruned_loss=0.06266, ctc_loss=0.1176, over 3829672.44 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:33:42,511 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.54 vs. limit=15.0
+2024-08-25 23:33:43,611 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=152165.33333333334, ans=0.1
+2024-08-25 23:34:07,251 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.763e+02 2.002e+02 2.335e+02 5.298e+02, threshold=4.005e+02, percent-clipped=1.0
+2024-08-25 23:34:09,066 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.12 vs. limit=15.0
+2024-08-25 23:34:19,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=152272.0, ans=0.025
+2024-08-25 23:34:28,903 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.10 vs. limit=6.0
+2024-08-25 23:34:46,823 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=152378.66666666666, ans=0.0
+2024-08-25 23:34:59,071 INFO [train.py:1114] (0/4) Epoch 12, batch 1200, loss[loss=0.2404, simple_loss=0.3029, pruned_loss=0.06295, ctc_loss=0.1302, over 19837.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.289, pruned_loss=0.06295, ctc_loss=0.1186, over 3826395.84 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:35:14,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=152432.0, ans=0.025
+2024-08-25 23:35:14,511 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.44 vs. limit=10.0
+2024-08-25 23:35:23,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=152485.33333333334, ans=0.125
+2024-08-25 23:35:32,099 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=152538.66666666666, ans=0.025
+2024-08-25 23:35:44,219 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.75 vs. limit=15.0
+2024-08-25 23:35:52,214 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=152592.0, ans=0.125
+2024-08-25 23:36:09,982 INFO [train.py:1114] (0/4) Epoch 12, batch 1250, loss[loss=0.2488, simple_loss=0.3091, pruned_loss=0.06958, ctc_loss=0.1232, over 19529.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2894, pruned_loss=0.06277, ctc_loss=0.118, over 3844268.22 frames. ], batch size: 61, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:36:23,861 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.19 vs. limit=15.0
+2024-08-25 23:36:25,237 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.18 vs. limit=15.0
+2024-08-25 23:36:34,031 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.907e+02 2.265e+02 2.785e+02 4.753e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 23:36:34,940 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=152752.0, ans=0.125
+2024-08-25 23:36:35,122 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.86 vs. limit=15.0
+2024-08-25 23:36:42,606 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.83 vs. limit=10.0
+2024-08-25 23:37:18,965 INFO [train.py:1114] (0/4) Epoch 12, batch 1300, loss[loss=0.2658, simple_loss=0.3092, pruned_loss=0.08124, ctc_loss=0.1498, over 18869.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2886, pruned_loss=0.06247, ctc_loss=0.1176, over 3847329.36 frames. ], batch size: 76, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:37:31,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=152965.33333333334, ans=15.0
+2024-08-25 23:37:52,115 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.94 vs. limit=22.5
+2024-08-25 23:37:55,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=153072.0, ans=0.125
+2024-08-25 23:38:03,550 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=153125.33333333334, ans=0.0
+2024-08-25 23:38:28,986 INFO [train.py:1114] (0/4) Epoch 12, batch 1350, loss[loss=0.2469, simple_loss=0.3051, pruned_loss=0.06805, ctc_loss=0.1315, over 19756.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2881, pruned_loss=0.06197, ctc_loss=0.1167, over 3858031.15 frames. ], batch size: 54, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:38:40,746 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.94 vs. limit=15.0
+2024-08-25 23:38:46,294 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.707e+02 2.039e+02 2.408e+02 4.402e+02, threshold=4.078e+02, percent-clipped=0.0
+2024-08-25 23:38:47,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=153285.33333333334, ans=0.2
+2024-08-25 23:39:01,099 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=153338.66666666666, ans=0.1
+2024-08-25 23:39:15,550 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=153392.0, ans=0.2
+2024-08-25 23:39:33,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=153445.33333333334, ans=0.0
+2024-08-25 23:39:37,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=153445.33333333334, ans=0.07
+2024-08-25 23:39:43,079 INFO [train.py:1114] (0/4) Epoch 12, batch 1400, loss[loss=0.1836, simple_loss=0.2485, pruned_loss=0.04383, ctc_loss=0.07755, over 19661.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.2876, pruned_loss=0.06169, ctc_loss=0.116, over 3864670.14 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:40:48,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=153658.66666666666, ans=0.025
+2024-08-25 23:40:52,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=153712.0, ans=0.0
+2024-08-25 23:40:57,357 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.62 vs. limit=6.0
+2024-08-25 23:41:07,359 INFO [train.py:1114] (0/4) Epoch 12, batch 1450, loss[loss=0.2582, simple_loss=0.3093, pruned_loss=0.07555, ctc_loss=0.1402, over 19681.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2881, pruned_loss=0.06194, ctc_loss=0.1165, over 3862329.37 frames. ], batch size: 63, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:41:23,128 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.74 vs. limit=15.0
+2024-08-25 23:41:27,989 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 1.773e+02 2.135e+02 2.639e+02 4.435e+02, threshold=4.270e+02, percent-clipped=2.0
+2024-08-25 23:42:43,082 INFO [train.py:1114] (0/4) Epoch 12, batch 1500, loss[loss=0.2134, simple_loss=0.2835, pruned_loss=0.05238, ctc_loss=0.09619, over 19579.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2883, pruned_loss=0.06187, ctc_loss=0.1163, over 3861668.37 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:43:10,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=154085.33333333334, ans=0.125
+2024-08-25 23:43:10,317 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.55 vs. limit=15.0
+2024-08-25 23:43:20,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=154138.66666666666, ans=0.025
+2024-08-25 23:43:55,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=154192.0, ans=0.0
+2024-08-25 23:44:09,930 INFO [train.py:1114] (0/4) Epoch 12, batch 1550, loss[loss=0.2599, simple_loss=0.3142, pruned_loss=0.07407, ctc_loss=0.1436, over 19607.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2884, pruned_loss=0.06196, ctc_loss=0.1166, over 3846894.07 frames. ], batch size: 60, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:44:10,209 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:44:16,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=154298.66666666666, ans=0.1
+2024-08-25 23:44:43,865 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.860e+02 2.194e+02 2.828e+02 4.590e+02, threshold=4.388e+02, percent-clipped=1.0
+2024-08-25 23:45:05,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=154405.33333333334, ans=0.1
+2024-08-25 23:46:12,534 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.13 vs. limit=12.0
+2024-08-25 23:46:23,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=154458.66666666666, ans=0.1
+2024-08-25 23:46:27,553 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.74 vs. limit=22.5
+2024-08-25 23:46:37,567 INFO [train.py:1114] (0/4) Epoch 12, batch 1600, loss[loss=0.2631, simple_loss=0.3153, pruned_loss=0.07653, ctc_loss=0.1445, over 19819.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.2884, pruned_loss=0.06194, ctc_loss=0.1166, over 3835404.97 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:46:39,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=154565.33333333334, ans=0.125
+2024-08-25 23:46:43,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=154565.33333333334, ans=0.125
+2024-08-25 23:48:01,984 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.75 vs. limit=22.5
+2024-08-25 23:48:12,948 INFO [train.py:1114] (0/4) Epoch 12, batch 1650, loss[loss=0.2288, simple_loss=0.3001, pruned_loss=0.05635, ctc_loss=0.1118, over 19658.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2882, pruned_loss=0.06184, ctc_loss=0.1167, over 3832233.24 frames. ], batch size: 59, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:48:13,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=154832.0, ans=0.125
+2024-08-25 23:48:14,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=154832.0, ans=0.0
+2024-08-25 23:48:25,176 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.33 vs. limit=15.0
+2024-08-25 23:48:27,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=154885.33333333334, ans=0.2
+2024-08-25 23:48:32,098 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:48:32,982 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.751e+02 2.060e+02 2.481e+02 4.497e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 23:48:34,477 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:48:57,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=154992.0, ans=0.1
+2024-08-25 23:49:17,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=155045.33333333334, ans=0.0
+2024-08-25 23:49:18,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=155098.66666666666, ans=0.0
+2024-08-25 23:49:19,225 INFO [train.py:1114] (0/4) Epoch 12, batch 1700, loss[loss=0.2128, simple_loss=0.2591, pruned_loss=0.06105, ctc_loss=0.1112, over 19678.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.2878, pruned_loss=0.06149, ctc_loss=0.1159, over 3846671.64 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:49:37,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=155152.0, ans=0.125
+2024-08-25 23:49:43,789 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:50:19,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=155258.66666666666, ans=0.09899494936611666
+2024-08-25 23:50:36,465 INFO [train.py:1114] (0/4) Epoch 12, batch 1750, loss[loss=0.1894, simple_loss=0.2513, pruned_loss=0.04628, ctc_loss=0.08734, over 19690.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.287, pruned_loss=0.06111, ctc_loss=0.1152, over 3852665.87 frames. ], batch size: 45, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:51:09,600 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.61 vs. limit=6.0
+2024-08-25 23:51:09,943 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.88 vs. limit=15.0
+2024-08-25 23:51:12,450 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.691e+02 1.944e+02 2.310e+02 4.068e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-25 23:51:17,338 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=155418.66666666666, ans=0.125
+2024-08-25 23:51:23,927 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=155472.0, ans=0.125
+2024-08-25 23:51:25,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=155472.0, ans=0.0
+2024-08-25 23:51:42,760 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=155525.33333333334, ans=0.0
+2024-08-25 23:51:48,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=155578.66666666666, ans=0.125
+2024-08-25 23:51:55,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=155578.66666666666, ans=0.0
+2024-08-25 23:52:03,819 INFO [train.py:1114] (0/4) Epoch 12, batch 1800, loss[loss=0.2499, simple_loss=0.3089, pruned_loss=0.07006, ctc_loss=0.1268, over 19602.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.2875, pruned_loss=0.0614, ctc_loss=0.1155, over 3853341.67 frames. ], batch size: 55, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:52:47,616 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.18 vs. limit=15.0
+2024-08-25 23:52:50,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=155738.66666666666, ans=0.125
+2024-08-25 23:53:17,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=155792.0, ans=0.0
+2024-08-25 23:53:18,324 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=155792.0, ans=0.0
+2024-08-25 23:53:18,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=155792.0, ans=0.04949747468305833
+2024-08-25 23:53:29,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=155792.0, ans=0.1
+2024-08-25 23:53:37,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=155845.33333333334, ans=0.025
+2024-08-25 23:54:05,056 INFO [train.py:1114] (0/4) Epoch 12, batch 1850, loss[loss=0.2305, simple_loss=0.2916, pruned_loss=0.06141, ctc_loss=0.1163, over 19579.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.2868, pruned_loss=0.06105, ctc_loss=0.1146, over 3854769.03 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:54:25,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=155898.66666666666, ans=0.125
+2024-08-25 23:54:44,938 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 1.785e+02 2.050e+02 2.712e+02 4.249e+02, threshold=4.100e+02, percent-clipped=1.0
+2024-08-25 23:54:56,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=155952.0, ans=0.0
+2024-08-25 23:55:11,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=156005.33333333334, ans=0.1
+2024-08-25 23:55:19,904 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.66 vs. limit=22.5
+2024-08-25 23:55:24,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=156058.66666666666, ans=0.04949747468305833
+2024-08-25 23:55:37,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=156112.0, ans=0.2
+2024-08-25 23:56:02,306 INFO [train.py:1114] (0/4) Epoch 12, batch 1900, loss[loss=0.2393, simple_loss=0.2983, pruned_loss=0.0649, ctc_loss=0.1261, over 19655.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.288, pruned_loss=0.06168, ctc_loss=0.1158, over 3859826.76 frames. ], batch size: 59, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:56:20,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=156165.33333333334, ans=0.0
+2024-08-25 23:56:49,714 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=156272.0, ans=0.2
+2024-08-25 23:57:00,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=156272.0, ans=0.0
+2024-08-25 23:57:03,919 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.62 vs. limit=6.0
+2024-08-25 23:57:18,047 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.17 vs. limit=15.0
+2024-08-25 23:57:31,619 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=156325.33333333334, ans=0.04949747468305833
+2024-08-25 23:57:35,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=156378.66666666666, ans=0.1
+2024-08-25 23:58:05,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=156378.66666666666, ans=0.125
+2024-08-25 23:58:28,732 INFO [train.py:1114] (0/4) Epoch 12, batch 1950, loss[loss=0.2328, simple_loss=0.2879, pruned_loss=0.06529, ctc_loss=0.1179, over 19587.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2893, pruned_loss=0.0621, ctc_loss=0.1165, over 3869081.57 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:58:31,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=156432.0, ans=0.0
+2024-08-25 23:59:03,816 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 1.700e+02 2.031e+02 2.417e+02 3.778e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-08-25 23:59:31,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=156592.0, ans=0.125
+2024-08-25 23:59:51,579 INFO [train.py:1114] (0/4) Epoch 12, batch 2000, loss[loss=0.1799, simple_loss=0.2418, pruned_loss=0.04368, ctc_loss=0.0765, over 19697.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.2896, pruned_loss=0.06233, ctc_loss=0.117, over 3855269.57 frames. ], batch size: 45, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:59:56,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=156698.66666666666, ans=0.2
+2024-08-26 00:00:00,611 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=156698.66666666666, ans=0.125
+2024-08-26 00:01:28,934 INFO [train.py:1114] (0/4) Epoch 12, batch 2050, loss[loss=0.2093, simple_loss=0.2629, pruned_loss=0.05739, ctc_loss=0.1025, over 19728.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2888, pruned_loss=0.06226, ctc_loss=0.1171, over 3851162.19 frames. ], batch size: 47, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:01:46,859 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.778e+02 1.977e+02 2.412e+02 4.440e+02, threshold=3.953e+02, percent-clipped=1.0
+2024-08-26 00:02:05,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=157072.0, ans=0.125
+2024-08-26 00:02:16,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=157072.0, ans=0.025
+2024-08-26 00:02:19,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=157072.0, ans=0.125
+2024-08-26 00:02:33,664 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=157125.33333333334, ans=0.2
+2024-08-26 00:02:47,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=157178.66666666666, ans=0.125
+2024-08-26 00:02:59,585 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=20.05 vs. limit=15.0
+2024-08-26 00:03:00,082 INFO [train.py:1114] (0/4) Epoch 12, batch 2100, loss[loss=0.2146, simple_loss=0.2832, pruned_loss=0.05398, ctc_loss=0.09531, over 19782.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2881, pruned_loss=0.06192, ctc_loss=0.1164, over 3859765.02 frames. ], batch size: 54, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:03:02,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=157232.0, ans=0.125
+2024-08-26 00:25:13,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=157338.66666666666, ans=0.1
+2024-08-26 00:33:21,226 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.75 vs. limit=15.0
+2024-08-26 00:33:35,151 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=157392.0, ans=0.1
+2024-08-26 00:56:07,943 INFO [train.py:1114] (0/4) Epoch 12, batch 2150, loss[loss=0.2025, simple_loss=0.269, pruned_loss=0.04919, ctc_loss=0.09439, over 19584.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.288, pruned_loss=0.06192, ctc_loss=0.1162, over 3870387.59 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:59:27,491 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.12 vs. limit=15.0
+2024-08-26 01:06:20,014 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=157552.0, ans=0.0
+2024-08-26 01:09:51,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=157552.0, ans=0.1
+2024-08-26 01:09:53,319 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.788e+02 2.174e+02 2.705e+02 6.148e+02, threshold=4.348e+02, percent-clipped=11.0
+2024-08-26 01:11:53,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=157552.0, ans=0.1
+2024-08-26 01:11:54,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=157552.0, ans=0.025
+2024-08-26 01:13:56,141 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=13.43 vs. limit=15.0
+2024-08-26 01:35:47,646 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.91 vs. limit=6.0
+2024-08-26 01:37:35,684 INFO [train.py:1114] (0/4) Epoch 12, batch 2200, loss[loss=0.2218, simple_loss=0.2953, pruned_loss=0.05451, ctc_loss=0.09846, over 19582.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.2875, pruned_loss=0.06152, ctc_loss=0.1156, over 3868343.65 frames. ], batch size: 57, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 01:38:03,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=157765.33333333334, ans=0.2
+2024-08-26 01:43:11,241 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=157818.66666666666, ans=0.1
+2024-08-26 01:44:09,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=157818.66666666666, ans=0.125
+2024-08-26 01:47:42,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=157872.0, ans=0.0
+2024-08-26 01:49:49,644 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=157925.33333333334, ans=0.125
+2024-08-26 01:50:18,840 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=157925.33333333334, ans=0.125
+2024-08-26 01:55:39,503 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.08 vs. limit=15.0
+2024-08-26 01:57:29,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=158032.0, ans=0.125
+2024-08-26 01:57:30,331 INFO [train.py:1114] (0/4) Epoch 12, batch 2250, loss[loss=0.2438, simple_loss=0.3034, pruned_loss=0.06669, ctc_loss=0.1273, over 19636.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.287, pruned_loss=0.06127, ctc_loss=0.115, over 3868134.48 frames. ], batch size: 55, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:03:20,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158085.33333333334, ans=0.1
+2024-08-26 02:04:28,482 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.839e+02 2.199e+02 2.577e+02 6.358e+02, threshold=4.399e+02, percent-clipped=1.0
+2024-08-26 02:08:01,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=158192.0, ans=0.0
+2024-08-26 02:09:46,483 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.76 vs. limit=15.0
+2024-08-26 02:12:18,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=158245.33333333334, ans=0.025
+2024-08-26 02:13:21,300 INFO [train.py:1114] (0/4) Epoch 12, batch 2300, loss[loss=0.2004, simple_loss=0.2647, pruned_loss=0.04923, ctc_loss=0.09407, over 19490.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2865, pruned_loss=0.06143, ctc_loss=0.1153, over 3862323.12 frames. ], batch size: 49, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:13:56,669 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.58 vs. limit=15.0
+2024-08-26 02:17:35,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158405.33333333334, ans=0.1
+2024-08-26 02:17:37,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=158405.33333333334, ans=10.0
+2024-08-26 02:21:51,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=158512.0, ans=0.025
+2024-08-26 02:22:39,621 INFO [train.py:1114] (0/4) Epoch 12, batch 2350, loss[loss=0.2453, simple_loss=0.3093, pruned_loss=0.0667, ctc_loss=0.12, over 19671.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.2863, pruned_loss=0.06121, ctc_loss=0.115, over 3864821.42 frames. ], batch size: 63, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:23:11,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=158565.33333333334, ans=0.025
+2024-08-26 02:25:18,442 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.991e+02 2.536e+02 3.183e+02 5.552e+02, threshold=5.072e+02, percent-clipped=5.0
+2024-08-26 02:28:01,712 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=158725.33333333334, ans=0.125
+2024-08-26 02:30:26,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158778.66666666666, ans=0.1
+2024-08-26 02:30:53,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=158778.66666666666, ans=0.0
+2024-08-26 02:30:58,360 INFO [train.py:1114] (0/4) Epoch 12, batch 2400, loss[loss=0.2609, simple_loss=0.3108, pruned_loss=0.0771, ctc_loss=0.1421, over 19390.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.2884, pruned_loss=0.06227, ctc_loss=0.1169, over 3859457.85 frames. ], batch size: 67, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:31:10,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=158832.0, ans=0.025
+2024-08-26 02:31:28,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=158885.33333333334, ans=0.2
+2024-08-26 02:37:41,011 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.03 vs. limit=15.0
+2024-08-26 02:38:16,207 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.35 vs. limit=10.0
+2024-08-26 02:38:22,378 INFO [train.py:1114] (0/4) Epoch 12, batch 2450, loss[loss=0.2844, simple_loss=0.318, pruned_loss=0.09148, ctc_loss=0.1695, over 13431.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2926, pruned_loss=0.0655, ctc_loss=0.1231, over 3736242.85 frames. ], batch size: 141, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:38:42,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=159098.66666666666, ans=0.2
+2024-08-26 02:39:07,170 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.18 vs. limit=15.0
+2024-08-26 02:39:11,645 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.25 vs. limit=15.0
+2024-08-26 02:39:42,314 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.859e+02 2.162e+02 2.447e+02 4.124e+02, threshold=4.324e+02, percent-clipped=0.0
+2024-08-26 02:39:42,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=159152.0, ans=0.05
+2024-08-26 02:40:01,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=159152.0, ans=0.125
+2024-08-26 02:40:14,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=159152.0, ans=0.125
+2024-08-26 02:40:15,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=159205.33333333334, ans=0.0
+2024-08-26 02:40:40,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=159205.33333333334, ans=0.0
+2024-08-26 02:41:48,774 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-12.pt
+2024-08-26 02:43:45,614 INFO [train.py:1114] (0/4) Epoch 13, batch 0, loss[loss=0.223, simple_loss=0.2715, pruned_loss=0.06214, ctc_loss=0.1254, over 19800.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2715, pruned_loss=0.06214, ctc_loss=0.1254, over 19800.00 frames. ], batch size: 49, lr: 1.18e-02, grad_scale: 32.0
+2024-08-26 02:43:45,616 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 02:45:27,908 INFO [train.py:1146] (0/4) Epoch 13, validation: loss=0.1972, simple_loss=0.2835, pruned_loss=0.04113, ctc_loss=0.07151, over 944034.00 frames.
+2024-08-26 02:45:27,909 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-26 02:45:31,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=159306.66666666666, ans=0.0
+2024-08-26 02:45:32,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=159306.66666666666, ans=0.125
+2024-08-26 02:46:06,322 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.08 vs. limit=15.0
+2024-08-26 02:46:07,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=159360.0, ans=0.025
+2024-08-26 02:46:14,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=159413.33333333334, ans=0.2
+2024-08-26 02:46:22,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=159413.33333333334, ans=0.04949747468305833
+2024-08-26 02:47:02,951 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=159520.0, ans=0.1
+2024-08-26 02:47:15,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=159520.0, ans=0.0
+2024-08-26 02:48:06,366 INFO [train.py:1114] (0/4) Epoch 13, batch 50, loss[loss=0.1752, simple_loss=0.2456, pruned_loss=0.03811, ctc_loss=0.07125, over 19681.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2893, pruned_loss=0.06277, ctc_loss=0.1198, over 844325.49 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:48:32,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=159626.66666666666, ans=0.1
+2024-08-26 02:48:55,481 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.822e+02 2.122e+02 2.766e+02 5.339e+02, threshold=4.244e+02, percent-clipped=3.0
+2024-08-26 02:49:16,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=159786.66666666666, ans=0.0
+2024-08-26 02:49:20,721 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=159786.66666666666, ans=0.125
+2024-08-26 02:49:26,937 INFO [train.py:1114] (0/4) Epoch 13, batch 100, loss[loss=0.2069, simple_loss=0.2663, pruned_loss=0.05282, ctc_loss=0.1049, over 19711.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2894, pruned_loss=0.06168, ctc_loss=0.1175, over 1498893.67 frames. ], batch size: 51, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:49:33,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=159840.0, ans=0.1
+2024-08-26 02:50:00,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=159946.66666666666, ans=0.125
+2024-08-26 02:50:00,799 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=159946.66666666666, ans=0.0
+2024-08-26 02:50:21,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=159946.66666666666, ans=0.125
+2024-08-26 02:50:45,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=160000.0, ans=0.125
+2024-08-26 02:50:46,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=160000.0, ans=0.0
+2024-08-26 02:51:16,991 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 02:51:20,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=160053.33333333334, ans=0.2
+2024-08-26 02:51:23,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160053.33333333334, ans=0.1
+2024-08-26 02:51:27,248 INFO [train.py:1114] (0/4) Epoch 13, batch 150, loss[loss=0.2121, simple_loss=0.2703, pruned_loss=0.0568, ctc_loss=0.1009, over 19717.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.2872, pruned_loss=0.06134, ctc_loss=0.1165, over 2027521.62 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:51:42,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=160106.66666666666, ans=0.0
+2024-08-26 02:51:54,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=160160.0, ans=0.0
+2024-08-26 02:52:05,706 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.56 vs. limit=12.0
+2024-08-26 02:52:48,530 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.693e+02 1.889e+02 2.276e+02 3.515e+02, threshold=3.778e+02, percent-clipped=0.0
+2024-08-26 02:53:36,276 INFO [train.py:1114] (0/4) Epoch 13, batch 200, loss[loss=0.2545, simple_loss=0.3105, pruned_loss=0.07242, ctc_loss=0.134, over 18324.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2861, pruned_loss=0.06061, ctc_loss=0.1146, over 2435886.34 frames. ], batch size: 85, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:53:52,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=160426.66666666666, ans=0.04949747468305833
+2024-08-26 02:54:01,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=160426.66666666666, ans=0.07
+2024-08-26 02:54:10,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=160426.66666666666, ans=0.2
+2024-08-26 02:54:32,304 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=160480.0, ans=0.0
+2024-08-26 02:54:36,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=160480.0, ans=0.0
+2024-08-26 02:54:49,166 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=160480.0, ans=0.2
+2024-08-26 02:55:15,723 INFO [train.py:1114] (0/4) Epoch 13, batch 250, loss[loss=0.2129, simple_loss=0.2799, pruned_loss=0.05318, ctc_loss=0.09903, over 19389.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2851, pruned_loss=0.05994, ctc_loss=0.1135, over 2755928.06 frames. ], batch size: 67, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:55:22,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=160640.0, ans=0.125
+2024-08-26 02:55:30,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=160693.33333333334, ans=0.125
+2024-08-26 02:55:34,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=160693.33333333334, ans=0.125
+2024-08-26 02:55:47,659 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.754e+02 2.188e+02 2.577e+02 4.403e+02, threshold=4.375e+02, percent-clipped=2.0
+2024-08-26 02:55:53,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=160800.0, ans=0.2
+2024-08-26 02:55:53,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=160800.0, ans=0.05
+2024-08-26 02:56:10,209 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=160853.33333333334, ans=0.1
+2024-08-26 02:56:15,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=160853.33333333334, ans=0.1
+2024-08-26 02:56:23,966 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=160906.66666666666, ans=0.125
+2024-08-26 02:56:43,566 INFO [train.py:1114] (0/4) Epoch 13, batch 300, loss[loss=0.2551, simple_loss=0.3096, pruned_loss=0.07454, ctc_loss=0.1288, over 19506.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2854, pruned_loss=0.06006, ctc_loss=0.1135, over 3000371.21 frames. ], batch size: 61, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:56:50,599 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=160906.66666666666, ans=0.2
+2024-08-26 02:56:56,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=160906.66666666666, ans=0.125
+2024-08-26 02:57:19,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=161013.33333333334, ans=0.2
+2024-08-26 02:57:34,859 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.84 vs. limit=10.0
+2024-08-26 02:57:37,185 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.99 vs. limit=15.0
+2024-08-26 02:57:45,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer_ff2.min_abs, batch_count=161120.0, ans=0.1
+2024-08-26 02:57:50,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.20 vs. limit=22.5
+2024-08-26 02:57:50,484 INFO [train.py:1114] (0/4) Epoch 13, batch 350, loss[loss=0.2038, simple_loss=0.257, pruned_loss=0.05432, ctc_loss=0.1049, over 19781.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2855, pruned_loss=0.05992, ctc_loss=0.1133, over 3190650.97 frames. ], batch size: 48, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:57:57,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=161173.33333333334, ans=0.125
+2024-08-26 02:57:57,996 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.79 vs. limit=10.0
+2024-08-26 02:58:09,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=161226.66666666666, ans=0.0
+2024-08-26 02:58:17,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=161280.0, ans=0.125
+2024-08-26 02:58:25,614 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.772e+02 2.039e+02 2.354e+02 3.759e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-26 02:59:24,157 INFO [train.py:1114] (0/4) Epoch 13, batch 400, loss[loss=0.2535, simple_loss=0.3033, pruned_loss=0.07513, ctc_loss=0.1338, over 19512.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2856, pruned_loss=0.0603, ctc_loss=0.1136, over 3342951.10 frames. ], batch size: 54, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 02:59:29,399 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.39 vs. limit=10.0
+2024-08-26 02:59:53,392 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=161440.0, ans=0.2
+2024-08-26 03:00:07,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=161493.33333333334, ans=0.0
+2024-08-26 03:00:43,178 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.32 vs. limit=15.0
+2024-08-26 03:00:44,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=161600.0, ans=0.125
+2024-08-26 03:00:46,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=161600.0, ans=0.025
+2024-08-26 03:00:52,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=161600.0, ans=0.125
+2024-08-26 03:01:10,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=161653.33333333334, ans=0.1
+2024-08-26 03:01:53,835 INFO [train.py:1114] (0/4) Epoch 13, batch 450, loss[loss=0.2194, simple_loss=0.2934, pruned_loss=0.05228, ctc_loss=0.102, over 19616.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2863, pruned_loss=0.06059, ctc_loss=0.1143, over 3451082.16 frames. ], batch size: 55, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:01:54,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=161706.66666666666, ans=0.125
+2024-08-26 03:02:04,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=161706.66666666666, ans=15.0
+2024-08-26 03:02:14,628 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.85 vs. limit=10.0
+2024-08-26 03:02:50,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=161813.33333333334, ans=0.125
+2024-08-26 03:02:57,039 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.35 vs. limit=15.0
+2024-08-26 03:03:10,115 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.726e+02 2.085e+02 2.754e+02 4.301e+02, threshold=4.170e+02, percent-clipped=3.0
+2024-08-26 03:03:29,141 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.84 vs. limit=15.0
+2024-08-26 03:03:51,757 INFO [train.py:1114] (0/4) Epoch 13, batch 500, loss[loss=0.2272, simple_loss=0.2924, pruned_loss=0.05933, ctc_loss=0.1084, over 19664.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2853, pruned_loss=0.06001, ctc_loss=0.113, over 3546593.79 frames. ], batch size: 63, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:04:16,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=161973.33333333334, ans=0.125
+2024-08-26 03:04:36,753 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=162026.66666666666, ans=0.0
+2024-08-26 03:05:02,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=162026.66666666666, ans=0.125
+2024-08-26 03:05:04,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=162080.0, ans=0.125
+2024-08-26 03:05:06,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=162080.0, ans=0.0
+2024-08-26 03:05:20,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=162080.0, ans=0.0
+2024-08-26 03:05:27,733 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=162133.33333333334, ans=0.05
+2024-08-26 03:05:35,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=162186.66666666666, ans=0.125
+2024-08-26 03:06:00,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=162186.66666666666, ans=0.2
+2024-08-26 03:06:03,081 INFO [train.py:1114] (0/4) Epoch 13, batch 550, loss[loss=0.245, simple_loss=0.3025, pruned_loss=0.06901, ctc_loss=0.1235, over 19314.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2854, pruned_loss=0.06031, ctc_loss=0.1136, over 3608138.28 frames. ], batch size: 71, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:06:10,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=162240.0, ans=0.125
+2024-08-26 03:06:26,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=162293.33333333334, ans=0.125
+2024-08-26 03:06:37,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=162346.66666666666, ans=0.0
+2024-08-26 03:06:38,275 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.07 vs. limit=15.0
+2024-08-26 03:06:39,057 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.01 vs. limit=15.0
+2024-08-26 03:06:47,057 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 1.758e+02 1.954e+02 2.485e+02 4.688e+02, threshold=3.908e+02, percent-clipped=2.0
+2024-08-26 03:07:01,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=162400.0, ans=0.0
+2024-08-26 03:07:24,286 INFO [train.py:1114] (0/4) Epoch 13, batch 600, loss[loss=0.2683, simple_loss=0.3173, pruned_loss=0.08046, ctc_loss=0.146, over 19428.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2852, pruned_loss=0.06004, ctc_loss=0.1128, over 3665328.45 frames. ], batch size: 67, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:07:27,587 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.91 vs. limit=15.0
+2024-08-26 03:07:48,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=162560.0, ans=0.0
+2024-08-26 03:07:52,004 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.84 vs. limit=22.5
+2024-08-26 03:08:34,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=162666.66666666666, ans=0.125
+2024-08-26 03:09:14,963 INFO [train.py:1114] (0/4) Epoch 13, batch 650, loss[loss=0.2021, simple_loss=0.2763, pruned_loss=0.04641, ctc_loss=0.08764, over 19748.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.285, pruned_loss=0.05988, ctc_loss=0.1125, over 3715452.02 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:09:25,392 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=162773.33333333334, ans=0.125
+2024-08-26 03:09:26,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=162773.33333333334, ans=0.125
+2024-08-26 03:09:54,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=162880.0, ans=0.0
+2024-08-26 03:09:56,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=162880.0, ans=0.0
+2024-08-26 03:10:09,866 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.755e+02 2.119e+02 2.960e+02 5.119e+02, threshold=4.237e+02, percent-clipped=6.0
+2024-08-26 03:10:16,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=162933.33333333334, ans=0.0
+2024-08-26 03:10:16,525 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.64 vs. limit=22.5
+2024-08-26 03:10:17,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=162933.33333333334, ans=0.1
+2024-08-26 03:10:36,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=162986.66666666666, ans=0.125
+2024-08-26 03:10:36,576 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=162986.66666666666, ans=0.0
+2024-08-26 03:10:39,747 INFO [train.py:1114] (0/4) Epoch 13, batch 700, loss[loss=0.2031, simple_loss=0.2731, pruned_loss=0.04795, ctc_loss=0.09305, over 19719.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.2858, pruned_loss=0.06019, ctc_loss=0.1132, over 3748105.30 frames. ], batch size: 51, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:11:10,509 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=163146.66666666666, ans=0.125
+2024-08-26 03:11:32,400 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=163200.0, ans=0.125
+2024-08-26 03:11:56,189 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.46 vs. limit=15.0
+2024-08-26 03:12:00,798 INFO [train.py:1114] (0/4) Epoch 13, batch 750, loss[loss=0.2366, simple_loss=0.3016, pruned_loss=0.0618, ctc_loss=0.12, over 19514.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2853, pruned_loss=0.05996, ctc_loss=0.1129, over 3775227.81 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:12:09,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=163306.66666666666, ans=0.1
+2024-08-26 03:12:20,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=163360.0, ans=0.025
+2024-08-26 03:12:36,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=163413.33333333334, ans=0.1
+2024-08-26 03:12:43,032 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.800e+02 2.310e+02 2.882e+02 4.749e+02, threshold=4.619e+02, percent-clipped=2.0
+2024-08-26 03:12:45,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=163466.66666666666, ans=0.125
+2024-08-26 03:12:56,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=163520.0, ans=0.125
+2024-08-26 03:13:56,360 INFO [train.py:1114] (0/4) Epoch 13, batch 800, loss[loss=0.1997, simple_loss=0.263, pruned_loss=0.04875, ctc_loss=0.09744, over 19405.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2846, pruned_loss=0.05953, ctc_loss=0.1121, over 3796428.28 frames. ], batch size: 48, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:14:17,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=163626.66666666666, ans=0.0
+2024-08-26 03:14:32,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=163680.0, ans=0.125
+2024-08-26 03:14:41,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=163680.0, ans=0.07
+2024-08-26 03:14:50,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=163733.33333333334, ans=0.125
+2024-08-26 03:15:04,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=163786.66666666666, ans=0.125
+2024-08-26 03:15:13,886 INFO [train.py:1114] (0/4) Epoch 13, batch 850, loss[loss=0.235, simple_loss=0.3005, pruned_loss=0.06111, ctc_loss=0.1182, over 19647.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2839, pruned_loss=0.05917, ctc_loss=0.1113, over 3815611.85 frames. ], batch size: 59, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:15:23,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=163840.0, ans=0.125
+2024-08-26 03:15:25,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=163893.33333333334, ans=0.0
+2024-08-26 03:16:04,999 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=163946.66666666666, ans=0.0
+2024-08-26 03:16:11,613 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.727e+02 1.948e+02 2.271e+02 3.773e+02, threshold=3.897e+02, percent-clipped=0.0
+2024-08-26 03:16:17,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=164000.0, ans=0.0
+2024-08-26 03:16:38,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=164106.66666666666, ans=0.125
+2024-08-26 03:16:39,641 INFO [train.py:1114] (0/4) Epoch 13, batch 900, loss[loss=0.2042, simple_loss=0.2607, pruned_loss=0.05372, ctc_loss=0.1009, over 19817.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2847, pruned_loss=0.05964, ctc_loss=0.1122, over 3819191.02 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:16:58,289 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.30 vs. limit=22.5
+2024-08-26 03:17:11,912 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=164213.33333333334, ans=0.125
+2024-08-26 03:17:24,780 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:17:39,456 INFO [train.py:1114] (0/4) Epoch 13, batch 950, loss[loss=0.2366, simple_loss=0.2818, pruned_loss=0.06987, ctc_loss=0.1291, over 19491.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2853, pruned_loss=0.06016, ctc_loss=0.113, over 3821463.59 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:18:42,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=164480.0, ans=0.025
+2024-08-26 03:18:47,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=164480.0, ans=0.125
+2024-08-26 03:18:52,283 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.763e+02 2.081e+02 2.549e+02 5.575e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-26 03:18:56,680 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=164533.33333333334, ans=0.2
+2024-08-26 03:18:57,758 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=164533.33333333334, ans=0.125
+2024-08-26 03:19:26,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=164586.66666666666, ans=0.1
+2024-08-26 03:19:29,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=164640.0, ans=0.1
+2024-08-26 03:19:29,891 INFO [train.py:1114] (0/4) Epoch 13, batch 1000, loss[loss=0.1883, simple_loss=0.2548, pruned_loss=0.04514, ctc_loss=0.07869, over 19854.00 frames. ], tot_loss[loss=0.226, simple_loss=0.2856, pruned_loss=0.06052, ctc_loss=0.1135, over 3817729.86 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:19:45,128 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=164693.33333333334, ans=0.125
+2024-08-26 03:19:51,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=164693.33333333334, ans=0.125
+2024-08-26 03:19:57,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=164746.66666666666, ans=0.0
+2024-08-26 03:20:26,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=164853.33333333334, ans=0.125
+2024-08-26 03:20:27,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=164853.33333333334, ans=0.0
+2024-08-26 03:20:32,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=164853.33333333334, ans=0.0
+2024-08-26 03:20:35,683 INFO [train.py:1114] (0/4) Epoch 13, batch 1050, loss[loss=0.2247, simple_loss=0.2904, pruned_loss=0.05894, ctc_loss=0.1027, over 19837.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2848, pruned_loss=0.06013, ctc_loss=0.1129, over 3823736.33 frames. ], batch size: 57, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:20:44,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=164906.66666666666, ans=0.1
+2024-08-26 03:20:58,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=165013.33333333334, ans=0.07
+2024-08-26 03:20:58,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=165013.33333333334, ans=0.025
+2024-08-26 03:21:05,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=165013.33333333334, ans=0.125
+2024-08-26 03:21:08,101 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.698e+02 1.997e+02 2.318e+02 3.616e+02, threshold=3.994e+02, percent-clipped=0.0
+2024-08-26 03:21:18,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=165066.66666666666, ans=0.0
+2024-08-26 03:21:37,109 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.39 vs. limit=15.0
+2024-08-26 03:21:44,506 INFO [train.py:1114] (0/4) Epoch 13, batch 1100, loss[loss=0.2278, simple_loss=0.2893, pruned_loss=0.06085, ctc_loss=0.1113, over 19566.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2848, pruned_loss=0.05997, ctc_loss=0.1126, over 3830771.65 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:22:16,222 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.09 vs. limit=15.0
+2024-08-26 03:22:17,136 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:22:36,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=165386.66666666666, ans=0.0
+2024-08-26 03:22:53,424 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.52 vs. limit=15.0
+2024-08-26 03:22:57,619 INFO [train.py:1114] (0/4) Epoch 13, batch 1150, loss[loss=0.2392, simple_loss=0.2891, pruned_loss=0.06833, ctc_loss=0.1317, over 19597.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2847, pruned_loss=0.06004, ctc_loss=0.1128, over 3830498.68 frames. ], batch size: 52, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:23:15,358 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.18 vs. limit=22.5
+2024-08-26 03:23:19,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=165493.33333333334, ans=0.07
+2024-08-26 03:23:37,743 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=165546.66666666666, ans=0.125
+2024-08-26 03:23:38,677 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.729e+02 2.006e+02 2.456e+02 7.202e+02, threshold=4.012e+02, percent-clipped=3.0
+2024-08-26 03:23:40,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=165600.0, ans=0.125
+2024-08-26 03:24:03,510 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.47 vs. limit=15.0
+2024-08-26 03:24:11,590 INFO [train.py:1114] (0/4) Epoch 13, batch 1200, loss[loss=0.2083, simple_loss=0.2795, pruned_loss=0.05089, ctc_loss=0.08842, over 19841.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.286, pruned_loss=0.06054, ctc_loss=0.1139, over 3825729.86 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:24:23,972 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=165706.66666666666, ans=0.2
+2024-08-26 03:24:34,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=165760.0, ans=0.2
+2024-08-26 03:26:10,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=165920.0, ans=0.125
+2024-08-26 03:26:18,753 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.78 vs. limit=15.0
+2024-08-26 03:26:20,506 INFO [train.py:1114] (0/4) Epoch 13, batch 1250, loss[loss=0.2155, simple_loss=0.2831, pruned_loss=0.05276, ctc_loss=0.1059, over 19518.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.285, pruned_loss=0.05952, ctc_loss=0.1119, over 3843655.39 frames. ], batch size: 61, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:26:25,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=165973.33333333334, ans=0.1
+2024-08-26 03:26:45,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=166026.66666666666, ans=0.125
+2024-08-26 03:27:18,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=166080.0, ans=0.0
+2024-08-26 03:27:23,469 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.311e+02 1.715e+02 1.869e+02 2.285e+02 3.930e+02, threshold=3.738e+02, percent-clipped=0.0
+2024-08-26 03:27:28,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=166080.0, ans=0.1
+2024-08-26 03:27:46,282 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=6.07 vs. limit=12.0
+2024-08-26 03:28:00,547 INFO [train.py:1114] (0/4) Epoch 13, batch 1300, loss[loss=0.2488, simple_loss=0.3018, pruned_loss=0.07149, ctc_loss=0.1319, over 18789.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2844, pruned_loss=0.05943, ctc_loss=0.1116, over 3847040.83 frames. ], batch size: 76, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:28:43,349 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=166293.33333333334, ans=0.0
+2024-08-26 03:29:33,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=166453.33333333334, ans=0.125
+2024-08-26 03:30:19,054 INFO [train.py:1114] (0/4) Epoch 13, batch 1350, loss[loss=0.2186, simple_loss=0.2801, pruned_loss=0.05651, ctc_loss=0.1102, over 19774.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.2835, pruned_loss=0.05876, ctc_loss=0.1105, over 3858652.72 frames. ], batch size: 54, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:30:34,813 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=166560.0, ans=0.125
+2024-08-26 03:30:54,600 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=166613.33333333334, ans=0.0
+2024-08-26 03:31:08,773 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.736e+02 2.053e+02 2.622e+02 5.263e+02, threshold=4.106e+02, percent-clipped=6.0
+2024-08-26 03:31:10,624 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.10 vs. limit=10.0
+2024-08-26 03:31:12,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=166666.66666666666, ans=0.0
+2024-08-26 03:31:40,386 INFO [train.py:1114] (0/4) Epoch 13, batch 1400, loss[loss=0.2035, simple_loss=0.2556, pruned_loss=0.0546, ctc_loss=0.1053, over 19692.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2834, pruned_loss=0.05886, ctc_loss=0.1108, over 3864667.05 frames. ], batch size: 46, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:31:53,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=166773.33333333334, ans=0.1
+2024-08-26 03:32:37,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=166880.0, ans=0.125
+2024-08-26 03:32:56,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=166933.33333333334, ans=0.125
+2024-08-26 03:33:07,201 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.75 vs. limit=15.0
+2024-08-26 03:33:14,110 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.33 vs. limit=15.0
+2024-08-26 03:33:18,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=166986.66666666666, ans=0.125
+2024-08-26 03:33:21,477 INFO [train.py:1114] (0/4) Epoch 13, batch 1450, loss[loss=0.212, simple_loss=0.2774, pruned_loss=0.05286, ctc_loss=0.1023, over 19679.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.284, pruned_loss=0.05917, ctc_loss=0.1112, over 3862526.47 frames. ], batch size: 63, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:33:26,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=167040.0, ans=0.0
+2024-08-26 03:33:59,242 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.756e+02 1.937e+02 2.380e+02 3.895e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-26 03:34:20,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=167253.33333333334, ans=0.04949747468305833
+2024-08-26 03:34:30,109 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=167253.33333333334, ans=0.1
+2024-08-26 03:34:33,941 INFO [train.py:1114] (0/4) Epoch 13, batch 1500, loss[loss=0.2257, simple_loss=0.2935, pruned_loss=0.05792, ctc_loss=0.1051, over 19580.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2854, pruned_loss=0.06008, ctc_loss=0.1127, over 3861682.56 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:34:37,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=167306.66666666666, ans=0.125
+2024-08-26 03:34:55,999 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.22 vs. limit=10.0
+2024-08-26 03:35:21,501 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.17 vs. limit=15.0
+2024-08-26 03:35:25,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=167520.0, ans=0.0
+2024-08-26 03:35:32,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=167520.0, ans=0.0
+2024-08-26 03:35:43,249 INFO [train.py:1114] (0/4) Epoch 13, batch 1550, loss[loss=0.2555, simple_loss=0.3075, pruned_loss=0.07421, ctc_loss=0.1375, over 19588.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2852, pruned_loss=0.06003, ctc_loss=0.1128, over 3846022.74 frames. ], batch size: 60, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:35:56,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=167626.66666666666, ans=0.2
+2024-08-26 03:35:57,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=167626.66666666666, ans=0.0
+2024-08-26 03:36:18,497 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.13 vs. limit=15.0
+2024-08-26 03:36:25,732 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.65 vs. limit=12.0
+2024-08-26 03:36:35,365 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.778e+02 2.054e+02 2.767e+02 5.252e+02, threshold=4.108e+02, percent-clipped=7.0
+2024-08-26 03:36:44,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=167733.33333333334, ans=0.09899494936611666
+2024-08-26 03:37:02,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=167786.66666666666, ans=0.125
+2024-08-26 03:37:05,306 INFO [train.py:1114] (0/4) Epoch 13, batch 1600, loss[loss=0.2075, simple_loss=0.285, pruned_loss=0.04666, ctc_loss=0.09161, over 19856.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2853, pruned_loss=0.06003, ctc_loss=0.113, over 3836438.34 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:37:40,187 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.75 vs. limit=10.0
+2024-08-26 03:37:46,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=167946.66666666666, ans=0.125
+2024-08-26 03:38:19,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=168000.0, ans=0.125
+2024-08-26 03:38:35,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=168106.66666666666, ans=0.1
+2024-08-26 03:38:36,006 INFO [train.py:1114] (0/4) Epoch 13, batch 1650, loss[loss=0.2236, simple_loss=0.2877, pruned_loss=0.05634, ctc_loss=0.1171, over 19662.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2852, pruned_loss=0.06005, ctc_loss=0.1131, over 3833199.88 frames. ], batch size: 59, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:38:48,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=168106.66666666666, ans=0.125
+2024-08-26 03:39:17,074 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=168213.33333333334, ans=0.0
+2024-08-26 03:39:20,045 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.825e+02 2.209e+02 2.614e+02 4.167e+02, threshold=4.418e+02, percent-clipped=2.0
+2024-08-26 03:39:37,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=168320.0, ans=0.0
+2024-08-26 03:39:42,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=168320.0, ans=0.0
+2024-08-26 03:40:00,095 INFO [train.py:1114] (0/4) Epoch 13, batch 1700, loss[loss=0.1931, simple_loss=0.2542, pruned_loss=0.04796, ctc_loss=0.09034, over 19674.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2845, pruned_loss=0.05933, ctc_loss=0.1117, over 3847041.83 frames. ], batch size: 46, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:40:14,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=168373.33333333334, ans=0.125
+2024-08-26 03:40:51,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=168533.33333333334, ans=0.2
+2024-08-26 03:41:06,697 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=168586.66666666666, ans=0.125
+2024-08-26 03:41:10,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=168640.0, ans=0.125
+2024-08-26 03:41:17,849 INFO [train.py:1114] (0/4) Epoch 13, batch 1750, loss[loss=0.1964, simple_loss=0.255, pruned_loss=0.04963, ctc_loss=0.09642, over 19627.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2837, pruned_loss=0.05898, ctc_loss=0.1111, over 3852460.98 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:42:01,100 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.702e+02 2.065e+02 2.813e+02 5.109e+02, threshold=4.129e+02, percent-clipped=2.0
+2024-08-26 03:42:15,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=168800.0, ans=0.025
+2024-08-26 03:42:29,306 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=168853.33333333334, ans=0.025
+2024-08-26 03:42:45,946 INFO [train.py:1114] (0/4) Epoch 13, batch 1800, loss[loss=0.2513, simple_loss=0.3125, pruned_loss=0.069, ctc_loss=0.1304, over 19597.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2841, pruned_loss=0.05913, ctc_loss=0.1112, over 3854137.29 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:43:05,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=168960.0, ans=0.0
+2024-08-26 03:43:10,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=169013.33333333334, ans=0.2
+2024-08-26 03:43:22,767 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=169013.33333333334, ans=0.125
+2024-08-26 03:43:29,584 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.53 vs. limit=15.0
+2024-08-26 03:43:41,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=169120.0, ans=0.0
+2024-08-26 03:43:47,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=169120.0, ans=0.0
+2024-08-26 03:43:53,523 INFO [train.py:1114] (0/4) Epoch 13, batch 1850, loss[loss=0.2421, simple_loss=0.3019, pruned_loss=0.06442, ctc_loss=0.1335, over 19579.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2838, pruned_loss=0.05924, ctc_loss=0.1113, over 3856853.35 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:43:59,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=169173.33333333334, ans=0.0
+2024-08-26 03:44:11,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=169226.66666666666, ans=0.125
+2024-08-26 03:44:21,463 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=169280.0, ans=0.05
+2024-08-26 03:44:22,655 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:44:29,682 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.936e+02 2.666e+02 3.402e+02 5.252e+02, threshold=5.332e+02, percent-clipped=13.0
+2024-08-26 03:44:43,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=169386.66666666666, ans=0.125
+2024-08-26 03:44:56,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=169386.66666666666, ans=0.125
+2024-08-26 03:45:07,841 INFO [train.py:1114] (0/4) Epoch 13, batch 1900, loss[loss=0.2428, simple_loss=0.3022, pruned_loss=0.066, ctc_loss=0.1284, over 19653.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2845, pruned_loss=0.05957, ctc_loss=0.112, over 3861046.47 frames. ], batch size: 59, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:45:15,043 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.21 vs. limit=15.0
+2024-08-26 03:45:18,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=169493.33333333334, ans=0.1
+2024-08-26 03:45:23,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=169493.33333333334, ans=0.125
+2024-08-26 03:45:48,909 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=169600.0, ans=0.1
+2024-08-26 03:45:54,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=169600.0, ans=0.1
+2024-08-26 03:46:02,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=169600.0, ans=0.125
+2024-08-26 03:46:24,569 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=169706.66666666666, ans=0.1
+2024-08-26 03:46:29,180 INFO [train.py:1114] (0/4) Epoch 13, batch 1950, loss[loss=0.2093, simple_loss=0.2687, pruned_loss=0.05452, ctc_loss=0.1021, over 19587.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.285, pruned_loss=0.0595, ctc_loss=0.1117, over 3870178.17 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:46:44,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=169760.0, ans=0.0
+2024-08-26 03:46:55,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=169760.0, ans=0.125
+2024-08-26 03:47:25,787 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=16.65 vs. limit=15.0
+2024-08-26 03:50:26,633 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.795e+02 2.018e+02 2.323e+02 3.502e+02, threshold=4.036e+02, percent-clipped=0.0
+2024-08-26 04:18:42,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=169920.0, ans=0.125
+2024-08-26 04:22:39,281 INFO [train.py:1114] (0/4) Epoch 13, batch 2000, loss[loss=0.2162, simple_loss=0.2719, pruned_loss=0.05887, ctc_loss=0.107, over 19613.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2861, pruned_loss=0.06017, ctc_loss=0.1131, over 3853235.11 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 04:22:40,550 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=169973.33333333334, ans=0.125
+2024-08-26 04:28:30,736 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.58 vs. limit=10.0
+2024-08-26 04:34:05,978 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.50 vs. limit=6.0
+2024-08-26 04:49:48,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=170080.0, ans=0.125
+2024-08-26 04:49:49,235 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.34 vs. limit=15.0
+2024-08-26 04:58:16,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=170133.33333333334, ans=0.125
+2024-08-26 05:04:26,351 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=170133.33333333334, ans=0.0
+2024-08-26 05:08:12,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=170186.66666666666, ans=0.1
+2024-08-26 05:17:15,370 INFO [train.py:1114] (0/4) Epoch 13, batch 2050, loss[loss=0.2317, simple_loss=0.2845, pruned_loss=0.06497, ctc_loss=0.1221, over 19708.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2856, pruned_loss=0.06038, ctc_loss=0.1134, over 3851156.12 frames. ], batch size: 47, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:17:50,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=170240.0, ans=0.125
+2024-08-26 05:17:51,314 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.08 vs. limit=15.0
+2024-08-26 05:20:46,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=170240.0, ans=0.0
+2024-08-26 05:22:11,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=170293.33333333334, ans=0.125
+2024-08-26 05:22:12,017 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=170293.33333333334, ans=0.2
+2024-08-26 05:34:32,862 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.739e+02 2.095e+02 2.592e+02 3.598e+02, threshold=4.189e+02, percent-clipped=0.0
+2024-08-26 05:35:15,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=170400.0, ans=0.035
+2024-08-26 05:37:54,966 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.62 vs. limit=15.0
+2024-08-26 05:38:21,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=170400.0, ans=0.0
+2024-08-26 05:45:21,792 INFO [train.py:1114] (0/4) Epoch 13, batch 2100, loss[loss=0.1964, simple_loss=0.2652, pruned_loss=0.04663, ctc_loss=0.0859, over 19762.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2848, pruned_loss=0.05969, ctc_loss=0.1121, over 3858379.74 frames. ], batch size: 54, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:47:11,785 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=170506.66666666666, ans=0.2
+2024-08-26 05:50:52,143 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.76 vs. limit=15.0
+2024-08-26 05:51:45,263 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=170560.0, ans=0.125
+2024-08-26 05:52:25,916 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.62 vs. limit=15.0
+2024-08-26 05:53:15,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=170613.33333333334, ans=0.125
+2024-08-26 05:53:29,280 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-32000.pt
+2024-08-26 05:56:41,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=170720.0, ans=0.0
+2024-08-26 05:57:05,751 INFO [train.py:1114] (0/4) Epoch 13, batch 2150, loss[loss=0.2078, simple_loss=0.2746, pruned_loss=0.05189, ctc_loss=0.09315, over 19575.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.284, pruned_loss=0.05937, ctc_loss=0.1114, over 3869607.32 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:58:18,622 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.97 vs. limit=15.0
+2024-08-26 06:00:56,697 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=170826.66666666666, ans=0.0
+2024-08-26 06:01:20,231 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=170880.0, ans=0.05
+2024-08-26 06:02:02,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=170880.0, ans=0.125
+2024-08-26 06:02:10,737 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.801e+02 2.071e+02 2.646e+02 5.963e+02, threshold=4.141e+02, percent-clipped=6.0
+2024-08-26 06:03:39,260 INFO [train.py:1114] (0/4) Epoch 13, batch 2200, loss[loss=0.2534, simple_loss=0.3168, pruned_loss=0.06919, ctc_loss=0.1293, over 19592.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2838, pruned_loss=0.05901, ctc_loss=0.1106, over 3867172.51 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 06:03:46,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=171040.0, ans=0.1
+2024-08-26 06:04:25,405 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.01 vs. limit=15.0
+2024-08-26 06:05:08,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=171200.0, ans=0.1
+2024-08-26 06:05:28,855 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=171200.0, ans=0.05
+2024-08-26 06:05:57,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=171253.33333333334, ans=0.0
+2024-08-26 06:06:26,735 INFO [train.py:1114] (0/4) Epoch 13, batch 2250, loss[loss=0.267, simple_loss=0.3206, pruned_loss=0.07899, ctc_loss=0.1388, over 19623.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2843, pruned_loss=0.05926, ctc_loss=0.1112, over 3867954.74 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 06:06:30,659 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.08 vs. limit=22.5
+2024-08-26 06:06:31,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=171306.66666666666, ans=0.1
+2024-08-26 06:08:30,404 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.765e+02 2.070e+02 2.599e+02 3.761e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-26 06:09:25,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=171466.66666666666, ans=0.04949747468305833
+2024-08-26 06:10:16,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=171520.0, ans=0.2
+2024-08-26 06:10:19,073 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.59 vs. limit=22.5
+2024-08-26 06:10:19,750 INFO [train.py:1114] (0/4) Epoch 13, batch 2300, loss[loss=0.1933, simple_loss=0.2575, pruned_loss=0.04745, ctc_loss=0.08533, over 19511.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2837, pruned_loss=0.05932, ctc_loss=0.1116, over 3862602.56 frames. ], batch size: 49, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:10:27,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=171573.33333333334, ans=0.0
+2024-08-26 06:11:02,991 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=171680.0, ans=0.0
+2024-08-26 06:11:15,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=171680.0, ans=0.125
+2024-08-26 06:11:22,323 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.23 vs. limit=22.5
+2024-08-26 06:11:41,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=171786.66666666666, ans=22.5
+2024-08-26 06:11:43,315 INFO [train.py:1114] (0/4) Epoch 13, batch 2350, loss[loss=0.219, simple_loss=0.2834, pruned_loss=0.05625, ctc_loss=0.1055, over 19693.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2835, pruned_loss=0.059, ctc_loss=0.1109, over 3865403.74 frames. ], batch size: 63, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:11:47,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=171840.0, ans=0.125
+2024-08-26 06:12:04,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=171946.66666666666, ans=0.125
+2024-08-26 06:12:09,763 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=171946.66666666666, ans=0.125
+2024-08-26 06:12:16,624 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.773e+02 2.247e+02 3.255e+02 4.983e+02, threshold=4.494e+02, percent-clipped=2.0
+2024-08-26 06:12:20,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=172000.0, ans=0.0
+2024-08-26 06:12:44,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=172053.33333333334, ans=0.0
+2024-08-26 06:12:46,290 INFO [train.py:1114] (0/4) Epoch 13, batch 2400, loss[loss=0.2608, simple_loss=0.3182, pruned_loss=0.07306, ctc_loss=0.1434, over 19379.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2859, pruned_loss=0.06023, ctc_loss=0.1132, over 3859690.14 frames. ], batch size: 67, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:12:58,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=172106.66666666666, ans=0.125
+2024-08-26 06:13:12,556 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=172160.0, ans=0.125
+2024-08-26 06:13:23,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=172160.0, ans=0.2
+2024-08-26 06:13:29,970 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.57 vs. limit=15.0
+2024-08-26 06:13:41,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=172266.66666666666, ans=0.125
+2024-08-26 06:14:03,511 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=9.72 vs. limit=12.0
+2024-08-26 06:14:08,367 INFO [train.py:1114] (0/4) Epoch 13, batch 2450, loss[loss=0.3072, simple_loss=0.3251, pruned_loss=0.1032, ctc_loss=0.2069, over 12724.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.2902, pruned_loss=0.06354, ctc_loss=0.1196, over 3732461.70 frames. ], batch size: 140, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:14:11,490 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.92 vs. limit=15.0
+2024-08-26 06:14:34,204 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=172480.0, ans=0.1
+2024-08-26 06:14:35,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=172480.0, ans=0.125
+2024-08-26 06:14:43,293 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.935e+02 2.072e+02 2.350e+02 4.711e+02, threshold=4.143e+02, percent-clipped=2.0
+2024-08-26 06:14:57,421 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-26 06:16:27,490 INFO [train.py:1114] (0/4) Epoch 14, batch 0, loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:16:27,490 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 06:17:56,655 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.1238, 3.1051, 3.6303, 2.8044], device='cuda:0')
+2024-08-26 06:17:58,789 INFO [train.py:1146] (0/4) Epoch 14, validation: loss=0.1898, simple_loss=0.2778, pruned_loss=0.03769, ctc_loss=0.06578, over 944034.00 frames.
+2024-08-26 06:18:12,591 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 14058MB
+2024-08-26 06:18:29,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=172634.66666666666, ans=0.125
+2024-08-26 06:18:32,906 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=3.71 vs. limit=12.0
+2024-08-26 06:19:24,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=172688.0, ans=0.0
+2024-08-26 06:19:28,054 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.76 vs. limit=22.5
+2024-08-26 06:19:33,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=172741.33333333334, ans=0.125
+2024-08-26 06:19:53,752 INFO [train.py:1114] (0/4) Epoch 14, batch 50, loss[loss=0.2156, simple_loss=0.2724, pruned_loss=0.05824, ctc_loss=0.1059, over 19713.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2863, pruned_loss=0.05912, ctc_loss=0.1119, over 844800.35 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:20:03,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=172848.0, ans=0.1
+2024-08-26 06:20:05,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=172848.0, ans=0.125
+2024-08-26 06:20:19,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=172901.33333333334, ans=0.0
+2024-08-26 06:20:25,160 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.00 vs. limit=22.5
+2024-08-26 06:20:42,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=172901.33333333334, ans=0.1
+2024-08-26 06:20:45,405 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.54 vs. limit=10.0
+2024-08-26 06:20:55,274 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=172954.66666666666, ans=0.0
+2024-08-26 06:21:17,215 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.738e+02 2.047e+02 2.487e+02 4.948e+02, threshold=4.095e+02, percent-clipped=4.0
+2024-08-26 06:21:19,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=173061.33333333334, ans=0.0
+2024-08-26 06:21:51,844 INFO [train.py:1114] (0/4) Epoch 14, batch 100, loss[loss=0.2338, simple_loss=0.2884, pruned_loss=0.0645, ctc_loss=0.1257, over 19723.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2889, pruned_loss=0.06068, ctc_loss=0.1147, over 1499143.56 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:21:54,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=173114.66666666666, ans=0.125
+2024-08-26 06:22:47,273 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:22:55,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=173274.66666666666, ans=0.0
+2024-08-26 06:23:30,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=173328.0, ans=0.125
+2024-08-26 06:23:38,131 INFO [train.py:1114] (0/4) Epoch 14, batch 150, loss[loss=0.1824, simple_loss=0.2476, pruned_loss=0.04345, ctc_loss=0.07581, over 19719.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2858, pruned_loss=0.05892, ctc_loss=0.1113, over 2027452.09 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:24:00,663 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.34 vs. limit=15.0
+2024-08-26 06:24:12,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=173434.66666666666, ans=0.2
+2024-08-26 06:24:31,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=173541.33333333334, ans=0.0
+2024-08-26 06:24:46,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=173594.66666666666, ans=0.125
+2024-08-26 06:24:49,746 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.676e+02 1.898e+02 2.213e+02 4.155e+02, threshold=3.795e+02, percent-clipped=1.0
+2024-08-26 06:25:00,466 INFO [train.py:1114] (0/4) Epoch 14, batch 200, loss[loss=0.2518, simple_loss=0.3073, pruned_loss=0.07121, ctc_loss=0.1345, over 18353.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.2834, pruned_loss=0.05849, ctc_loss=0.1104, over 2435099.22 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:25:10,607 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.55 vs. limit=22.5
+2024-08-26 06:25:22,151 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=173648.0, ans=0.0
+2024-08-26 06:25:30,930 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=173701.33333333334, ans=0.0
+2024-08-26 06:25:44,598 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.46 vs. limit=22.5
+2024-08-26 06:25:53,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=173808.0, ans=0.125
+2024-08-26 06:26:09,128 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=173861.33333333334, ans=0.2
+2024-08-26 06:26:13,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=173861.33333333334, ans=0.0
+2024-08-26 06:26:16,062 INFO [train.py:1114] (0/4) Epoch 14, batch 250, loss[loss=0.2341, simple_loss=0.2899, pruned_loss=0.06461, ctc_loss=0.1225, over 19394.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2836, pruned_loss=0.05869, ctc_loss=0.1109, over 2755806.45 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:26:29,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=173914.66666666666, ans=0.125
+2024-08-26 06:26:58,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=174021.33333333334, ans=0.0
+2024-08-26 06:27:18,006 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.683e+02 2.061e+02 2.648e+02 4.927e+02, threshold=4.123e+02, percent-clipped=4.0
+2024-08-26 06:27:21,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=174128.0, ans=0.015
+2024-08-26 06:27:28,140 INFO [train.py:1114] (0/4) Epoch 14, batch 300, loss[loss=0.2278, simple_loss=0.2836, pruned_loss=0.06351, ctc_loss=0.1124, over 19521.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2827, pruned_loss=0.05813, ctc_loss=0.1098, over 3001481.83 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:27:28,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=174181.33333333334, ans=0.125
+2024-08-26 06:27:49,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=174234.66666666666, ans=0.125
+2024-08-26 06:27:53,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=174288.0, ans=0.125
+2024-08-26 06:28:02,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=174288.0, ans=0.1
+2024-08-26 06:28:28,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=174394.66666666666, ans=0.125
+2024-08-26 06:28:34,433 INFO [train.py:1114] (0/4) Epoch 14, batch 350, loss[loss=0.1805, simple_loss=0.2447, pruned_loss=0.04208, ctc_loss=0.08036, over 19759.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2822, pruned_loss=0.05768, ctc_loss=0.1088, over 3191850.72 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:29:16,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=174554.66666666666, ans=0.0
+2024-08-26 06:29:28,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=174608.0, ans=0.05
+2024-08-26 06:29:32,482 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.657e+02 1.894e+02 2.440e+02 4.007e+02, threshold=3.787e+02, percent-clipped=0.0
+2024-08-26 06:29:42,958 INFO [train.py:1114] (0/4) Epoch 14, batch 400, loss[loss=0.2454, simple_loss=0.3014, pruned_loss=0.06946, ctc_loss=0.1262, over 19496.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2821, pruned_loss=0.05783, ctc_loss=0.109, over 3342545.93 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:29:58,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=174768.0, ans=0.0
+2024-08-26 06:30:51,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=174928.0, ans=0.0
+2024-08-26 06:30:58,872 INFO [train.py:1114] (0/4) Epoch 14, batch 450, loss[loss=0.2194, simple_loss=0.2907, pruned_loss=0.0533, ctc_loss=0.1039, over 19609.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2825, pruned_loss=0.05801, ctc_loss=0.1095, over 3450915.96 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:31:05,553 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.00 vs. limit=15.0
+2024-08-26 06:31:39,257 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.83 vs. limit=10.0
+2024-08-26 06:32:11,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=175141.33333333334, ans=0.0
+2024-08-26 06:32:14,233 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.63 vs. limit=22.5
+2024-08-26 06:32:20,554 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.45 vs. limit=5.0
+2024-08-26 06:32:27,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=175141.33333333334, ans=0.125
+2024-08-26 06:32:32,612 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.297e+02 1.702e+02 1.875e+02 2.205e+02 3.904e+02, threshold=3.749e+02, percent-clipped=2.0
+2024-08-26 06:32:59,817 INFO [train.py:1114] (0/4) Epoch 14, batch 500, loss[loss=0.2444, simple_loss=0.3138, pruned_loss=0.06432, ctc_loss=0.116, over 19681.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2816, pruned_loss=0.05745, ctc_loss=0.1083, over 3546652.26 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:32:59,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=175248.0, ans=0.125
+2024-08-26 06:33:25,582 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.255e-01
+2024-08-26 06:33:25,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=175248.0, ans=0.07
+2024-08-26 06:33:26,644 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=175301.33333333334, ans=0.125
+2024-08-26 06:34:21,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=175461.33333333334, ans=0.025
+2024-08-26 06:34:32,952 INFO [train.py:1114] (0/4) Epoch 14, batch 550, loss[loss=0.2502, simple_loss=0.3082, pruned_loss=0.06965, ctc_loss=0.1323, over 19273.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2819, pruned_loss=0.05737, ctc_loss=0.1083, over 3608532.80 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:34:36,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=175514.66666666666, ans=0.125
+2024-08-26 06:34:47,156 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.86 vs. limit=6.0
+2024-08-26 06:35:00,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=175568.0, ans=0.125
+2024-08-26 06:35:00,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=175568.0, ans=0.2
+2024-08-26 06:35:24,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=175674.66666666666, ans=0.0
+2024-08-26 06:35:32,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=175674.66666666666, ans=0.125
+2024-08-26 06:35:36,411 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.729e+02 1.957e+02 2.291e+02 4.042e+02, threshold=3.913e+02, percent-clipped=2.0
+2024-08-26 06:35:59,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=175728.0, ans=0.5
+2024-08-26 06:36:15,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=175728.0, ans=0.125
+2024-08-26 06:36:18,853 INFO [train.py:1114] (0/4) Epoch 14, batch 600, loss[loss=0.2363, simple_loss=0.3006, pruned_loss=0.06274, ctc_loss=0.1165, over 19370.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2827, pruned_loss=0.05769, ctc_loss=0.1091, over 3666425.54 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:36:19,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=175781.33333333334, ans=0.0
+2024-08-26 06:36:24,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=175781.33333333334, ans=0.0
+2024-08-26 06:36:27,327 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.02 vs. limit=15.0
+2024-08-26 06:37:41,301 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.52 vs. limit=15.0
+2024-08-26 06:37:44,787 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.65 vs. limit=22.5
+2024-08-26 06:38:39,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=175888.0, ans=0.125
+2024-08-26 06:38:48,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=175888.0, ans=0.0
+2024-08-26 06:38:54,366 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.31 vs. limit=15.0
+2024-08-26 06:39:24,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=175994.66666666666, ans=0.125
+2024-08-26 06:39:25,933 INFO [train.py:1114] (0/4) Epoch 14, batch 650, loss[loss=0.2378, simple_loss=0.3028, pruned_loss=0.06222, ctc_loss=0.1211, over 19762.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2823, pruned_loss=0.05745, ctc_loss=0.1085, over 3716649.43 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:40:13,731 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.36 vs. limit=15.0
+2024-08-26 06:40:38,354 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.67 vs. limit=15.0
+2024-08-26 06:40:41,709 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.57 vs. limit=15.0
+2024-08-26 06:40:54,862 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.34 vs. limit=15.0
+2024-08-26 06:40:55,400 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=176154.66666666666, ans=0.125
+2024-08-26 06:40:58,855 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=176154.66666666666, ans=0.2
+2024-08-26 06:41:14,819 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.89 vs. limit=15.0
+2024-08-26 06:41:22,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=176208.0, ans=10.0
+2024-08-26 06:41:24,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=176208.0, ans=0.125
+2024-08-26 06:41:31,385 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.772e+02 2.123e+02 2.635e+02 4.354e+02, threshold=4.247e+02, percent-clipped=3.0
+2024-08-26 06:41:33,366 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176261.33333333334, ans=0.1
+2024-08-26 06:41:35,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=176261.33333333334, ans=0.025
+2024-08-26 06:41:45,038 INFO [train.py:1114] (0/4) Epoch 14, batch 700, loss[loss=0.2009, simple_loss=0.2688, pruned_loss=0.04809, ctc_loss=0.09225, over 19729.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2827, pruned_loss=0.05752, ctc_loss=0.1087, over 3748596.56 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:41:47,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=176314.66666666666, ans=0.125
+2024-08-26 06:41:48,664 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=176314.66666666666, ans=0.04949747468305833
+2024-08-26 06:41:51,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=176314.66666666666, ans=0.0
+2024-08-26 06:41:52,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=176314.66666666666, ans=0.025
+2024-08-26 06:42:03,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=176368.0, ans=0.125
+2024-08-26 06:42:08,791 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.62 vs. limit=6.0
+2024-08-26 06:42:20,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=176421.33333333334, ans=0.2
+2024-08-26 06:42:27,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=176474.66666666666, ans=0.125
+2024-08-26 06:42:45,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=176528.0, ans=0.07
+2024-08-26 06:42:51,113 INFO [train.py:1114] (0/4) Epoch 14, batch 750, loss[loss=0.2169, simple_loss=0.2821, pruned_loss=0.05548, ctc_loss=0.1016, over 19505.00 frames. ], tot_loss[loss=0.22, simple_loss=0.282, pruned_loss=0.05738, ctc_loss=0.1081, over 3775051.89 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:42:56,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=176581.33333333334, ans=0.125
+2024-08-26 06:43:07,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=176581.33333333334, ans=0.125
+2024-08-26 06:43:18,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=176634.66666666666, ans=0.0
+2024-08-26 06:44:13,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=176741.33333333334, ans=0.125
+2024-08-26 06:44:25,339 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=176794.66666666666, ans=0.125
+2024-08-26 06:44:27,361 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.803e+02 2.358e+02 3.080e+02 4.835e+02, threshold=4.715e+02, percent-clipped=7.0
+2024-08-26 06:44:29,884 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:44:32,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=176794.66666666666, ans=0.04949747468305833
+2024-08-26 06:44:39,895 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176794.66666666666, ans=0.1
+2024-08-26 06:44:41,993 INFO [train.py:1114] (0/4) Epoch 14, batch 800, loss[loss=0.1853, simple_loss=0.2533, pruned_loss=0.04223, ctc_loss=0.08197, over 19409.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2813, pruned_loss=0.05681, ctc_loss=0.1072, over 3795596.12 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:44:52,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=176901.33333333334, ans=0.125
+2024-08-26 06:44:55,090 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:45:37,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=177061.33333333334, ans=0.125
+2024-08-26 06:45:52,035 INFO [train.py:1114] (0/4) Epoch 14, batch 850, loss[loss=0.2294, simple_loss=0.2995, pruned_loss=0.05749, ctc_loss=0.1109, over 19642.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2814, pruned_loss=0.05716, ctc_loss=0.1078, over 3814865.22 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:46:17,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=177114.66666666666, ans=0.07
+2024-08-26 06:46:47,356 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.55 vs. limit=15.0
+2024-08-26 06:46:49,512 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.87 vs. limit=15.0
+2024-08-26 06:47:11,725 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.690e+02 1.974e+02 2.351e+02 3.908e+02, threshold=3.948e+02, percent-clipped=0.0
+2024-08-26 06:47:24,575 INFO [train.py:1114] (0/4) Epoch 14, batch 900, loss[loss=0.2022, simple_loss=0.263, pruned_loss=0.05119, ctc_loss=0.09751, over 19784.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2818, pruned_loss=0.05748, ctc_loss=0.1082, over 3818440.25 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:47:27,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=177381.33333333334, ans=0.0
+2024-08-26 06:47:35,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=177434.66666666666, ans=0.2
+2024-08-26 06:48:02,588 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.44 vs. limit=12.0
+2024-08-26 06:48:06,006 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.36 vs. limit=15.0
+2024-08-26 06:48:38,038 INFO [train.py:1114] (0/4) Epoch 14, batch 950, loss[loss=0.209, simple_loss=0.2698, pruned_loss=0.05439, ctc_loss=0.09887, over 19503.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2821, pruned_loss=0.05769, ctc_loss=0.1086, over 3820636.09 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:48:41,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=177648.0, ans=0.2
+2024-08-26 06:48:52,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-26 06:49:08,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=177754.66666666666, ans=0.0
+2024-08-26 06:49:15,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=177754.66666666666, ans=0.0
+2024-08-26 06:49:28,945 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=177808.0, ans=0.0
+2024-08-26 06:49:34,054 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=177861.33333333334, ans=0.0
+2024-08-26 06:49:36,189 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.810e+02 2.092e+02 2.519e+02 4.035e+02, threshold=4.185e+02, percent-clipped=1.0
+2024-08-26 06:49:44,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=177861.33333333334, ans=0.025
+2024-08-26 06:50:04,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=177861.33333333334, ans=0.0
+2024-08-26 06:50:06,722 INFO [train.py:1114] (0/4) Epoch 14, batch 1000, loss[loss=0.2074, simple_loss=0.2651, pruned_loss=0.05506, ctc_loss=0.09895, over 19854.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2831, pruned_loss=0.05852, ctc_loss=0.1102, over 3816509.39 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:50:17,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=177968.0, ans=0.125
+2024-08-26 06:50:31,304 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177968.0, ans=0.1
+2024-08-26 06:50:46,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=178074.66666666666, ans=0.0
+2024-08-26 06:50:47,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=178074.66666666666, ans=0.125
+2024-08-26 06:50:50,779 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:51:23,205 INFO [train.py:1114] (0/4) Epoch 14, batch 1050, loss[loss=0.2143, simple_loss=0.2849, pruned_loss=0.05178, ctc_loss=0.1003, over 19845.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2821, pruned_loss=0.05799, ctc_loss=0.1092, over 3823872.80 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:51:40,779 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.45 vs. limit=10.0
+2024-08-26 06:51:41,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=178234.66666666666, ans=0.125
+2024-08-26 06:51:51,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=178288.0, ans=0.125
+2024-08-26 06:52:05,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=178341.33333333334, ans=0.0
+2024-08-26 06:52:17,054 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 1.767e+02 2.034e+02 2.568e+02 4.426e+02, threshold=4.067e+02, percent-clipped=2.0
+2024-08-26 06:52:20,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=178394.66666666666, ans=0.125
+2024-08-26 06:52:39,173 INFO [train.py:1114] (0/4) Epoch 14, batch 1100, loss[loss=0.2002, simple_loss=0.2668, pruned_loss=0.04857, ctc_loss=0.09117, over 19612.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2815, pruned_loss=0.05758, ctc_loss=0.1083, over 3831458.89 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:52:48,157 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=178448.0, ans=0.2
+2024-08-26 06:52:49,705 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.57 vs. limit=22.5
+2024-08-26 06:53:36,508 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=8.76 vs. limit=12.0
+2024-08-26 06:53:39,054 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.76 vs. limit=15.0
+2024-08-26 06:53:49,716 INFO [train.py:1114] (0/4) Epoch 14, batch 1150, loss[loss=0.2215, simple_loss=0.2785, pruned_loss=0.05933, ctc_loss=0.1145, over 19593.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2813, pruned_loss=0.05756, ctc_loss=0.1084, over 3828830.16 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:54:09,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=178768.0, ans=0.125
+2024-08-26 06:54:11,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=178768.0, ans=0.2
+2024-08-26 06:54:24,917 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=178821.33333333334, ans=0.025
+2024-08-26 06:54:29,732 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.54 vs. limit=15.0
+2024-08-26 06:54:34,377 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=178874.66666666666, ans=0.0
+2024-08-26 06:54:47,795 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.255e+02 1.672e+02 1.916e+02 2.259e+02 4.129e+02, threshold=3.832e+02, percent-clipped=1.0
+2024-08-26 06:54:58,197 INFO [train.py:1114] (0/4) Epoch 14, batch 1200, loss[loss=0.2426, simple_loss=0.3023, pruned_loss=0.06683, ctc_loss=0.123, over 19839.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2827, pruned_loss=0.058, ctc_loss=0.1094, over 3825820.21 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:55:10,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=179034.66666666666, ans=0.1
+2024-08-26 06:55:13,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=179034.66666666666, ans=0.0
+2024-08-26 06:55:20,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=179034.66666666666, ans=0.1
+2024-08-26 06:55:20,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=179034.66666666666, ans=0.0
+2024-08-26 06:55:36,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=179141.33333333334, ans=0.1
+2024-08-26 06:56:27,985 INFO [train.py:1114] (0/4) Epoch 14, batch 1250, loss[loss=0.2243, simple_loss=0.2905, pruned_loss=0.0578, ctc_loss=0.1063, over 19531.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2826, pruned_loss=0.05765, ctc_loss=0.1084, over 3843334.00 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:56:32,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=179248.0, ans=0.125
+2024-08-26 06:56:44,516 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.94 vs. limit=6.0
+2024-08-26 06:56:47,014 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=179301.33333333334, ans=0.2
+2024-08-26 06:57:56,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=179408.0, ans=0.125
+2024-08-26 06:57:56,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=179408.0, ans=0.0
+2024-08-26 06:57:57,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=179408.0, ans=0.2
+2024-08-26 06:58:13,350 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.864e+02 2.134e+02 2.537e+02 3.723e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-08-26 06:58:14,065 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=6.78 vs. limit=15.0
+2024-08-26 06:58:31,276 INFO [train.py:1114] (0/4) Epoch 14, batch 1300, loss[loss=0.2353, simple_loss=0.2991, pruned_loss=0.0609, ctc_loss=0.1242, over 18833.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.282, pruned_loss=0.05724, ctc_loss=0.1077, over 3847330.15 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:58:36,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=179514.66666666666, ans=0.2
+2024-08-26 06:58:39,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-26 06:58:59,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=179514.66666666666, ans=0.2
+2024-08-26 06:59:00,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=179568.0, ans=0.2
+2024-08-26 06:59:10,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=179568.0, ans=0.125
+2024-08-26 06:59:20,342 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.86 vs. limit=15.0
+2024-08-26 06:59:50,578 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.45 vs. limit=6.0
+2024-08-26 07:00:13,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=179728.0, ans=0.125
+2024-08-26 07:00:24,875 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:00:32,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=179728.0, ans=0.0
+2024-08-26 07:00:35,357 INFO [train.py:1114] (0/4) Epoch 14, batch 1350, loss[loss=0.2216, simple_loss=0.279, pruned_loss=0.05973, ctc_loss=0.1115, over 19770.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2818, pruned_loss=0.05726, ctc_loss=0.1077, over 3858687.41 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:01:19,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=179834.66666666666, ans=0.025
+2024-08-26 07:02:10,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=179941.33333333334, ans=0.125
+2024-08-26 07:02:26,030 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.690e+02 1.870e+02 2.214e+02 3.706e+02, threshold=3.740e+02, percent-clipped=0.0
+2024-08-26 07:02:31,113 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.85 vs. limit=22.5
+2024-08-26 07:02:47,345 INFO [train.py:1114] (0/4) Epoch 14, batch 1400, loss[loss=0.2116, simple_loss=0.2713, pruned_loss=0.05417, ctc_loss=0.1088, over 19674.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2819, pruned_loss=0.05722, ctc_loss=0.1075, over 3864940.55 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:03:02,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=180048.0, ans=0.015
+2024-08-26 07:03:29,696 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=21.26 vs. limit=22.5
+2024-08-26 07:03:34,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.94 vs. limit=22.5
+2024-08-26 07:03:38,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=180154.66666666666, ans=0.125
+2024-08-26 07:04:20,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=180261.33333333334, ans=0.125
+2024-08-26 07:04:21,203 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.14 vs. limit=6.0
+2024-08-26 07:04:25,282 INFO [train.py:1114] (0/4) Epoch 14, batch 1450, loss[loss=0.2382, simple_loss=0.2963, pruned_loss=0.06556, ctc_loss=0.1224, over 19668.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2825, pruned_loss=0.05746, ctc_loss=0.1081, over 3862868.32 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:04:46,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=180368.0, ans=0.0
+2024-08-26 07:04:46,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180368.0, ans=0.1
+2024-08-26 07:05:05,330 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:05:21,404 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=6.98 vs. limit=15.0
+2024-08-26 07:05:24,373 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.47 vs. limit=15.0
+2024-08-26 07:05:28,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-26 07:05:40,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=180528.0, ans=0.025
+2024-08-26 07:05:41,163 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.716e+02 1.963e+02 2.339e+02 6.137e+02, threshold=3.925e+02, percent-clipped=1.0
+2024-08-26 07:05:57,995 INFO [train.py:1114] (0/4) Epoch 14, batch 1500, loss[loss=0.2157, simple_loss=0.2815, pruned_loss=0.055, ctc_loss=0.09994, over 19588.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2827, pruned_loss=0.05756, ctc_loss=0.1083, over 3861664.46 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:07:02,589 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.44 vs. limit=12.0
+2024-08-26 07:07:07,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=180741.33333333334, ans=0.05
+2024-08-26 07:07:18,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=180794.66666666666, ans=0.125
+2024-08-26 07:07:26,408 INFO [train.py:1114] (0/4) Epoch 14, batch 1550, loss[loss=0.2374, simple_loss=0.2973, pruned_loss=0.06405, ctc_loss=0.1236, over 19602.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2822, pruned_loss=0.05747, ctc_loss=0.1083, over 3847541.67 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:07:52,872 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.74 vs. limit=15.0
+2024-08-26 07:07:54,873 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:07:58,460 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:08:00,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=180954.66666666666, ans=0.2
+2024-08-26 07:08:20,838 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.735e+02 1.996e+02 2.323e+02 4.332e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-26 07:08:46,084 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=181114.66666666666, ans=0.2
+2024-08-26 07:08:46,938 INFO [train.py:1114] (0/4) Epoch 14, batch 1600, loss[loss=0.2169, simple_loss=0.2818, pruned_loss=0.05606, ctc_loss=0.09974, over 19828.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2814, pruned_loss=0.057, ctc_loss=0.1074, over 3837473.80 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:09:17,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=181168.0, ans=0.125
+2024-08-26 07:09:21,125 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:09:26,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=181221.33333333334, ans=0.125
+2024-08-26 07:09:31,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=181221.33333333334, ans=0.2
+2024-08-26 07:09:38,999 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=181274.66666666666, ans=0.1
+2024-08-26 07:10:18,945 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=181328.0, ans=0.125
+2024-08-26 07:10:22,357 INFO [train.py:1114] (0/4) Epoch 14, batch 1650, loss[loss=0.2128, simple_loss=0.2888, pruned_loss=0.049, ctc_loss=0.09699, over 19654.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2814, pruned_loss=0.05712, ctc_loss=0.1076, over 3833134.13 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:10:23,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=181381.33333333334, ans=0.5
+2024-08-26 07:10:36,234 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:10:36,829 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=181434.66666666666, ans=0.0
+2024-08-26 07:11:10,771 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.857e+02 2.243e+02 2.957e+02 5.258e+02, threshold=4.486e+02, percent-clipped=5.0
+2024-08-26 07:11:28,242 INFO [train.py:1114] (0/4) Epoch 14, batch 1700, loss[loss=0.1856, simple_loss=0.2466, pruned_loss=0.04521, ctc_loss=0.0856, over 19703.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2809, pruned_loss=0.05656, ctc_loss=0.1064, over 3847514.77 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:11:40,082 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.63 vs. limit=6.0
+2024-08-26 07:12:23,450 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=181914.66666666666, ans=0.025
+2024-08-26 07:12:24,392 INFO [train.py:1114] (0/4) Epoch 14, batch 1750, loss[loss=0.1641, simple_loss=0.2277, pruned_loss=0.03648, ctc_loss=0.06901, over 19617.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2805, pruned_loss=0.0563, ctc_loss=0.1061, over 3852353.23 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:12:30,054 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.76 vs. limit=22.5
+2024-08-26 07:12:35,920 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.80 vs. limit=15.0
+2024-08-26 07:13:14,689 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=182021.33333333334, ans=0.0
+2024-08-26 07:13:19,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=182021.33333333334, ans=0.125
+2024-08-26 07:13:35,902 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.769e+02 2.123e+02 2.747e+02 4.234e+02, threshold=4.245e+02, percent-clipped=0.0
+2024-08-26 07:13:38,163 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182128.0, ans=0.1
+2024-08-26 07:13:42,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=182128.0, ans=0.1
+2024-08-26 07:13:51,683 INFO [train.py:1114] (0/4) Epoch 14, batch 1800, loss[loss=0.2173, simple_loss=0.2756, pruned_loss=0.05814, ctc_loss=0.1068, over 19592.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2809, pruned_loss=0.05647, ctc_loss=0.1064, over 3852353.41 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:14:13,902 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.15 vs. limit=6.0
+2024-08-26 07:14:25,172 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.21 vs. limit=15.0
+2024-08-26 07:14:33,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_ff3.min_abs, batch_count=182341.33333333334, ans=0.2
+2024-08-26 07:14:36,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=182341.33333333334, ans=0.125
+2024-08-26 07:14:49,558 INFO [train.py:1114] (0/4) Epoch 14, batch 1850, loss[loss=0.2094, simple_loss=0.282, pruned_loss=0.05006, ctc_loss=0.09181, over 19596.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.281, pruned_loss=0.05648, ctc_loss=0.1063, over 3854748.92 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:14:53,201 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.29 vs. limit=10.0
+2024-08-26 07:14:57,615 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=182448.0, ans=0.125
+2024-08-26 07:14:58,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=182448.0, ans=0.125
+2024-08-26 07:15:01,500 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.34 vs. limit=15.0
+2024-08-26 07:15:14,017 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.02 vs. limit=12.0
+2024-08-26 07:15:20,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=182554.66666666666, ans=0.0
+2024-08-26 07:15:31,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=182608.0, ans=0.025
+2024-08-26 07:15:35,871 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.755e+02 2.000e+02 2.500e+02 5.147e+02, threshold=4.001e+02, percent-clipped=3.0
+2024-08-26 07:15:52,271 INFO [train.py:1114] (0/4) Epoch 14, batch 1900, loss[loss=0.2068, simple_loss=0.2914, pruned_loss=0.0443, ctc_loss=0.08373, over 19628.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2815, pruned_loss=0.05662, ctc_loss=0.1065, over 3861097.39 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:16:02,015 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.47 vs. limit=15.0
+2024-08-26 07:16:02,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=182714.66666666666, ans=0.025
+2024-08-26 07:16:12,450 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=182768.0, ans=0.125
+2024-08-26 07:16:18,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=182821.33333333334, ans=0.015
+2024-08-26 07:16:19,071 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:16:41,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=182928.0, ans=0.125
+2024-08-26 07:16:56,694 INFO [train.py:1114] (0/4) Epoch 14, batch 1950, loss[loss=0.1885, simple_loss=0.2594, pruned_loss=0.0431, ctc_loss=0.07879, over 19583.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2823, pruned_loss=0.05654, ctc_loss=0.1063, over 3870300.04 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:17:16,623 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.34 vs. limit=15.0
+2024-08-26 07:17:47,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183141.33333333334, ans=0.1
+2024-08-26 07:17:51,728 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.75 vs. limit=6.0
+2024-08-26 07:17:55,527 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.666e+02 1.941e+02 2.281e+02 4.229e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-26 07:18:05,617 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:18:11,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=183194.66666666666, ans=0.1
+2024-08-26 07:18:14,113 INFO [train.py:1114] (0/4) Epoch 14, batch 2000, loss[loss=0.1698, simple_loss=0.2401, pruned_loss=0.03565, ctc_loss=0.07049, over 19643.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2829, pruned_loss=0.05695, ctc_loss=0.1072, over 3854479.09 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 64.0
+2024-08-26 07:18:30,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=183301.33333333334, ans=0.2
+2024-08-26 07:18:43,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=183354.66666666666, ans=0.125
+2024-08-26 07:19:11,484 INFO [train.py:1114] (0/4) Epoch 14, batch 2050, loss[loss=0.1877, simple_loss=0.2491, pruned_loss=0.04625, ctc_loss=0.08475, over 19725.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2821, pruned_loss=0.05706, ctc_loss=0.1073, over 3850346.95 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:19:38,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=183568.0, ans=0.125
+2024-08-26 07:19:38,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=183568.0, ans=0.5
+2024-08-26 07:19:43,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=183621.33333333334, ans=0.125
+2024-08-26 07:19:53,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=183674.66666666666, ans=0.125
+2024-08-26 07:20:51,599 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.705e+02 1.994e+02 2.461e+02 3.917e+02, threshold=3.988e+02, percent-clipped=1.0
+2024-08-26 07:24:48,279 INFO [train.py:1114] (0/4) Epoch 14, batch 2100, loss[loss=0.2134, simple_loss=0.2771, pruned_loss=0.05456, ctc_loss=0.1011, over 19758.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.281, pruned_loss=0.05658, ctc_loss=0.1065, over 3857367.71 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:41:49,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=183834.66666666666, ans=0.125
+2024-08-26 07:45:05,533 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.38 vs. limit=10.0
+2024-08-26 07:50:50,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=183888.0, ans=0.125
+2024-08-26 08:13:15,360 INFO [train.py:1114] (0/4) Epoch 14, batch 2150, loss[loss=0.2287, simple_loss=0.2881, pruned_loss=0.0614, ctc_loss=0.1163, over 19570.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2811, pruned_loss=0.05695, ctc_loss=0.1068, over 3868244.08 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 08:24:49,148 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.19 vs. limit=12.0
+2024-08-26 08:31:59,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=184101.33333333334, ans=0.2
+2024-08-26 08:40:58,792 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.82 vs. limit=15.0
+2024-08-26 08:59:37,607 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.765e+02 2.052e+02 2.784e+02 6.261e+02, threshold=4.104e+02, percent-clipped=7.0
+2024-08-26 09:03:09,781 INFO [train.py:1114] (0/4) Epoch 14, batch 2200, loss[loss=0.2102, simple_loss=0.2827, pruned_loss=0.05042, ctc_loss=0.09214, over 19567.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.281, pruned_loss=0.05674, ctc_loss=0.1065, over 3866837.54 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 09:05:46,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=184314.66666666666, ans=0.125
+2024-08-26 09:05:47,259 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.76 vs. limit=22.5
+2024-08-26 09:11:59,956 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=184368.0, ans=0.2
+2024-08-26 09:12:27,213 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.57 vs. limit=15.0
+2024-08-26 09:16:07,865 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=184421.33333333334, ans=0.125
+2024-08-26 09:17:25,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184421.33333333334, ans=0.1
+2024-08-26 09:18:58,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=184474.66666666666, ans=0.1
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-1
new file mode 100644
index 0000000000000000000000000000000000000000..5cd3ef91edecdb8892b315ee1903c8529e197518
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-1
@@ -0,0 +1,4963 @@
+2024-08-25 02:23:27,598 INFO [train.py:1182] (1/4) Training started
+2024-08-25 02:23:28,628 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-25 02:23:28,631 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 02:23:28,631 INFO [train.py:1212] (1/4) About to create model
+2024-08-25 02:23:29,330 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-25 02:23:29,457 INFO [train.py:1231] (1/4) Using DDP
+2024-08-25 02:23:51,118 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-25 02:23:53,073 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-25 02:23:53,074 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-25 02:23:53,293 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-25 02:23:53,346 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-25 02:23:53,652 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-25 02:23:53,652 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 02:27:49,592 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=9.26 vs. limit=3.0
+2024-08-25 02:27:50,706 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12815MB
+2024-08-25 02:27:52,179 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-25 02:28:01,908 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-25 02:28:03,367 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-25 02:28:25,926 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=256, metric=46.49 vs. limit=7.5
+2024-08-25 02:28:26,159 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-25 02:28:27,781 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-25 02:29:15,517 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.95 vs. limit=7.5
+2024-08-25 02:29:16,119 INFO [train.py:1114] (1/4) Epoch 1, batch 0, loss[loss=8.848, simple_loss=7.21, pruned_loss=6.822, ctc_loss=4.776, over 19825.00 frames. ], tot_loss[loss=8.848, simple_loss=7.21, pruned_loss=6.822, ctc_loss=4.776, over 19825.00 frames. ], batch size: 49, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 02:29:16,120 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 02:29:29,444 INFO [train.py:1146] (1/4) Epoch 1, validation: loss=8.973, simple_loss=7.311, pruned_loss=6.819, ctc_loss=4.895, over 944034.00 frames.
+2024-08-25 02:29:29,445 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-25 02:29:37,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=0.0, ans=0.2
+2024-08-25 02:29:37,583 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=0.0, ans=0.1
+2024-08-25 02:29:40,026 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.31 vs. limit=7.5
+2024-08-25 02:29:47,571 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.30 vs. limit=7.5
+2024-08-25 02:30:12,490 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=19.35 vs. limit=7.52
+2024-08-25 02:30:23,438 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.714e+03 3.750e+03 4.817e+03 5.615e+03 6.551e+03, threshold=1.927e+04, percent-clipped=0.0
+2024-08-25 02:30:23,976 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=15.55 vs. limit=7.52
+2024-08-25 02:30:47,098 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=72.34 vs. limit=7.52
+2024-08-25 02:32:25,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106.66666666666667, ans=0.4866666666666667
+2024-08-25 02:32:26,056 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.867e+02 1.019e+03 3.714e+03 5.063e+03 6.846e+03, threshold=1.486e+04, percent-clipped=0.0
+2024-08-25 02:32:44,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=106.66666666666667, ans=0.196
+2024-08-25 02:32:44,899 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=71.66 vs. limit=7.54
+2024-08-25 02:33:14,345 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=39.59 vs. limit=5.08
+2024-08-25 02:33:20,404 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=323.22 vs. limit=7.56
+2024-08-25 02:33:28,329 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.22 vs. limit=3.032
+2024-08-25 02:33:29,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=213.33333333333334, ans=0.49
+2024-08-25 02:33:34,442 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 02:33:36,820 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.544e+02 7.649e+02 1.076e+03 3.731e+03 6.846e+03, threshold=4.304e+03, percent-clipped=0.0
+2024-08-25 02:33:41,047 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=33.01 vs. limit=7.58
+2024-08-25 02:33:47,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=213.33333333333334, ans=0.8925333333333334
+2024-08-25 02:33:59,696 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=109.20 vs. limit=7.66
+2024-08-25 02:34:03,844 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=266.6666666666667, ans=0.20400000000000001
+2024-08-25 02:34:04,704 INFO [train.py:1114] (1/4) Epoch 1, batch 50, loss[loss=1.447, simple_loss=1.077, pruned_loss=1.253, ctc_loss=1.145, over 19710.00 frames. ], tot_loss[loss=3.556, simple_loss=2.937, pruned_loss=2.558, ctc_loss=1.778, over 844643.19 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 02:34:24,008 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=95.63 vs. limit=7.6
+2024-08-25 02:34:25,213 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=40.68 vs. limit=7.62
+2024-08-25 02:34:33,142 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=42.81 vs. limit=7.62
+2024-08-25 02:34:34,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=320.0, ans=0.008
+2024-08-25 02:34:41,310 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=35.33 vs. limit=5.08
+2024-08-25 02:35:24,778 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=11.15 vs. limit=4.149333333333333
+2024-08-25 02:35:30,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=426.6666666666667, ans=0.48
+2024-08-25 02:35:32,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=426.6666666666667, ans=0.184
+2024-08-25 02:35:36,419 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=38.62 vs. limit=7.82
+2024-08-25 02:37:51,538 INFO [train.py:1114] (1/4) Epoch 1, batch 100, loss[loss=1.337, simple_loss=0.9588, pruned_loss=1.217, ctc_loss=1.128, over 19760.00 frames. ], tot_loss[loss=2.415, simple_loss=1.917, pruned_loss=1.866, ctc_loss=1.472, over 1498832.64 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 02:37:55,735 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.639e+01 1.517e+02 3.832e+02 1.019e+03 9.054e+03, threshold=7.665e+02, percent-clipped=2.0
+2024-08-25 02:38:07,589 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=586.6666666666666, ans=0.2941333333333333
+2024-08-25 02:38:08,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=586.6666666666666, ans=0.0868
+2024-08-25 02:38:17,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=586.6666666666666, ans=7.94
+2024-08-25 02:38:23,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=586.6666666666666, ans=5.366666666666666
+2024-08-25 02:38:34,894 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=113.18 vs. limit=7.72
+2024-08-25 02:38:34,957 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=44.73 vs. limit=7.94
+2024-08-25 02:38:53,873 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=38.46 vs. limit=8.02
+2024-08-25 02:38:56,501 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=133.57 vs. limit=7.76
+2024-08-25 02:39:01,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=693.3333333333334, ans=0.4675
+2024-08-25 02:39:02,225 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=71.15 vs. limit=5.346666666666667
+2024-08-25 02:39:02,671 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=208.72 vs. limit=5.346666666666667
+2024-08-25 02:39:10,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_na.min_abs, batch_count=746.6666666666666, ans=0.006986666666666667
+2024-08-25 02:39:11,009 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=57.76 vs. limit=7.78
+2024-08-25 02:39:12,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=746.6666666666666, ans=0.2925333333333333
+2024-08-25 02:39:12,613 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=131.28 vs. limit=5.373333333333333
+2024-08-25 02:39:18,428 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=38.71 vs. limit=5.1866666666666665
+2024-08-25 02:39:22,877 INFO [train.py:1114] (1/4) Epoch 1, batch 150, loss[loss=1.132, simple_loss=0.7871, pruned_loss=0.9836, ctc_loss=1.049, over 19707.00 frames. ], tot_loss[loss=1.944, simple_loss=1.493, pruned_loss=1.565, ctc_loss=1.345, over 2028052.72 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 02:39:26,566 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=268.75 vs. limit=7.8
+2024-08-25 02:39:28,842 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=800.0, ans=0.872
+2024-08-25 02:39:30,645 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=12.42 vs. limit=5.2
+2024-08-25 02:39:36,170 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=97.72 vs. limit=7.82
+2024-08-25 02:39:46,102 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=91.39 vs. limit=7.82
+2024-08-25 02:39:46,255 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=180.44 vs. limit=4.1706666666666665
+2024-08-25 02:39:51,668 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.00 vs. limit=8.18
+2024-08-25 02:39:55,411 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=14.20 vs. limit=7.84
+2024-08-25 02:40:03,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=960.0, ans=0.455
+2024-08-25 02:40:06,955 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=30.67 vs. limit=5.48
+2024-08-25 02:40:08,467 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=327.07 vs. limit=7.86
+2024-08-25 02:40:18,013 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=51.96 vs. limit=7.88
+2024-08-25 02:40:27,461 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=28.21 vs. limit=5.506666666666667
+2024-08-25 02:40:31,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=1066.6666666666667, ans=0.16
+2024-08-25 02:40:32,728 INFO [train.py:1114] (1/4) Epoch 1, batch 200, loss[loss=1.274, simple_loss=0.8773, pruned_loss=1.021, ctc_loss=1.229, over 18088.00 frames. ], tot_loss[loss=1.688, simple_loss=1.263, pruned_loss=1.375, ctc_loss=1.278, over 2435338.20 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 02:40:33,215 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=9.13 vs. limit=5.266666666666667
+2024-08-25 02:40:33,700 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=31.47 vs. limit=7.9
+2024-08-25 02:40:34,962 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=10.88 vs. limit=4.426666666666667
+2024-08-25 02:40:36,941 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.587e+01 1.185e+02 1.545e+02 1.999e+02 4.229e+02, threshold=3.089e+02, percent-clipped=0.0
+2024-08-25 02:40:57,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1066.6666666666667, ans=0.28933333333333333
+2024-08-25 02:40:59,298 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=18.70 vs. limit=7.9
+2024-08-25 02:41:07,561 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=82.19 vs. limit=7.92
+2024-08-25 02:41:13,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=1120.0, ans=0.8608
+2024-08-25 02:41:15,434 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=13.22 vs. limit=5.28
+2024-08-25 02:41:15,516 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=84.98 vs. limit=7.92
+2024-08-25 02:41:17,927 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 02:41:22,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=1173.3333333333333, ans=0.445
+2024-08-25 02:41:24,100 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.90 vs. limit=8.38
+2024-08-25 02:41:31,116 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=28.45 vs. limit=8.38
+2024-08-25 02:41:34,073 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=95.54 vs. limit=7.94
+2024-08-25 02:41:45,032 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=59.78 vs. limit=7.96
+2024-08-25 02:41:50,908 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=181.32 vs. limit=7.96
+2024-08-25 02:41:57,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=1280.0, ans=0.44
+2024-08-25 02:42:06,918 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=113.56 vs. limit=5.64
+2024-08-25 02:42:11,294 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=26.09 vs. limit=8.0
+2024-08-25 02:42:11,847 INFO [train.py:1114] (1/4) Epoch 1, batch 250, loss[loss=1.243, simple_loss=0.8419, pruned_loss=0.991, ctc_loss=1.214, over 19372.00 frames. ], tot_loss[loss=1.534, simple_loss=1.122, pruned_loss=1.247, ctc_loss=1.242, over 2755780.74 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 02:42:15,192 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=12.07 vs. limit=5.333333333333333
+2024-08-25 02:42:26,884 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=77.01 vs. limit=8.0
+2024-08-25 02:42:32,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=1386.6666666666667, ans=0.5
+2024-08-25 02:42:34,289 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=6.14 vs. limit=4.554666666666667
+2024-08-25 02:42:38,149 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=10.09 vs. limit=5.346666666666667
+2024-08-25 02:42:39,409 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=106.23 vs. limit=8.02
+2024-08-25 02:42:48,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=1440.0, ans=0.0676
+2024-08-25 02:42:52,287 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=5.708e+00
+2024-08-25 02:42:52,575 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=89.65 vs. limit=8.04
+2024-08-25 02:42:54,144 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=76.40 vs. limit=8.04
+2024-08-25 02:42:54,540 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=39.68 vs. limit=8.04
+2024-08-25 02:42:56,879 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=114.09 vs. limit=8.06
+2024-08-25 02:43:01,020 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=26.58 vs. limit=8.620000000000001
+2024-08-25 02:43:02,715 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.53 vs. limit=8.620000000000001
+2024-08-25 02:43:08,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=1546.6666666666667, ans=0.4275
+2024-08-25 02:43:10,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=1546.6666666666667, ans=0.163
+2024-08-25 02:43:14,521 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=249.09 vs. limit=8.08
+2024-08-25 02:43:17,758 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=7.10 vs. limit=5.386666666666667
+2024-08-25 02:43:18,724 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=32.79 vs. limit=4.618666666666667
+2024-08-25 02:43:21,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=1546.6666666666667, ans=0.06520000000000001
+2024-08-25 02:43:23,906 INFO [train.py:1114] (1/4) Epoch 1, batch 300, loss[loss=1.224, simple_loss=0.819, pruned_loss=0.9581, ctc_loss=1.207, over 19521.00 frames. ], tot_loss[loss=1.433, simple_loss=1.028, pruned_loss=1.158, ctc_loss=1.218, over 3000944.94 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 02:43:24,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=1600.0, ans=0.16
+2024-08-25 02:43:24,384 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=74.68 vs. limit=8.7
+2024-08-25 02:43:26,075 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=40.53 vs. limit=8.1
+2024-08-25 02:43:27,978 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 8.516e+01 1.281e+02 1.784e+02 2.457e+02 1.092e+03, threshold=3.568e+02, percent-clipped=12.0
+2024-08-25 02:43:30,110 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=9.03 vs. limit=5.4
+2024-08-25 02:43:37,782 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=1653.3333333333333, ans=0.157
+2024-08-25 02:43:44,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=1653.3333333333333, ans=0.29333333333333333
+2024-08-25 02:43:50,587 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=9.98 vs. limit=5.413333333333333
+2024-08-25 02:44:00,145 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=1706.6666666666667, ans=0.42
+2024-08-25 02:44:00,830 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=28.57 vs. limit=5.8533333333333335
+2024-08-25 02:44:03,217 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=28.73 vs. limit=8.78
+2024-08-25 02:44:03,408 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=8.98 vs. limit=5.426666666666667
+2024-08-25 02:44:08,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=1760.0, ans=0.4175
+2024-08-25 02:44:19,198 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=9.00 vs. limit=4.704
+2024-08-25 02:44:24,616 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=182.27 vs. limit=5.906666666666666
+2024-08-25 02:44:27,701 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.29 vs. limit=8.86
+2024-08-25 02:44:29,687 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=9.306e-01
+2024-08-25 02:44:31,621 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.29 vs. limit=8.86
+2024-08-25 02:44:31,634 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=1813.3333333333333, ans=8.86
+2024-08-25 02:44:31,892 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=24.75 vs. limit=8.18
+2024-08-25 02:44:35,055 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=34.33 vs. limit=8.18
+2024-08-25 02:44:36,730 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.97 vs. limit=5.906666666666666
+2024-08-25 02:44:37,794 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=1866.6666666666667, ans=0.4125
+2024-08-25 02:44:38,844 INFO [train.py:1114] (1/4) Epoch 1, batch 350, loss[loss=1.019, simple_loss=0.6725, pruned_loss=0.7912, ctc_loss=1.006, over 19751.00 frames. ], tot_loss[loss=1.367, simple_loss=0.9642, pruned_loss=1.095, ctc_loss=1.205, over 3190480.39 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 02:44:42,760 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=24.80 vs. limit=5.933333333333334
+2024-08-25 02:44:44,233 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=1866.6666666666667, ans=8.2
+2024-08-25 02:44:46,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=1866.6666666666667, ans=0.4125
+2024-08-25 02:44:49,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1866.6666666666667, ans=0.2813333333333333
+2024-08-25 02:45:01,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=1920.0, ans=0.0568
+2024-08-25 02:45:02,255 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.82 vs. limit=5.48
+2024-08-25 02:45:06,435 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=41.56 vs. limit=8.24
+2024-08-25 02:45:15,737 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.15 vs. limit=8.98
+2024-08-25 02:45:17,497 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.35 vs. limit=8.98
+2024-08-25 02:45:26,347 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=2026.6666666666667, ans=0.24666666666666665
+2024-08-25 02:45:28,212 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=75.45 vs. limit=8.26
+2024-08-25 02:45:36,056 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=87.72 vs. limit=8.26
+2024-08-25 02:46:55,721 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=40.82 vs. limit=8.28
+2024-08-25 02:47:02,592 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=57.15 vs. limit=8.28
+2024-08-25 02:47:08,195 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=35.89 vs. limit=9.1
+2024-08-25 02:47:09,641 INFO [train.py:1114] (1/4) Epoch 1, batch 400, loss[loss=1.217, simple_loss=0.8005, pruned_loss=0.9253, ctc_loss=1.186, over 19878.00 frames. ], tot_loss[loss=1.317, simple_loss=0.9151, pruned_loss=1.045, ctc_loss=1.19, over 3342068.16 frames. ], batch size: 55, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 02:47:10,151 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=75.98 vs. limit=8.3
+2024-08-25 02:47:13,218 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=30.55 vs. limit=8.3
+2024-08-25 02:47:13,850 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 9.873e+01 1.501e+02 1.913e+02 2.464e+02 6.763e+02, threshold=3.826e+02, percent-clipped=7.0
+2024-08-25 02:47:14,614 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=21.34 vs. limit=8.3
+2024-08-25 02:47:20,288 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.98 vs. limit=4.8533333333333335
+2024-08-25 02:47:21,425 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=96.05 vs. limit=8.3
+2024-08-25 02:47:26,987 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=83.05 vs. limit=8.32
+2024-08-25 02:47:32,410 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=91.27 vs. limit=8.32
+2024-08-25 02:47:37,979 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=86.41 vs. limit=8.32
+2024-08-25 02:47:42,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1.whitening_limit, batch_count=2240.0, ans=5.5600000000000005
+2024-08-25 02:47:42,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=2240.0, ans=0.395
+2024-08-25 02:47:43,315 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.35 vs. limit=4.896
+2024-08-25 02:47:52,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=2293.3333333333335, ans=0.121
+2024-08-25 02:47:52,902 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=70.88 vs. limit=8.36
+2024-08-25 02:47:58,320 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.77 vs. limit=9.22
+2024-08-25 02:48:06,461 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=6.54 vs. limit=4.938666666666666
+2024-08-25 02:48:21,737 INFO [train.py:1114] (1/4) Epoch 1, batch 450, loss[loss=1.239, simple_loss=0.8127, pruned_loss=0.9277, ctc_loss=1.183, over 19609.00 frames. ], tot_loss[loss=1.283, simple_loss=0.88, pruned_loss=1.006, ctc_loss=1.178, over 3450267.38 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 02:48:27,713 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=28.04 vs. limit=8.4
+2024-08-25 02:48:29,274 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=6.63 vs. limit=4.96
+2024-08-25 02:48:30,128 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=2400.0, ans=0.11
+2024-08-25 02:48:38,474 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.78 vs. limit=9.34
+2024-08-25 02:48:40,962 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=33.24 vs. limit=8.42
+2024-08-25 02:48:45,544 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=26.46 vs. limit=8.42
+2024-08-25 02:48:54,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=2506.6666666666665, ans=0.3825
+2024-08-25 02:49:00,145 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=32.08 vs. limit=8.44
+2024-08-25 02:49:03,476 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.70 vs. limit=5.024
+2024-08-25 02:49:06,500 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.27 vs. limit=5.64
+2024-08-25 02:49:10,663 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=19.76 vs. limit=8.46
+2024-08-25 02:49:14,706 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.51 vs. limit=8.48
+2024-08-25 02:49:15,930 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=48.20 vs. limit=8.48
+2024-08-25 02:49:24,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=2613.3333333333335, ans=0.3775
+2024-08-25 02:49:27,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=2666.6666666666665, ans=0.375
+2024-08-25 02:49:28,254 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.61 vs. limit=9.5
+2024-08-25 02:49:28,637 INFO [train.py:1114] (1/4) Epoch 1, batch 500, loss[loss=1.183, simple_loss=0.7805, pruned_loss=0.8439, ctc_loss=1.128, over 19664.00 frames. ], tot_loss[loss=1.25, simple_loss=0.8495, pruned_loss=0.9663, ctc_loss=1.16, over 3545445.64 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:49:29,059 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=34.63 vs. limit=8.5
+2024-08-25 02:49:29,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.whiten.whitening_limit, batch_count=2666.6666666666665, ans=5.066666666666666
+2024-08-25 02:49:30,977 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=4.47 vs. limit=4.533333333333333
+2024-08-25 02:49:32,572 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.834e+02 2.411e+02 2.968e+02 6.409e+02, threshold=4.822e+02, percent-clipped=7.0
+2024-08-25 02:49:39,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=2666.6666666666665, ans=0.16666666666666669
+2024-08-25 02:49:53,848 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.40 vs. limit=8.52
+2024-08-25 02:49:57,712 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.28 vs. limit=9.58
+2024-08-25 02:49:58,933 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=2773.3333333333335, ans=0.37
+2024-08-25 02:50:04,697 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=13.95 vs. limit=8.54
+2024-08-25 02:50:04,855 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.50 vs. limit=8.54
+2024-08-25 02:50:19,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=2826.6666666666665, ans=0.08233333333333334
+2024-08-25 02:50:27,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=2880.0, ans=0.365
+2024-08-25 02:50:30,274 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=19.04 vs. limit=8.58
+2024-08-25 02:50:35,481 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=2880.0, ans=0.088
+2024-08-25 02:50:39,203 INFO [train.py:1114] (1/4) Epoch 1, batch 550, loss[loss=1.1, simple_loss=0.7419, pruned_loss=0.7123, ctc_loss=1.062, over 19313.00 frames. ], tot_loss[loss=1.219, simple_loss=0.8256, pruned_loss=0.9171, ctc_loss=1.137, over 3607780.47 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:50:46,742 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=8.69 vs. limit=5.733333333333333
+2024-08-25 02:50:56,022 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=2933.3333333333335, ans=0.5
+2024-08-25 02:50:59,966 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=1.349e+01
+2024-08-25 02:51:13,020 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.04 vs. limit=5.216
+2024-08-25 02:51:19,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=3040.0, ans=0.35750000000000004
+2024-08-25 02:51:23,388 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=6.071e+01
+2024-08-25 02:51:50,151 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=20.39 vs. limit=8.68
+2024-08-25 02:51:55,122 INFO [train.py:1114] (1/4) Epoch 1, batch 600, loss[loss=1.002, simple_loss=0.6891, pruned_loss=0.5985, ctc_loss=0.9742, over 19416.00 frames. ], tot_loss[loss=1.173, simple_loss=0.7967, pruned_loss=0.8506, ctc_loss=1.1, over 3665856.03 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:51:59,171 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.677e+02 3.553e+02 4.456e+02 9.241e+02, threshold=7.106e+02, percent-clipped=18.0
+2024-08-25 02:52:20,053 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.15 vs. limit=8.72
+2024-08-25 02:52:40,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=3360.0, ans=7.1
+2024-08-25 02:52:40,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=3360.0, ans=0.02439999999999999
+2024-08-25 02:53:01,029 INFO [train.py:1114] (1/4) Epoch 1, batch 650, loss[loss=0.8533, simple_loss=0.6013, pruned_loss=0.4708, ctc_loss=0.8244, over 19771.00 frames. ], tot_loss[loss=1.112, simple_loss=0.7585, pruned_loss=0.7746, ctc_loss=1.048, over 3716267.31 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 4.0
+2024-08-25 02:53:10,869 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.12 vs. limit=10.1
+2024-08-25 02:53:23,664 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.97 vs. limit=8.82
+2024-08-25 02:53:25,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=3573.3333333333335, ans=0.04899999999999999
+2024-08-25 02:53:25,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3573.3333333333335, ans=0.26426666666666665
+2024-08-25 02:53:39,276 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.07 vs. limit=3.544
+2024-08-25 02:53:48,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=3626.6666666666665, ans=0.06399999999999997
+2024-08-25 02:53:55,627 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.15 vs. limit=5.92
+2024-08-25 02:53:58,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=3680.0, ans=0.3275
+2024-08-25 02:54:01,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=3680.0, ans=0.3275
+2024-08-25 02:54:09,015 INFO [train.py:1114] (1/4) Epoch 1, batch 700, loss[loss=0.7897, simple_loss=0.5598, pruned_loss=0.4316, ctc_loss=0.7434, over 19715.00 frames. ], tot_loss[loss=1.05, simple_loss=0.7215, pruned_loss=0.7016, ctc_loss=0.9917, over 3749077.52 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:54:14,188 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.975e+02 3.878e+02 5.385e+02 1.936e+03, threshold=7.756e+02, percent-clipped=10.0
+2024-08-25 02:54:22,809 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.85 vs. limit=5.514666666666667
+2024-08-25 02:54:24,103 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.80 vs. limit=8.92
+2024-08-25 02:54:40,292 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.24 vs. limit=6.92
+2024-08-25 02:54:45,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=3840.0, ans=0.013600000000000001
+2024-08-25 02:54:57,048 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=3893.3333333333335, ans=0.3175
+2024-08-25 02:55:03,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=3946.6666666666665, ans=0.315
+2024-08-25 02:55:03,949 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.13 vs. limit=8.98
+2024-08-25 02:55:05,034 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.48 vs. limit=5.578666666666667
+2024-08-25 02:55:06,464 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.97 vs. limit=8.98
+2024-08-25 02:55:15,172 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.03 vs. limit=5.986666666666666
+2024-08-25 02:55:16,795 INFO [train.py:1114] (1/4) Epoch 1, batch 750, loss[loss=0.7512, simple_loss=0.55, pruned_loss=0.3742, ctc_loss=0.7029, over 19495.00 frames. ], tot_loss[loss=0.988, simple_loss=0.6852, pruned_loss=0.6333, ctc_loss=0.9334, over 3775728.42 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:55:18,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=4000.0, ans=0.3125
+2024-08-25 02:55:37,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=4053.3333333333335, ans=0.7581333333333333
+2024-08-25 02:56:02,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=4160.0, ans=0.305
+2024-08-25 02:56:04,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=4160.0, ans=0.2624
+2024-08-25 02:56:15,365 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=4213.333333333333, ans=0.3025
+2024-08-25 02:56:15,588 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.74 vs. limit=9.08
+2024-08-25 02:56:23,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=4266.666666666667, ans=0.025
+2024-08-25 02:56:24,844 INFO [train.py:1114] (1/4) Epoch 1, batch 800, loss[loss=0.6423, simple_loss=0.4807, pruned_loss=0.3093, ctc_loss=0.577, over 19827.00 frames. ], tot_loss[loss=0.9285, simple_loss=0.6512, pruned_loss=0.5711, ctc_loss=0.8726, over 3797367.18 frames. ], batch size: 49, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:56:25,437 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.31 vs. limit=10.7
+2024-08-25 02:56:29,872 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.945e+02 3.956e+02 5.210e+02 9.107e+02, threshold=7.913e+02, percent-clipped=4.0
+2024-08-25 02:56:57,998 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=4373.333333333333, ans=0.29500000000000004
+2024-08-25 02:57:05,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=4426.666666666667, ans=0.2557333333333333
+2024-08-25 02:57:07,631 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=4426.666666666667, ans=0.07
+2024-08-25 02:57:15,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=4426.666666666667, ans=0.04822222222222222
+2024-08-25 02:57:30,597 INFO [train.py:1114] (1/4) Epoch 1, batch 850, loss[loss=0.6783, simple_loss=0.5149, pruned_loss=0.3159, ctc_loss=0.602, over 19671.00 frames. ], tot_loss[loss=0.8746, simple_loss=0.621, pruned_loss=0.5167, ctc_loss=0.8152, over 3815221.61 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:57:33,891 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.68 vs. limit=3.68
+2024-08-25 02:57:34,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=4533.333333333333, ans=0.2875
+2024-08-25 02:57:36,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=4533.333333333333, ans=0.009884057971014493
+2024-08-25 02:57:36,443 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=2.81 vs. limit=3.68
+2024-08-25 02:57:40,921 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=4533.333333333333, ans=0.7413333333333334
+2024-08-25 02:57:53,610 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=4.622e-01
+2024-08-25 02:58:06,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=4640.0, ans=0.2536
+2024-08-25 02:58:09,568 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.08 vs. limit=9.24
+2024-08-25 02:58:36,753 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.56 vs. limit=3.7119999999999997
+2024-08-25 02:58:42,813 INFO [train.py:1114] (1/4) Epoch 1, batch 900, loss[loss=0.5668, simple_loss=0.4403, pruned_loss=0.2531, ctc_loss=0.4886, over 19823.00 frames. ], tot_loss[loss=0.8287, simple_loss=0.5955, pruned_loss=0.4714, ctc_loss=0.7649, over 3819074.93 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 02:58:48,904 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.783e+02 3.682e+02 4.971e+02 1.764e+03, threshold=7.364e+02, percent-clipped=6.0
+2024-08-25 02:58:53,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=4800.0, ans=0.275
+2024-08-25 02:59:01,629 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=4853.333333333333, ans=0.7301333333333334
+2024-08-25 02:59:19,999 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.70 vs. limit=3.7359999999999998
+2024-08-25 02:59:20,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=4906.666666666667, ans=0.27
+2024-08-25 02:59:31,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=4960.0, ans=0.7264
+2024-08-25 02:59:50,550 INFO [train.py:1114] (1/4) Epoch 1, batch 950, loss[loss=0.579, simple_loss=0.4513, pruned_loss=0.2569, ctc_loss=0.4969, over 19494.00 frames. ], tot_loss[loss=0.7856, simple_loss=0.5719, pruned_loss=0.4309, ctc_loss=0.7168, over 3820940.71 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:00:29,358 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=5226.666666666667, ans=0.07
+2024-08-25 03:00:33,179 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:00:51,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=5280.0, ans=0.24719999999999998
+2024-08-25 03:00:54,649 INFO [train.py:1114] (1/4) Epoch 1, batch 1000, loss[loss=0.6094, simple_loss=0.4714, pruned_loss=0.2765, ctc_loss=0.5187, over 19853.00 frames. ], tot_loss[loss=0.751, simple_loss=0.5535, pruned_loss=0.3982, ctc_loss=0.6767, over 3817984.21 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:01:01,304 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.847e+02 3.463e+02 4.611e+02 9.717e+02, threshold=6.926e+02, percent-clipped=4.0
+2024-08-25 03:01:01,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=5333.333333333333, ans=0.25
+2024-08-25 03:01:16,873 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=5386.666666666667, ans=0.0
+2024-08-25 03:01:16,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=5386.666666666667, ans=0.0
+2024-08-25 03:01:28,095 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.91 vs. limit=7.720000000000001
+2024-08-25 03:01:53,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=5546.666666666667, ans=0.24
+2024-08-25 03:01:55,477 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=5546.666666666667, ans=0.00966376811594203
+2024-08-25 03:01:57,086 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=5.69 vs. limit=6.2186666666666675
+2024-08-25 03:02:07,677 INFO [train.py:1114] (1/4) Epoch 1, batch 1050, loss[loss=0.6225, simple_loss=0.4916, pruned_loss=0.2723, ctc_loss=0.5204, over 19861.00 frames. ], tot_loss[loss=0.7153, simple_loss=0.5342, pruned_loss=0.367, ctc_loss=0.6359, over 3823633.07 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:02:17,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=5600.0, ans=0.194
+2024-08-25 03:02:39,265 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=5706.666666666667, ans=0.24293333333333333
+2024-08-25 03:02:46,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=5760.0, ans=0.6984
+2024-08-25 03:02:54,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=5760.0, ans=0.2864
+2024-08-25 03:02:59,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=5813.333333333333, ans=0.009605797101449275
+2024-08-25 03:03:13,747 INFO [train.py:1114] (1/4) Epoch 1, batch 1100, loss[loss=0.594, simple_loss=0.4711, pruned_loss=0.2582, ctc_loss=0.4939, over 19577.00 frames. ], tot_loss[loss=0.6842, simple_loss=0.5174, pruned_loss=0.3405, ctc_loss=0.6001, over 3830869.71 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:03:16,809 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.35 vs. limit=9.7
+2024-08-25 03:03:20,118 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.626e+02 3.754e+02 4.559e+02 6.965e+02, threshold=7.509e+02, percent-clipped=1.0
+2024-08-25 03:03:20,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=5866.666666666667, ans=0.22499999999999998
+2024-08-25 03:03:24,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=5866.666666666667, ans=0.24133333333333332
+2024-08-25 03:03:24,912 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.06 vs. limit=6.346666666666667
+2024-08-25 03:03:27,111 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.89 vs. limit=11.940000000000001
+2024-08-25 03:03:34,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=5920.0, ans=0.22249999999999998
+2024-08-25 03:03:34,382 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:03:38,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=5973.333333333333, ans=0.025
+2024-08-25 03:03:46,424 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.99 vs. limit=9.74
+2024-08-25 03:03:47,940 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.44 vs. limit=9.74
+2024-08-25 03:03:48,511 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:03:57,868 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.94 vs. limit=12.02
+2024-08-25 03:04:14,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=6080.0, ans=0.21500000000000002
+2024-08-25 03:04:18,511 INFO [train.py:1114] (1/4) Epoch 1, batch 1150, loss[loss=0.5848, simple_loss=0.4674, pruned_loss=0.2497, ctc_loss=0.4878, over 19567.00 frames. ], tot_loss[loss=0.6605, simple_loss=0.5051, pruned_loss=0.3199, ctc_loss=0.5724, over 3829809.04 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:04:49,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=6240.0, ans=0.09899494936611666
+2024-08-25 03:04:54,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=6240.0, ans=0.20750000000000002
+2024-08-25 03:05:08,726 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.80 vs. limit=12.219999999999999
+2024-08-25 03:05:16,326 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.49 vs. limit=8.173333333333334
+2024-08-25 03:05:20,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=6346.666666666667, ans=0.2025
+2024-08-25 03:05:20,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=6346.666666666667, ans=0.2025
+2024-08-25 03:05:24,499 INFO [train.py:1114] (1/4) Epoch 1, batch 1200, loss[loss=0.5452, simple_loss=0.4531, pruned_loss=0.2213, ctc_loss=0.4323, over 19852.00 frames. ], tot_loss[loss=0.6399, simple_loss=0.495, pruned_loss=0.3023, ctc_loss=0.548, over 3825553.02 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 03:05:30,708 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.702e+02 3.344e+02 4.028e+02 1.038e+03, threshold=6.687e+02, percent-clipped=4.0
+2024-08-25 03:05:34,029 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.77 vs. limit=8.2
+2024-08-25 03:06:12,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 03:06:16,672 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.21 vs. limit=12.42
+2024-08-25 03:06:33,175 INFO [train.py:1114] (1/4) Epoch 1, batch 1250, loss[loss=0.5484, simple_loss=0.4521, pruned_loss=0.2282, ctc_loss=0.4321, over 19519.00 frames. ], tot_loss[loss=0.6188, simple_loss=0.4848, pruned_loss=0.2852, ctc_loss=0.5224, over 3843501.71 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:06:46,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=6720.0, ans=0.009408695652173914
+2024-08-25 03:06:51,428 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=6720.0, ans=0.185
+2024-08-25 03:06:56,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=6720.0, ans=0.0
+2024-08-25 03:07:29,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=6880.0, ans=0.0
+2024-08-25 03:07:36,677 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=6933.333333333333, ans=0.175
+2024-08-25 03:07:53,431 INFO [train.py:1114] (1/4) Epoch 1, batch 1300, loss[loss=0.5643, simple_loss=0.4603, pruned_loss=0.2378, ctc_loss=0.4536, over 18874.00 frames. ], tot_loss[loss=0.5964, simple_loss=0.473, pruned_loss=0.2688, ctc_loss=0.4969, over 3846323.59 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:08:00,986 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.595e+02 3.171e+02 4.007e+02 5.829e+02, threshold=6.342e+02, percent-clipped=0.0
+2024-08-25 03:08:22,669 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.17 vs. limit=12.780000000000001
+2024-08-25 03:08:31,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=7093.333333333333, ans=0.8209333333333333
+2024-08-25 03:08:52,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=7146.666666666667, ans=0.025
+2024-08-25 03:08:59,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=7200.0, ans=0.009304347826086957
+2024-08-25 03:09:00,194 INFO [train.py:1114] (1/4) Epoch 1, batch 1350, loss[loss=0.4781, simple_loss=0.4108, pruned_loss=0.1863, ctc_loss=0.3709, over 19760.00 frames. ], tot_loss[loss=0.579, simple_loss=0.4642, pruned_loss=0.2559, ctc_loss=0.4768, over 3858477.95 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:09:05,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=7200.0, ans=0.16249999999999998
+2024-08-25 03:09:11,417 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.13 vs. limit=8.6
+2024-08-25 03:09:11,912 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=7253.333333333333, ans=9.533333333333333
+2024-08-25 03:10:09,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=7253.333333333333, ans=0.036444444444444446
+2024-08-25 03:10:22,133 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=7306.666666666667, ans=0.15749999999999997
+2024-08-25 03:11:52,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=7360.0, ans=0.036000000000000004
+2024-08-25 03:12:03,222 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.11 vs. limit=6.965333333333334
+2024-08-25 03:12:05,300 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=7413.333333333333, ans=0.009257971014492754
+2024-08-25 03:12:10,366 INFO [train.py:1114] (1/4) Epoch 1, batch 1400, loss[loss=0.4261, simple_loss=0.3677, pruned_loss=0.1678, ctc_loss=0.3231, over 19668.00 frames. ], tot_loss[loss=0.5637, simple_loss=0.4566, pruned_loss=0.245, ctc_loss=0.4592, over 3864290.12 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:12:32,369 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.490e+02 2.974e+02 4.034e+02 6.918e+02, threshold=5.948e+02, percent-clipped=1.0
+2024-08-25 03:13:09,471 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.36 vs. limit=10.36
+2024-08-25 03:13:26,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=7680.0, ans=0.2232
+2024-08-25 03:13:28,397 INFO [train.py:1114] (1/4) Epoch 1, batch 1450, loss[loss=0.5238, simple_loss=0.4481, pruned_loss=0.2108, ctc_loss=0.3977, over 19686.00 frames. ], tot_loss[loss=0.5518, simple_loss=0.4515, pruned_loss=0.2362, ctc_loss=0.4444, over 3861926.89 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:13:29,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=7733.333333333333, ans=0.1375
+2024-08-25 03:13:30,333 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.06 vs. limit=13.3
+2024-08-25 03:13:36,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=7733.333333333333, ans=0.1375
+2024-08-25 03:13:50,950 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.62 vs. limit=10.42
+2024-08-25 03:13:59,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=7840.0, ans=0.034
+2024-08-25 03:14:15,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=7893.333333333333, ans=0.13
+2024-08-25 03:14:24,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=7946.666666666667, ans=0.033555555555555554
+2024-08-25 03:14:30,703 INFO [train.py:1114] (1/4) Epoch 1, batch 1500, loss[loss=0.5177, simple_loss=0.4487, pruned_loss=0.2029, ctc_loss=0.4004, over 19597.00 frames. ], tot_loss[loss=0.5407, simple_loss=0.4465, pruned_loss=0.2284, ctc_loss=0.4317, over 3862190.24 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:14:38,503 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.576e+02 3.382e+02 4.091e+02 7.597e+02, threshold=6.763e+02, percent-clipped=6.0
+2024-08-25 03:14:40,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=8000.0, ans=0.125
+2024-08-25 03:14:47,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=8053.333333333333, ans=0.025
+2024-08-25 03:15:12,741 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.09 vs. limit=13.620000000000001
+2024-08-25 03:15:16,523 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.07 vs. limit=10.56
+2024-08-25 03:15:19,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=8160.0, ans=0.125
+2024-08-25 03:15:24,783 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=8213.333333333334, ans=0.03244444444444444
+2024-08-25 03:15:40,014 INFO [train.py:1114] (1/4) Epoch 1, batch 1550, loss[loss=0.5474, simple_loss=0.4547, pruned_loss=0.2274, ctc_loss=0.4375, over 19624.00 frames. ], tot_loss[loss=0.5307, simple_loss=0.442, pruned_loss=0.2218, ctc_loss=0.4205, over 3846925.98 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 8.0
+2024-08-25 03:15:51,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=8320.0, ans=0.1668
+2024-08-25 03:15:57,877 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.70 vs. limit=13.74
+2024-08-25 03:16:23,464 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=8426.666666666666, ans=0.125
+2024-08-25 03:16:30,839 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=8480.0, ans=0.2152
+2024-08-25 03:16:31,706 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.61 vs. limit=13.86
+2024-08-25 03:16:36,252 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=8480.0, ans=0.125
+2024-08-25 03:16:49,350 INFO [train.py:1114] (1/4) Epoch 1, batch 1600, loss[loss=0.4744, simple_loss=0.4255, pruned_loss=0.1787, ctc_loss=0.3629, over 19831.00 frames. ], tot_loss[loss=0.5214, simple_loss=0.4376, pruned_loss=0.2159, ctc_loss=0.4102, over 3836866.91 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:16:55,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=8533.333333333334, ans=0.09899494936611666
+2024-08-25 03:16:57,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=8533.333333333334, ans=0.125
+2024-08-25 03:16:59,538 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.604e+02 3.125e+02 4.170e+02 2.617e+03, threshold=6.251e+02, percent-clipped=7.0
+2024-08-25 03:17:03,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=8586.666666666666, ans=0.125
+2024-08-25 03:17:08,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=8586.666666666666, ans=0.05
+2024-08-25 03:17:19,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=8640.0, ans=0.125
+2024-08-25 03:17:24,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=8640.0, ans=0.5976
+2024-08-25 03:17:40,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=8693.333333333334, ans=0.125
+2024-08-25 03:19:09,295 INFO [train.py:1114] (1/4) Epoch 1, batch 1650, loss[loss=0.4358, simple_loss=0.3984, pruned_loss=0.1621, ctc_loss=0.3257, over 19652.00 frames. ], tot_loss[loss=0.512, simple_loss=0.4332, pruned_loss=0.21, ctc_loss=0.4003, over 3831942.97 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:19:10,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=8800.0, ans=0.125
+2024-08-25 03:19:17,080 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=8800.0, ans=0.125
+2024-08-25 03:19:19,071 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.65 vs. limit=10.8
+2024-08-25 03:19:52,621 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.14 vs. limit=5.0
+2024-08-25 03:19:57,108 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.49 vs. limit=14.219999999999999
+2024-08-25 03:20:12,473 INFO [train.py:1114] (1/4) Epoch 1, batch 1700, loss[loss=0.39, simple_loss=0.3524, pruned_loss=0.1486, ctc_loss=0.2942, over 19670.00 frames. ], tot_loss[loss=0.5017, simple_loss=0.4288, pruned_loss=0.2039, ctc_loss=0.3894, over 3845983.60 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:20:15,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=9066.666666666666, ans=0.125
+2024-08-25 03:20:19,814 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.828e+02 2.395e+02 2.888e+02 3.702e+02 8.491e+02, threshold=5.776e+02, percent-clipped=2.0
+2024-08-25 03:20:21,483 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.94 vs. limit=9.533333333333333
+2024-08-25 03:20:29,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=9120.0, ans=0.02866666666666667
+2024-08-25 03:22:14,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 03:22:22,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=9280.0, ans=0.125
+2024-08-25 03:22:22,545 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=9280.0, ans=0.125
+2024-08-25 03:22:27,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=9280.0, ans=0.125
+2024-08-25 03:22:33,180 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.11 vs. limit=4.4
+2024-08-25 03:22:33,852 INFO [train.py:1114] (1/4) Epoch 1, batch 1750, loss[loss=0.3899, simple_loss=0.3639, pruned_loss=0.1449, ctc_loss=0.2831, over 19627.00 frames. ], tot_loss[loss=0.4934, simple_loss=0.4251, pruned_loss=0.199, ctc_loss=0.3807, over 3850913.43 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:22:37,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=9333.333333333334, ans=0.125
+2024-08-25 03:22:47,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=9386.666666666666, ans=0.5714666666666668
+2024-08-25 03:22:51,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=9386.666666666666, ans=0.20613333333333334
+2024-08-25 03:22:52,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=9386.666666666666, ans=0.125
+2024-08-25 03:23:04,157 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.23 vs. limit=14.58
+2024-08-25 03:23:29,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=9546.666666666666, ans=0.125
+2024-08-25 03:23:31,435 INFO [train.py:1114] (1/4) Epoch 1, batch 1800, loss[loss=0.4977, simple_loss=0.4344, pruned_loss=0.2018, ctc_loss=0.3762, over 19608.00 frames. ], tot_loss[loss=0.4891, simple_loss=0.4241, pruned_loss=0.1964, ctc_loss=0.3761, over 3852586.37 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 8.0
+2024-08-25 03:23:39,408 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.646e+02 3.473e+02 4.220e+02 8.344e+02, threshold=6.945e+02, percent-clipped=3.0
+2024-08-25 03:23:49,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=9653.333333333334, ans=0.125
+2024-08-25 03:24:05,978 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=9706.666666666666, ans=0.5602666666666667
+2024-08-25 03:24:15,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=9760.0, ans=0.026000000000000002
+2024-08-25 03:24:16,921 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.92 vs. limit=11.16
+2024-08-25 03:24:23,019 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=9813.333333333334, ans=0.025777777777777778
+2024-08-25 03:24:35,842 INFO [train.py:1114] (1/4) Epoch 1, batch 1850, loss[loss=0.4235, simple_loss=0.3908, pruned_loss=0.1637, ctc_loss=0.3044, over 19578.00 frames. ], tot_loss[loss=0.48, simple_loss=0.4204, pruned_loss=0.1913, ctc_loss=0.367, over 3856379.05 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:24:44,823 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=9866.666666666666, ans=0.5546666666666666
+2024-08-25 03:24:51,636 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.73 vs. limit=11.2
+2024-08-25 03:25:01,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=9920.0, ans=0.125
+2024-08-25 03:25:18,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10026.666666666666, ans=0.19973333333333332
+2024-08-25 03:25:38,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=10080.0, ans=0.02466666666666667
+2024-08-25 03:25:40,387 INFO [train.py:1114] (1/4) Epoch 1, batch 1900, loss[loss=0.4883, simple_loss=0.4366, pruned_loss=0.1937, ctc_loss=0.371, over 19629.00 frames. ], tot_loss[loss=0.4752, simple_loss=0.4192, pruned_loss=0.1886, ctc_loss=0.3618, over 3860757.23 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:25:46,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=10133.333333333334, ans=0.19866666666666666
+2024-08-25 03:25:48,461 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.873e+02 2.554e+02 2.990e+02 4.033e+02 8.041e+02, threshold=5.979e+02, percent-clipped=3.0
+2024-08-25 03:26:08,612 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.96 vs. limit=15.18
+2024-08-25 03:26:09,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=10240.0, ans=0.125
+2024-08-25 03:26:09,503 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.88 vs. limit=8.096
+2024-08-25 03:26:14,311 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.83 vs. limit=11.36
+2024-08-25 03:26:19,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=10293.333333333334, ans=0.125
+2024-08-25 03:26:25,484 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=10346.666666666666, ans=0.125
+2024-08-25 03:26:38,010 INFO [train.py:1114] (1/4) Epoch 1, batch 1950, loss[loss=0.4296, simple_loss=0.3962, pruned_loss=0.1663, ctc_loss=0.32, over 19583.00 frames. ], tot_loss[loss=0.4682, simple_loss=0.4171, pruned_loss=0.1847, ctc_loss=0.355, over 3870139.29 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:26:47,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=10400.0, ans=0.125
+2024-08-25 03:27:03,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=10506.666666666666, ans=0.19493333333333335
+2024-08-25 03:27:07,760 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.62 vs. limit=15.379999999999999
+2024-08-25 03:27:15,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=10560.0, ans=0.008573913043478262
+2024-08-25 03:27:23,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10613.333333333334, ans=0.19386666666666666
+2024-08-25 03:27:36,523 INFO [train.py:1114] (1/4) Epoch 1, batch 2000, loss[loss=0.408, simple_loss=0.3765, pruned_loss=0.157, ctc_loss=0.3136, over 19677.00 frames. ], tot_loss[loss=0.4639, simple_loss=0.4158, pruned_loss=0.1826, ctc_loss=0.3511, over 3854520.47 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:27:42,622 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=10666.666666666666, ans=0.0
+2024-08-25 03:27:44,891 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.508e+02 3.011e+02 3.695e+02 6.472e+02, threshold=6.022e+02, percent-clipped=1.0
+2024-08-25 03:27:46,524 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.53 vs. limit=11.5
+2024-08-25 03:27:51,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=10720.0, ans=0.125
+2024-08-25 03:27:54,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=10720.0, ans=0.5248
+2024-08-25 03:28:27,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=10880.0, ans=0.008504347826086956
+2024-08-25 03:28:41,990 INFO [train.py:1114] (1/4) Epoch 1, batch 2050, loss[loss=0.3962, simple_loss=0.3767, pruned_loss=0.1497, ctc_loss=0.2908, over 19750.00 frames. ], tot_loss[loss=0.4573, simple_loss=0.4124, pruned_loss=0.1796, ctc_loss=0.3451, over 3850741.77 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:30:05,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=11093.333333333334, ans=0.5117333333333334
+2024-08-25 03:31:02,643 INFO [train.py:1114] (1/4) Epoch 1, batch 2100, loss[loss=0.4076, simple_loss=0.3939, pruned_loss=0.151, ctc_loss=0.2982, over 19772.00 frames. ], tot_loss[loss=0.4489, simple_loss=0.4085, pruned_loss=0.1752, ctc_loss=0.3372, over 3857647.84 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:31:03,205 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.18 vs. limit=8.48
+2024-08-25 03:31:04,125 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.33 vs. limit=11.7
+2024-08-25 03:31:14,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=11200.0, ans=0.020000000000000004
+2024-08-25 03:31:19,369 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 2.443e+02 2.901e+02 4.101e+02 7.108e+02, threshold=5.802e+02, percent-clipped=5.0
+2024-08-25 03:31:44,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=11306.666666666666, ans=0.035
+2024-08-25 03:32:07,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=11360.0, ans=0.5024000000000001
+2024-08-25 03:32:32,797 INFO [train.py:1114] (1/4) Epoch 1, batch 2150, loss[loss=0.4253, simple_loss=0.395, pruned_loss=0.1647, ctc_loss=0.3154, over 19609.00 frames. ], tot_loss[loss=0.4419, simple_loss=0.4053, pruned_loss=0.1716, ctc_loss=0.3303, over 3868410.32 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:32:53,658 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=11520.0, ans=0.01866666666666667
+2024-08-25 03:32:58,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=11520.0, ans=0.1848
+2024-08-25 03:33:39,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=11626.666666666666, ans=0.125
+2024-08-25 03:33:39,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11626.666666666666, ans=0.18373333333333333
+2024-08-25 03:33:54,225 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.78 vs. limit=4.752
+2024-08-25 03:33:57,383 INFO [train.py:1114] (1/4) Epoch 1, batch 2200, loss[loss=0.4627, simple_loss=0.4278, pruned_loss=0.1788, ctc_loss=0.3496, over 19555.00 frames. ], tot_loss[loss=0.4386, simple_loss=0.4041, pruned_loss=0.1699, ctc_loss=0.3274, over 3866197.78 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:34:01,383 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=11733.333333333334, ans=0.18266666666666664
+2024-08-25 03:34:05,547 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.75 vs. limit=11.9
+2024-08-25 03:34:08,398 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.628e+02 3.380e+02 4.438e+02 7.655e+02, threshold=6.760e+02, percent-clipped=12.0
+2024-08-25 03:34:10,119 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=17.28 vs. limit=11.92
+2024-08-25 03:34:13,137 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=11786.666666666666, ans=0.0
+2024-08-25 03:34:13,461 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.91 vs. limit=11.92
+2024-08-25 03:34:13,790 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=7.90 vs. limit=7.946666666666666
+2024-08-25 03:34:14,669 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.84 vs. limit=11.92
+2024-08-25 03:34:30,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=11840.0, ans=0.025
+2024-08-25 03:34:56,346 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.66 vs. limit=16.46
+2024-08-25 03:35:03,295 INFO [train.py:1114] (1/4) Epoch 1, batch 2250, loss[loss=0.3898, simple_loss=0.3856, pruned_loss=0.1401, ctc_loss=0.2842, over 19598.00 frames. ], tot_loss[loss=0.4345, simple_loss=0.4025, pruned_loss=0.1677, ctc_loss=0.3228, over 3866141.34 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:35:03,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=12000.0, ans=0.125
+2024-08-25 03:35:18,667 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.48 vs. limit=11.026666666666667
+2024-08-25 03:35:40,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=12160.0, ans=0.125
+2024-08-25 03:35:53,579 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.77 vs. limit=11.106666666666667
+2024-08-25 03:35:54,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=12213.333333333334, ans=0.025
+2024-08-25 03:36:03,055 INFO [train.py:1114] (1/4) Epoch 1, batch 2300, loss[loss=0.394, simple_loss=0.378, pruned_loss=0.1475, ctc_loss=0.2871, over 19507.00 frames. ], tot_loss[loss=0.4306, simple_loss=0.4002, pruned_loss=0.1659, ctc_loss=0.3191, over 3860747.31 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:36:07,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=12266.666666666666, ans=0.125
+2024-08-25 03:36:12,308 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.546e+02 3.099e+02 3.956e+02 8.242e+02, threshold=6.199e+02, percent-clipped=6.0
+2024-08-25 03:36:46,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=12426.666666666666, ans=0.17573333333333335
+2024-08-25 03:36:54,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=12480.0, ans=0.125
+2024-08-25 03:37:00,696 INFO [train.py:1114] (1/4) Epoch 1, batch 2350, loss[loss=0.4892, simple_loss=0.4365, pruned_loss=0.1945, ctc_loss=0.3824, over 19700.00 frames. ], tot_loss[loss=0.4258, simple_loss=0.3982, pruned_loss=0.1633, ctc_loss=0.3142, over 3863328.77 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:37:24,261 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=12640.0, ans=0.4576
+2024-08-25 03:37:35,001 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.34 vs. limit=12.26
+2024-08-25 03:37:38,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=12693.333333333334, ans=0.125
+2024-08-25 03:37:40,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=12693.333333333334, ans=0.013777777777777778
+2024-08-25 03:37:42,136 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.49 vs. limit=17.02
+2024-08-25 03:37:51,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=12746.666666666666, ans=0.125
+2024-08-25 03:37:51,106 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=12746.666666666666, ans=0.17253333333333334
+2024-08-25 03:37:59,408 INFO [train.py:1114] (1/4) Epoch 1, batch 2400, loss[loss=0.4502, simple_loss=0.4223, pruned_loss=0.1727, ctc_loss=0.3319, over 19379.00 frames. ], tot_loss[loss=0.4273, simple_loss=0.4004, pruned_loss=0.1638, ctc_loss=0.3145, over 3858170.43 frames. ], batch size: 67, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:38:02,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=12800.0, ans=0.07
+2024-08-25 03:38:08,244 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.522e+02 3.053e+02 3.990e+02 1.210e+03, threshold=6.106e+02, percent-clipped=3.0
+2024-08-25 03:38:15,553 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.00 vs. limit=12.32
+2024-08-25 03:38:18,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=12853.333333333334, ans=0.013111111111111108
+2024-08-25 03:38:30,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=12906.666666666666, ans=0.125
+2024-08-25 03:38:37,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=12960.0, ans=0.08012000000000001
+2024-08-25 03:38:38,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=12960.0, ans=0.125
+2024-08-25 03:38:41,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=12960.0, ans=0.125
+2024-08-25 03:39:03,263 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.05 vs. limit=17.3
+2024-08-25 03:39:03,887 INFO [train.py:1114] (1/4) Epoch 1, batch 2450, loss[loss=0.5034, simple_loss=0.4388, pruned_loss=0.2057, ctc_loss=0.3917, over 13653.00 frames. ], tot_loss[loss=0.4376, simple_loss=0.4062, pruned_loss=0.1693, ctc_loss=0.3239, over 3732983.70 frames. ], batch size: 140, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:39:07,584 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=13066.666666666666, ans=0.0
+2024-08-25 03:40:43,719 INFO [train.py:1114] (1/4) Epoch 2, batch 0, loss[loss=0.4347, simple_loss=0.3982, pruned_loss=0.1704, ctc_loss=0.326, over 19390.00 frames. ], tot_loss[loss=0.4347, simple_loss=0.3982, pruned_loss=0.1704, ctc_loss=0.326, over 19390.00 frames. ], batch size: 48, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 03:40:43,720 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 03:40:51,836 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.5.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.6884, 3.4234, 2.7515, 3.4895], device='cuda:1')
+2024-08-25 03:40:53,952 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.7451, 3.1147, 3.4833, 3.2353], device='cuda:1')
+2024-08-25 03:40:55,163 INFO [train.py:1146] (1/4) Epoch 2, validation: loss=0.3317, simple_loss=0.3718, pruned_loss=0.1058, ctc_loss=0.2, over 944034.00 frames.
+2024-08-25 03:40:55,164 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 03:41:17,122 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.388e+02 2.818e+02 3.444e+02 6.577e+02, threshold=5.636e+02, percent-clipped=3.0
+2024-08-25 03:41:19,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=13386.666666666666, ans=0.025
+2024-08-25 03:41:20,905 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 03:41:25,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 03:41:32,815 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=13440.0, ans=0.0
+2024-08-25 03:41:57,951 INFO [train.py:1114] (1/4) Epoch 2, batch 50, loss[loss=0.346, simple_loss=0.3437, pruned_loss=0.1264, ctc_loss=0.2384, over 19743.00 frames. ], tot_loss[loss=0.4242, simple_loss=0.4003, pruned_loss=0.162, ctc_loss=0.3102, over 845814.23 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:41:59,905 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.78 vs. limit=12.58
+2024-08-25 03:42:07,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=13546.666666666666, ans=0.4258666666666667
+2024-08-25 03:42:07,701 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.84 vs. limit=5.032
+2024-08-25 03:42:12,092 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=13600.0, ans=0.025
+2024-08-25 03:42:22,992 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.19 vs. limit=17.740000000000002
+2024-08-25 03:43:11,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=13706.666666666666, ans=0.125
+2024-08-25 03:43:29,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=13760.0, ans=0.00933333333333334
+2024-08-25 03:43:36,875 INFO [train.py:1114] (1/4) Epoch 2, batch 100, loss[loss=0.3597, simple_loss=0.3616, pruned_loss=0.1285, ctc_loss=0.2518, over 19718.00 frames. ], tot_loss[loss=0.421, simple_loss=0.4006, pruned_loss=0.1594, ctc_loss=0.3062, over 1499532.21 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:43:37,929 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.96 vs. limit=5.072
+2024-08-25 03:43:39,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=13813.333333333334, ans=0.125
+2024-08-25 03:43:40,963 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:43:44,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=13813.333333333334, ans=0.025
+2024-08-25 03:43:50,080 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.53 vs. limit=12.68
+2024-08-25 03:43:50,991 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=13866.666666666666, ans=0.00888888888888889
+2024-08-25 03:44:02,825 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.500e+02 2.916e+02 3.893e+02 6.295e+02, threshold=5.832e+02, percent-clipped=2.0
+2024-08-25 03:44:11,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=13920.0, ans=0.4128
+2024-08-25 03:44:22,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=13973.333333333334, ans=0.125
+2024-08-25 03:44:23,859 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=13973.333333333334, ans=0.125
+2024-08-25 03:44:39,621 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.33 vs. limit=18.02
+2024-08-25 03:44:42,788 INFO [train.py:1114] (1/4) Epoch 2, batch 150, loss[loss=0.3788, simple_loss=0.3614, pruned_loss=0.1415, ctc_loss=0.2831, over 19716.00 frames. ], tot_loss[loss=0.4117, simple_loss=0.3946, pruned_loss=0.155, ctc_loss=0.2973, over 2028297.15 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:44:58,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=14133.333333333334, ans=0.15866666666666665
+2024-08-25 03:45:00,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=14133.333333333334, ans=0.007777777777777772
+2024-08-25 03:45:14,980 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=14186.666666666666, ans=0.007785507246376812
+2024-08-25 03:45:25,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=14240.0, ans=0.125
+2024-08-25 03:45:42,127 INFO [train.py:1114] (1/4) Epoch 2, batch 200, loss[loss=0.4496, simple_loss=0.4089, pruned_loss=0.1768, ctc_loss=0.3416, over 18347.00 frames. ], tot_loss[loss=0.4043, simple_loss=0.3896, pruned_loss=0.1513, ctc_loss=0.2907, over 2435894.34 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:45:54,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14346.666666666666, ans=0.15653333333333333
+2024-08-25 03:46:05,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=14400.0, ans=0.396
+2024-08-25 03:46:06,460 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.951e+02 2.445e+02 2.940e+02 3.728e+02 6.995e+02, threshold=5.880e+02, percent-clipped=3.0
+2024-08-25 03:46:18,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=14453.333333333334, ans=0.0
+2024-08-25 03:46:22,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=14506.666666666666, ans=0.125
+2024-08-25 03:46:45,918 INFO [train.py:1114] (1/4) Epoch 2, batch 250, loss[loss=0.448, simple_loss=0.4253, pruned_loss=0.1719, ctc_loss=0.3172, over 19410.00 frames. ], tot_loss[loss=0.4039, simple_loss=0.3894, pruned_loss=0.1513, ctc_loss=0.2894, over 2755875.80 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:47:10,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=14720.0, ans=0.007669565217391304
+2024-08-25 03:47:21,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=14773.333333333334, ans=0.0
+2024-08-25 03:47:40,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=14826.666666666666, ans=0.04949747468305833
+2024-08-25 03:47:41,821 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.85 vs. limit=13.059999999999999
+2024-08-25 03:47:45,019 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=14826.666666666666, ans=0.125
+2024-08-25 03:47:50,841 INFO [train.py:1114] (1/4) Epoch 2, batch 300, loss[loss=0.4209, simple_loss=0.4101, pruned_loss=0.1566, ctc_loss=0.2963, over 19532.00 frames. ], tot_loss[loss=0.4004, simple_loss=0.3875, pruned_loss=0.1494, ctc_loss=0.2859, over 3000767.60 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:47:53,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=14880.0, ans=0.007634782608695653
+2024-08-25 03:47:56,770 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=14880.0, ans=0.125
+2024-08-25 03:47:59,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=14880.0, ans=0.025
+2024-08-25 03:48:13,155 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.396e+02 2.818e+02 3.488e+02 8.647e+02, threshold=5.636e+02, percent-clipped=6.0
+2024-08-25 03:48:50,383 INFO [train.py:1114] (1/4) Epoch 2, batch 350, loss[loss=0.3374, simple_loss=0.3444, pruned_loss=0.119, ctc_loss=0.2309, over 19746.00 frames. ], tot_loss[loss=0.4001, simple_loss=0.3876, pruned_loss=0.1492, ctc_loss=0.2855, over 3189874.42 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:49:33,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=15200.0, ans=0.42800000000000005
+2024-08-25 03:50:01,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=15306.666666666666, ans=0.125
+2024-08-25 03:50:17,350 INFO [train.py:1114] (1/4) Epoch 2, batch 400, loss[loss=0.4106, simple_loss=0.3951, pruned_loss=0.1546, ctc_loss=0.292, over 19498.00 frames. ], tot_loss[loss=0.3978, simple_loss=0.3868, pruned_loss=0.1479, ctc_loss=0.2829, over 3341436.16 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:50:39,710 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.407e+02 2.984e+02 3.456e+02 5.488e+02, threshold=5.968e+02, percent-clipped=0.0
+2024-08-25 03:50:43,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=15520.0, ans=0.125
+2024-08-25 03:50:48,426 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.55 vs. limit=13.32
+2024-08-25 03:50:49,371 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=2.700e-02
+2024-08-25 03:50:50,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=15520.0, ans=19.14
+2024-08-25 03:51:03,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=15626.666666666666, ans=0.125
+2024-08-25 03:51:19,334 INFO [train.py:1114] (1/4) Epoch 2, batch 450, loss[loss=0.396, simple_loss=0.3946, pruned_loss=0.1438, ctc_loss=0.2745, over 19607.00 frames. ], tot_loss[loss=0.3967, simple_loss=0.3859, pruned_loss=0.1475, ctc_loss=0.2816, over 3450016.16 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:51:24,549 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=15680.0, ans=0.125
+2024-08-25 03:51:32,132 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.39 vs. limit=5.359999999999999
+2024-08-25 03:51:38,805 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=15733.333333333334, ans=0.125
+2024-08-25 03:51:43,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=15733.333333333334, ans=0.125
+2024-08-25 03:51:49,927 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.10 vs. limit=13.42
+2024-08-25 03:51:54,680 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.82 vs. limit=19.34
+2024-08-25 03:52:05,146 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.66 vs. limit=13.440000000000001
+2024-08-25 03:52:10,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=15893.333333333334, ans=0.34373333333333334
+2024-08-25 03:52:20,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15946.666666666666, ans=0.14053333333333334
+2024-08-25 03:52:21,860 INFO [train.py:1114] (1/4) Epoch 2, batch 500, loss[loss=0.4456, simple_loss=0.4176, pruned_loss=0.1723, ctc_loss=0.3222, over 19633.00 frames. ], tot_loss[loss=0.3934, simple_loss=0.384, pruned_loss=0.1457, ctc_loss=0.2784, over 3545224.30 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:53:00,693 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.27 vs. limit=9.0
+2024-08-25 03:53:07,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=16000.0, ans=0.007391304347826087
+2024-08-25 03:53:11,993 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.425e+02 3.079e+02 3.995e+02 1.154e+03, threshold=6.159e+02, percent-clipped=13.0
+2024-08-25 03:53:12,360 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=16053.333333333334, ans=0.9105333333333333
+2024-08-25 03:53:19,801 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.33 vs. limit=19.54
+2024-08-25 03:53:19,945 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.68 vs. limit=19.54
+2024-08-25 03:53:24,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=16106.666666666666, ans=0.13893333333333333
+2024-08-25 03:53:49,313 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.87 vs. limit=13.58
+2024-08-25 03:53:49,957 INFO [train.py:1114] (1/4) Epoch 2, batch 550, loss[loss=0.3737, simple_loss=0.3772, pruned_loss=0.1325, ctc_loss=0.2632, over 19278.00 frames. ], tot_loss[loss=0.3916, simple_loss=0.3834, pruned_loss=0.1447, ctc_loss=0.2765, over 3606845.61 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:54:10,282 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.64 vs. limit=5.4399999999999995
+2024-08-25 03:54:27,163 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.21 vs. limit=13.64
+2024-08-25 03:54:30,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=16373.333333333334, ans=0.3269333333333333
+2024-08-25 03:54:51,528 INFO [train.py:1114] (1/4) Epoch 2, batch 600, loss[loss=0.4331, simple_loss=0.4156, pruned_loss=0.1626, ctc_loss=0.3136, over 19448.00 frames. ], tot_loss[loss=0.3894, simple_loss=0.3824, pruned_loss=0.1434, ctc_loss=0.2741, over 3664934.48 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:54:52,264 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.70 vs. limit=19.86
+2024-08-25 03:55:02,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=16533.333333333332, ans=0.125
+2024-08-25 03:55:02,655 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.10 vs. limit=13.7
+2024-08-25 03:55:04,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=16533.333333333332, ans=0.13466666666666668
+2024-08-25 03:55:11,733 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=16533.333333333332, ans=0.125
+2024-08-25 03:55:14,977 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.336e+02 2.753e+02 3.494e+02 8.105e+02, threshold=5.507e+02, percent-clipped=1.0
+2024-08-25 03:55:30,464 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.23 vs. limit=19.98
+2024-08-25 03:55:56,157 INFO [train.py:1114] (1/4) Epoch 2, batch 650, loss[loss=0.3689, simple_loss=0.3702, pruned_loss=0.1322, ctc_loss=0.2578, over 19762.00 frames. ], tot_loss[loss=0.3882, simple_loss=0.3814, pruned_loss=0.1429, ctc_loss=0.273, over 3715274.43 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:56:27,036 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16853.333333333332, ans=0.13146666666666668
+2024-08-25 03:56:40,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=16906.666666666668, ans=0.125
+2024-08-25 03:56:46,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=16960.0, ans=0.025
+2024-08-25 03:56:56,407 INFO [train.py:1114] (1/4) Epoch 2, batch 700, loss[loss=0.3288, simple_loss=0.3428, pruned_loss=0.1121, ctc_loss=0.2263, over 19719.00 frames. ], tot_loss[loss=0.3872, simple_loss=0.3811, pruned_loss=0.1423, ctc_loss=0.2718, over 3747259.36 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:57:05,163 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=17013.333333333332, ans=0.125
+2024-08-25 03:57:13,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=17066.666666666668, ans=0.07
+2024-08-25 03:57:23,239 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.519e+02 2.895e+02 3.628e+02 6.087e+02, threshold=5.790e+02, percent-clipped=2.0
+2024-08-25 03:58:00,564 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.14 vs. limit=13.98
+2024-08-25 03:58:01,082 INFO [train.py:1114] (1/4) Epoch 2, batch 750, loss[loss=0.3486, simple_loss=0.363, pruned_loss=0.1198, ctc_loss=0.2364, over 19500.00 frames. ], tot_loss[loss=0.3869, simple_loss=0.3809, pruned_loss=0.1422, ctc_loss=0.2712, over 3774238.27 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:58:21,203 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=17333.333333333332, ans=0.125
+2024-08-25 03:58:23,874 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.23 vs. limit=5.6
+2024-08-25 03:58:27,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=17386.666666666668, ans=0.29146666666666676
+2024-08-25 03:58:27,402 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.57 vs. limit=14.02
+2024-08-25 03:58:28,325 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=17386.666666666668, ans=0.125
+2024-08-25 04:00:08,080 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=17493.333333333332, ans=0.125
+2024-08-25 04:00:16,090 INFO [train.py:1114] (1/4) Epoch 2, batch 800, loss[loss=0.3618, simple_loss=0.3632, pruned_loss=0.1313, ctc_loss=0.2446, over 19415.00 frames. ], tot_loss[loss=0.3853, simple_loss=0.38, pruned_loss=0.1414, ctc_loss=0.2694, over 3796416.54 frames. ], batch size: 48, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:00:17,824 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.89 vs. limit=14.08
+2024-08-25 04:00:20,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=17546.666666666668, ans=0.1245333333333333
+2024-08-25 04:00:25,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=17546.666666666668, ans=0.125
+2024-08-25 04:00:30,523 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.13 vs. limit=20.7
+2024-08-25 04:00:39,331 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.611e+02 3.088e+02 3.881e+02 9.768e+02, threshold=6.176e+02, percent-clipped=6.0
+2024-08-25 04:00:57,257 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.14 vs. limit=14.14
+2024-08-25 04:01:00,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=17706.666666666668, ans=0.125
+2024-08-25 04:01:09,446 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:01:15,035 INFO [train.py:1114] (1/4) Epoch 2, batch 850, loss[loss=0.3971, simple_loss=0.3955, pruned_loss=0.1462, ctc_loss=0.2659, over 19672.00 frames. ], tot_loss[loss=0.3827, simple_loss=0.3785, pruned_loss=0.1401, ctc_loss=0.2668, over 3814423.31 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:01:21,620 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.83 vs. limit=20.86
+2024-08-25 04:01:22,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=17813.333333333332, ans=0.12186666666666668
+2024-08-25 04:01:26,004 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=17866.666666666668, ans=0.2746666666666667
+2024-08-25 04:01:40,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=17920.0, ans=0.125
+2024-08-25 04:01:47,547 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:01:49,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=17920.0, ans=0.0
+2024-08-25 04:01:51,679 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.47 vs. limit=5.696
+2024-08-25 04:01:56,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=17973.333333333332, ans=0.07026666666666667
+2024-08-25 04:01:57,392 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.78 vs. limit=9.493333333333332
+2024-08-25 04:02:00,731 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=17973.333333333332, ans=0.00696231884057971
+2024-08-25 04:02:09,684 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.00 vs. limit=14.26
+2024-08-25 04:02:18,993 INFO [train.py:1114] (1/4) Epoch 2, batch 900, loss[loss=0.3408, simple_loss=0.3496, pruned_loss=0.1197, ctc_loss=0.2317, over 19803.00 frames. ], tot_loss[loss=0.383, simple_loss=0.3787, pruned_loss=0.1404, ctc_loss=0.2666, over 3818215.99 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:03:03,829 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.814e+02 2.530e+02 3.033e+02 3.602e+02 3.379e+03, threshold=6.066e+02, percent-clipped=6.0
+2024-08-25 04:03:06,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=18186.666666666668, ans=0.26346666666666674
+2024-08-25 04:03:17,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=18240.0, ans=0.125
+2024-08-25 04:03:36,914 INFO [train.py:1114] (1/4) Epoch 2, batch 950, loss[loss=0.3957, simple_loss=0.3641, pruned_loss=0.1554, ctc_loss=0.2913, over 19517.00 frames. ], tot_loss[loss=0.3832, simple_loss=0.3788, pruned_loss=0.1404, ctc_loss=0.2669, over 3819215.99 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:03:37,756 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.19 vs. limit=9.586666666666666
+2024-08-25 04:03:40,857 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=18346.666666666668, ans=0.0
+2024-08-25 04:04:05,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18453.333333333332, ans=0.11546666666666669
+2024-08-25 04:04:18,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=18506.666666666668, ans=0.25226666666666675
+2024-08-25 04:04:19,226 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=18506.666666666668, ans=0.0
+2024-08-25 04:04:26,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=18560.0, ans=0.0
+2024-08-25 04:04:39,324 INFO [train.py:1114] (1/4) Epoch 2, batch 1000, loss[loss=0.3534, simple_loss=0.3626, pruned_loss=0.1234, ctc_loss=0.2439, over 19834.00 frames. ], tot_loss[loss=0.3816, simple_loss=0.3785, pruned_loss=0.1393, ctc_loss=0.2649, over 3814199.32 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:04:53,226 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.57 vs. limit=9.666666666666668
+2024-08-25 04:05:03,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=18720.0, ans=0.24480000000000002
+2024-08-25 04:05:05,784 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.321e+02 2.743e+02 3.485e+02 6.350e+02, threshold=5.486e+02, percent-clipped=2.0
+2024-08-25 04:05:07,688 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.45 vs. limit=14.52
+2024-08-25 04:05:18,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=18773.333333333332, ans=10.0
+2024-08-25 04:05:41,810 INFO [train.py:1114] (1/4) Epoch 2, batch 1050, loss[loss=0.3724, simple_loss=0.3785, pruned_loss=0.1337, ctc_loss=0.2472, over 19836.00 frames. ], tot_loss[loss=0.3796, simple_loss=0.3771, pruned_loss=0.1384, ctc_loss=0.2628, over 3821203.38 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:05:45,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=18880.0, ans=0.11120000000000002
+2024-08-25 04:05:50,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=18880.0, ans=0.006765217391304348
+2024-08-25 04:05:54,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18933.333333333332, ans=0.11066666666666669
+2024-08-25 04:06:06,743 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.72 vs. limit=14.620000000000001
+2024-08-25 04:06:10,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=18986.666666666668, ans=0.125
+2024-08-25 04:06:29,730 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=19040.0, ans=0.0
+2024-08-25 04:06:33,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=19093.333333333332, ans=0.125
+2024-08-25 04:06:44,163 INFO [train.py:1114] (1/4) Epoch 2, batch 1100, loss[loss=0.3708, simple_loss=0.3691, pruned_loss=0.1342, ctc_loss=0.2603, over 19593.00 frames. ], tot_loss[loss=0.3777, simple_loss=0.3761, pruned_loss=0.1375, ctc_loss=0.2607, over 3829216.55 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:06:44,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=19146.666666666668, ans=0.0
+2024-08-25 04:07:11,086 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.465e+02 2.960e+02 4.039e+02 7.406e+02, threshold=5.919e+02, percent-clipped=11.0
+2024-08-25 04:07:45,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=19306.666666666668, ans=0.125
+2024-08-25 04:08:08,071 INFO [train.py:1114] (1/4) Epoch 2, batch 1150, loss[loss=0.3785, simple_loss=0.3687, pruned_loss=0.1414, ctc_loss=0.264, over 19580.00 frames. ], tot_loss[loss=0.3777, simple_loss=0.376, pruned_loss=0.1375, ctc_loss=0.2607, over 3829591.84 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:08:16,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=19413.333333333332, ans=0.125
+2024-08-25 04:08:35,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=19520.0, ans=0.125
+2024-08-25 04:08:53,188 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:08:58,112 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.97 vs. limit=14.86
+2024-08-25 04:09:03,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=19626.666666666668, ans=0.21306666666666674
+2024-08-25 04:09:08,084 INFO [train.py:1114] (1/4) Epoch 2, batch 1200, loss[loss=0.3709, simple_loss=0.3821, pruned_loss=0.1294, ctc_loss=0.2524, over 19837.00 frames. ], tot_loss[loss=0.3774, simple_loss=0.3761, pruned_loss=0.1372, ctc_loss=0.2605, over 3824515.07 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 04:09:21,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=19733.333333333332, ans=0.10266666666666668
+2024-08-25 04:09:36,224 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.637e+02 3.065e+02 4.000e+02 6.600e+02, threshold=6.130e+02, percent-clipped=2.0
+2024-08-25 04:09:36,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=19786.666666666668, ans=0.10213333333333333
+2024-08-25 04:09:44,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=19786.666666666668, ans=0.125
+2024-08-25 04:09:49,326 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.83 vs. limit=14.940000000000001
+2024-08-25 04:09:53,240 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.88 vs. limit=14.940000000000001
+2024-08-25 04:10:10,937 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=19946.666666666668, ans=0.006533333333333334
+2024-08-25 04:10:11,978 INFO [train.py:1114] (1/4) Epoch 2, batch 1250, loss[loss=0.4142, simple_loss=0.4028, pruned_loss=0.1554, ctc_loss=0.2874, over 19534.00 frames. ], tot_loss[loss=0.3745, simple_loss=0.375, pruned_loss=0.1356, ctc_loss=0.2573, over 3842435.77 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:10:12,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=19946.666666666668, ans=0.025
+2024-08-25 04:10:17,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19946.666666666668, ans=0.10053333333333334
+2024-08-25 04:10:19,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=19946.666666666668, ans=0.10053333333333334
+2024-08-25 04:10:25,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.37 vs. limit=6.0
+2024-08-25 04:10:29,427 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.15 vs. limit=6.0
+2024-08-25 04:10:38,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=20053.333333333332, ans=10.0
+2024-08-25 04:10:46,677 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=20106.666666666668, ans=0.00649855072463768
+2024-08-25 04:10:52,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20106.666666666668, ans=0.1
+2024-08-25 04:10:56,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=20106.666666666668, ans=0.125
+2024-08-25 04:11:08,861 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:11:15,949 INFO [train.py:1114] (1/4) Epoch 2, batch 1300, loss[loss=0.3919, simple_loss=0.3894, pruned_loss=0.1433, ctc_loss=0.269, over 18869.00 frames. ], tot_loss[loss=0.3732, simple_loss=0.3742, pruned_loss=0.1349, ctc_loss=0.2561, over 3846646.55 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:11:41,991 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.187e+02 2.429e+02 2.931e+02 4.736e+02, threshold=4.858e+02, percent-clipped=0.0
+2024-08-25 04:11:47,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=20320.0, ans=0.2
+2024-08-25 04:11:53,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=20373.333333333332, ans=0.125
+2024-08-25 04:11:53,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=20373.333333333332, ans=0.0
+2024-08-25 04:11:54,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=20373.333333333332, ans=0.125
+2024-08-25 04:12:05,245 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=20426.666666666668, ans=0.125
+2024-08-25 04:12:15,276 INFO [train.py:1114] (1/4) Epoch 2, batch 1350, loss[loss=0.3624, simple_loss=0.3657, pruned_loss=0.1302, ctc_loss=0.247, over 19775.00 frames. ], tot_loss[loss=0.3724, simple_loss=0.3738, pruned_loss=0.1345, ctc_loss=0.2551, over 3858167.57 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:12:15,965 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.64 vs. limit=15.0
+2024-08-25 04:12:21,381 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=20480.0, ans=0.125
+2024-08-25 04:12:31,487 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.76 vs. limit=15.0
+2024-08-25 04:12:34,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=20533.333333333332, ans=0.125
+2024-08-25 04:12:46,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=20586.666666666668, ans=0.125
+2024-08-25 04:12:48,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=20586.666666666668, ans=0.125
+2024-08-25 04:13:18,521 INFO [train.py:1114] (1/4) Epoch 2, batch 1400, loss[loss=0.2754, simple_loss=0.3096, pruned_loss=0.08623, ctc_loss=0.1717, over 19678.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.3723, pruned_loss=0.1329, ctc_loss=0.2525, over 3864535.33 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:13:19,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=20746.666666666668, ans=0.125
+2024-08-25 04:13:20,062 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=20746.666666666668, ans=0.125
+2024-08-25 04:13:31,562 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=20800.0, ans=0.0
+2024-08-25 04:13:59,308 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.04 vs. limit=22.5
+2024-08-25 04:14:03,159 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.385e+02 2.674e+02 3.744e+02 6.684e+02, threshold=5.347e+02, percent-clipped=6.0
+2024-08-25 04:14:10,023 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.29 vs. limit=15.0
+2024-08-25 04:14:18,423 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=19.26 vs. limit=22.5
+2024-08-25 04:14:21,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=20906.666666666668, ans=0.2
+2024-08-25 04:14:26,285 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=20960.0, ans=0.125
+2024-08-25 04:14:37,932 INFO [train.py:1114] (1/4) Epoch 2, batch 1450, loss[loss=0.3926, simple_loss=0.3934, pruned_loss=0.1418, ctc_loss=0.2705, over 19723.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.3725, pruned_loss=0.1329, ctc_loss=0.2519, over 3862719.65 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:14:40,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=21013.333333333332, ans=0.125
+2024-08-25 04:14:54,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=21066.666666666668, ans=0.125
+2024-08-25 04:14:57,072 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=21066.666666666668, ans=0.0
+2024-08-25 04:16:08,022 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.07 vs. limit=22.5
+2024-08-25 04:16:11,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=21173.333333333332, ans=0.125
+2024-08-25 04:16:15,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=21173.333333333332, ans=0.125
+2024-08-25 04:16:20,170 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=21226.666666666668, ans=0.2
+2024-08-25 04:16:22,275 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=21226.666666666668, ans=0.95
+2024-08-25 04:16:23,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=21226.666666666668, ans=0.125
+2024-08-25 04:16:32,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21280.0, ans=0.1
+2024-08-25 04:16:33,079 INFO [train.py:1114] (1/4) Epoch 2, batch 1500, loss[loss=0.3947, simple_loss=0.3886, pruned_loss=0.1446, ctc_loss=0.2793, over 19583.00 frames. ], tot_loss[loss=0.3695, simple_loss=0.3728, pruned_loss=0.1327, ctc_loss=0.2517, over 3861872.94 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:16:41,807 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=21280.0, ans=0.125
+2024-08-25 04:16:44,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=21333.333333333332, ans=0.125
+2024-08-25 04:17:08,004 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.509e+02 2.906e+02 4.274e+02 8.598e+02, threshold=5.813e+02, percent-clipped=13.0
+2024-08-25 04:17:10,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=21386.666666666668, ans=0.0
+2024-08-25 04:17:13,137 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=21386.666666666668, ans=0.025
+2024-08-25 04:17:20,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=21440.0, ans=0.0
+2024-08-25 04:17:22,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21440.0, ans=0.1
+2024-08-25 04:17:25,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=21440.0, ans=0.2
+2024-08-25 04:17:42,726 INFO [train.py:1114] (1/4) Epoch 2, batch 1550, loss[loss=0.3928, simple_loss=0.3949, pruned_loss=0.1417, ctc_loss=0.268, over 19584.00 frames. ], tot_loss[loss=0.3691, simple_loss=0.3726, pruned_loss=0.1326, ctc_loss=0.2513, over 3846711.90 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 04:17:46,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=21546.666666666668, ans=0.5
+2024-08-25 04:17:52,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=21546.666666666668, ans=0.125
+2024-08-25 04:17:58,812 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=21600.0, ans=0.015
+2024-08-25 04:17:59,954 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=21600.0, ans=0.2
+2024-08-25 04:18:21,993 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.92 vs. limit=15.0
+2024-08-25 04:18:28,196 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.51 vs. limit=22.5
+2024-08-25 04:18:44,946 INFO [train.py:1114] (1/4) Epoch 2, batch 1600, loss[loss=0.3803, simple_loss=0.3874, pruned_loss=0.1355, ctc_loss=0.2556, over 19837.00 frames. ], tot_loss[loss=0.3672, simple_loss=0.3712, pruned_loss=0.1317, ctc_loss=0.2495, over 3835638.61 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:19:01,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=21866.666666666668, ans=0.125
+2024-08-25 04:19:05,527 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=21866.666666666668, ans=0.006115942028985508
+2024-08-25 04:19:13,738 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.370e+02 2.902e+02 3.664e+02 6.938e+02, threshold=5.803e+02, percent-clipped=2.0
+2024-08-25 04:19:24,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=21973.333333333332, ans=0.0060927536231884065
+2024-08-25 04:19:35,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=21973.333333333332, ans=0.125
+2024-08-25 04:19:36,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=22026.666666666668, ans=0.125
+2024-08-25 04:19:49,396 INFO [train.py:1114] (1/4) Epoch 2, batch 1650, loss[loss=0.4021, simple_loss=0.3978, pruned_loss=0.1484, ctc_loss=0.2742, over 19641.00 frames. ], tot_loss[loss=0.3664, simple_loss=0.3704, pruned_loss=0.1314, ctc_loss=0.2491, over 3833478.71 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:19:49,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=22080.0, ans=0.125
+2024-08-25 04:19:50,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=22080.0, ans=0.125
+2024-08-25 04:20:06,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=22133.333333333332, ans=0.0
+2024-08-25 04:20:36,002 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=22293.333333333332, ans=0.0
+2024-08-25 04:20:43,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=22293.333333333332, ans=0.125
+2024-08-25 04:20:48,531 INFO [train.py:1114] (1/4) Epoch 2, batch 1700, loss[loss=0.3303, simple_loss=0.3364, pruned_loss=0.117, ctc_loss=0.2255, over 19679.00 frames. ], tot_loss[loss=0.3651, simple_loss=0.3699, pruned_loss=0.1306, ctc_loss=0.2478, over 3848170.98 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:21:16,624 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.264e+02 2.715e+02 3.253e+02 5.462e+02, threshold=5.430e+02, percent-clipped=0.0
+2024-08-25 04:21:25,607 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=10.78 vs. limit=10.0
+2024-08-25 04:21:27,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=22506.666666666668, ans=0.1
+2024-08-25 04:21:48,261 INFO [train.py:1114] (1/4) Epoch 2, batch 1750, loss[loss=0.349, simple_loss=0.3446, pruned_loss=0.1291, ctc_loss=0.2383, over 19643.00 frames. ], tot_loss[loss=0.3628, simple_loss=0.3683, pruned_loss=0.1295, ctc_loss=0.2456, over 3853458.26 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:21:55,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=22613.333333333332, ans=0.95
+2024-08-25 04:22:33,415 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.00 vs. limit=15.0
+2024-08-25 04:22:50,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=22826.666666666668, ans=0.025
+2024-08-25 04:22:56,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=22826.666666666668, ans=0.125
+2024-08-25 04:23:00,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=22826.666666666668, ans=0.0
+2024-08-25 04:23:02,441 INFO [train.py:1114] (1/4) Epoch 2, batch 1800, loss[loss=0.3592, simple_loss=0.3724, pruned_loss=0.1251, ctc_loss=0.2394, over 19617.00 frames. ], tot_loss[loss=0.3642, simple_loss=0.3692, pruned_loss=0.1303, ctc_loss=0.2466, over 3854744.74 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:23:10,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=22880.0, ans=0.2
+2024-08-25 04:23:28,012 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.473e+02 2.913e+02 3.585e+02 6.262e+02, threshold=5.825e+02, percent-clipped=5.0
+2024-08-25 04:23:38,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=23040.0, ans=0.025
+2024-08-25 04:23:59,522 INFO [train.py:1114] (1/4) Epoch 2, batch 1850, loss[loss=0.3396, simple_loss=0.3626, pruned_loss=0.1151, ctc_loss=0.2161, over 19579.00 frames. ], tot_loss[loss=0.3625, simple_loss=0.3686, pruned_loss=0.1293, ctc_loss=0.2447, over 3858462.73 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:24:12,820 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.59 vs. limit=15.0
+2024-08-25 04:24:17,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=23200.0, ans=0.0
+2024-08-25 04:24:25,535 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.51 vs. limit=15.0
+2024-08-25 04:24:29,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=23253.333333333332, ans=0.2
+2024-08-25 04:24:43,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=23360.0, ans=0.005791304347826087
+2024-08-25 04:24:56,424 INFO [train.py:1114] (1/4) Epoch 2, batch 1900, loss[loss=0.353, simple_loss=0.3725, pruned_loss=0.1209, ctc_loss=0.2294, over 19635.00 frames. ], tot_loss[loss=0.3616, simple_loss=0.3683, pruned_loss=0.1287, ctc_loss=0.2436, over 3861789.55 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 32.0
+2024-08-25 04:25:12,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=23466.666666666668, ans=0.125
+2024-08-25 04:25:21,309 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.247e+02 2.781e+02 3.399e+02 7.136e+02, threshold=5.561e+02, percent-clipped=3.0
+2024-08-25 04:25:21,923 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.36 vs. limit=6.0
+2024-08-25 04:25:48,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=23626.666666666668, ans=0.005733333333333333
+2024-08-25 04:25:53,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=23626.666666666668, ans=0.125
+2024-08-25 04:25:55,287 INFO [train.py:1114] (1/4) Epoch 2, batch 1950, loss[loss=0.3779, simple_loss=0.3784, pruned_loss=0.1358, ctc_loss=0.2646, over 19600.00 frames. ], tot_loss[loss=0.3623, simple_loss=0.3692, pruned_loss=0.1289, ctc_loss=0.2439, over 3870452.80 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:25:56,861 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.05 vs. limit=15.0
+2024-08-25 04:26:07,121 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.39 vs. limit=12.0
+2024-08-25 04:26:20,610 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.45 vs. limit=15.0
+2024-08-25 04:26:27,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=23786.666666666668, ans=0.1
+2024-08-25 04:26:28,399 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.81 vs. limit=15.0
+2024-08-25 04:26:30,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=23840.0, ans=0.1
+2024-08-25 04:26:34,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=23840.0, ans=0.95
+2024-08-25 04:26:40,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=23840.0, ans=0.1
+2024-08-25 04:26:41,667 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=10.61 vs. limit=10.0
+2024-08-25 04:26:46,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=23893.333333333332, ans=0.025
+2024-08-25 04:26:52,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=23893.333333333332, ans=0.125
+2024-08-25 04:26:53,992 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.12 vs. limit=15.0
+2024-08-25 04:26:54,476 INFO [train.py:1114] (1/4) Epoch 2, batch 2000, loss[loss=0.3134, simple_loss=0.3244, pruned_loss=0.1085, ctc_loss=0.2134, over 19636.00 frames. ], tot_loss[loss=0.3637, simple_loss=0.37, pruned_loss=0.1297, ctc_loss=0.245, over 3854595.39 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:27:05,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=24000.0, ans=0.0
+2024-08-25 04:27:15,155 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=24000.0, ans=0.125
+2024-08-25 04:27:20,444 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.625e+02 3.128e+02 3.968e+02 6.078e+02, threshold=6.255e+02, percent-clipped=2.0
+2024-08-25 04:27:28,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=24106.666666666668, ans=0.1
+2024-08-25 04:27:30,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=24106.666666666668, ans=0.2
+2024-08-25 04:27:30,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=24106.666666666668, ans=0.0
+2024-08-25 04:27:32,377 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.40 vs. limit=15.0
+2024-08-25 04:27:36,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=24106.666666666668, ans=0.125
+2024-08-25 04:27:38,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=24160.0, ans=0.125
+2024-08-25 04:27:51,118 INFO [train.py:1114] (1/4) Epoch 2, batch 2050, loss[loss=0.2978, simple_loss=0.3172, pruned_loss=0.1007, ctc_loss=0.1925, over 19707.00 frames. ], tot_loss[loss=0.3617, simple_loss=0.3682, pruned_loss=0.1288, ctc_loss=0.2436, over 3851137.52 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:28:08,030 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=24266.666666666668, ans=0.125
+2024-08-25 04:28:19,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=24320.0, ans=0.125
+2024-08-25 04:28:35,284 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.03 vs. limit=15.0
+2024-08-25 04:28:36,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=24426.666666666668, ans=0.005559420289855072
+2024-08-25 04:28:47,790 INFO [train.py:1114] (1/4) Epoch 2, batch 2100, loss[loss=0.3552, simple_loss=0.368, pruned_loss=0.1244, ctc_loss=0.2339, over 19760.00 frames. ], tot_loss[loss=0.3598, simple_loss=0.3669, pruned_loss=0.128, ctc_loss=0.2419, over 3857910.48 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:28:51,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=24480.0, ans=0.125
+2024-08-25 04:29:06,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=24533.333333333332, ans=0.0
+2024-08-25 04:29:13,224 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=24586.666666666668, ans=0.125
+2024-08-25 04:29:14,128 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.311e+02 2.619e+02 3.137e+02 5.086e+02, threshold=5.238e+02, percent-clipped=0.0
+2024-08-25 04:29:18,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=24586.666666666668, ans=0.07
+2024-08-25 04:29:19,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=24586.666666666668, ans=0.2
+2024-08-25 04:29:22,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=24640.0, ans=0.125
+2024-08-25 04:29:35,397 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=24693.333333333332, ans=0.2
+2024-08-25 04:29:37,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=24693.333333333332, ans=0.0
+2024-08-25 04:29:44,336 INFO [train.py:1114] (1/4) Epoch 2, batch 2150, loss[loss=0.3015, simple_loss=0.3338, pruned_loss=0.09751, ctc_loss=0.1856, over 19577.00 frames. ], tot_loss[loss=0.3578, simple_loss=0.3655, pruned_loss=0.127, ctc_loss=0.24, over 3868715.03 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 04:29:46,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=24746.666666666668, ans=0.1
+2024-08-25 04:29:53,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=24746.666666666668, ans=0.025
+2024-08-25 04:30:03,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=24800.0, ans=0.125
+2024-08-25 04:30:11,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=24853.333333333332, ans=0.025
+2024-08-25 04:30:27,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=24960.0, ans=0.125
+2024-08-25 04:30:33,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=24960.0, ans=0.125
+2024-08-25 04:30:33,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=24960.0, ans=0.125
+2024-08-25 04:30:35,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=24960.0, ans=0.125
+2024-08-25 04:30:40,042 INFO [train.py:1114] (1/4) Epoch 2, batch 2200, loss[loss=0.3799, simple_loss=0.392, pruned_loss=0.1339, ctc_loss=0.2499, over 19601.00 frames. ], tot_loss[loss=0.3573, simple_loss=0.3656, pruned_loss=0.1267, ctc_loss=0.239, over 3867598.72 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:31:06,347 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.398e+02 2.814e+02 3.505e+02 8.042e+02, threshold=5.628e+02, percent-clipped=3.0
+2024-08-25 04:31:23,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=25173.333333333332, ans=0.125
+2024-08-25 04:31:37,460 INFO [train.py:1114] (1/4) Epoch 2, batch 2250, loss[loss=0.3346, simple_loss=0.3636, pruned_loss=0.1106, ctc_loss=0.2109, over 19604.00 frames. ], tot_loss[loss=0.3576, simple_loss=0.366, pruned_loss=0.1268, ctc_loss=0.2391, over 3867623.39 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:31:46,396 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:31:47,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=25333.333333333332, ans=0.05
+2024-08-25 04:31:55,380 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25333.333333333332, ans=0.1
+2024-08-25 04:32:09,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 04:32:14,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=25440.0, ans=0.1
+2024-08-25 04:32:18,662 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.53 vs. limit=6.0
+2024-08-25 04:32:31,489 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=25493.333333333332, ans=0.0
+2024-08-25 04:32:33,458 INFO [train.py:1114] (1/4) Epoch 2, batch 2300, loss[loss=0.3058, simple_loss=0.3347, pruned_loss=0.1003, ctc_loss=0.1903, over 19519.00 frames. ], tot_loss[loss=0.3554, simple_loss=0.3641, pruned_loss=0.126, ctc_loss=0.2371, over 3861882.93 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 16.0
+2024-08-25 04:32:40,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=25546.666666666668, ans=0.005315942028985506
+2024-08-25 04:32:47,283 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.10 vs. limit=15.0
+2024-08-25 04:33:03,051 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.317e+02 2.709e+02 3.466e+02 6.027e+02, threshold=5.417e+02, percent-clipped=4.0
+2024-08-25 04:33:04,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=25653.333333333332, ans=0.125
+2024-08-25 04:33:04,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=25653.333333333332, ans=0.125
+2024-08-25 04:33:23,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=25760.0, ans=0.1
+2024-08-25 04:33:32,444 INFO [train.py:1114] (1/4) Epoch 2, batch 2350, loss[loss=0.3511, simple_loss=0.3732, pruned_loss=0.1194, ctc_loss=0.2254, over 19672.00 frames. ], tot_loss[loss=0.3564, simple_loss=0.3646, pruned_loss=0.1265, ctc_loss=0.238, over 3864853.50 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 16.0
+2024-08-25 04:33:42,525 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.22 vs. limit=15.0
+2024-08-25 04:33:45,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=25866.666666666668, ans=0.0
+2024-08-25 04:33:58,874 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=25920.0, ans=0.0
+2024-08-25 04:34:04,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=25920.0, ans=0.125
+2024-08-25 04:34:17,613 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=15.87 vs. limit=22.5
+2024-08-25 04:34:20,916 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.02 vs. limit=22.5
+2024-08-25 04:34:22,836 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=26026.666666666668, ans=0.125
+2024-08-25 04:34:23,239 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.97 vs. limit=15.0
+2024-08-25 04:34:30,678 INFO [train.py:1114] (1/4) Epoch 2, batch 2400, loss[loss=0.4125, simple_loss=0.4019, pruned_loss=0.1535, ctc_loss=0.2904, over 19187.00 frames. ], tot_loss[loss=0.3585, simple_loss=0.3667, pruned_loss=0.1273, ctc_loss=0.2394, over 3859043.96 frames. ], batch size: 71, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 04:34:57,146 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.184e+02 2.505e+02 3.102e+02 8.045e+02, threshold=5.010e+02, percent-clipped=5.0
+2024-08-25 04:35:06,506 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=26240.0, ans=0.2
+2024-08-25 04:35:24,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=26293.333333333332, ans=0.125
+2024-08-25 04:35:27,217 INFO [train.py:1114] (1/4) Epoch 2, batch 2450, loss[loss=0.4679, simple_loss=0.4143, pruned_loss=0.1894, ctc_loss=0.3566, over 13465.00 frames. ], tot_loss[loss=0.3687, simple_loss=0.3724, pruned_loss=0.1326, ctc_loss=0.2489, over 3732884.53 frames. ], batch size: 141, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 04:35:34,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=26346.666666666668, ans=0.125
+2024-08-25 04:35:49,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=26453.333333333332, ans=0.125
+2024-08-25 04:35:49,976 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.47 vs. limit=22.5
+2024-08-25 04:36:02,190 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=6.43 vs. limit=12.0
+2024-08-25 04:36:05,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=26506.666666666668, ans=0.125
+2024-08-25 04:36:50,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=26554.666666666668, ans=0.07
+2024-08-25 04:36:55,747 INFO [train.py:1114] (1/4) Epoch 3, batch 0, loss[loss=0.3225, simple_loss=0.3344, pruned_loss=0.1136, ctc_loss=0.2084, over 19823.00 frames. ], tot_loss[loss=0.3225, simple_loss=0.3344, pruned_loss=0.1136, ctc_loss=0.2084, over 19823.00 frames. ], batch size: 49, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 04:36:55,748 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 04:37:08,022 INFO [train.py:1146] (1/4) Epoch 3, validation: loss=0.2847, simple_loss=0.3461, pruned_loss=0.08168, ctc_loss=0.1499, over 944034.00 frames.
+2024-08-25 04:37:08,022 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 04:37:10,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=26554.666666666668, ans=0.005096811594202898
+2024-08-25 04:37:17,240 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.80 vs. limit=15.0
+2024-08-25 04:37:27,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=26608.0, ans=0.0
+2024-08-25 04:37:45,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=26714.666666666668, ans=0.025
+2024-08-25 04:37:50,805 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.252e+02 2.580e+02 3.143e+02 6.401e+02, threshold=5.159e+02, percent-clipped=2.0
+2024-08-25 04:38:10,074 INFO [train.py:1114] (1/4) Epoch 3, batch 50, loss[loss=0.2604, simple_loss=0.2978, pruned_loss=0.0813, ctc_loss=0.1508, over 19667.00 frames. ], tot_loss[loss=0.3616, simple_loss=0.3681, pruned_loss=0.129, ctc_loss=0.2429, over 844347.37 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:38:17,164 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.77 vs. limit=15.0
+2024-08-25 04:38:33,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=26821.333333333332, ans=0.0
+2024-08-25 04:38:47,239 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.68 vs. limit=15.0
+2024-08-25 04:38:51,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=26928.0, ans=0.2
+2024-08-25 04:39:12,336 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.77 vs. limit=15.0
+2024-08-25 04:39:19,442 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.42 vs. limit=15.0
+2024-08-25 04:39:28,426 INFO [train.py:1114] (1/4) Epoch 3, batch 100, loss[loss=0.3049, simple_loss=0.3376, pruned_loss=0.09751, ctc_loss=0.1931, over 19702.00 frames. ], tot_loss[loss=0.3609, simple_loss=0.369, pruned_loss=0.128, ctc_loss=0.2417, over 1499014.27 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:39:32,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27088.0, ans=0.1
+2024-08-25 04:39:38,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=27088.0, ans=10.0
+2024-08-25 04:39:40,717 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.68 vs. limit=15.0
+2024-08-25 04:39:50,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=27194.666666666668, ans=0.025
+2024-08-25 04:40:11,087 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.221e+02 2.583e+02 3.158e+02 4.904e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-08-25 04:40:27,485 INFO [train.py:1114] (1/4) Epoch 3, batch 150, loss[loss=0.3358, simple_loss=0.3444, pruned_loss=0.1201, ctc_loss=0.2179, over 19725.00 frames. ], tot_loss[loss=0.3543, simple_loss=0.3645, pruned_loss=0.1249, ctc_loss=0.2355, over 2028246.68 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 04:40:30,482 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.93 vs. limit=6.0
+2024-08-25 04:40:32,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=27354.666666666668, ans=0.125
+2024-08-25 04:40:34,887 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.23 vs. limit=22.5
+2024-08-25 04:41:07,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=27514.666666666668, ans=0.0
+2024-08-25 04:41:29,363 INFO [train.py:1114] (1/4) Epoch 3, batch 200, loss[loss=0.4263, simple_loss=0.4112, pruned_loss=0.1611, ctc_loss=0.2977, over 18329.00 frames. ], tot_loss[loss=0.3502, simple_loss=0.3616, pruned_loss=0.123, ctc_loss=0.2316, over 2436541.20 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:41:36,634 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27621.333333333332, ans=0.1
+2024-08-25 04:41:52,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=27674.666666666668, ans=0.125
+2024-08-25 04:42:14,175 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.192e+02 2.550e+02 3.125e+02 5.269e+02, threshold=5.099e+02, percent-clipped=1.0
+2024-08-25 04:42:34,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=27888.0, ans=10.0
+2024-08-25 04:42:35,061 INFO [train.py:1114] (1/4) Epoch 3, batch 250, loss[loss=0.3762, simple_loss=0.3882, pruned_loss=0.1335, ctc_loss=0.2431, over 19373.00 frames. ], tot_loss[loss=0.3492, simple_loss=0.3611, pruned_loss=0.1226, ctc_loss=0.2307, over 2755941.42 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:42:38,810 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=27888.0, ans=0.015
+2024-08-25 04:42:53,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=27941.333333333332, ans=0.0
+2024-08-25 04:42:55,148 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 04:42:57,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=27994.666666666668, ans=0.125
+2024-08-25 04:42:57,957 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.01 vs. limit=15.0
+2024-08-25 04:43:02,546 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.36 vs. limit=15.0
+2024-08-25 04:43:03,463 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.40 vs. limit=10.0
+2024-08-25 04:43:12,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=28048.0, ans=0.125
+2024-08-25 04:43:16,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=28048.0, ans=0.025
+2024-08-25 04:43:21,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=28101.333333333332, ans=0.125
+2024-08-25 04:43:27,226 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=4.68 vs. limit=15.0
+2024-08-25 04:43:28,113 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.99 vs. limit=15.0
+2024-08-25 04:43:29,378 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.78 vs. limit=15.0
+2024-08-25 04:43:33,534 INFO [train.py:1114] (1/4) Epoch 3, batch 300, loss[loss=0.382, simple_loss=0.376, pruned_loss=0.1417, ctc_loss=0.2612, over 19525.00 frames. ], tot_loss[loss=0.3492, simple_loss=0.3606, pruned_loss=0.1226, ctc_loss=0.2309, over 3000471.32 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:43:36,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=28154.666666666668, ans=0.1
+2024-08-25 04:43:37,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=28154.666666666668, ans=0.004748985507246377
+2024-08-25 04:43:38,013 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.33 vs. limit=15.0
+2024-08-25 04:43:48,757 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.29 vs. limit=12.0
+2024-08-25 04:43:55,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=28208.0, ans=0.125
+2024-08-25 04:44:09,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 04:44:10,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 04:44:18,924 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.242e+02 2.624e+02 3.299e+02 5.169e+02, threshold=5.248e+02, percent-clipped=1.0
+2024-08-25 04:44:36,146 INFO [train.py:1114] (1/4) Epoch 3, batch 350, loss[loss=0.3184, simple_loss=0.3349, pruned_loss=0.1107, ctc_loss=0.2014, over 19763.00 frames. ], tot_loss[loss=0.349, simple_loss=0.3605, pruned_loss=0.1226, ctc_loss=0.2306, over 3190566.06 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:44:38,286 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=28421.333333333332, ans=0.0
+2024-08-25 04:44:53,792 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.61 vs. limit=15.0
+2024-08-25 04:44:58,871 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.24 vs. limit=15.0
+2024-08-25 04:45:00,841 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=28528.0, ans=0.125
+2024-08-25 04:45:14,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=28528.0, ans=0.025
+2024-08-25 04:45:36,074 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=28581.333333333332, ans=0.125
+2024-08-25 04:45:48,698 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=28581.333333333332, ans=0.5
+2024-08-25 04:46:55,912 INFO [train.py:1114] (1/4) Epoch 3, batch 400, loss[loss=0.331, simple_loss=0.3558, pruned_loss=0.1103, ctc_loss=0.2143, over 19530.00 frames. ], tot_loss[loss=0.3477, simple_loss=0.3599, pruned_loss=0.1219, ctc_loss=0.2294, over 3342662.72 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 04:47:20,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=28741.333333333332, ans=0.2
+2024-08-25 04:47:24,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=28741.333333333332, ans=0.0
+2024-08-25 04:47:25,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=28741.333333333332, ans=0.125
+2024-08-25 04:47:28,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=28741.333333333332, ans=0.2
+2024-08-25 04:47:40,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=28794.666666666668, ans=0.025
+2024-08-25 04:48:22,794 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.232e+02 2.568e+02 3.025e+02 1.134e+03, threshold=5.136e+02, percent-clipped=4.0
+2024-08-25 04:48:48,325 INFO [train.py:1114] (1/4) Epoch 3, batch 450, loss[loss=0.3204, simple_loss=0.3515, pruned_loss=0.1054, ctc_loss=0.1964, over 19611.00 frames. ], tot_loss[loss=0.347, simple_loss=0.3596, pruned_loss=0.1215, ctc_loss=0.2285, over 3450195.22 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:48:51,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=28954.666666666668, ans=0.004575072463768116
+2024-08-25 04:48:53,889 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=28954.666666666668, ans=0.025
+2024-08-25 04:48:58,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=28954.666666666668, ans=0.0
+2024-08-25 04:49:19,442 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.01 vs. limit=15.0
+2024-08-25 04:50:03,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=29168.0, ans=0.125
+2024-08-25 04:50:05,186 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.88 vs. limit=15.0
+2024-08-25 04:50:09,368 INFO [train.py:1114] (1/4) Epoch 3, batch 500, loss[loss=0.3476, simple_loss=0.3702, pruned_loss=0.1163, ctc_loss=0.2308, over 19675.00 frames. ], tot_loss[loss=0.3468, simple_loss=0.3593, pruned_loss=0.1215, ctc_loss=0.2287, over 3545219.68 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:50:27,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29274.666666666668, ans=0.1
+2024-08-25 04:50:53,117 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=29328.0, ans=0.125
+2024-08-25 04:51:02,430 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.16 vs. limit=15.0
+2024-08-25 04:51:03,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=29381.333333333332, ans=0.125
+2024-08-25 04:51:09,140 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.370e+02 2.734e+02 3.745e+02 5.336e+02, threshold=5.469e+02, percent-clipped=1.0
+2024-08-25 04:51:26,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=29434.666666666668, ans=0.125
+2024-08-25 04:51:28,439 INFO [train.py:1114] (1/4) Epoch 3, batch 550, loss[loss=0.3897, simple_loss=0.3836, pruned_loss=0.1447, ctc_loss=0.2662, over 19305.00 frames. ], tot_loss[loss=0.3464, simple_loss=0.3594, pruned_loss=0.1212, ctc_loss=0.2278, over 3607695.08 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:51:46,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=29541.333333333332, ans=0.125
+2024-08-25 04:51:54,285 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:52:52,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=29648.0, ans=0.07
+2024-08-25 04:52:53,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=29701.333333333332, ans=0.1
+2024-08-25 04:52:53,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=29701.333333333332, ans=0.2
+2024-08-25 04:53:06,025 INFO [train.py:1114] (1/4) Epoch 3, batch 600, loss[loss=0.3442, simple_loss=0.3629, pruned_loss=0.1181, ctc_loss=0.2229, over 19375.00 frames. ], tot_loss[loss=0.3452, simple_loss=0.3587, pruned_loss=0.1205, ctc_loss=0.227, over 3665416.30 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:53:06,495 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.09 vs. limit=15.0
+2024-08-25 04:53:11,451 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.11 vs. limit=10.0
+2024-08-25 04:53:24,368 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29808.0, ans=0.1
+2024-08-25 04:53:41,081 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.23 vs. limit=15.0
+2024-08-25 04:53:41,966 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=29914.666666666668, ans=0.125
+2024-08-25 04:53:48,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=29914.666666666668, ans=0.0
+2024-08-25 04:53:49,296 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.141e+02 2.536e+02 3.031e+02 6.622e+02, threshold=5.071e+02, percent-clipped=2.0
+2024-08-25 04:53:54,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=29968.0, ans=0.05
+2024-08-25 04:53:55,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=29968.0, ans=0.2
+2024-08-25 04:54:04,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=29968.0, ans=0.125
+2024-08-25 04:54:06,084 INFO [train.py:1114] (1/4) Epoch 3, batch 650, loss[loss=0.3396, simple_loss=0.3571, pruned_loss=0.1173, ctc_loss=0.2186, over 19774.00 frames. ], tot_loss[loss=0.3435, simple_loss=0.3574, pruned_loss=0.1197, ctc_loss=0.2256, over 3716429.58 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 32.0
+2024-08-25 04:54:06,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=30021.333333333332, ans=0.125
+2024-08-25 04:54:09,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=30021.333333333332, ans=0.125
+2024-08-25 04:54:10,190 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.70 vs. limit=10.0
+2024-08-25 04:54:13,353 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=30021.333333333332, ans=0.0
+2024-08-25 04:54:15,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=30021.333333333332, ans=0.125
+2024-08-25 04:54:22,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:54:37,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:54:55,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=30181.333333333332, ans=0.2
+2024-08-25 04:54:59,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=30181.333333333332, ans=0.125
+2024-08-25 04:55:18,360 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.48 vs. limit=22.5
+2024-08-25 04:55:19,010 INFO [train.py:1114] (1/4) Epoch 3, batch 700, loss[loss=0.3055, simple_loss=0.3349, pruned_loss=0.1016, ctc_loss=0.1821, over 19739.00 frames. ], tot_loss[loss=0.3446, simple_loss=0.3583, pruned_loss=0.1202, ctc_loss=0.2264, over 3749289.09 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:55:45,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30394.666666666668, ans=0.1
+2024-08-25 04:56:32,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=30448.0, ans=0.2
+2024-08-25 04:56:35,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=30448.0, ans=0.1
+2024-08-25 04:56:38,911 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.292e+02 2.520e+02 3.192e+02 5.203e+02, threshold=5.040e+02, percent-clipped=1.0
+2024-08-25 04:56:39,628 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.87 vs. limit=15.0
+2024-08-25 04:56:41,087 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.44 vs. limit=15.0
+2024-08-25 04:56:55,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=30501.333333333332, ans=0.125
+2024-08-25 04:56:57,177 INFO [train.py:1114] (1/4) Epoch 3, batch 750, loss[loss=0.3274, simple_loss=0.3565, pruned_loss=0.1077, ctc_loss=0.2073, over 19487.00 frames. ], tot_loss[loss=0.3429, simple_loss=0.3569, pruned_loss=0.1195, ctc_loss=0.2249, over 3775271.28 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:57:08,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=30554.666666666668, ans=0.025
+2024-08-25 04:57:11,556 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=30608.0, ans=0.004215652173913044
+2024-08-25 04:57:16,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=30608.0, ans=0.0
+2024-08-25 04:57:32,648 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.37 vs. limit=15.0
+2024-08-25 04:57:57,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30768.0, ans=0.1
+2024-08-25 04:57:59,141 INFO [train.py:1114] (1/4) Epoch 3, batch 800, loss[loss=0.2969, simple_loss=0.3249, pruned_loss=0.09724, ctc_loss=0.1862, over 19807.00 frames. ], tot_loss[loss=0.3421, simple_loss=0.3564, pruned_loss=0.1191, ctc_loss=0.2242, over 3796174.33 frames. ], batch size: 49, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:58:16,485 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.92 vs. limit=15.0
+2024-08-25 04:58:23,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=30928.0, ans=0.125
+2024-08-25 04:58:25,415 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.62 vs. limit=15.0
+2024-08-25 04:58:35,970 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.74 vs. limit=15.0
+2024-08-25 04:58:38,904 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.90 vs. limit=15.0
+2024-08-25 04:58:42,759 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.211e+02 2.622e+02 3.205e+02 5.257e+02, threshold=5.244e+02, percent-clipped=1.0
+2024-08-25 04:59:01,968 INFO [train.py:1114] (1/4) Epoch 3, batch 850, loss[loss=0.3609, simple_loss=0.3706, pruned_loss=0.1293, ctc_loss=0.2315, over 19655.00 frames. ], tot_loss[loss=0.3402, simple_loss=0.355, pruned_loss=0.1182, ctc_loss=0.2226, over 3815868.27 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:59:04,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=31088.0, ans=0.2
+2024-08-25 04:59:10,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=31088.0, ans=0.1
+2024-08-25 04:59:30,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=31194.666666666668, ans=0.2
+2024-08-25 04:59:43,175 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=31248.0, ans=0.0
+2024-08-25 04:59:59,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=31301.333333333332, ans=0.125
+2024-08-25 05:00:04,394 INFO [train.py:1114] (1/4) Epoch 3, batch 900, loss[loss=0.3022, simple_loss=0.3197, pruned_loss=0.1034, ctc_loss=0.1948, over 19422.00 frames. ], tot_loss[loss=0.3424, simple_loss=0.3562, pruned_loss=0.1194, ctc_loss=0.2245, over 3819560.69 frames. ], batch size: 48, lr: 3.72e-02, grad_scale: 8.0
+2024-08-25 05:00:19,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=31408.0, ans=0.0040417391304347836
+2024-08-25 05:00:29,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=31461.333333333332, ans=0.0
+2024-08-25 05:00:49,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=31514.666666666668, ans=0.125
+2024-08-25 05:00:54,421 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.296e+02 2.736e+02 3.525e+02 1.528e+03, threshold=5.472e+02, percent-clipped=4.0
+2024-08-25 05:01:08,272 INFO [train.py:1114] (1/4) Epoch 3, batch 950, loss[loss=0.3452, simple_loss=0.3483, pruned_loss=0.1237, ctc_loss=0.237, over 19491.00 frames. ], tot_loss[loss=0.3417, simple_loss=0.3559, pruned_loss=0.119, ctc_loss=0.224, over 3821438.83 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:01:22,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=31674.666666666668, ans=0.003983768115942029
+2024-08-25 05:01:27,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=31674.666666666668, ans=0.025
+2024-08-25 05:01:33,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=31728.0, ans=0.125
+2024-08-25 05:01:40,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=31728.0, ans=0.2
+2024-08-25 05:02:05,436 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.76 vs. limit=6.0
+2024-08-25 05:02:08,119 INFO [train.py:1114] (1/4) Epoch 3, batch 1000, loss[loss=0.3014, simple_loss=0.3337, pruned_loss=0.09818, ctc_loss=0.1819, over 19850.00 frames. ], tot_loss[loss=0.3432, simple_loss=0.3573, pruned_loss=0.1196, ctc_loss=0.2247, over 3817613.11 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:02:19,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=31941.333333333332, ans=0.025
+2024-08-25 05:02:55,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=32048.0, ans=0.125
+2024-08-25 05:02:56,467 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.163e+02 2.492e+02 3.027e+02 5.724e+02, threshold=4.983e+02, percent-clipped=1.0
+2024-08-25 05:03:13,723 INFO [train.py:1114] (1/4) Epoch 3, batch 1050, loss[loss=0.3461, simple_loss=0.357, pruned_loss=0.1217, ctc_loss=0.2291, over 19827.00 frames. ], tot_loss[loss=0.3428, simple_loss=0.3567, pruned_loss=0.1196, ctc_loss=0.2247, over 3824558.96 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:03:20,036 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=32154.666666666668, ans=0.125
+2024-08-25 05:03:21,095 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=32154.666666666668, ans=0.125
+2024-08-25 05:03:24,761 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:04:08,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=32261.333333333332, ans=0.1
+2024-08-25 05:04:17,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=32261.333333333332, ans=0.125
+2024-08-25 05:05:04,400 INFO [train.py:1114] (1/4) Epoch 3, batch 1100, loss[loss=0.3235, simple_loss=0.3488, pruned_loss=0.1085, ctc_loss=0.2033, over 19577.00 frames. ], tot_loss[loss=0.3419, simple_loss=0.3564, pruned_loss=0.119, ctc_loss=0.2238, over 3831467.53 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:05:50,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=32581.333333333332, ans=0.2
+2024-08-25 05:05:51,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=32581.333333333332, ans=0.0037866666666666665
+2024-08-25 05:06:00,567 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.355e+02 2.517e+02 3.019e+02 4.945e+02, threshold=5.033e+02, percent-clipped=0.0
+2024-08-25 05:06:03,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=32634.666666666668, ans=0.2
+2024-08-25 05:06:20,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=32634.666666666668, ans=0.2
+2024-08-25 05:06:23,027 INFO [train.py:1114] (1/4) Epoch 3, batch 1150, loss[loss=0.3365, simple_loss=0.3558, pruned_loss=0.1146, ctc_loss=0.22, over 19585.00 frames. ], tot_loss[loss=0.3418, simple_loss=0.3564, pruned_loss=0.1189, ctc_loss=0.2234, over 3831042.27 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 8.0
+2024-08-25 05:06:24,542 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=32688.0, ans=15.0
+2024-08-25 05:06:32,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=32688.0, ans=0.125
+2024-08-25 05:06:41,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=32741.333333333332, ans=0.125
+2024-08-25 05:06:46,324 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.95 vs. limit=15.0
+2024-08-25 05:06:50,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=32794.666666666664, ans=0.003740289855072464
+2024-08-25 05:06:53,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=32794.666666666664, ans=0.0
+2024-08-25 05:07:10,035 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.15 vs. limit=10.0
+2024-08-25 05:07:10,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=32848.0, ans=0.125
+2024-08-25 05:07:15,103 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.85 vs. limit=15.0
+2024-08-25 05:07:25,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=32901.333333333336, ans=0.125
+2024-08-25 05:07:28,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=32901.333333333336, ans=22.5
+2024-08-25 05:07:32,070 INFO [train.py:1114] (1/4) Epoch 3, batch 1200, loss[loss=0.3686, simple_loss=0.3816, pruned_loss=0.1299, ctc_loss=0.2396, over 19842.00 frames. ], tot_loss[loss=0.3431, simple_loss=0.3577, pruned_loss=0.1194, ctc_loss=0.224, over 3825539.56 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:07:34,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=32954.666666666664, ans=0.1
+2024-08-25 05:08:12,083 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.82 vs. limit=15.0
+2024-08-25 05:08:19,680 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.128e+02 2.359e+02 2.757e+02 6.653e+02, threshold=4.718e+02, percent-clipped=2.0
+2024-08-25 05:08:38,022 INFO [train.py:1114] (1/4) Epoch 3, batch 1250, loss[loss=0.3388, simple_loss=0.3672, pruned_loss=0.1129, ctc_loss=0.2114, over 19541.00 frames. ], tot_loss[loss=0.3416, simple_loss=0.3574, pruned_loss=0.1185, ctc_loss=0.2222, over 3843396.04 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:08:53,205 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.43 vs. limit=15.0
+2024-08-25 05:08:56,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=33274.666666666664, ans=0.07
+2024-08-25 05:08:59,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=33274.666666666664, ans=0.125
+2024-08-25 05:09:00,333 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.07 vs. limit=10.0
+2024-08-25 05:09:01,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=33328.0, ans=0.0036243478260869558
+2024-08-25 05:09:17,696 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.68 vs. limit=10.0
+2024-08-25 05:09:19,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=33381.333333333336, ans=0.003612753623188405
+2024-08-25 05:09:26,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=33381.333333333336, ans=0.0
+2024-08-25 05:09:41,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=33488.0, ans=0.2
+2024-08-25 05:09:42,096 INFO [train.py:1114] (1/4) Epoch 3, batch 1300, loss[loss=0.3676, simple_loss=0.3739, pruned_loss=0.1326, ctc_loss=0.2401, over 18793.00 frames. ], tot_loss[loss=0.3394, simple_loss=0.3557, pruned_loss=0.1175, ctc_loss=0.2201, over 3847174.36 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:10:04,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=33594.666666666664, ans=0.0
+2024-08-25 05:10:33,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=33594.666666666664, ans=0.125
+2024-08-25 05:10:48,151 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.161e+02 2.525e+02 2.896e+02 5.464e+02, threshold=5.050e+02, percent-clipped=3.0
+2024-08-25 05:10:50,189 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.32 vs. limit=15.0
+2024-08-25 05:10:52,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33701.333333333336, ans=0.1
+2024-08-25 05:11:02,307 INFO [train.py:1114] (1/4) Epoch 3, batch 1350, loss[loss=0.3587, simple_loss=0.3752, pruned_loss=0.1244, ctc_loss=0.2337, over 19771.00 frames. ], tot_loss[loss=0.3379, simple_loss=0.3548, pruned_loss=0.1167, ctc_loss=0.2188, over 3856257.20 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:11:07,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=33754.666666666664, ans=0.003531594202898552
+2024-08-25 05:11:42,205 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.08 vs. limit=15.0
+2024-08-25 05:11:49,551 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=10.40 vs. limit=15.0
+2024-08-25 05:12:10,571 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.69 vs. limit=22.5
+2024-08-25 05:12:26,305 INFO [train.py:1114] (1/4) Epoch 3, batch 1400, loss[loss=0.2883, simple_loss=0.3166, pruned_loss=0.09457, ctc_loss=0.1771, over 19699.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3542, pruned_loss=0.1166, ctc_loss=0.2186, over 3864232.26 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 16.0
+2024-08-25 05:12:37,865 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.28 vs. limit=15.0
+2024-08-25 05:12:49,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=34074.666666666664, ans=0.003462028985507247
+2024-08-25 05:13:29,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34181.333333333336, ans=0.1
+2024-08-25 05:13:31,969 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.210e+02 2.531e+02 3.096e+02 9.067e+02, threshold=5.062e+02, percent-clipped=2.0
+2024-08-25 05:14:12,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=34234.666666666664, ans=0.025
+2024-08-25 05:14:24,482 INFO [train.py:1114] (1/4) Epoch 3, batch 1450, loss[loss=0.3425, simple_loss=0.3625, pruned_loss=0.1184, ctc_loss=0.2144, over 19654.00 frames. ], tot_loss[loss=0.3383, simple_loss=0.3549, pruned_loss=0.117, ctc_loss=0.219, over 3862401.94 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:14:36,608 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=34288.0, ans=0.125
+2024-08-25 05:14:39,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=34288.0, ans=0.025
+2024-08-25 05:14:48,719 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.89 vs. limit=15.0
+2024-08-25 05:14:52,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=34341.333333333336, ans=0.2
+2024-08-25 05:14:58,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=34394.666666666664, ans=0.125
+2024-08-25 05:15:04,443 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=34394.666666666664, ans=0.125
+2024-08-25 05:15:08,045 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=34448.0, ans=0.125
+2024-08-25 05:15:32,915 INFO [train.py:1114] (1/4) Epoch 3, batch 1500, loss[loss=0.3389, simple_loss=0.3682, pruned_loss=0.1135, ctc_loss=0.2063, over 19575.00 frames. ], tot_loss[loss=0.3388, simple_loss=0.3556, pruned_loss=0.1172, ctc_loss=0.2193, over 3862253.01 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:15:35,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=34554.666666666664, ans=0.0
+2024-08-25 05:15:36,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=34554.666666666664, ans=0.0
+2024-08-25 05:16:03,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=34661.333333333336, ans=0.125
+2024-08-25 05:16:37,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=34661.333333333336, ans=0.0
+2024-08-25 05:16:38,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=34661.333333333336, ans=0.0033344927536231887
+2024-08-25 05:16:43,471 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=34714.666666666664, ans=0.125
+2024-08-25 05:16:51,238 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.151e+02 2.498e+02 3.151e+02 6.810e+02, threshold=4.996e+02, percent-clipped=2.0
+2024-08-25 05:20:00,626 INFO [train.py:1114] (1/4) Epoch 3, batch 1550, loss[loss=0.3801, simple_loss=0.3857, pruned_loss=0.1386, ctc_loss=0.243, over 19587.00 frames. ], tot_loss[loss=0.3391, simple_loss=0.3556, pruned_loss=0.1174, ctc_loss=0.2197, over 3847577.24 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 16.0
+2024-08-25 05:20:00,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=34821.333333333336, ans=0.125
+2024-08-25 05:20:22,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=34874.666666666664, ans=0.125
+2024-08-25 05:21:04,631 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.29 vs. limit=15.0
+2024-08-25 05:21:09,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=34981.333333333336, ans=0.125
+2024-08-25 05:22:04,976 INFO [train.py:1114] (1/4) Epoch 3, batch 1600, loss[loss=0.3829, simple_loss=0.3918, pruned_loss=0.1348, ctc_loss=0.2609, over 19839.00 frames. ], tot_loss[loss=0.3393, simple_loss=0.3555, pruned_loss=0.1175, ctc_loss=0.2202, over 3835820.56 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 05:22:15,613 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.36 vs. limit=10.0
+2024-08-25 05:22:52,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=35194.666666666664, ans=0.125
+2024-08-25 05:23:08,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=35248.0, ans=0.125
+2024-08-25 05:23:22,730 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=35248.0, ans=0.125
+2024-08-25 05:23:43,082 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.193e+02 2.529e+02 3.233e+02 6.645e+02, threshold=5.059e+02, percent-clipped=2.0
+2024-08-25 05:24:22,997 INFO [train.py:1114] (1/4) Epoch 3, batch 1650, loss[loss=0.308, simple_loss=0.3429, pruned_loss=0.1005, ctc_loss=0.1802, over 19638.00 frames. ], tot_loss[loss=0.3379, simple_loss=0.3546, pruned_loss=0.1168, ctc_loss=0.2191, over 3831935.54 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 05:24:31,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=35354.666666666664, ans=0.0
+2024-08-25 05:24:56,380 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=35408.0, ans=0.0
+2024-08-25 05:25:13,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=35408.0, ans=0.003172173913043479
+2024-08-25 05:25:57,328 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=35514.666666666664, ans=0.125
+2024-08-25 05:26:16,035 INFO [train.py:1114] (1/4) Epoch 3, batch 1700, loss[loss=0.3137, simple_loss=0.3283, pruned_loss=0.1088, ctc_loss=0.2042, over 19649.00 frames. ], tot_loss[loss=0.3367, simple_loss=0.3539, pruned_loss=0.1162, ctc_loss=0.2179, over 3846531.43 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:26:16,679 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.32 vs. limit=15.0
+2024-08-25 05:27:02,500 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=35781.333333333336, ans=0.1
+2024-08-25 05:27:10,189 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.342e+02 2.819e+02 3.429e+02 5.215e+02, threshold=5.637e+02, percent-clipped=1.0
+2024-08-25 05:27:18,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=35834.666666666664, ans=0.0030794202898550734
+2024-08-25 05:27:23,550 INFO [train.py:1114] (1/4) Epoch 3, batch 1750, loss[loss=0.3062, simple_loss=0.3232, pruned_loss=0.1052, ctc_loss=0.1969, over 19681.00 frames. ], tot_loss[loss=0.335, simple_loss=0.3523, pruned_loss=0.1155, ctc_loss=0.2166, over 3851205.57 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:27:29,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=35888.0, ans=0.125
+2024-08-25 05:27:32,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=35888.0, ans=0.1
+2024-08-25 05:29:19,796 INFO [train.py:1114] (1/4) Epoch 3, batch 1800, loss[loss=0.3074, simple_loss=0.3337, pruned_loss=0.1029, ctc_loss=0.1883, over 19609.00 frames. ], tot_loss[loss=0.3352, simple_loss=0.3526, pruned_loss=0.1155, ctc_loss=0.217, over 3853268.25 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:31:26,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=36208.0, ans=0.0
+2024-08-25 05:31:43,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=36261.333333333336, ans=0.125
+2024-08-25 05:31:45,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=36261.333333333336, ans=0.125
+2024-08-25 05:31:53,588 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=16.83 vs. limit=15.0
+2024-08-25 05:31:58,633 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.106e+02 2.466e+02 3.299e+02 1.077e+03, threshold=4.933e+02, percent-clipped=1.0
+2024-08-25 05:32:01,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=36368.0, ans=0.125
+2024-08-25 05:32:06,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=36368.0, ans=0.002963478260869565
+2024-08-25 05:32:11,696 INFO [train.py:1114] (1/4) Epoch 3, batch 1850, loss[loss=0.3196, simple_loss=0.3492, pruned_loss=0.1058, ctc_loss=0.1964, over 19608.00 frames. ], tot_loss[loss=0.3338, simple_loss=0.3515, pruned_loss=0.1149, ctc_loss=0.2157, over 3856041.44 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:32:12,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=36421.333333333336, ans=0.2
+2024-08-25 05:32:14,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=36421.333333333336, ans=0.125
+2024-08-25 05:32:16,630 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=36421.333333333336, ans=0.025
+2024-08-25 05:32:30,137 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.71 vs. limit=6.0
+2024-08-25 05:32:30,800 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:32:35,680 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.64 vs. limit=22.5
+2024-08-25 05:32:47,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=36528.0, ans=0.125
+2024-08-25 05:32:48,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=36581.333333333336, ans=0.1
+2024-08-25 05:32:53,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36581.333333333336, ans=0.1
+2024-08-25 05:33:12,867 INFO [train.py:1114] (1/4) Epoch 3, batch 1900, loss[loss=0.3154, simple_loss=0.3534, pruned_loss=0.0995, ctc_loss=0.196, over 19659.00 frames. ], tot_loss[loss=0.3345, simple_loss=0.3522, pruned_loss=0.1151, ctc_loss=0.2162, over 3861295.52 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 16.0
+2024-08-25 05:33:37,995 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.59 vs. limit=10.0
+2024-08-25 05:33:38,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=36794.666666666664, ans=0.125
+2024-08-25 05:33:43,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=36794.666666666664, ans=0.2
+2024-08-25 05:33:52,869 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=36794.666666666664, ans=0.0
+2024-08-25 05:33:59,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=36848.0, ans=0.2
+2024-08-25 05:34:01,015 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff2.min_abs, batch_count=36848.0, ans=0.1
+2024-08-25 05:34:01,370 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.69 vs. limit=15.0
+2024-08-25 05:34:05,255 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.260e+02 2.560e+02 3.105e+02 5.689e+02, threshold=5.120e+02, percent-clipped=2.0
+2024-08-25 05:34:49,853 INFO [train.py:1114] (1/4) Epoch 3, batch 1950, loss[loss=0.3271, simple_loss=0.3395, pruned_loss=0.1143, ctc_loss=0.215, over 19591.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3533, pruned_loss=0.1154, ctc_loss=0.2163, over 3870141.84 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 16.0
+2024-08-25 05:36:26,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=37008.0, ans=0.1
+2024-08-25 05:36:27,380 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=37008.0, ans=0.1
+2024-08-25 05:36:32,543 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=3.872e+00
+2024-08-25 05:36:41,013 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.69 vs. limit=15.0
+2024-08-25 05:36:42,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=37114.666666666664, ans=0.0
+2024-08-25 05:36:44,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=37114.666666666664, ans=0.125
+2024-08-25 05:37:09,005 INFO [train.py:1114] (1/4) Epoch 3, batch 2000, loss[loss=0.3134, simple_loss=0.3242, pruned_loss=0.1097, ctc_loss=0.2081, over 19661.00 frames. ], tot_loss[loss=0.3358, simple_loss=0.3538, pruned_loss=0.1155, ctc_loss=0.2166, over 3854345.99 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 05:37:17,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=37221.333333333336, ans=0.95
+2024-08-25 05:37:18,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=37221.333333333336, ans=0.125
+2024-08-25 05:37:31,183 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.54 vs. limit=15.0
+2024-08-25 05:37:35,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=37328.0, ans=0.125
+2024-08-25 05:37:36,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=37328.0, ans=0.125
+2024-08-25 05:37:45,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=37381.333333333336, ans=0.125
+2024-08-25 05:37:55,629 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=37381.333333333336, ans=0.04949747468305833
+2024-08-25 05:38:02,425 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.243e+02 2.650e+02 3.292e+02 1.299e+03, threshold=5.300e+02, percent-clipped=6.0
+2024-08-25 05:38:13,922 INFO [train.py:1114] (1/4) Epoch 3, batch 2050, loss[loss=0.3235, simple_loss=0.3324, pruned_loss=0.1144, ctc_loss=0.2144, over 19725.00 frames. ], tot_loss[loss=0.3339, simple_loss=0.3523, pruned_loss=0.1147, ctc_loss=0.2155, over 3850800.98 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:38:17,500 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=37488.0, ans=0.125
+2024-08-25 05:38:30,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=37541.333333333336, ans=0.125
+2024-08-25 05:38:31,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37541.333333333336, ans=0.1
+2024-08-25 05:38:47,289 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=37594.666666666664, ans=0.125
+2024-08-25 05:38:59,376 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.47 vs. limit=15.0
+2024-08-25 05:39:40,791 INFO [train.py:1114] (1/4) Epoch 3, batch 2100, loss[loss=0.2848, simple_loss=0.3249, pruned_loss=0.08779, ctc_loss=0.1727, over 19758.00 frames. ], tot_loss[loss=0.3333, simple_loss=0.3518, pruned_loss=0.1143, ctc_loss=0.215, over 3857825.56 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:39:49,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=37754.666666666664, ans=0.125
+2024-08-25 05:39:55,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=37754.666666666664, ans=0.2
+2024-08-25 05:40:05,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=37808.0, ans=0.0
+2024-08-25 05:40:13,352 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.60 vs. limit=22.5
+2024-08-25 05:40:45,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=37861.333333333336, ans=0.0
+2024-08-25 05:40:50,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=37914.666666666664, ans=0.05
+2024-08-25 05:40:53,535 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.73 vs. limit=22.5
+2024-08-25 05:40:54,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=37914.666666666664, ans=0.125
+2024-08-25 05:40:58,550 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.072e+02 2.352e+02 2.718e+02 4.903e+02, threshold=4.703e+02, percent-clipped=0.0
+2024-08-25 05:41:08,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=37968.0, ans=0.0
+2024-08-25 05:41:10,087 INFO [train.py:1114] (1/4) Epoch 3, batch 2150, loss[loss=0.3358, simple_loss=0.3533, pruned_loss=0.1157, ctc_loss=0.2173, over 19594.00 frames. ], tot_loss[loss=0.3317, simple_loss=0.3504, pruned_loss=0.1137, ctc_loss=0.214, over 3868883.36 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 05:41:28,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=38074.666666666664, ans=0.125
+2024-08-25 05:41:31,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=38074.666666666664, ans=0.125
+2024-08-25 05:41:44,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=38128.0, ans=0.07
+2024-08-25 05:41:54,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=38181.333333333336, ans=0.2
+2024-08-25 05:41:55,251 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=38181.333333333336, ans=0.125
+2024-08-25 05:42:24,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=38181.333333333336, ans=0.025
+2024-08-25 05:42:25,978 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=38234.666666666664, ans=0.125
+2024-08-25 05:42:31,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38234.666666666664, ans=0.1
+2024-08-25 05:42:42,869 INFO [train.py:1114] (1/4) Epoch 3, batch 2200, loss[loss=0.351, simple_loss=0.364, pruned_loss=0.122, ctc_loss=0.2347, over 19580.00 frames. ], tot_loss[loss=0.3322, simple_loss=0.3509, pruned_loss=0.1139, ctc_loss=0.2141, over 3868042.63 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:42:43,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=38288.0, ans=0.0025460869565217398
+2024-08-25 05:42:54,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=38288.0, ans=0.0025460869565217398
+2024-08-25 05:43:02,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=38341.333333333336, ans=0.125
+2024-08-25 05:43:02,765 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=38341.333333333336, ans=0.125
+2024-08-25 05:43:34,307 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.197e+02 2.629e+02 2.994e+02 6.107e+02, threshold=5.259e+02, percent-clipped=1.0
+2024-08-25 05:43:51,061 INFO [train.py:1114] (1/4) Epoch 3, batch 2250, loss[loss=0.2887, simple_loss=0.3364, pruned_loss=0.08739, ctc_loss=0.1655, over 19603.00 frames. ], tot_loss[loss=0.3319, simple_loss=0.351, pruned_loss=0.1137, ctc_loss=0.2133, over 3868149.79 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:44:20,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=38608.0, ans=0.125
+2024-08-25 05:44:24,528 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=38608.0, ans=0.125
+2024-08-25 05:44:26,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=38608.0, ans=0.2
+2024-08-25 05:44:29,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=38608.0, ans=0.125
+2024-08-25 05:44:44,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=38661.333333333336, ans=0.125
+2024-08-25 05:45:07,821 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.66 vs. limit=15.0
+2024-08-25 05:45:10,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=38768.0, ans=0.125
+2024-08-25 05:45:15,053 INFO [train.py:1114] (1/4) Epoch 3, batch 2300, loss[loss=0.3051, simple_loss=0.3227, pruned_loss=0.1036, ctc_loss=0.2011, over 19527.00 frames. ], tot_loss[loss=0.3309, simple_loss=0.3496, pruned_loss=0.1135, ctc_loss=0.2129, over 3862623.77 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:45:19,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38821.333333333336, ans=0.1
+2024-08-25 05:45:22,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=38821.333333333336, ans=0.125
+2024-08-25 05:46:07,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=38874.666666666664, ans=0.125
+2024-08-25 05:46:11,364 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=38928.0, ans=0.1
+2024-08-25 05:46:32,205 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=21.44 vs. limit=22.5
+2024-08-25 05:47:15,683 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.233e+02 2.542e+02 3.133e+02 7.552e+02, threshold=5.083e+02, percent-clipped=3.0
+2024-08-25 05:47:16,978 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:47:17,137 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=39034.666666666664, ans=0.125
+2024-08-25 05:47:27,928 INFO [train.py:1114] (1/4) Epoch 3, batch 2350, loss[loss=0.3878, simple_loss=0.3906, pruned_loss=0.1407, ctc_loss=0.2589, over 19678.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3486, pruned_loss=0.1125, ctc_loss=0.2113, over 3864981.09 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:48:09,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=39248.0, ans=0.125
+2024-08-25 05:48:10,353 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.06 vs. limit=15.0
+2024-08-25 05:48:24,944 INFO [train.py:1114] (1/4) Epoch 3, batch 2400, loss[loss=0.3473, simple_loss=0.3691, pruned_loss=0.1186, ctc_loss=0.2211, over 19316.00 frames. ], tot_loss[loss=0.3333, simple_loss=0.3518, pruned_loss=0.1145, ctc_loss=0.2144, over 3859318.23 frames. ], batch size: 71, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 05:48:25,590 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.55 vs. limit=15.0
+2024-08-25 05:48:27,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=39354.666666666664, ans=0.125
+2024-08-25 05:48:35,216 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.97 vs. limit=15.0
+2024-08-25 05:48:51,620 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=39461.333333333336, ans=0.025
+2024-08-25 05:49:10,300 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.241e+02 2.672e+02 3.161e+02 5.607e+02, threshold=5.344e+02, percent-clipped=4.0
+2024-08-25 05:49:11,836 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=39568.0, ans=0.125
+2024-08-25 05:49:26,435 INFO [train.py:1114] (1/4) Epoch 3, batch 2450, loss[loss=0.4262, simple_loss=0.3958, pruned_loss=0.166, ctc_loss=0.3114, over 13715.00 frames. ], tot_loss[loss=0.3435, simple_loss=0.3575, pruned_loss=0.1198, ctc_loss=0.2244, over 3731134.73 frames. ], batch size: 140, lr: 3.53e-02, grad_scale: 32.0
+2024-08-25 05:49:26,812 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=9.79 vs. limit=12.0
+2024-08-25 05:50:05,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=39781.333333333336, ans=0.2
+2024-08-25 05:50:49,424 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.74 vs. limit=22.5
+2024-08-25 05:51:05,715 INFO [train.py:1114] (1/4) Epoch 4, batch 0, loss[loss=0.3387, simple_loss=0.3474, pruned_loss=0.1199, ctc_loss=0.2254, over 19425.00 frames. ], tot_loss[loss=0.3387, simple_loss=0.3474, pruned_loss=0.1199, ctc_loss=0.2254, over 19425.00 frames. ], batch size: 48, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:51:05,715 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 05:51:35,402 INFO [train.py:1146] (1/4) Epoch 4, validation: loss=0.2629, simple_loss=0.3337, pruned_loss=0.07032, ctc_loss=0.1284, over 944034.00 frames.
+2024-08-25 05:51:35,403 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 05:51:40,981 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.51 vs. limit=15.0
+2024-08-25 05:52:02,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.08 vs. limit=15.0
+2024-08-25 05:52:21,109 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=39989.333333333336, ans=0.025
+2024-08-25 05:52:22,232 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=39989.333333333336, ans=0.125
+2024-08-25 05:52:29,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=40042.666666666664, ans=0.1
+2024-08-25 05:52:38,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=40042.666666666664, ans=0.2
+2024-08-25 05:52:41,492 INFO [train.py:1114] (1/4) Epoch 4, batch 50, loss[loss=0.2678, simple_loss=0.3018, pruned_loss=0.08433, ctc_loss=0.1629, over 19702.00 frames. ], tot_loss[loss=0.3396, simple_loss=0.356, pruned_loss=0.1173, ctc_loss=0.2216, over 845181.07 frames. ], batch size: 47, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:52:47,061 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.147e+02 2.483e+02 2.920e+02 4.932e+02, threshold=4.967e+02, percent-clipped=0.0
+2024-08-25 05:52:56,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=40096.0, ans=0.1
+2024-08-25 05:53:07,075 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.05 vs. limit=15.0
+2024-08-25 05:53:18,560 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=40149.333333333336, ans=0.025
+2024-08-25 05:53:38,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=40202.666666666664, ans=0.0
+2024-08-25 05:53:41,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=40256.0, ans=0.125
+2024-08-25 05:53:54,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=40256.0, ans=0.125
+2024-08-25 05:54:04,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=40309.333333333336, ans=0.025
+2024-08-25 05:54:08,128 INFO [train.py:1114] (1/4) Epoch 4, batch 100, loss[loss=0.3207, simple_loss=0.3454, pruned_loss=0.1066, ctc_loss=0.2069, over 19702.00 frames. ], tot_loss[loss=0.3349, simple_loss=0.354, pruned_loss=0.1147, ctc_loss=0.2159, over 1499126.13 frames. ], batch size: 51, lr: 3.29e-02, grad_scale: 32.0
+2024-08-25 05:54:14,758 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.09 vs. limit=10.0
+2024-08-25 05:54:32,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=40362.666666666664, ans=0.125
+2024-08-25 05:54:59,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=40416.0, ans=0.025
+2024-08-25 05:55:45,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=40576.0, ans=0.125
+2024-08-25 05:55:55,478 INFO [train.py:1114] (1/4) Epoch 4, batch 150, loss[loss=0.284, simple_loss=0.3066, pruned_loss=0.09457, ctc_loss=0.1804, over 19700.00 frames. ], tot_loss[loss=0.3288, simple_loss=0.3492, pruned_loss=0.112, ctc_loss=0.2107, over 2026958.37 frames. ], batch size: 47, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:55:56,484 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.033e+02 2.286e+02 2.661e+02 4.118e+02, threshold=4.571e+02, percent-clipped=0.0
+2024-08-25 05:56:07,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=40682.666666666664, ans=0.125
+2024-08-25 05:56:11,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=40682.666666666664, ans=0.0
+2024-08-25 05:56:45,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=40789.333333333336, ans=0.1
+2024-08-25 05:56:58,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=40842.666666666664, ans=0.1
+2024-08-25 05:57:04,741 INFO [train.py:1114] (1/4) Epoch 4, batch 200, loss[loss=0.3522, simple_loss=0.3642, pruned_loss=0.1256, ctc_loss=0.2223, over 18345.00 frames. ], tot_loss[loss=0.3251, simple_loss=0.3469, pruned_loss=0.1102, ctc_loss=0.2073, over 2434725.20 frames. ], batch size: 85, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:57:25,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.86 vs. limit=22.5
+2024-08-25 05:57:31,499 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_na.min_abs, batch_count=40949.333333333336, ans=0.02
+2024-08-25 05:57:32,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=40949.333333333336, ans=0.07
+2024-08-25 05:57:36,525 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=15.0
+2024-08-25 05:57:39,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=40949.333333333336, ans=0.2
+2024-08-25 05:57:48,926 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:58:29,444 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=41056.0, ans=0.0019443478260869566
+2024-08-25 05:58:36,809 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.66 vs. limit=15.0
+2024-08-25 05:58:55,858 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.30 vs. limit=15.0
+2024-08-25 05:59:03,057 INFO [train.py:1114] (1/4) Epoch 4, batch 250, loss[loss=0.3613, simple_loss=0.3702, pruned_loss=0.1279, ctc_loss=0.2416, over 19353.00 frames. ], tot_loss[loss=0.3259, simple_loss=0.3474, pruned_loss=0.1106, ctc_loss=0.2079, over 2754787.26 frames. ], batch size: 67, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 05:59:03,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=41162.666666666664, ans=0.125
+2024-08-25 05:59:04,099 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.098e+02 2.387e+02 2.939e+02 4.251e+02, threshold=4.774e+02, percent-clipped=0.0
+2024-08-25 05:59:26,825 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=5.62 vs. limit=15.0
+2024-08-25 05:59:54,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=41322.666666666664, ans=0.125
+2024-08-25 06:00:14,168 INFO [train.py:1114] (1/4) Epoch 4, batch 300, loss[loss=0.3251, simple_loss=0.3484, pruned_loss=0.1089, ctc_loss=0.21, over 19508.00 frames. ], tot_loss[loss=0.3248, simple_loss=0.3467, pruned_loss=0.11, ctc_loss=0.207, over 3000076.96 frames. ], batch size: 61, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 06:00:34,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=41482.666666666664, ans=0.125
+2024-08-25 06:00:37,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=41536.0, ans=0.125
+2024-08-25 06:00:57,175 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=41536.0, ans=0.0
+2024-08-25 06:01:08,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=41589.333333333336, ans=0.125
+2024-08-25 06:01:21,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=41642.666666666664, ans=0.07
+2024-08-25 06:01:23,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=41642.666666666664, ans=0.125
+2024-08-25 06:01:36,618 INFO [train.py:1114] (1/4) Epoch 4, batch 350, loss[loss=0.2773, simple_loss=0.308, pruned_loss=0.08938, ctc_loss=0.1698, over 19757.00 frames. ], tot_loss[loss=0.3239, simple_loss=0.3463, pruned_loss=0.1096, ctc_loss=0.2059, over 3188636.47 frames. ], batch size: 48, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:01:37,789 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.143e+02 2.517e+02 2.887e+02 6.595e+02, threshold=5.034e+02, percent-clipped=1.0
+2024-08-25 06:01:44,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=41696.0, ans=0.0
+2024-08-25 06:02:07,773 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.06 vs. limit=22.5
+2024-08-25 06:02:14,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=41856.0, ans=0.0
+2024-08-25 06:02:21,590 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=41856.0, ans=0.125
+2024-08-25 06:02:38,783 INFO [train.py:1114] (1/4) Epoch 4, batch 400, loss[loss=0.3326, simple_loss=0.3554, pruned_loss=0.1116, ctc_loss=0.2165, over 19504.00 frames. ], tot_loss[loss=0.3232, simple_loss=0.3461, pruned_loss=0.1091, ctc_loss=0.2052, over 3340991.85 frames. ], batch size: 54, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:02:39,001 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:02:49,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=41962.666666666664, ans=0.125
+2024-08-25 06:03:24,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=42122.666666666664, ans=0.125
+2024-08-25 06:03:46,754 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=42122.666666666664, ans=0.125
+2024-08-25 06:04:04,054 INFO [train.py:1114] (1/4) Epoch 4, batch 450, loss[loss=0.3925, simple_loss=0.3969, pruned_loss=0.1406, ctc_loss=0.2672, over 19623.00 frames. ], tot_loss[loss=0.3241, simple_loss=0.3465, pruned_loss=0.1097, ctc_loss=0.2057, over 3449441.54 frames. ], batch size: 55, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:04:05,483 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=6.143e-02
+2024-08-25 06:04:06,526 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.107e+02 2.479e+02 2.897e+02 5.564e+02, threshold=4.958e+02, percent-clipped=2.0
+2024-08-25 06:04:08,010 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:04:11,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=42229.333333333336, ans=0.125
+2024-08-25 06:04:19,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=42282.666666666664, ans=0.125
+2024-08-25 06:04:21,208 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.02 vs. limit=12.0
+2024-08-25 06:04:21,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=42282.666666666664, ans=0.125
+2024-08-25 06:04:57,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=42389.333333333336, ans=0.05
+2024-08-25 06:05:03,145 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=42389.333333333336, ans=0.1
+2024-08-25 06:05:32,236 INFO [train.py:1114] (1/4) Epoch 4, batch 500, loss[loss=0.3338, simple_loss=0.3595, pruned_loss=0.1121, ctc_loss=0.2102, over 19692.00 frames. ], tot_loss[loss=0.3218, simple_loss=0.3448, pruned_loss=0.1086, ctc_loss=0.204, over 3545718.72 frames. ], batch size: 63, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:05:50,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=42549.333333333336, ans=0.0
+2024-08-25 06:05:52,706 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.11 vs. limit=15.0
+2024-08-25 06:06:40,198 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=42762.666666666664, ans=0.0
+2024-08-25 06:06:41,081 INFO [train.py:1114] (1/4) Epoch 4, batch 550, loss[loss=0.3327, simple_loss=0.355, pruned_loss=0.1132, ctc_loss=0.2098, over 19214.00 frames. ], tot_loss[loss=0.3224, simple_loss=0.3451, pruned_loss=0.1089, ctc_loss=0.2043, over 3606988.10 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:06:44,779 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.027e+02 2.416e+02 2.881e+02 5.051e+02, threshold=4.833e+02, percent-clipped=1.0
+2024-08-25 06:07:01,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.whiten.whitening_limit, batch_count=42816.0, ans=12.0
+2024-08-25 06:07:18,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=42869.333333333336, ans=0.125
+2024-08-25 06:07:50,594 INFO [train.py:1114] (1/4) Epoch 4, batch 600, loss[loss=0.3629, simple_loss=0.378, pruned_loss=0.127, ctc_loss=0.2346, over 19407.00 frames. ], tot_loss[loss=0.3222, simple_loss=0.345, pruned_loss=0.1088, ctc_loss=0.2042, over 3664793.90 frames. ], batch size: 67, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:07:54,211 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=43029.333333333336, ans=0.125
+2024-08-25 06:07:57,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=43029.333333333336, ans=0.2
+2024-08-25 06:07:58,465 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.71 vs. limit=22.5
+2024-08-25 06:08:01,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=43082.666666666664, ans=0.1
+2024-08-25 06:08:06,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=43082.666666666664, ans=0.125
+2024-08-25 06:08:12,899 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.85 vs. limit=10.0
+2024-08-25 06:08:26,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=43189.333333333336, ans=0.2
+2024-08-25 06:08:44,203 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.07 vs. limit=22.5
+2024-08-25 06:08:59,031 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.17 vs. limit=15.0
+2024-08-25 06:09:00,687 INFO [train.py:1114] (1/4) Epoch 4, batch 650, loss[loss=0.2809, simple_loss=0.3247, pruned_loss=0.08583, ctc_loss=0.1638, over 19790.00 frames. ], tot_loss[loss=0.3207, simple_loss=0.3439, pruned_loss=0.1082, ctc_loss=0.2027, over 3715390.33 frames. ], batch size: 54, lr: 3.23e-02, grad_scale: 16.0
+2024-08-25 06:09:15,857 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.140e+02 2.544e+02 3.023e+02 7.017e+02, threshold=5.088e+02, percent-clipped=9.0
+2024-08-25 06:09:18,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=43296.0, ans=0.125
+2024-08-25 06:09:37,566 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.85 vs. limit=10.0
+2024-08-25 06:09:50,507 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.25 vs. limit=15.0
+2024-08-25 06:09:51,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=43402.666666666664, ans=0.125
+2024-08-25 06:09:59,436 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=43456.0, ans=0.125
+2024-08-25 06:10:09,209 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.14 vs. limit=15.0
+2024-08-25 06:10:12,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=43509.333333333336, ans=0.125
+2024-08-25 06:10:18,927 INFO [train.py:1114] (1/4) Epoch 4, batch 700, loss[loss=0.2577, simple_loss=0.3078, pruned_loss=0.07501, ctc_loss=0.1437, over 19727.00 frames. ], tot_loss[loss=0.3203, simple_loss=0.3442, pruned_loss=0.1078, ctc_loss=0.2021, over 3747440.16 frames. ], batch size: 51, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:10:39,098 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.14 vs. limit=12.0
+2024-08-25 06:10:39,924 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=43616.0, ans=0.025
+2024-08-25 06:10:53,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.min_positive, batch_count=43669.333333333336, ans=0.025
+2024-08-25 06:11:04,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=43722.666666666664, ans=0.125
+2024-08-25 06:11:04,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=43722.666666666664, ans=0.125
+2024-08-25 06:11:06,788 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.49 vs. limit=22.5
+2024-08-25 06:11:20,009 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.55 vs. limit=22.5
+2024-08-25 06:11:23,835 INFO [train.py:1114] (1/4) Epoch 4, batch 750, loss[loss=0.3383, simple_loss=0.3627, pruned_loss=0.1139, ctc_loss=0.2157, over 19483.00 frames. ], tot_loss[loss=0.319, simple_loss=0.3432, pruned_loss=0.1072, ctc_loss=0.2011, over 3772264.34 frames. ], batch size: 54, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:11:28,683 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.141e+02 2.481e+02 2.931e+02 4.472e+02, threshold=4.962e+02, percent-clipped=0.0
+2024-08-25 06:11:49,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=43882.666666666664, ans=0.125
+2024-08-25 06:12:09,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=43989.333333333336, ans=10.0
+2024-08-25 06:12:16,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=44042.666666666664, ans=0.5
+2024-08-25 06:12:29,316 INFO [train.py:1114] (1/4) Epoch 4, batch 800, loss[loss=0.3329, simple_loss=0.3394, pruned_loss=0.1198, ctc_loss=0.2173, over 19810.00 frames. ], tot_loss[loss=0.3187, simple_loss=0.3428, pruned_loss=0.1071, ctc_loss=0.2009, over 3794731.81 frames. ], batch size: 49, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:12:56,022 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.26 vs. limit=15.0
+2024-08-25 06:13:03,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=44256.0, ans=0.125
+2024-08-25 06:13:03,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=44256.0, ans=0.125
+2024-08-25 06:13:22,765 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.11 vs. limit=15.0
+2024-08-25 06:13:27,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=44362.666666666664, ans=0.125
+2024-08-25 06:13:28,033 INFO [train.py:1114] (1/4) Epoch 4, batch 850, loss[loss=0.3459, simple_loss=0.3698, pruned_loss=0.1169, ctc_loss=0.2209, over 19670.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3418, pruned_loss=0.1064, ctc_loss=0.1996, over 3815241.96 frames. ], batch size: 59, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:13:31,254 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.074e+02 2.402e+02 2.888e+02 5.555e+02, threshold=4.804e+02, percent-clipped=1.0
+2024-08-25 06:13:33,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=44362.666666666664, ans=0.5
+2024-08-25 06:13:39,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=44416.0, ans=0.0
+2024-08-25 06:13:41,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=44416.0, ans=0.125
+2024-08-25 06:13:47,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_na.min_abs, batch_count=44416.0, ans=0.02
+2024-08-25 06:13:53,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=44469.333333333336, ans=0.125
+2024-08-25 06:13:57,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=44469.333333333336, ans=0.2
+2024-08-25 06:14:00,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=44469.333333333336, ans=0.125
+2024-08-25 06:14:09,296 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.46 vs. limit=22.5
+2024-08-25 06:14:13,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=44522.666666666664, ans=0.125
+2024-08-25 06:14:16,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=44576.0, ans=0.125
+2024-08-25 06:14:21,161 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.89 vs. limit=15.0
+2024-08-25 06:14:30,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=44576.0, ans=0.1
+2024-08-25 06:14:32,266 INFO [train.py:1114] (1/4) Epoch 4, batch 900, loss[loss=0.3033, simple_loss=0.3253, pruned_loss=0.1027, ctc_loss=0.1897, over 19799.00 frames. ], tot_loss[loss=0.319, simple_loss=0.3429, pruned_loss=0.1073, ctc_loss=0.2012, over 3819422.89 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:14:36,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=44629.333333333336, ans=0.125
+2024-08-25 06:14:37,666 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.75 vs. limit=22.5
+2024-08-25 06:15:01,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=44736.0, ans=0.125
+2024-08-25 06:15:02,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=44736.0, ans=15.0
+2024-08-25 06:15:32,385 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.08 vs. limit=15.0
+2024-08-25 06:15:38,553 INFO [train.py:1114] (1/4) Epoch 4, batch 950, loss[loss=0.3037, simple_loss=0.3243, pruned_loss=0.1047, ctc_loss=0.1844, over 19496.00 frames. ], tot_loss[loss=0.3205, simple_loss=0.3439, pruned_loss=0.108, ctc_loss=0.2028, over 3821051.77 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:15:41,175 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=44896.0, ans=0.0
+2024-08-25 06:15:42,142 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.101e+02 2.364e+02 2.735e+02 6.196e+02, threshold=4.728e+02, percent-clipped=2.0
+2024-08-25 06:15:44,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=44896.0, ans=0.125
+2024-08-25 06:15:59,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=44949.333333333336, ans=0.09899494936611666
+2024-08-25 06:16:30,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=45109.333333333336, ans=0.125
+2024-08-25 06:16:35,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=45109.333333333336, ans=0.125
+2024-08-25 06:16:42,344 INFO [train.py:1114] (1/4) Epoch 4, batch 1000, loss[loss=0.2576, simple_loss=0.3126, pruned_loss=0.073, ctc_loss=0.1419, over 19857.00 frames. ], tot_loss[loss=0.3214, simple_loss=0.3447, pruned_loss=0.1083, ctc_loss=0.2034, over 3817432.80 frames. ], batch size: 52, lr: 3.19e-02, grad_scale: 32.0
+2024-08-25 06:17:03,235 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=45162.666666666664, ans=0.125
+2024-08-25 06:17:55,465 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:18:10,596 INFO [train.py:1114] (1/4) Epoch 4, batch 1050, loss[loss=0.3207, simple_loss=0.3523, pruned_loss=0.1046, ctc_loss=0.1998, over 19835.00 frames. ], tot_loss[loss=0.3191, simple_loss=0.3431, pruned_loss=0.1073, ctc_loss=0.2013, over 3823387.51 frames. ], batch size: 57, lr: 3.19e-02, grad_scale: 16.0
+2024-08-25 06:18:11,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.86 vs. limit=15.0
+2024-08-25 06:18:26,176 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.982e+02 2.200e+02 2.634e+02 5.388e+02, threshold=4.401e+02, percent-clipped=1.0
+2024-08-25 06:19:03,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=45589.333333333336, ans=0.07
+2024-08-25 06:19:09,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=45589.333333333336, ans=0.0009588405797101435
+2024-08-25 06:19:36,311 INFO [train.py:1114] (1/4) Epoch 4, batch 1100, loss[loss=0.3076, simple_loss=0.3421, pruned_loss=0.09954, ctc_loss=0.1849, over 19585.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3423, pruned_loss=0.1069, ctc_loss=0.2008, over 3829412.18 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:19:47,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=45696.0, ans=0.025
+2024-08-25 06:19:48,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=45696.0, ans=0.125
+2024-08-25 06:19:58,187 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.82 vs. limit=15.0
+2024-08-25 06:20:07,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45749.333333333336, ans=0.1
+2024-08-25 06:20:19,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=45802.666666666664, ans=0.2
+2024-08-25 06:20:39,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=45909.333333333336, ans=0.125
+2024-08-25 06:20:52,238 INFO [train.py:1114] (1/4) Epoch 4, batch 1150, loss[loss=0.3218, simple_loss=0.3428, pruned_loss=0.1106, ctc_loss=0.199, over 19587.00 frames. ], tot_loss[loss=0.3186, simple_loss=0.3426, pruned_loss=0.1071, ctc_loss=0.2011, over 3827714.63 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:20:57,036 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.122e+02 2.390e+02 2.706e+02 4.199e+02, threshold=4.779e+02, percent-clipped=0.0
+2024-08-25 06:21:12,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=46016.0, ans=0.125
+2024-08-25 06:21:12,887 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=46016.0, ans=0.125
+2024-08-25 06:21:27,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=46069.333333333336, ans=0.125
+2024-08-25 06:21:51,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=46176.0, ans=0.0
+2024-08-25 06:21:59,986 INFO [train.py:1114] (1/4) Epoch 4, batch 1200, loss[loss=0.3435, simple_loss=0.3644, pruned_loss=0.115, ctc_loss=0.2319, over 19854.00 frames. ], tot_loss[loss=0.3196, simple_loss=0.3437, pruned_loss=0.1074, ctc_loss=0.2016, over 3824320.42 frames. ], batch size: 57, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:22:17,167 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.11 vs. limit=15.0
+2024-08-25 06:22:21,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=46282.666666666664, ans=0.125
+2024-08-25 06:22:22,572 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:22:30,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=46336.0, ans=0.0
+2024-08-25 06:22:33,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=46336.0, ans=0.125
+2024-08-25 06:22:53,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=46389.333333333336, ans=0.000784927536231883
+2024-08-25 06:23:10,618 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=1.742e-02
+2024-08-25 06:23:12,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=46442.666666666664, ans=0.0007733333333333325
+2024-08-25 06:23:19,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=46442.666666666664, ans=0.125
+2024-08-25 06:23:21,319 INFO [train.py:1114] (1/4) Epoch 4, batch 1250, loss[loss=0.3121, simple_loss=0.3505, pruned_loss=0.09988, ctc_loss=0.1849, over 19492.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.3432, pruned_loss=0.1064, ctc_loss=0.1996, over 3842481.00 frames. ], batch size: 61, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:23:21,706 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=46496.0, ans=0.0
+2024-08-25 06:23:26,214 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.962e+02 2.225e+02 2.468e+02 3.508e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 06:23:30,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=46496.0, ans=0.2
+2024-08-25 06:24:10,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=46602.666666666664, ans=0.2
+2024-08-25 06:24:21,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=46656.0, ans=0.125
+2024-08-25 06:24:35,080 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=46709.333333333336, ans=0.125
+2024-08-25 06:24:35,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=46709.333333333336, ans=0.125
+2024-08-25 06:24:36,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=46709.333333333336, ans=0.2
+2024-08-25 06:24:48,929 INFO [train.py:1114] (1/4) Epoch 4, batch 1300, loss[loss=0.3811, simple_loss=0.3816, pruned_loss=0.1388, ctc_loss=0.2573, over 18872.00 frames. ], tot_loss[loss=0.3177, simple_loss=0.3426, pruned_loss=0.1064, ctc_loss=0.1999, over 3847173.41 frames. ], batch size: 76, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:25:52,881 INFO [train.py:1114] (1/4) Epoch 4, batch 1350, loss[loss=0.2746, simple_loss=0.3174, pruned_loss=0.08496, ctc_loss=0.1545, over 19767.00 frames. ], tot_loss[loss=0.3165, simple_loss=0.342, pruned_loss=0.1058, ctc_loss=0.1987, over 3856426.63 frames. ], batch size: 54, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:25:54,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=47029.333333333336, ans=0.0006457971014492744
+2024-08-25 06:26:07,743 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.269e+02 2.560e+02 3.229e+02 4.886e+02, threshold=5.120e+02, percent-clipped=5.0
+2024-08-25 06:26:31,945 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=47082.666666666664, ans=0.1
+2024-08-25 06:26:35,776 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.97 vs. limit=15.0
+2024-08-25 06:26:43,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=47082.666666666664, ans=0.1
+2024-08-25 06:26:45,708 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.87 vs. limit=15.0
+2024-08-25 06:26:47,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=47136.0, ans=0.0006226086956521732
+2024-08-25 06:27:14,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=47242.666666666664, ans=0.125
+2024-08-25 06:27:20,739 INFO [train.py:1114] (1/4) Epoch 4, batch 1400, loss[loss=0.2741, simple_loss=0.2977, pruned_loss=0.08987, ctc_loss=0.1766, over 19673.00 frames. ], tot_loss[loss=0.3164, simple_loss=0.3417, pruned_loss=0.1058, ctc_loss=0.1988, over 3863554.18 frames. ], batch size: 46, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:27:38,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=47296.0, ans=0.125
+2024-08-25 06:27:48,304 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=47349.333333333336, ans=0.0
+2024-08-25 06:28:01,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=47402.666666666664, ans=0.125
+2024-08-25 06:28:06,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=47402.666666666664, ans=0.0
+2024-08-25 06:28:43,704 INFO [train.py:1114] (1/4) Epoch 4, batch 1450, loss[loss=0.3652, simple_loss=0.3752, pruned_loss=0.1297, ctc_loss=0.2396, over 19649.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3422, pruned_loss=0.106, ctc_loss=0.199, over 3859954.86 frames. ], batch size: 63, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:28:48,583 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.026e+02 2.327e+02 2.659e+02 4.329e+02, threshold=4.654e+02, percent-clipped=0.0
+2024-08-25 06:28:48,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=47562.666666666664, ans=0.125
+2024-08-25 06:28:56,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=47616.0, ans=0.0
+2024-08-25 06:29:06,293 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=47616.0, ans=0.125
+2024-08-25 06:29:09,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=47669.333333333336, ans=0.0
+2024-08-25 06:29:10,188 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=47669.333333333336, ans=0.025
+2024-08-25 06:29:34,700 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=25.30 vs. limit=15.0
+2024-08-25 06:29:44,348 INFO [train.py:1114] (1/4) Epoch 4, batch 1500, loss[loss=0.324, simple_loss=0.3589, pruned_loss=0.1048, ctc_loss=0.1986, over 19579.00 frames. ], tot_loss[loss=0.3168, simple_loss=0.3423, pruned_loss=0.1059, ctc_loss=0.1989, over 3860060.18 frames. ], batch size: 57, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:29:47,996 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:30:09,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=47936.0, ans=0.0
+2024-08-25 06:30:24,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=47989.333333333336, ans=0.025
+2024-08-25 06:30:27,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=47989.333333333336, ans=0.125
+2024-08-25 06:30:37,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=48042.666666666664, ans=0.025
+2024-08-25 06:31:30,471 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=48042.666666666664, ans=0.2
+2024-08-25 06:31:38,007 INFO [train.py:1114] (1/4) Epoch 4, batch 1550, loss[loss=0.2917, simple_loss=0.335, pruned_loss=0.09083, ctc_loss=0.1667, over 19613.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3422, pruned_loss=0.1061, ctc_loss=0.1992, over 3844874.58 frames. ], batch size: 60, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:31:49,988 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.013e+02 2.262e+02 2.770e+02 1.090e+03, threshold=4.525e+02, percent-clipped=1.0
+2024-08-25 06:32:11,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=48149.333333333336, ans=0.2
+2024-08-25 06:32:33,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=48256.0, ans=0.1
+2024-08-25 06:32:41,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=48256.0, ans=0.1
+2024-08-25 06:33:26,257 INFO [train.py:1114] (1/4) Epoch 4, batch 1600, loss[loss=0.3121, simple_loss=0.3455, pruned_loss=0.1009, ctc_loss=0.1926, over 19836.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3419, pruned_loss=0.1062, ctc_loss=0.1993, over 3834288.08 frames. ], batch size: 57, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:33:29,926 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:33:54,647 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=48416.0, ans=0.125
+2024-08-25 06:34:00,413 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.24 vs. limit=15.0
+2024-08-25 06:34:03,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=48469.333333333336, ans=0.125
+2024-08-25 06:34:04,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=48469.333333333336, ans=0.0
+2024-08-25 06:34:05,059 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.36 vs. limit=15.0
+2024-08-25 06:34:42,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=48522.666666666664, ans=0.125
+2024-08-25 06:34:54,337 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=48576.0, ans=0.125
+2024-08-25 06:35:09,916 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:35:15,008 INFO [train.py:1114] (1/4) Epoch 4, batch 1650, loss[loss=0.3516, simple_loss=0.3646, pruned_loss=0.1229, ctc_loss=0.2317, over 19651.00 frames. ], tot_loss[loss=0.3159, simple_loss=0.341, pruned_loss=0.1057, ctc_loss=0.1987, over 3829513.74 frames. ], batch size: 59, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:35:16,893 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.74 vs. limit=15.0
+2024-08-25 06:35:21,183 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.079e+02 2.506e+02 2.996e+02 5.422e+02, threshold=5.011e+02, percent-clipped=2.0
+2024-08-25 06:35:40,423 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.02 vs. limit=10.0
+2024-08-25 06:36:37,429 INFO [train.py:1114] (1/4) Epoch 4, batch 1700, loss[loss=0.2579, simple_loss=0.2941, pruned_loss=0.0796, ctc_loss=0.1563, over 19694.00 frames. ], tot_loss[loss=0.3156, simple_loss=0.3409, pruned_loss=0.1055, ctc_loss=0.1984, over 3843496.50 frames. ], batch size: 46, lr: 3.12e-02, grad_scale: 32.0
+2024-08-25 06:36:52,615 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.59 vs. limit=12.0
+2024-08-25 06:37:38,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=49056.0, ans=0.00020521739130434716
+2024-08-25 06:37:45,328 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=49056.0, ans=0.0
+2024-08-25 06:38:16,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=49109.333333333336, ans=10.0
+2024-08-25 06:38:25,884 INFO [train.py:1114] (1/4) Epoch 4, batch 1750, loss[loss=0.2528, simple_loss=0.2921, pruned_loss=0.0768, ctc_loss=0.1496, over 19641.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3397, pruned_loss=0.1044, ctc_loss=0.1965, over 3848065.76 frames. ], batch size: 45, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:38:33,082 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 1.987e+02 2.278e+02 2.713e+02 5.908e+02, threshold=4.555e+02, percent-clipped=1.0
+2024-08-25 06:38:40,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=49216.0, ans=0.125
+2024-08-25 06:38:59,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=49269.333333333336, ans=0.0
+2024-08-25 06:39:01,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=49269.333333333336, ans=0.025
+2024-08-25 06:39:02,170 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.55 vs. limit=22.5
+2024-08-25 06:39:06,350 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=49322.666666666664, ans=0.000147246376811596
+2024-08-25 06:39:06,797 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.76 vs. limit=15.0
+2024-08-25 06:39:10,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=49322.666666666664, ans=0.0
+2024-08-25 06:39:11,483 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.35 vs. limit=15.0
+2024-08-25 06:39:31,715 INFO [train.py:1114] (1/4) Epoch 4, batch 1800, loss[loss=0.3176, simple_loss=0.349, pruned_loss=0.1031, ctc_loss=0.2001, over 19620.00 frames. ], tot_loss[loss=0.3135, simple_loss=0.3397, pruned_loss=0.1044, ctc_loss=0.1964, over 3849760.61 frames. ], batch size: 55, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:40:31,518 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.51 vs. limit=22.5
+2024-08-25 06:40:48,093 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=49642.666666666664, ans=0.125
+2024-08-25 06:40:54,710 INFO [train.py:1114] (1/4) Epoch 4, batch 1850, loss[loss=0.3193, simple_loss=0.3476, pruned_loss=0.1051, ctc_loss=0.202, over 19583.00 frames. ], tot_loss[loss=0.3144, simple_loss=0.3402, pruned_loss=0.1049, ctc_loss=0.197, over 3853486.93 frames. ], batch size: 57, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:40:59,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=49696.0, ans=0.0
+2024-08-25 06:41:01,661 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.149e+02 2.307e+02 2.574e+02 4.619e+02, threshold=4.614e+02, percent-clipped=1.0
+2024-08-25 06:41:15,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=49749.333333333336, ans=0.2
+2024-08-25 06:41:39,353 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=14.36 vs. limit=15.0
+2024-08-25 06:41:47,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=49909.333333333336, ans=0.0
+2024-08-25 06:41:50,188 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=49909.333333333336, ans=0.125
+2024-08-25 06:41:54,680 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=49909.333333333336, ans=0.0
+2024-08-25 06:41:59,170 INFO [train.py:1114] (1/4) Epoch 4, batch 1900, loss[loss=0.2898, simple_loss=0.3314, pruned_loss=0.08876, ctc_loss=0.177, over 19639.00 frames. ], tot_loss[loss=0.3148, simple_loss=0.341, pruned_loss=0.105, ctc_loss=0.197, over 3858891.13 frames. ], batch size: 59, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:42:32,894 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.80 vs. limit=22.5
+2024-08-25 06:43:14,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=50122.666666666664, ans=0.09899494936611666
+2024-08-25 06:43:37,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=50176.0, ans=0.09899494936611666
+2024-08-25 06:43:39,883 INFO [train.py:1114] (1/4) Epoch 4, batch 1950, loss[loss=0.2621, simple_loss=0.3093, pruned_loss=0.07667, ctc_loss=0.1539, over 19581.00 frames. ], tot_loss[loss=0.3148, simple_loss=0.3417, pruned_loss=0.1046, ctc_loss=0.1965, over 3868493.40 frames. ], batch size: 52, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:43:41,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=50229.333333333336, ans=0.09899494936611666
+2024-08-25 06:43:45,593 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.065e+02 2.259e+02 2.635e+02 4.732e+02, threshold=4.517e+02, percent-clipped=1.0
+2024-08-25 06:44:00,263 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.46 vs. limit=15.0
+2024-08-25 06:44:01,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=50336.0, ans=0.025
+2024-08-25 06:44:18,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=50389.333333333336, ans=0.125
+2024-08-25 06:44:19,700 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.01 vs. limit=10.0
+2024-08-25 06:44:48,804 INFO [train.py:1114] (1/4) Epoch 4, batch 2000, loss[loss=0.2666, simple_loss=0.2958, pruned_loss=0.08521, ctc_loss=0.1672, over 19608.00 frames. ], tot_loss[loss=0.3159, simple_loss=0.3423, pruned_loss=0.1053, ctc_loss=0.1976, over 3853087.84 frames. ], batch size: 45, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:45:44,388 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.52 vs. limit=15.0
+2024-08-25 06:45:54,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=50602.666666666664, ans=0.125
+2024-08-25 06:46:13,596 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:46:32,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=50709.333333333336, ans=0.07
+2024-08-25 06:46:34,002 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=50762.666666666664, ans=0.0
+2024-08-25 06:46:35,025 INFO [train.py:1114] (1/4) Epoch 4, batch 2050, loss[loss=0.2484, simple_loss=0.2912, pruned_loss=0.07412, ctc_loss=0.1435, over 19738.00 frames. ], tot_loss[loss=0.3154, simple_loss=0.3412, pruned_loss=0.1052, ctc_loss=0.1976, over 3850142.93 frames. ], batch size: 47, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:46:36,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=50762.666666666664, ans=0.2
+2024-08-25 06:46:45,622 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.046e+02 2.338e+02 2.720e+02 4.537e+02, threshold=4.675e+02, percent-clipped=1.0
+2024-08-25 06:46:51,129 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=50816.0, ans=0.125
+2024-08-25 06:47:18,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=50922.666666666664, ans=0.125
+2024-08-25 06:47:23,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=50922.666666666664, ans=0.1
+2024-08-25 06:47:27,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=50922.666666666664, ans=0.125
+2024-08-25 06:47:32,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=50976.0, ans=0.125
+2024-08-25 06:47:37,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=50976.0, ans=0.5
+2024-08-25 06:47:43,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=50976.0, ans=0.0
+2024-08-25 06:47:47,556 INFO [train.py:1114] (1/4) Epoch 4, batch 2100, loss[loss=0.279, simple_loss=0.3226, pruned_loss=0.08636, ctc_loss=0.1565, over 19799.00 frames. ], tot_loss[loss=0.3141, simple_loss=0.3404, pruned_loss=0.1046, ctc_loss=0.1962, over 3857068.53 frames. ], batch size: 54, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:48:02,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51029.333333333336, ans=0.1
+2024-08-25 06:48:38,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=51082.666666666664, ans=0.125
+2024-08-25 06:48:43,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=51136.0, ans=0.125
+2024-08-25 06:48:45,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=51136.0, ans=0.125
+2024-08-25 06:49:09,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=51189.333333333336, ans=0.2
+2024-08-25 06:49:31,476 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=51242.666666666664, ans=0.5
+2024-08-25 06:49:32,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=51242.666666666664, ans=0.1
+2024-08-25 06:49:33,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=51242.666666666664, ans=0.125
+2024-08-25 06:49:45,527 INFO [train.py:1114] (1/4) Epoch 4, batch 2150, loss[loss=0.3019, simple_loss=0.3292, pruned_loss=0.1001, ctc_loss=0.186, over 19577.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3388, pruned_loss=0.1037, ctc_loss=0.1945, over 3867589.37 frames. ], batch size: 52, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:49:54,450 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.035e+02 2.305e+02 2.639e+02 4.596e+02, threshold=4.610e+02, percent-clipped=0.0
+2024-08-25 06:50:06,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=51296.0, ans=0.0
+2024-08-25 06:50:12,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=51349.333333333336, ans=0.0
+2024-08-25 06:50:20,008 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.69 vs. limit=10.0
+2024-08-25 06:51:15,390 INFO [train.py:1114] (1/4) Epoch 4, batch 2200, loss[loss=0.3026, simple_loss=0.3466, pruned_loss=0.09274, ctc_loss=0.1829, over 19581.00 frames. ], tot_loss[loss=0.3113, simple_loss=0.3385, pruned_loss=0.1033, ctc_loss=0.1936, over 3866382.53 frames. ], batch size: 57, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:51:25,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=51616.0, ans=0.2
+2024-08-25 06:51:33,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51616.0, ans=0.1
+2024-08-25 06:51:33,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=51616.0, ans=0.0
+2024-08-25 06:51:36,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=51616.0, ans=0.125
+2024-08-25 06:51:52,211 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=51669.333333333336, ans=0.125
+2024-08-25 06:51:53,193 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=51669.333333333336, ans=0.2
+2024-08-25 06:51:53,633 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=15.0
+2024-08-25 06:51:59,239 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.37 vs. limit=22.5
+2024-08-25 06:52:24,904 INFO [train.py:1114] (1/4) Epoch 4, batch 2250, loss[loss=0.3152, simple_loss=0.3494, pruned_loss=0.1036, ctc_loss=0.1843, over 19607.00 frames. ], tot_loss[loss=0.3124, simple_loss=0.339, pruned_loss=0.104, ctc_loss=0.1947, over 3866626.85 frames. ], batch size: 55, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:52:26,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=51829.333333333336, ans=0.1
+2024-08-25 06:52:27,600 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.32 vs. limit=22.5
+2024-08-25 06:52:32,000 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.164e+02 2.622e+02 3.263e+02 6.940e+02, threshold=5.245e+02, percent-clipped=2.0
+2024-08-25 06:52:41,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=51882.666666666664, ans=0.125
+2024-08-25 06:52:50,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=51936.0, ans=0.125
+2024-08-25 06:53:16,188 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.01 vs. limit=15.0
+2024-08-25 06:53:30,935 INFO [train.py:1114] (1/4) Epoch 4, batch 2300, loss[loss=0.3195, simple_loss=0.3357, pruned_loss=0.1102, ctc_loss=0.2072, over 19509.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3383, pruned_loss=0.1039, ctc_loss=0.1945, over 3861051.19 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:53:41,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=52096.0, ans=0.125
+2024-08-25 06:53:45,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=52149.333333333336, ans=0.125
+2024-08-25 06:53:45,990 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.56 vs. limit=6.0
+2024-08-25 06:53:50,742 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=52149.333333333336, ans=0.125
+2024-08-25 06:53:54,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=52149.333333333336, ans=0.025
+2024-08-25 06:54:03,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=52202.666666666664, ans=0.025
+2024-08-25 06:54:08,198 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.23 vs. limit=15.0
+2024-08-25 06:54:28,485 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=52309.333333333336, ans=0.0
+2024-08-25 06:54:53,354 INFO [train.py:1114] (1/4) Epoch 4, batch 2350, loss[loss=0.3222, simple_loss=0.3487, pruned_loss=0.1089, ctc_loss=0.1951, over 19643.00 frames. ], tot_loss[loss=0.3109, simple_loss=0.3377, pruned_loss=0.1033, ctc_loss=0.1937, over 3864614.61 frames. ], batch size: 63, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 06:54:56,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=52362.666666666664, ans=0.125
+2024-08-25 06:54:58,711 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.121e+02 2.497e+02 3.048e+02 4.745e+02, threshold=4.995e+02, percent-clipped=0.0
+2024-08-25 06:55:01,104 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=1.041e-02
+2024-08-25 06:55:11,666 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=52416.0, ans=0.125
+2024-08-25 07:03:05,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=52576.0, ans=0.0
+2024-08-25 07:06:48,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52576.0, ans=0.1
+2024-08-25 07:07:21,817 INFO [train.py:1114] (1/4) Epoch 4, batch 2400, loss[loss=0.3551, simple_loss=0.3699, pruned_loss=0.1242, ctc_loss=0.2297, over 19266.00 frames. ], tot_loss[loss=0.3133, simple_loss=0.3401, pruned_loss=0.1042, ctc_loss=0.1954, over 3858684.65 frames. ], batch size: 71, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 07:07:47,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52629.333333333336, ans=0.1
+2024-08-25 07:08:18,314 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=52629.333333333336, ans=0.0
+2024-08-25 07:10:22,076 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.74 vs. limit=15.0
+2024-08-25 07:34:58,795 INFO [train.py:1114] (1/4) Epoch 4, batch 2450, loss[loss=0.4118, simple_loss=0.3863, pruned_loss=0.1578, ctc_loss=0.3041, over 13884.00 frames. ], tot_loss[loss=0.3219, simple_loss=0.3452, pruned_loss=0.1086, ctc_loss=0.2037, over 3734050.30 frames. ], batch size: 140, lr: 3.05e-02, grad_scale: 16.0
+2024-08-25 07:36:27,111 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.096e+02 2.355e+02 2.735e+02 5.246e+02, threshold=4.710e+02, percent-clipped=1.0
+2024-08-25 07:43:07,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=53002.666666666664, ans=0.1
+2024-08-25 07:43:10,380 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.39 vs. limit=15.0
+2024-08-25 07:44:31,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=53056.0, ans=0.125
+2024-08-25 07:46:30,880 INFO [train.py:1114] (1/4) Epoch 5, batch 0, loss[loss=0.2997, simple_loss=0.3171, pruned_loss=0.1029, ctc_loss=0.1908, over 19837.00 frames. ], tot_loss[loss=0.2997, simple_loss=0.3171, pruned_loss=0.1029, ctc_loss=0.1908, over 19837.00 frames. ], batch size: 49, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 07:46:30,881 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 07:49:02,116 INFO [train.py:1146] (1/4) Epoch 5, validation: loss=0.2543, simple_loss=0.3259, pruned_loss=0.06691, ctc_loss=0.1221, over 944034.00 frames.
+2024-08-25 07:49:02,117 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 07:50:55,240 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.14 vs. limit=15.0
+2024-08-25 07:52:00,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=53157.333333333336, ans=15.0
+2024-08-25 07:54:37,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=53157.333333333336, ans=0.0
+2024-08-25 07:57:02,485 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.71 vs. limit=15.0
+2024-08-25 07:58:12,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=53210.666666666664, ans=0.0
+2024-08-25 08:01:56,939 INFO [train.py:1114] (1/4) Epoch 5, batch 50, loss[loss=0.2747, simple_loss=0.3072, pruned_loss=0.08795, ctc_loss=0.1657, over 19700.00 frames. ], tot_loss[loss=0.3157, simple_loss=0.3415, pruned_loss=0.1053, ctc_loss=0.1982, over 844585.17 frames. ], batch size: 47, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 08:03:39,813 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.27 vs. limit=15.0
+2024-08-25 08:03:51,540 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 1.984e+02 2.202e+02 2.522e+02 4.045e+02, threshold=4.404e+02, percent-clipped=0.0
+2024-08-25 08:06:10,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=53530.666666666664, ans=0.0
+2024-08-25 08:06:48,826 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=10.63 vs. limit=15.0
+2024-08-25 08:07:22,860 INFO [train.py:1114] (1/4) Epoch 5, batch 100, loss[loss=0.3123, simple_loss=0.3322, pruned_loss=0.1045, ctc_loss=0.2083, over 19728.00 frames. ], tot_loss[loss=0.3155, simple_loss=0.3423, pruned_loss=0.1048, ctc_loss=0.1981, over 1499416.85 frames. ], batch size: 51, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:08:16,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=53690.666666666664, ans=0.0
+2024-08-25 08:08:25,544 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.54 vs. limit=15.0
+2024-08-25 08:08:37,688 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.88 vs. limit=22.5
+2024-08-25 08:08:43,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.whiten.whitening_limit, batch_count=53744.0, ans=12.0
+2024-08-25 08:08:50,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=53744.0, ans=0.04949747468305833
+2024-08-25 08:09:05,962 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=53797.333333333336, ans=0.1
+2024-08-25 08:09:45,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=53797.333333333336, ans=0.125
+2024-08-25 08:10:03,743 INFO [train.py:1114] (1/4) Epoch 5, batch 150, loss[loss=0.2441, simple_loss=0.2901, pruned_loss=0.07109, ctc_loss=0.1399, over 19717.00 frames. ], tot_loss[loss=0.3115, simple_loss=0.3393, pruned_loss=0.103, ctc_loss=0.1943, over 2027995.75 frames. ], batch size: 47, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:10:08,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=53904.0, ans=0.125
+2024-08-25 08:10:27,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=53904.0, ans=0.125
+2024-08-25 08:10:37,441 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.17 vs. limit=15.0
+2024-08-25 08:10:40,323 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.115e+02 2.389e+02 2.764e+02 4.531e+02, threshold=4.777e+02, percent-clipped=1.0
+2024-08-25 08:11:10,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=54010.666666666664, ans=0.1
+2024-08-25 08:11:10,302 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.79 vs. limit=22.5
+2024-08-25 08:11:11,261 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=54010.666666666664, ans=0.2
+2024-08-25 08:11:24,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=54064.0, ans=0.125
+2024-08-25 08:11:47,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=54117.333333333336, ans=0.2
+2024-08-25 08:12:01,136 INFO [train.py:1114] (1/4) Epoch 5, batch 200, loss[loss=0.3474, simple_loss=0.3674, pruned_loss=0.1172, ctc_loss=0.2323, over 18206.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3361, pruned_loss=0.1007, ctc_loss=0.1902, over 2435865.93 frames. ], batch size: 85, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:13:52,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=54170.666666666664, ans=0.0
+2024-08-25 08:15:10,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=54330.666666666664, ans=0.025
+2024-08-25 08:15:57,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=54437.333333333336, ans=0.0
+2024-08-25 08:15:58,938 INFO [train.py:1114] (1/4) Epoch 5, batch 250, loss[loss=0.3264, simple_loss=0.3502, pruned_loss=0.1113, ctc_loss=0.2001, over 19452.00 frames. ], tot_loss[loss=0.3063, simple_loss=0.3357, pruned_loss=0.1006, ctc_loss=0.1892, over 2756215.66 frames. ], batch size: 67, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:16:47,930 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 1.969e+02 2.164e+02 2.373e+02 3.326e+02, threshold=4.328e+02, percent-clipped=0.0
+2024-08-25 08:16:51,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=54544.0, ans=0.125
+2024-08-25 08:17:00,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=54544.0, ans=0.2
+2024-08-25 08:17:18,969 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.62 vs. limit=15.0
+2024-08-25 08:17:20,856 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=54650.666666666664, ans=0.125
+2024-08-25 08:17:25,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=54650.666666666664, ans=0.0
+2024-08-25 08:17:25,917 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.27 vs. limit=22.5
+2024-08-25 08:17:32,561 INFO [train.py:1114] (1/4) Epoch 5, batch 300, loss[loss=0.307, simple_loss=0.3434, pruned_loss=0.09964, ctc_loss=0.1783, over 19520.00 frames. ], tot_loss[loss=0.304, simple_loss=0.334, pruned_loss=0.09959, ctc_loss=0.1872, over 3000581.82 frames. ], batch size: 61, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:17:42,253 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.46 vs. limit=22.5
+2024-08-25 08:17:47,396 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.59 vs. limit=12.0
+2024-08-25 08:17:56,686 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=6.89 vs. limit=12.0
+2024-08-25 08:18:29,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=54917.333333333336, ans=0.0
+2024-08-25 08:18:35,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=54917.333333333336, ans=0.0
+2024-08-25 08:18:36,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=54917.333333333336, ans=0.125
+2024-08-25 08:18:38,528 INFO [train.py:1114] (1/4) Epoch 5, batch 350, loss[loss=0.3063, simple_loss=0.3273, pruned_loss=0.1037, ctc_loss=0.1948, over 19774.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.335, pruned_loss=0.1001, ctc_loss=0.1882, over 3190486.70 frames. ], batch size: 48, lr: 2.80e-02, grad_scale: 16.0
+2024-08-25 08:18:54,776 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.52 vs. limit=10.0
+2024-08-25 08:19:02,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=55024.0, ans=0.0
+2024-08-25 08:19:10,797 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.967e+02 2.265e+02 2.794e+02 4.039e+02, threshold=4.529e+02, percent-clipped=0.0
+2024-08-25 08:19:42,961 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.53 vs. limit=15.0
+2024-08-25 08:19:43,137 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.23 vs. limit=22.5
+2024-08-25 08:19:51,984 INFO [train.py:1114] (1/4) Epoch 5, batch 400, loss[loss=0.2861, simple_loss=0.3294, pruned_loss=0.08761, ctc_loss=0.1692, over 19505.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3349, pruned_loss=0.09999, ctc_loss=0.1881, over 3341696.15 frames. ], batch size: 54, lr: 2.80e-02, grad_scale: 32.0
+2024-08-25 08:19:59,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=55237.333333333336, ans=0.125
+2024-08-25 08:20:03,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=55290.666666666664, ans=0.125
+2024-08-25 08:20:15,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=55290.666666666664, ans=0.2
+2024-08-25 08:20:20,551 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=55344.0, ans=0.0
+2024-08-25 08:20:40,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=55397.333333333336, ans=0.2
+2024-08-25 08:20:50,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=55397.333333333336, ans=0.125
+2024-08-25 08:21:19,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=55450.666666666664, ans=0.015
+2024-08-25 08:21:19,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=55450.666666666664, ans=0.0
+2024-08-25 08:21:27,045 INFO [train.py:1114] (1/4) Epoch 5, batch 450, loss[loss=0.2821, simple_loss=0.3289, pruned_loss=0.08455, ctc_loss=0.1653, over 19626.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.334, pruned_loss=0.09922, ctc_loss=0.1867, over 3450532.18 frames. ], batch size: 55, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:21:31,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=55504.0, ans=0.025
+2024-08-25 08:21:45,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=55557.333333333336, ans=0.125
+2024-08-25 08:21:47,866 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.008e+02 2.249e+02 2.774e+02 4.428e+02, threshold=4.498e+02, percent-clipped=0.0
+2024-08-25 08:21:58,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=55610.666666666664, ans=0.125
+2024-08-25 08:22:03,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=55664.0, ans=0.1
+2024-08-25 08:22:04,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=55664.0, ans=0.125
+2024-08-25 08:22:26,714 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=55717.333333333336, ans=0.125
+2024-08-25 08:22:42,209 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.01 vs. limit=15.0
+2024-08-25 08:22:44,524 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.03 vs. limit=15.0
+2024-08-25 08:22:58,212 INFO [train.py:1114] (1/4) Epoch 5, batch 500, loss[loss=0.3221, simple_loss=0.355, pruned_loss=0.1061, ctc_loss=0.1923, over 19671.00 frames. ], tot_loss[loss=0.3006, simple_loss=0.3318, pruned_loss=0.09794, ctc_loss=0.1839, over 3545615.28 frames. ], batch size: 63, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:23:24,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=55824.0, ans=0.125
+2024-08-25 08:23:26,191 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.18 vs. limit=22.5
+2024-08-25 08:23:30,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=55824.0, ans=0.125
+2024-08-25 08:23:40,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=55824.0, ans=0.0
+2024-08-25 08:23:57,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=55930.666666666664, ans=10.0
+2024-08-25 08:24:00,930 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55930.666666666664, ans=0.1
+2024-08-25 08:24:16,341 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.26 vs. limit=15.0
+2024-08-25 08:24:21,961 INFO [train.py:1114] (1/4) Epoch 5, batch 550, loss[loss=0.3362, simple_loss=0.3657, pruned_loss=0.1122, ctc_loss=0.2058, over 19310.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3324, pruned_loss=0.09841, ctc_loss=0.1849, over 3607774.45 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:24:24,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=56037.333333333336, ans=0.0
+2024-08-25 08:24:30,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=56037.333333333336, ans=0.125
+2024-08-25 08:24:47,071 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 1.991e+02 2.247e+02 2.867e+02 6.260e+02, threshold=4.494e+02, percent-clipped=1.0
+2024-08-25 08:24:58,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=56144.0, ans=0.2
+2024-08-25 08:25:00,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=56197.333333333336, ans=0.125
+2024-08-25 08:25:13,945 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=56197.333333333336, ans=0.2
+2024-08-25 08:25:25,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=56250.666666666664, ans=0.125
+2024-08-25 08:25:37,706 INFO [train.py:1114] (1/4) Epoch 5, batch 600, loss[loss=0.3366, simple_loss=0.3555, pruned_loss=0.1159, ctc_loss=0.2143, over 19452.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3326, pruned_loss=0.09811, ctc_loss=0.1845, over 3664908.07 frames. ], batch size: 67, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:25:37,841 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=56304.0, ans=0.025
+2024-08-25 08:25:48,249 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=56304.0, ans=0.0
+2024-08-25 08:25:59,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=56357.333333333336, ans=0.2
+2024-08-25 08:26:20,349 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.18 vs. limit=15.0
+2024-08-25 08:26:33,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=56517.333333333336, ans=0.125
+2024-08-25 08:26:33,716 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.22 vs. limit=15.0
+2024-08-25 08:26:45,252 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.29 vs. limit=12.0
+2024-08-25 08:26:47,458 INFO [train.py:1114] (1/4) Epoch 5, batch 650, loss[loss=0.2969, simple_loss=0.328, pruned_loss=0.09665, ctc_loss=0.1814, over 19773.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3319, pruned_loss=0.09771, ctc_loss=0.1837, over 3716151.69 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:27:13,350 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.957e+02 2.352e+02 2.685e+02 4.359e+02, threshold=4.704e+02, percent-clipped=0.0
+2024-08-25 08:27:25,261 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:27:51,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=56784.0, ans=0.1
+2024-08-25 08:28:10,104 INFO [train.py:1114] (1/4) Epoch 5, batch 700, loss[loss=0.3208, simple_loss=0.3356, pruned_loss=0.1111, ctc_loss=0.2096, over 19703.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.3326, pruned_loss=0.09846, ctc_loss=0.1849, over 3748123.76 frames. ], batch size: 51, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:28:22,557 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=56837.333333333336, ans=0.2
+2024-08-25 08:29:06,493 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.36 vs. limit=12.0
+2024-08-25 08:29:14,810 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.68 vs. limit=15.0
+2024-08-25 08:29:25,008 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.86 vs. limit=15.0
+2024-08-25 08:29:36,995 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.12 vs. limit=22.5
+2024-08-25 08:29:41,359 INFO [train.py:1114] (1/4) Epoch 5, batch 750, loss[loss=0.3078, simple_loss=0.3377, pruned_loss=0.1015, ctc_loss=0.1872, over 19491.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.332, pruned_loss=0.09801, ctc_loss=0.1841, over 3774347.55 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:30:03,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=57104.0, ans=0.125
+2024-08-25 08:30:08,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=57104.0, ans=0.125
+2024-08-25 08:30:20,553 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=57157.333333333336, ans=0.2
+2024-08-25 08:30:28,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=57157.333333333336, ans=0.125
+2024-08-25 08:30:38,646 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.09 vs. limit=15.0
+2024-08-25 08:30:40,359 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.099e+02 2.472e+02 3.181e+02 5.803e+02, threshold=4.945e+02, percent-clipped=2.0
+2024-08-25 08:31:06,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=57210.666666666664, ans=0.09899494936611666
+2024-08-25 08:31:07,397 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:32:01,792 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.44 vs. limit=22.5
+2024-08-25 08:32:05,738 INFO [train.py:1114] (1/4) Epoch 5, batch 800, loss[loss=0.2599, simple_loss=0.3026, pruned_loss=0.07906, ctc_loss=0.1477, over 19818.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3315, pruned_loss=0.09781, ctc_loss=0.1835, over 3795939.85 frames. ], batch size: 49, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:32:36,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=57424.0, ans=0.125
+2024-08-25 08:32:41,072 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=57424.0, ans=0.1
+2024-08-25 08:33:37,745 INFO [train.py:1114] (1/4) Epoch 5, batch 850, loss[loss=0.316, simple_loss=0.3517, pruned_loss=0.1017, ctc_loss=0.1923, over 19645.00 frames. ], tot_loss[loss=0.299, simple_loss=0.3308, pruned_loss=0.09714, ctc_loss=0.1824, over 3814806.93 frames. ], batch size: 59, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:34:26,554 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.963e+02 2.197e+02 2.544e+02 4.330e+02, threshold=4.395e+02, percent-clipped=0.0
+2024-08-25 08:34:26,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=57690.666666666664, ans=0.125
+2024-08-25 08:34:55,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=57797.333333333336, ans=0.025
+2024-08-25 08:35:08,269 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=57850.666666666664, ans=0.025
+2024-08-25 08:35:17,361 INFO [train.py:1114] (1/4) Epoch 5, batch 900, loss[loss=0.2814, simple_loss=0.3141, pruned_loss=0.08989, ctc_loss=0.1725, over 19813.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3316, pruned_loss=0.09782, ctc_loss=0.1838, over 3819785.67 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:35:28,914 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=57904.0, ans=0.05
+2024-08-25 08:35:45,668 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.46 vs. limit=15.0
+2024-08-25 08:36:06,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=58064.0, ans=22.5
+2024-08-25 08:36:28,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=58117.333333333336, ans=0.1
+2024-08-25 08:36:30,015 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=58117.333333333336, ans=0.125
+2024-08-25 08:36:30,831 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.15 vs. limit=6.0
+2024-08-25 08:36:37,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=58117.333333333336, ans=0.2
+2024-08-25 08:36:41,309 INFO [train.py:1114] (1/4) Epoch 5, batch 950, loss[loss=0.3122, simple_loss=0.3373, pruned_loss=0.1034, ctc_loss=0.2005, over 19489.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3316, pruned_loss=0.09777, ctc_loss=0.1838, over 3821498.10 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:37:02,452 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.021e+02 2.236e+02 2.607e+02 6.234e+02, threshold=4.471e+02, percent-clipped=1.0
+2024-08-25 08:37:14,493 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.84 vs. limit=15.0
+2024-08-25 08:37:38,723 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.45 vs. limit=12.0
+2024-08-25 08:37:49,067 INFO [train.py:1114] (1/4) Epoch 5, batch 1000, loss[loss=0.2729, simple_loss=0.3089, pruned_loss=0.08621, ctc_loss=0.1612, over 19869.00 frames. ], tot_loss[loss=0.3015, simple_loss=0.3324, pruned_loss=0.09831, ctc_loss=0.1848, over 3818493.34 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:37:50,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=58437.333333333336, ans=0.0
+2024-08-25 08:38:24,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=58490.666666666664, ans=0.2
+2024-08-25 08:38:51,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=58544.0, ans=0.0
+2024-08-25 08:39:01,631 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=58597.333333333336, ans=0.2
+2024-08-25 08:39:17,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=58650.666666666664, ans=0.0
+2024-08-25 08:39:18,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=58704.0, ans=0.025
+2024-08-25 08:39:20,331 INFO [train.py:1114] (1/4) Epoch 5, batch 1050, loss[loss=0.2895, simple_loss=0.3369, pruned_loss=0.08841, ctc_loss=0.1634, over 19830.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3315, pruned_loss=0.09774, ctc_loss=0.1839, over 3824036.28 frames. ], batch size: 57, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:39:30,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=58757.333333333336, ans=0.125
+2024-08-25 08:39:36,796 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.14 vs. limit=15.0
+2024-08-25 08:39:39,188 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=58757.333333333336, ans=0.125
+2024-08-25 08:39:41,241 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.929e+02 2.228e+02 2.594e+02 4.447e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 08:40:09,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=58864.0, ans=0.125
+2024-08-25 08:40:24,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=58864.0, ans=0.2
+2024-08-25 08:40:35,051 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.96 vs. limit=15.0
+2024-08-25 08:40:42,203 INFO [train.py:1114] (1/4) Epoch 5, batch 1100, loss[loss=0.2777, simple_loss=0.3122, pruned_loss=0.0876, ctc_loss=0.1702, over 19580.00 frames. ], tot_loss[loss=0.3002, simple_loss=0.3315, pruned_loss=0.09767, ctc_loss=0.1839, over 3831028.62 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:40:51,896 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.93 vs. limit=6.0
+2024-08-25 08:40:54,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=58970.666666666664, ans=0.0
+2024-08-25 08:40:56,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=58970.666666666664, ans=0.125
+2024-08-25 08:41:19,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=59077.333333333336, ans=0.2
+2024-08-25 08:41:22,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59077.333333333336, ans=0.1
+2024-08-25 08:41:33,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=59130.666666666664, ans=0.025
+2024-08-25 08:41:54,672 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.38 vs. limit=15.0
+2024-08-25 08:42:06,695 INFO [train.py:1114] (1/4) Epoch 5, batch 1150, loss[loss=0.2645, simple_loss=0.3135, pruned_loss=0.07846, ctc_loss=0.1462, over 19596.00 frames. ], tot_loss[loss=0.2998, simple_loss=0.3313, pruned_loss=0.09745, ctc_loss=0.1835, over 3830538.20 frames. ], batch size: 52, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:42:17,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=59237.333333333336, ans=0.09899494936611666
+2024-08-25 08:42:21,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=59237.333333333336, ans=0.125
+2024-08-25 08:42:38,150 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.022e+02 2.244e+02 2.636e+02 4.087e+02, threshold=4.489e+02, percent-clipped=0.0
+2024-08-25 08:42:38,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=59290.666666666664, ans=0.125
+2024-08-25 08:43:17,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=59397.333333333336, ans=0.025
+2024-08-25 08:43:23,385 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=59450.666666666664, ans=0.125
+2024-08-25 08:43:33,331 INFO [train.py:1114] (1/4) Epoch 5, batch 1200, loss[loss=0.2956, simple_loss=0.3356, pruned_loss=0.09288, ctc_loss=0.1745, over 19832.00 frames. ], tot_loss[loss=0.3014, simple_loss=0.3325, pruned_loss=0.09822, ctc_loss=0.1847, over 3824495.26 frames. ], batch size: 57, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:43:34,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=59504.0, ans=0.125
+2024-08-25 08:43:42,682 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=59504.0, ans=0.0
+2024-08-25 08:43:56,315 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.63 vs. limit=22.5
+2024-08-25 08:44:00,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=59557.333333333336, ans=0.0
+2024-08-25 08:44:15,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=59610.666666666664, ans=0.025
+2024-08-25 08:44:49,245 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=59717.333333333336, ans=0.0
+2024-08-25 08:44:50,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59717.333333333336, ans=0.1
+2024-08-25 08:44:52,823 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.96 vs. limit=15.0
+2024-08-25 08:44:55,319 INFO [train.py:1114] (1/4) Epoch 5, batch 1250, loss[loss=0.3266, simple_loss=0.3507, pruned_loss=0.1118, ctc_loss=0.1971, over 19512.00 frames. ], tot_loss[loss=0.3006, simple_loss=0.3325, pruned_loss=0.09765, ctc_loss=0.1836, over 3842699.00 frames. ], batch size: 61, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:44:56,556 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=59770.666666666664, ans=0.125
+2024-08-25 08:45:01,766 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59770.666666666664, ans=0.1
+2024-08-25 08:45:21,211 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 1.906e+02 2.098e+02 2.362e+02 4.005e+02, threshold=4.196e+02, percent-clipped=0.0
+2024-08-25 08:45:41,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=59930.666666666664, ans=0.0
+2024-08-25 08:45:46,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=59930.666666666664, ans=0.0
+2024-08-25 08:45:55,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=59984.0, ans=0.07
+2024-08-25 08:45:56,292 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.40 vs. limit=15.0
+2024-08-25 08:46:03,572 INFO [train.py:1114] (1/4) Epoch 5, batch 1300, loss[loss=0.331, simple_loss=0.3491, pruned_loss=0.1148, ctc_loss=0.2081, over 18800.00 frames. ], tot_loss[loss=0.2984, simple_loss=0.3308, pruned_loss=0.09662, ctc_loss=0.1817, over 3846418.41 frames. ], batch size: 76, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:46:16,561 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=60037.333333333336, ans=0.0
+2024-08-25 08:46:22,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=60037.333333333336, ans=0.125
+2024-08-25 08:46:38,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=60090.666666666664, ans=0.125
+2024-08-25 08:46:43,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=60144.0, ans=0.125
+2024-08-25 08:46:59,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=60197.333333333336, ans=0.125
+2024-08-25 08:47:27,092 INFO [train.py:1114] (1/4) Epoch 5, batch 1350, loss[loss=0.2784, simple_loss=0.3267, pruned_loss=0.08333, ctc_loss=0.1584, over 19761.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.3298, pruned_loss=0.09577, ctc_loss=0.1802, over 3857363.12 frames. ], batch size: 54, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:47:27,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=60304.0, ans=0.125
+2024-08-25 08:47:36,197 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.16 vs. limit=15.0
+2024-08-25 08:47:47,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=60357.333333333336, ans=0.1
+2024-08-25 08:47:52,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=60357.333333333336, ans=0.125
+2024-08-25 08:48:06,343 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.950e+02 2.204e+02 2.621e+02 4.331e+02, threshold=4.409e+02, percent-clipped=1.0
+2024-08-25 08:48:23,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=60410.666666666664, ans=0.0
+2024-08-25 08:49:03,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=60517.333333333336, ans=0.125
+2024-08-25 08:49:14,319 INFO [train.py:1114] (1/4) Epoch 5, batch 1400, loss[loss=0.238, simple_loss=0.2796, pruned_loss=0.07164, ctc_loss=0.1327, over 19679.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3295, pruned_loss=0.09558, ctc_loss=0.1797, over 3863772.33 frames. ], batch size: 46, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:49:50,633 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=5.10 vs. limit=12.0
+2024-08-25 08:49:57,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=60730.666666666664, ans=0.125
+2024-08-25 08:55:49,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=60784.0, ans=0.1
+2024-08-25 09:01:11,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=60784.0, ans=0.125
+2024-08-25 09:01:57,437 INFO [train.py:1114] (1/4) Epoch 5, batch 1450, loss[loss=0.3356, simple_loss=0.3626, pruned_loss=0.1121, ctc_loss=0.2113, over 19674.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.3309, pruned_loss=0.09647, ctc_loss=0.1813, over 3862302.65 frames. ], batch size: 63, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 09:04:56,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=60837.333333333336, ans=0.2
+2024-08-25 09:05:53,386 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.25 vs. limit=15.0
+2024-08-25 09:08:29,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60890.666666666664, ans=0.1
+2024-08-25 09:11:44,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=60890.666666666664, ans=0.125
+2024-08-25 09:14:29,264 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 1.942e+02 2.164e+02 2.480e+02 4.633e+02, threshold=4.329e+02, percent-clipped=1.0
+2024-08-25 09:36:13,470 INFO [train.py:1114] (1/4) Epoch 5, batch 1500, loss[loss=0.3141, simple_loss=0.3526, pruned_loss=0.1005, ctc_loss=0.1865, over 19569.00 frames. ], tot_loss[loss=0.2998, simple_loss=0.3319, pruned_loss=0.09729, ctc_loss=0.1827, over 3862689.19 frames. ], batch size: 57, lr: 2.70e-02, grad_scale: 32.0
+2024-08-25 09:42:53,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=61104.0, ans=0.125
+2024-08-25 09:42:54,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=61104.0, ans=0.0
+2024-08-25 09:54:06,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=61210.666666666664, ans=0.125
+2024-08-25 09:55:08,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=61264.0, ans=0.125
+2024-08-25 09:56:05,528 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:06:44,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=61370.666666666664, ans=0.125
+2024-08-25 10:06:52,303 INFO [train.py:1114] (1/4) Epoch 5, batch 1550, loss[loss=0.3129, simple_loss=0.3472, pruned_loss=0.1022, ctc_loss=0.1853, over 19639.00 frames. ], tot_loss[loss=0.2997, simple_loss=0.3317, pruned_loss=0.09724, ctc_loss=0.1828, over 3847007.99 frames. ], batch size: 60, lr: 2.70e-02, grad_scale: 16.0
+2024-08-25 10:10:30,098 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.67 vs. limit=12.0
+2024-08-25 10:11:27,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=61424.0, ans=0.025
+2024-08-25 10:14:47,416 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.971e+02 2.260e+02 2.611e+02 5.554e+02, threshold=4.519e+02, percent-clipped=3.0
+2024-08-25 10:17:42,564 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.83 vs. limit=15.0
+2024-08-25 10:19:29,535 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=61530.666666666664, ans=0.1
+2024-08-25 10:28:13,717 INFO [train.py:1114] (1/4) Epoch 5, batch 1600, loss[loss=0.3595, simple_loss=0.3729, pruned_loss=0.1269, ctc_loss=0.2309, over 19845.00 frames. ], tot_loss[loss=0.3002, simple_loss=0.332, pruned_loss=0.09756, ctc_loss=0.1835, over 3836328.59 frames. ], batch size: 57, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:28:15,432 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.60 vs. limit=15.0
+2024-08-25 10:28:35,614 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=61637.333333333336, ans=0.0
+2024-08-25 10:31:22,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=61690.666666666664, ans=0.125
+2024-08-25 10:31:23,102 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.06 vs. limit=15.0
+2024-08-25 10:39:31,743 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.28 vs. limit=6.0
+2024-08-25 10:39:56,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=61850.666666666664, ans=0.125
+2024-08-25 10:40:45,721 INFO [train.py:1114] (1/4) Epoch 5, batch 1650, loss[loss=0.3105, simple_loss=0.3417, pruned_loss=0.1019, ctc_loss=0.1888, over 19665.00 frames. ], tot_loss[loss=0.2984, simple_loss=0.3308, pruned_loss=0.09671, ctc_loss=0.1817, over 3833090.56 frames. ], batch size: 59, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:41:02,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=61904.0, ans=0.125
+2024-08-25 10:42:04,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=61957.333333333336, ans=0.125
+2024-08-25 10:43:04,215 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.985e+02 2.336e+02 2.616e+02 4.728e+02, threshold=4.672e+02, percent-clipped=1.0
+2024-08-25 10:43:35,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=62010.666666666664, ans=0.125
+2024-08-25 10:43:36,451 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:44:28,249 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=62064.0, ans=0.125
+2024-08-25 10:45:50,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=62117.333333333336, ans=0.1
+2024-08-25 10:45:57,300 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.83 vs. limit=22.5
+2024-08-25 10:46:09,418 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.53 vs. limit=15.0
+2024-08-25 10:46:43,734 INFO [train.py:1114] (1/4) Epoch 5, batch 1700, loss[loss=0.2547, simple_loss=0.2837, pruned_loss=0.08221, ctc_loss=0.1532, over 19689.00 frames. ], tot_loss[loss=0.2978, simple_loss=0.3303, pruned_loss=0.09647, ctc_loss=0.1808, over 3847837.05 frames. ], batch size: 46, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:47:39,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=62170.666666666664, ans=0.0
+2024-08-25 10:48:07,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=62224.0, ans=0.0
+2024-08-25 10:48:07,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=62224.0, ans=0.0
+2024-08-25 10:48:59,145 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=2.801e+00
+2024-08-25 10:50:08,889 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.58 vs. limit=15.0
+2024-08-25 10:50:54,978 INFO [train.py:1114] (1/4) Epoch 5, batch 1750, loss[loss=0.2693, simple_loss=0.2987, pruned_loss=0.08654, ctc_loss=0.1672, over 19702.00 frames. ], tot_loss[loss=0.2969, simple_loss=0.3298, pruned_loss=0.09604, ctc_loss=0.1801, over 3852457.68 frames. ], batch size: 45, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:51:42,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=62490.666666666664, ans=0.0
+2024-08-25 10:53:52,981 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.010e+02 2.326e+02 2.972e+02 6.446e+02, threshold=4.653e+02, percent-clipped=3.0
+2024-08-25 10:53:53,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=62544.0, ans=0.2
+2024-08-25 10:53:53,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=62544.0, ans=0.125
+2024-08-25 10:55:56,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=62597.333333333336, ans=0.1
+2024-08-25 10:56:33,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=62650.666666666664, ans=0.0
+2024-08-25 10:57:11,524 INFO [train.py:1114] (1/4) Epoch 5, batch 1800, loss[loss=0.318, simple_loss=0.3501, pruned_loss=0.1048, ctc_loss=0.1911, over 19609.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3301, pruned_loss=0.09632, ctc_loss=0.1808, over 3853524.62 frames. ], batch size: 55, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:57:11,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=62704.0, ans=0.0
+2024-08-25 10:57:55,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=62757.333333333336, ans=0.125
+2024-08-25 10:58:03,794 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.06 vs. limit=15.0
+2024-08-25 10:58:03,906 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.87 vs. limit=15.0
+2024-08-25 10:58:39,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=62864.0, ans=0.125
+2024-08-25 10:59:06,170 INFO [train.py:1114] (1/4) Epoch 5, batch 1850, loss[loss=0.3189, simple_loss=0.3508, pruned_loss=0.1043, ctc_loss=0.196, over 19587.00 frames. ], tot_loss[loss=0.2956, simple_loss=0.3291, pruned_loss=0.09527, ctc_loss=0.1789, over 3856995.29 frames. ], batch size: 57, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 10:59:08,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=62970.666666666664, ans=0.125
+2024-08-25 10:59:23,260 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=63024.0, ans=0.025
+2024-08-25 10:59:32,455 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.044e+02 2.314e+02 2.820e+02 4.474e+02, threshold=4.628e+02, percent-clipped=0.0
+2024-08-25 10:59:33,201 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.33 vs. limit=15.0
+2024-08-25 10:59:33,236 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.83 vs. limit=15.0
+2024-08-25 10:59:39,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=63077.333333333336, ans=0.0
+2024-08-25 10:59:50,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=63130.666666666664, ans=0.125
+2024-08-25 11:00:03,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=63184.0, ans=0.2
+2024-08-25 11:00:05,584 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63184.0, ans=0.1
+2024-08-25 11:00:08,583 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63184.0, ans=0.1
+2024-08-25 11:00:13,587 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.53 vs. limit=12.0
+2024-08-25 11:00:20,619 INFO [train.py:1114] (1/4) Epoch 5, batch 1900, loss[loss=0.2659, simple_loss=0.3229, pruned_loss=0.0733, ctc_loss=0.1556, over 19659.00 frames. ], tot_loss[loss=0.2952, simple_loss=0.3291, pruned_loss=0.09503, ctc_loss=0.1784, over 3862274.50 frames. ], batch size: 59, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:00:20,886 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=63237.333333333336, ans=0.125
+2024-08-25 11:00:42,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=63290.666666666664, ans=0.125
+2024-08-25 11:01:56,409 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=63397.333333333336, ans=0.0
+2024-08-25 11:02:04,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=63450.666666666664, ans=22.5
+2024-08-25 11:02:12,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=63450.666666666664, ans=0.0
+2024-08-25 11:02:12,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=63450.666666666664, ans=0.0
+2024-08-25 11:02:18,690 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=63504.0, ans=0.125
+2024-08-25 11:02:34,471 INFO [train.py:1114] (1/4) Epoch 5, batch 1950, loss[loss=0.276, simple_loss=0.3185, pruned_loss=0.08386, ctc_loss=0.1643, over 19601.00 frames. ], tot_loss[loss=0.2961, simple_loss=0.3306, pruned_loss=0.09511, ctc_loss=0.1786, over 3870831.22 frames. ], batch size: 52, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:03:02,393 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=63557.333333333336, ans=0.125
+2024-08-25 11:03:16,681 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.932e+02 2.130e+02 2.461e+02 4.838e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 11:03:19,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=63610.666666666664, ans=0.0
+2024-08-25 11:03:47,281 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.44 vs. limit=15.0
+2024-08-25 11:03:58,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=63664.0, ans=0.0
+2024-08-25 11:04:02,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=63717.333333333336, ans=0.0
+2024-08-25 11:04:14,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=63717.333333333336, ans=0.125
+2024-08-25 11:04:18,851 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.87 vs. limit=6.0
+2024-08-25 11:04:36,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=63770.666666666664, ans=0.125
+2024-08-25 11:04:37,798 INFO [train.py:1114] (1/4) Epoch 5, batch 2000, loss[loss=0.2531, simple_loss=0.2908, pruned_loss=0.07707, ctc_loss=0.153, over 19666.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.332, pruned_loss=0.09618, ctc_loss=0.1802, over 3854746.18 frames. ], batch size: 45, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:04:51,584 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=63824.0, ans=0.2
+2024-08-25 11:04:57,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=63824.0, ans=0.125
+2024-08-25 11:04:59,495 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.83 vs. limit=15.0
+2024-08-25 11:05:16,904 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.28 vs. limit=15.0
+2024-08-25 11:05:41,037 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=63930.666666666664, ans=0.05
+2024-08-25 11:06:08,373 INFO [train.py:1114] (1/4) Epoch 5, batch 2050, loss[loss=0.2582, simple_loss=0.2964, pruned_loss=0.07919, ctc_loss=0.1541, over 19741.00 frames. ], tot_loss[loss=0.2976, simple_loss=0.331, pruned_loss=0.09604, ctc_loss=0.1802, over 3850981.22 frames. ], batch size: 47, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:06:15,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=64037.333333333336, ans=0.125
+2024-08-25 11:06:17,365 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=64037.333333333336, ans=0.1
+2024-08-25 11:06:29,151 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.037e+02 2.272e+02 2.892e+02 6.343e+02, threshold=4.544e+02, percent-clipped=1.0
+2024-08-25 11:07:05,010 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.47 vs. limit=15.0
+2024-08-25 11:07:10,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=64197.333333333336, ans=0.125
+2024-08-25 11:07:12,589 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.47 vs. limit=15.0
+2024-08-25 11:07:16,471 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=64250.666666666664, ans=0.125
+2024-08-25 11:07:27,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=64250.666666666664, ans=0.125
+2024-08-25 11:07:48,387 INFO [train.py:1114] (1/4) Epoch 5, batch 2100, loss[loss=0.3108, simple_loss=0.3371, pruned_loss=0.1035, ctc_loss=0.1937, over 19764.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3303, pruned_loss=0.09572, ctc_loss=0.1797, over 3858018.39 frames. ], batch size: 54, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:08:29,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=64410.666666666664, ans=0.0
+2024-08-25 11:08:56,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=64517.333333333336, ans=0.0
+2024-08-25 11:09:20,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=64570.666666666664, ans=0.1
+2024-08-25 11:09:21,090 INFO [train.py:1114] (1/4) Epoch 5, batch 2150, loss[loss=0.2553, simple_loss=0.3013, pruned_loss=0.07592, ctc_loss=0.1437, over 19602.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.3294, pruned_loss=0.09531, ctc_loss=0.1788, over 3868961.66 frames. ], batch size: 52, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:09:43,547 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=64624.0, ans=0.0
+2024-08-25 11:09:44,519 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.041e+02 2.279e+02 2.689e+02 3.624e+02, threshold=4.557e+02, percent-clipped=0.0
+2024-08-25 11:10:00,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=64677.333333333336, ans=0.125
+2024-08-25 11:10:07,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=64730.666666666664, ans=0.125
+2024-08-25 11:10:17,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=64730.666666666664, ans=0.125
+2024-08-25 11:10:34,037 INFO [train.py:1114] (1/4) Epoch 5, batch 2200, loss[loss=0.3222, simple_loss=0.3449, pruned_loss=0.1088, ctc_loss=0.2048, over 19577.00 frames. ], tot_loss[loss=0.2953, simple_loss=0.329, pruned_loss=0.0951, ctc_loss=0.1785, over 3867474.71 frames. ], batch size: 57, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:10:39,080 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=64837.333333333336, ans=0.0
+2024-08-25 11:10:44,841 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=64890.666666666664, ans=0.125
+2024-08-25 11:11:06,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=64997.333333333336, ans=0.125
+2024-08-25 11:11:29,236 INFO [train.py:1114] (1/4) Epoch 5, batch 2250, loss[loss=0.3015, simple_loss=0.3496, pruned_loss=0.09211, ctc_loss=0.1729, over 19617.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3285, pruned_loss=0.09442, ctc_loss=0.1772, over 3868277.11 frames. ], batch size: 55, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:11:42,760 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=5.68 vs. limit=15.0
+2024-08-25 11:11:51,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=65157.333333333336, ans=0.0
+2024-08-25 11:11:51,987 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.180e+02 2.514e+02 3.003e+02 5.559e+02, threshold=5.029e+02, percent-clipped=2.0
+2024-08-25 11:12:38,219 INFO [train.py:1114] (1/4) Epoch 5, batch 2300, loss[loss=0.2875, simple_loss=0.3194, pruned_loss=0.09271, ctc_loss=0.1753, over 19508.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3278, pruned_loss=0.0947, ctc_loss=0.1775, over 3861687.49 frames. ], batch size: 49, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:12:52,825 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.81 vs. limit=15.0
+2024-08-25 11:12:53,929 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.73 vs. limit=15.0
+2024-08-25 11:13:22,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=65530.666666666664, ans=0.1
+2024-08-25 11:13:22,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=65530.666666666664, ans=0.2
+2024-08-25 11:13:32,891 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=65584.0, ans=10.0
+2024-08-25 11:13:50,077 INFO [train.py:1114] (1/4) Epoch 5, batch 2350, loss[loss=0.2953, simple_loss=0.3422, pruned_loss=0.09061, ctc_loss=0.168, over 19686.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.3274, pruned_loss=0.09452, ctc_loss=0.1771, over 3864002.50 frames. ], batch size: 63, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:13:50,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=65637.33333333333, ans=0.125
+2024-08-25 11:13:50,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=65637.33333333333, ans=15.0
+2024-08-25 11:13:57,162 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=65637.33333333333, ans=0.1
+2024-08-25 11:14:00,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=65637.33333333333, ans=0.09899494936611666
+2024-08-25 11:14:30,549 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=65690.66666666667, ans=0.025
+2024-08-25 11:14:31,465 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.936e+02 2.303e+02 2.820e+02 4.151e+02, threshold=4.606e+02, percent-clipped=0.0
+2024-08-25 11:14:48,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=65744.0, ans=0.2
+2024-08-25 11:15:23,113 INFO [train.py:1114] (1/4) Epoch 5, batch 2400, loss[loss=0.3147, simple_loss=0.3418, pruned_loss=0.105, ctc_loss=0.1938, over 19270.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3306, pruned_loss=0.0962, ctc_loss=0.1801, over 3858281.04 frames. ], batch size: 71, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:15:54,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=65957.33333333333, ans=0.125
+2024-08-25 11:16:16,576 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=66064.0, ans=0.125
+2024-08-25 11:16:17,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=66064.0, ans=0.125
+2024-08-25 11:16:21,117 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:16:52,285 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.86 vs. limit=15.0
+2024-08-25 11:16:53,488 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.19 vs. limit=15.0
+2024-08-25 11:16:56,804 INFO [train.py:1114] (1/4) Epoch 5, batch 2450, loss[loss=0.3539, simple_loss=0.3547, pruned_loss=0.1294, ctc_loss=0.2356, over 13139.00 frames. ], tot_loss[loss=0.3058, simple_loss=0.3354, pruned_loss=0.1005, ctc_loss=0.1883, over 3727883.52 frames. ], batch size: 140, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:16:59,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=66170.66666666667, ans=0.0
+2024-08-25 11:17:07,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=66224.0, ans=0.125
+2024-08-25 11:17:43,156 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.021e+02 2.221e+02 2.524e+02 3.558e+02, threshold=4.443e+02, percent-clipped=0.0
+2024-08-25 11:17:44,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=66277.33333333333, ans=0.0
+2024-08-25 11:17:45,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=66277.33333333333, ans=0.125
+2024-08-25 11:19:28,340 INFO [train.py:1114] (1/4) Epoch 6, batch 0, loss[loss=0.2869, simple_loss=0.3232, pruned_loss=0.09127, ctc_loss=0.1703, over 19399.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3232, pruned_loss=0.09127, ctc_loss=0.1703, over 19399.00 frames. ], batch size: 48, lr: 2.45e-02, grad_scale: 32.0
+2024-08-25 11:19:28,341 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 11:20:29,253 INFO [train.py:1146] (1/4) Epoch 6, validation: loss=0.2388, simple_loss=0.3147, pruned_loss=0.05993, ctc_loss=0.1076, over 944034.00 frames.
+2024-08-25 11:20:29,254 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 11:20:29,652 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.41 vs. limit=15.0
+2024-08-25 11:20:45,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=66432.0, ans=0.0
+2024-08-25 11:21:17,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=66538.66666666667, ans=0.125
+2024-08-25 11:21:21,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=66592.0, ans=0.125
+2024-08-25 11:21:23,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=66592.0, ans=0.125
+2024-08-25 11:21:56,939 INFO [train.py:1114] (1/4) Epoch 6, batch 50, loss[loss=0.2498, simple_loss=0.2922, pruned_loss=0.07478, ctc_loss=0.1444, over 19715.00 frames. ], tot_loss[loss=0.2999, simple_loss=0.333, pruned_loss=0.09682, ctc_loss=0.183, over 844837.63 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:22:15,582 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=66698.66666666667, ans=0.125
+2024-08-25 11:22:36,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=66752.0, ans=0.0
+2024-08-25 11:22:50,724 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 1.959e+02 2.174e+02 2.569e+02 5.460e+02, threshold=4.347e+02, percent-clipped=1.0
+2024-08-25 11:23:00,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=66805.33333333333, ans=0.125
+2024-08-25 11:23:09,438 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.10 vs. limit=12.0
+2024-08-25 11:23:10,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66858.66666666667, ans=0.1
+2024-08-25 11:23:16,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=66858.66666666667, ans=0.2
+2024-08-25 11:23:18,882 INFO [train.py:1114] (1/4) Epoch 6, batch 100, loss[loss=0.2911, simple_loss=0.3221, pruned_loss=0.09482, ctc_loss=0.1763, over 19702.00 frames. ], tot_loss[loss=0.2983, simple_loss=0.3325, pruned_loss=0.09586, ctc_loss=0.1809, over 1498587.70 frames. ], batch size: 51, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:24:13,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=67125.33333333333, ans=0.125
+2024-08-25 11:24:15,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=67125.33333333333, ans=0.07
+2024-08-25 11:24:21,818 INFO [train.py:1114] (1/4) Epoch 6, batch 150, loss[loss=0.2595, simple_loss=0.2932, pruned_loss=0.08255, ctc_loss=0.1518, over 19679.00 frames. ], tot_loss[loss=0.2932, simple_loss=0.3286, pruned_loss=0.09365, ctc_loss=0.1762, over 2027875.39 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:25:04,954 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.947e+02 2.172e+02 2.650e+02 4.091e+02, threshold=4.343e+02, percent-clipped=0.0
+2024-08-25 11:25:21,749 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:25:34,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=67445.33333333333, ans=0.0
+2024-08-25 11:25:35,912 INFO [train.py:1114] (1/4) Epoch 6, batch 200, loss[loss=0.3294, simple_loss=0.3437, pruned_loss=0.1127, ctc_loss=0.2242, over 18471.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3264, pruned_loss=0.09263, ctc_loss=0.1744, over 2436571.14 frames. ], batch size: 86, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:25:43,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=67445.33333333333, ans=0.125
+2024-08-25 11:25:45,755 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.36 vs. limit=15.0
+2024-08-25 11:26:16,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=67552.0, ans=10.0
+2024-08-25 11:26:41,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=67605.33333333333, ans=0.2
+2024-08-25 11:26:54,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=67605.33333333333, ans=0.04949747468305833
+2024-08-25 11:26:57,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=67605.33333333333, ans=0.125
+2024-08-25 11:27:19,502 INFO [train.py:1114] (1/4) Epoch 6, batch 250, loss[loss=0.3111, simple_loss=0.3406, pruned_loss=0.1019, ctc_loss=0.1947, over 19431.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3247, pruned_loss=0.09139, ctc_loss=0.1722, over 2756537.75 frames. ], batch size: 67, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:27:36,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=67712.0, ans=0.125
+2024-08-25 11:28:16,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=67818.66666666667, ans=0.1
+2024-08-25 11:28:18,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=67818.66666666667, ans=0.1
+2024-08-25 11:28:23,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=67818.66666666667, ans=10.0
+2024-08-25 11:28:35,177 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.54 vs. limit=22.5
+2024-08-25 11:28:36,845 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 1.900e+02 2.111e+02 2.483e+02 4.707e+02, threshold=4.222e+02, percent-clipped=1.0
+2024-08-25 11:29:09,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=67872.0, ans=0.2
+2024-08-25 11:29:16,627 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.57 vs. limit=15.0
+2024-08-25 11:29:20,015 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=67925.33333333333, ans=0.125
+2024-08-25 11:29:38,956 INFO [train.py:1114] (1/4) Epoch 6, batch 300, loss[loss=0.2959, simple_loss=0.3315, pruned_loss=0.09571, ctc_loss=0.1721, over 19497.00 frames. ], tot_loss[loss=0.2865, simple_loss=0.3236, pruned_loss=0.0906, ctc_loss=0.1705, over 3001602.54 frames. ], batch size: 61, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:29:43,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=67978.66666666667, ans=0.1
+2024-08-25 11:30:56,105 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.36 vs. limit=15.0
+2024-08-25 11:30:57,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=68138.66666666667, ans=0.125
+2024-08-25 11:31:00,393 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=68138.66666666667, ans=0.07
+2024-08-25 11:31:28,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=68192.0, ans=0.125
+2024-08-25 11:31:29,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=68192.0, ans=0.0
+2024-08-25 11:31:39,928 INFO [train.py:1114] (1/4) Epoch 6, batch 350, loss[loss=0.2316, simple_loss=0.2785, pruned_loss=0.06675, ctc_loss=0.1283, over 19761.00 frames. ], tot_loss[loss=0.2885, simple_loss=0.3251, pruned_loss=0.09149, ctc_loss=0.1722, over 3191085.11 frames. ], batch size: 48, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:32:35,301 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.039e+02 2.360e+02 2.872e+02 5.301e+02, threshold=4.720e+02, percent-clipped=2.0
+2024-08-25 11:32:47,025 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:33:02,559 INFO [train.py:1114] (1/4) Epoch 6, batch 400, loss[loss=0.2875, simple_loss=0.3306, pruned_loss=0.08845, ctc_loss=0.1687, over 19485.00 frames. ], tot_loss[loss=0.2871, simple_loss=0.3243, pruned_loss=0.09077, ctc_loss=0.171, over 3342801.21 frames. ], batch size: 54, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:33:02,816 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=68512.0, ans=0.0
+2024-08-25 11:33:15,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=68565.33333333333, ans=0.0
+2024-08-25 11:33:49,746 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.48 vs. limit=12.0
+2024-08-25 11:33:54,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=68672.0, ans=0.1
+2024-08-25 11:34:10,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=68725.33333333333, ans=0.0
+2024-08-25 11:34:11,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=68725.33333333333, ans=0.1
+2024-08-25 11:34:13,402 INFO [train.py:1114] (1/4) Epoch 6, batch 450, loss[loss=0.2572, simple_loss=0.3079, pruned_loss=0.07515, ctc_loss=0.1403, over 19624.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.3241, pruned_loss=0.0905, ctc_loss=0.1706, over 3451744.00 frames. ], batch size: 55, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:34:32,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=68832.0, ans=0.2
+2024-08-25 11:34:41,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=68885.33333333333, ans=0.2
+2024-08-25 11:34:49,661 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.969e+02 2.191e+02 2.793e+02 4.218e+02, threshold=4.382e+02, percent-clipped=0.0
+2024-08-25 11:34:54,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten.whitening_limit, batch_count=68938.66666666667, ans=15.0
+2024-08-25 11:34:59,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=68992.0, ans=0.1
+2024-08-25 11:35:09,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=69045.33333333333, ans=0.125
+2024-08-25 11:35:10,578 INFO [train.py:1114] (1/4) Epoch 6, batch 500, loss[loss=0.3109, simple_loss=0.342, pruned_loss=0.1016, ctc_loss=0.1916, over 19656.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3236, pruned_loss=0.09051, ctc_loss=0.1702, over 3546592.17 frames. ], batch size: 63, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:35:23,928 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.44 vs. limit=22.5
+2024-08-25 11:35:26,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=69098.66666666667, ans=0.125
+2024-08-25 11:36:10,426 INFO [train.py:1114] (1/4) Epoch 6, batch 550, loss[loss=0.296, simple_loss=0.3354, pruned_loss=0.09359, ctc_loss=0.1734, over 19238.00 frames. ], tot_loss[loss=0.2857, simple_loss=0.3231, pruned_loss=0.09018, ctc_loss=0.1696, over 3609361.26 frames. ], batch size: 71, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:36:46,540 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.100e+02 2.439e+02 2.966e+02 5.259e+02, threshold=4.878e+02, percent-clipped=1.0
+2024-08-25 11:37:16,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=69525.33333333333, ans=0.0
+2024-08-25 11:37:18,166 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.00 vs. limit=15.0
+2024-08-25 11:37:28,780 INFO [train.py:1114] (1/4) Epoch 6, batch 600, loss[loss=0.3186, simple_loss=0.3479, pruned_loss=0.1059, ctc_loss=0.1936, over 19413.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3231, pruned_loss=0.08981, ctc_loss=0.1688, over 3666238.52 frames. ], batch size: 67, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:37:43,376 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.89 vs. limit=15.0
+2024-08-25 11:37:49,653 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.35 vs. limit=12.0
+2024-08-25 11:37:58,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=69685.33333333333, ans=0.125
+2024-08-25 11:38:00,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=69685.33333333333, ans=0.125
+2024-08-25 11:38:46,325 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=69792.0, ans=0.2
+2024-08-25 11:38:56,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=69792.0, ans=0.2
+2024-08-25 11:38:58,902 INFO [train.py:1114] (1/4) Epoch 6, batch 650, loss[loss=0.2774, simple_loss=0.3159, pruned_loss=0.08728, ctc_loss=0.1609, over 19769.00 frames. ], tot_loss[loss=0.2832, simple_loss=0.3215, pruned_loss=0.08898, ctc_loss=0.1674, over 3717015.54 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:39:09,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=69898.66666666667, ans=0.1
+2024-08-25 11:39:50,467 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.931e+02 2.137e+02 2.425e+02 3.711e+02, threshold=4.274e+02, percent-clipped=0.0
+2024-08-25 11:40:15,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=70112.0, ans=0.2
+2024-08-25 11:40:16,194 INFO [train.py:1114] (1/4) Epoch 6, batch 700, loss[loss=0.2688, simple_loss=0.3073, pruned_loss=0.08189, ctc_loss=0.1663, over 19712.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.3214, pruned_loss=0.0888, ctc_loss=0.1671, over 3749297.31 frames. ], batch size: 51, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:40:18,062 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.52 vs. limit=12.0
+2024-08-25 11:40:19,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=70112.0, ans=0.125
+2024-08-25 11:40:24,833 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.75 vs. limit=15.0
+2024-08-25 11:40:42,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff2.min_abs, batch_count=70165.33333333333, ans=0.1
+2024-08-25 11:41:04,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=70165.33333333333, ans=0.0
+2024-08-25 11:41:13,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=70218.66666666667, ans=0.0
+2024-08-25 11:41:57,851 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.51 vs. limit=22.5
+2024-08-25 11:42:12,744 INFO [train.py:1114] (1/4) Epoch 6, batch 750, loss[loss=0.2784, simple_loss=0.3254, pruned_loss=0.0842, ctc_loss=0.1573, over 19507.00 frames. ], tot_loss[loss=0.2834, simple_loss=0.3218, pruned_loss=0.08904, ctc_loss=0.1675, over 3775115.39 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:42:14,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=70378.66666666667, ans=0.125
+2024-08-25 11:42:17,988 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=70378.66666666667, ans=0.1
+2024-08-25 11:42:32,169 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.60 vs. limit=12.0
+2024-08-25 11:43:09,526 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.022e+02 2.297e+02 2.693e+02 4.652e+02, threshold=4.594e+02, percent-clipped=2.0
+2024-08-25 11:43:14,619 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.62 vs. limit=15.0
+2024-08-25 11:43:34,906 INFO [train.py:1114] (1/4) Epoch 6, batch 800, loss[loss=0.2598, simple_loss=0.2965, pruned_loss=0.07996, ctc_loss=0.1579, over 19422.00 frames. ], tot_loss[loss=0.2835, simple_loss=0.3218, pruned_loss=0.08913, ctc_loss=0.1675, over 3795598.18 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:43:45,835 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.40 vs. limit=6.0
+2024-08-25 11:43:48,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=70698.66666666667, ans=0.125
+2024-08-25 11:44:33,343 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.20 vs. limit=15.0
+2024-08-25 11:44:46,463 INFO [train.py:1114] (1/4) Epoch 6, batch 850, loss[loss=0.3207, simple_loss=0.3587, pruned_loss=0.1016, ctc_loss=0.1987, over 19650.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3212, pruned_loss=0.08851, ctc_loss=0.1663, over 3815002.64 frames. ], batch size: 59, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:45:37,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=71018.66666666667, ans=0.05
+2024-08-25 11:45:39,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=71018.66666666667, ans=0.125
+2024-08-25 11:45:45,589 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=71072.0, ans=0.0
+2024-08-25 11:45:46,248 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.893e+02 2.077e+02 2.374e+02 4.075e+02, threshold=4.154e+02, percent-clipped=0.0
+2024-08-25 11:46:03,740 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.32 vs. limit=12.0
+2024-08-25 11:46:07,493 INFO [train.py:1114] (1/4) Epoch 6, batch 900, loss[loss=0.2943, simple_loss=0.3215, pruned_loss=0.09785, ctc_loss=0.1785, over 19409.00 frames. ], tot_loss[loss=0.2836, simple_loss=0.3218, pruned_loss=0.08922, ctc_loss=0.1673, over 3819354.15 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 16.0
+2024-08-25 11:46:09,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=71178.66666666667, ans=0.125
+2024-08-25 11:46:21,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=71232.0, ans=0.04949747468305833
+2024-08-25 11:46:32,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=71232.0, ans=0.0
+2024-08-25 11:47:16,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71392.0, ans=0.1
+2024-08-25 11:47:21,583 INFO [train.py:1114] (1/4) Epoch 6, batch 950, loss[loss=0.2855, simple_loss=0.3095, pruned_loss=0.09544, ctc_loss=0.1765, over 19493.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.322, pruned_loss=0.08934, ctc_loss=0.1678, over 3821090.00 frames. ], batch size: 49, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:47:29,295 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:48:12,634 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=71552.0, ans=0.0
+2024-08-25 11:48:12,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71552.0, ans=0.1
+2024-08-25 11:48:21,677 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=71605.33333333333, ans=0.125
+2024-08-25 11:48:23,514 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.900e+02 2.167e+02 2.553e+02 4.088e+02, threshold=4.334e+02, percent-clipped=0.0
+2024-08-25 11:48:42,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71658.66666666667, ans=0.1
+2024-08-25 11:49:03,406 INFO [train.py:1114] (1/4) Epoch 6, batch 1000, loss[loss=0.2622, simple_loss=0.3052, pruned_loss=0.07931, ctc_loss=0.1516, over 19838.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.3235, pruned_loss=0.09039, ctc_loss=0.1697, over 3815904.45 frames. ], batch size: 52, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:49:04,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff3.min_abs, batch_count=71712.0, ans=0.2
+2024-08-25 11:49:13,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=71712.0, ans=0.125
+2024-08-25 11:49:14,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=71712.0, ans=0.125
+2024-08-25 11:49:14,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=71712.0, ans=0.125
+2024-08-25 11:49:43,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=71818.66666666667, ans=0.125
+2024-08-25 11:49:51,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=71818.66666666667, ans=0.125
+2024-08-25 11:50:06,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=71872.0, ans=0.125
+2024-08-25 11:50:18,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=71872.0, ans=0.0
+2024-08-25 11:50:57,802 INFO [train.py:1114] (1/4) Epoch 6, batch 1050, loss[loss=0.3094, simple_loss=0.3434, pruned_loss=0.09873, ctc_loss=0.1951, over 19835.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.3226, pruned_loss=0.09031, ctc_loss=0.1697, over 3822457.66 frames. ], batch size: 57, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:51:08,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=71978.66666666667, ans=0.0
+2024-08-25 11:51:09,235 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.46 vs. limit=22.5
+2024-08-25 11:51:12,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=72032.0, ans=0.2
+2024-08-25 11:51:14,759 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:51:24,476 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=72032.0, ans=0.025
+2024-08-25 11:51:32,497 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.10 vs. limit=15.0
+2024-08-25 11:52:00,137 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.944e+02 2.201e+02 2.550e+02 3.957e+02, threshold=4.403e+02, percent-clipped=0.0
+2024-08-25 11:52:25,574 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=72138.66666666667, ans=0.125
+2024-08-25 11:52:28,046 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.20 vs. limit=12.0
+2024-08-25 11:52:48,884 INFO [train.py:1114] (1/4) Epoch 6, batch 1100, loss[loss=0.2905, simple_loss=0.3208, pruned_loss=0.09312, ctc_loss=0.1847, over 19591.00 frames. ], tot_loss[loss=0.2845, simple_loss=0.3221, pruned_loss=0.08977, ctc_loss=0.1687, over 3829725.50 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:52:56,213 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72245.33333333333, ans=0.1
+2024-08-25 11:52:56,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=72245.33333333333, ans=0.125
+2024-08-25 11:52:59,222 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=72245.33333333333, ans=0.0
+2024-08-25 11:52:59,772 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.46 vs. limit=15.0
+2024-08-25 11:53:26,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=72352.0, ans=0.0
+2024-08-25 11:53:58,641 INFO [train.py:1114] (1/4) Epoch 6, batch 1150, loss[loss=0.2686, simple_loss=0.3134, pruned_loss=0.0816, ctc_loss=0.1514, over 19583.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3218, pruned_loss=0.08992, ctc_loss=0.1687, over 3827941.44 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:54:22,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=72565.33333333333, ans=0.07
+2024-08-25 11:54:35,540 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=72618.66666666667, ans=0.125
+2024-08-25 11:54:41,252 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=72672.0, ans=0.0
+2024-08-25 11:54:43,439 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.952e+02 2.194e+02 2.505e+02 4.680e+02, threshold=4.387e+02, percent-clipped=1.0
+2024-08-25 11:54:50,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=72672.0, ans=0.0
+2024-08-25 11:54:54,794 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=72725.33333333333, ans=0.125
+2024-08-25 11:55:00,501 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.55 vs. limit=5.0
+2024-08-25 11:55:04,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=72725.33333333333, ans=0.0
+2024-08-25 11:55:11,873 INFO [train.py:1114] (1/4) Epoch 6, batch 1200, loss[loss=0.3016, simple_loss=0.3494, pruned_loss=0.0913, ctc_loss=0.1778, over 19837.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.3232, pruned_loss=0.09048, ctc_loss=0.17, over 3823993.67 frames. ], batch size: 57, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:56:22,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=72938.66666666667, ans=0.0
+2024-08-25 11:56:25,416 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.03 vs. limit=12.0
+2024-08-25 11:56:26,781 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.47 vs. limit=15.0
+2024-08-25 11:56:31,860 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.04 vs. limit=10.0
+2024-08-25 11:56:43,211 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.37 vs. limit=10.0
+2024-08-25 11:56:55,076 INFO [train.py:1114] (1/4) Epoch 6, batch 1250, loss[loss=0.2973, simple_loss=0.34, pruned_loss=0.09391, ctc_loss=0.1671, over 19520.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.3232, pruned_loss=0.09003, ctc_loss=0.1691, over 3841866.82 frames. ], batch size: 61, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:56:58,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=73045.33333333333, ans=0.125
+2024-08-25 11:57:05,382 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.57 vs. limit=15.0
+2024-08-25 11:57:15,820 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.77 vs. limit=15.0
+2024-08-25 11:57:28,738 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=73098.66666666667, ans=0.125
+2024-08-25 11:57:53,286 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=73152.0, ans=0.025
+2024-08-25 11:58:02,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=73205.33333333333, ans=0.2
+2024-08-25 11:58:03,810 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=73205.33333333333, ans=0.0
+2024-08-25 11:58:13,319 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.073e+02 2.305e+02 2.660e+02 4.224e+02, threshold=4.609e+02, percent-clipped=0.0
+2024-08-25 11:58:19,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=73205.33333333333, ans=0.2
+2024-08-25 11:58:32,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=73258.66666666667, ans=0.125
+2024-08-25 11:58:43,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=73258.66666666667, ans=0.2
+2024-08-25 11:58:46,696 INFO [train.py:1114] (1/4) Epoch 6, batch 1300, loss[loss=0.3262, simple_loss=0.3529, pruned_loss=0.1092, ctc_loss=0.2027, over 18887.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3221, pruned_loss=0.08924, ctc_loss=0.1675, over 3845848.26 frames. ], batch size: 76, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:58:47,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=73312.0, ans=0.0
+2024-08-25 11:59:00,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=73312.0, ans=0.1
+2024-08-25 11:59:23,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=73365.33333333333, ans=0.1
+2024-08-25 11:59:28,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=73365.33333333333, ans=0.025
+2024-08-25 11:59:41,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=73418.66666666667, ans=0.125
+2024-08-25 11:59:50,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=73418.66666666667, ans=0.125
+2024-08-25 11:59:52,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=73472.0, ans=0.125
+2024-08-25 12:00:12,324 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=73525.33333333333, ans=0.0
+2024-08-25 12:00:19,977 INFO [train.py:1114] (1/4) Epoch 6, batch 1350, loss[loss=0.2887, simple_loss=0.3256, pruned_loss=0.09096, ctc_loss=0.1746, over 19757.00 frames. ], tot_loss[loss=0.2825, simple_loss=0.3211, pruned_loss=0.08866, ctc_loss=0.1665, over 3856087.46 frames. ], batch size: 54, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 12:01:05,000 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.025e+02 2.295e+02 2.579e+02 4.133e+02, threshold=4.590e+02, percent-clipped=0.0
+2024-08-25 12:01:05,216 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.479e-01
+2024-08-25 12:01:12,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=73738.66666666667, ans=0.1
+2024-08-25 12:01:14,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=73738.66666666667, ans=0.0
+2024-08-25 12:01:30,913 INFO [train.py:1114] (1/4) Epoch 6, batch 1400, loss[loss=0.2709, simple_loss=0.3002, pruned_loss=0.08866, ctc_loss=0.1606, over 19654.00 frames. ], tot_loss[loss=0.2822, simple_loss=0.3209, pruned_loss=0.08852, ctc_loss=0.1663, over 3863972.67 frames. ], batch size: 46, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:02:05,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=73898.66666666667, ans=0.1
+2024-08-25 12:02:08,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=73898.66666666667, ans=0.0
+2024-08-25 12:02:52,164 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:02:52,963 INFO [train.py:1114] (1/4) Epoch 6, batch 1450, loss[loss=0.2845, simple_loss=0.3316, pruned_loss=0.08719, ctc_loss=0.1576, over 19687.00 frames. ], tot_loss[loss=0.2825, simple_loss=0.3214, pruned_loss=0.08853, ctc_loss=0.1664, over 3862438.35 frames. ], batch size: 63, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:02:54,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=74112.0, ans=0.035
+2024-08-25 12:02:55,961 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.70 vs. limit=22.5
+2024-08-25 12:03:04,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=74165.33333333333, ans=0.125
+2024-08-25 12:03:39,760 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=74218.66666666667, ans=0.0
+2024-08-25 12:03:48,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=74218.66666666667, ans=0.2
+2024-08-25 12:03:53,270 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 1.998e+02 2.330e+02 2.811e+02 4.670e+02, threshold=4.661e+02, percent-clipped=1.0
+2024-08-25 12:04:03,408 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.94 vs. limit=15.0
+2024-08-25 12:04:04,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=74325.33333333333, ans=0.125
+2024-08-25 12:04:10,304 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.38 vs. limit=22.5
+2024-08-25 12:04:25,535 INFO [train.py:1114] (1/4) Epoch 6, batch 1500, loss[loss=0.2811, simple_loss=0.3184, pruned_loss=0.0883, ctc_loss=0.168, over 19592.00 frames. ], tot_loss[loss=0.2822, simple_loss=0.3214, pruned_loss=0.08834, ctc_loss=0.166, over 3862805.82 frames. ], batch size: 57, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:04:42,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=74432.0, ans=0.125
+2024-08-25 12:04:43,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=74432.0, ans=0.125
+2024-08-25 12:04:59,324 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=74485.33333333333, ans=0.0
+2024-08-25 12:04:59,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=74485.33333333333, ans=0.125
+2024-08-25 12:05:51,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.90 vs. limit=15.0
+2024-08-25 12:06:01,373 INFO [train.py:1114] (1/4) Epoch 6, batch 1550, loss[loss=0.2939, simple_loss=0.3352, pruned_loss=0.09201, ctc_loss=0.1713, over 19611.00 frames. ], tot_loss[loss=0.283, simple_loss=0.3218, pruned_loss=0.08878, ctc_loss=0.1668, over 3848623.69 frames. ], batch size: 60, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:06:01,604 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=74645.33333333333, ans=0.0
+2024-08-25 12:06:24,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=74752.0, ans=0.2
+2024-08-25 12:06:37,903 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.061e+02 2.512e+02 3.027e+02 4.789e+02, threshold=5.024e+02, percent-clipped=1.0
+2024-08-25 12:06:45,540 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.39 vs. limit=15.0
+2024-08-25 12:06:46,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=74858.66666666667, ans=0.025
+2024-08-25 12:07:01,756 INFO [train.py:1114] (1/4) Epoch 6, batch 1600, loss[loss=0.2881, simple_loss=0.333, pruned_loss=0.08872, ctc_loss=0.1642, over 19862.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.3205, pruned_loss=0.08814, ctc_loss=0.1656, over 3838342.69 frames. ], batch size: 57, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:07:03,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=74912.0, ans=0.0
+2024-08-25 12:07:14,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=74965.33333333333, ans=0.1
+2024-08-25 12:07:17,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=74965.33333333333, ans=0.1
+2024-08-25 12:07:23,357 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.67 vs. limit=10.0
+2024-08-25 12:07:45,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=75072.0, ans=0.125
+2024-08-25 12:07:48,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=75125.33333333333, ans=0.125
+2024-08-25 12:07:56,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff2.min_abs, batch_count=75125.33333333333, ans=0.1
+2024-08-25 12:08:00,980 INFO [train.py:1114] (1/4) Epoch 6, batch 1650, loss[loss=0.3019, simple_loss=0.3406, pruned_loss=0.09614, ctc_loss=0.1773, over 19633.00 frames. ], tot_loss[loss=0.2813, simple_loss=0.3204, pruned_loss=0.08808, ctc_loss=0.1653, over 3835386.23 frames. ], batch size: 59, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:08:02,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=75178.66666666667, ans=0.0
+2024-08-25 12:08:11,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=75232.0, ans=0.1
+2024-08-25 12:08:18,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=75232.0, ans=0.125
+2024-08-25 12:08:26,592 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=75285.33333333333, ans=0.0
+2024-08-25 12:08:37,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=75338.66666666667, ans=0.125
+2024-08-25 12:08:37,757 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.893e+02 2.381e+02 2.784e+02 7.281e+02, threshold=4.762e+02, percent-clipped=1.0
+2024-08-25 12:08:45,638 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=75338.66666666667, ans=0.125
+2024-08-25 12:08:52,556 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:08:56,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=75392.0, ans=0.125
+2024-08-25 12:09:00,114 INFO [train.py:1114] (1/4) Epoch 6, batch 1700, loss[loss=0.2241, simple_loss=0.2694, pruned_loss=0.06461, ctc_loss=0.1238, over 19660.00 frames. ], tot_loss[loss=0.2807, simple_loss=0.3202, pruned_loss=0.0877, ctc_loss=0.1647, over 3848577.47 frames. ], batch size: 46, lr: 2.33e-02, grad_scale: 32.0
+2024-08-25 12:09:06,814 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.62 vs. limit=15.0
+2024-08-25 12:09:25,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=75552.0, ans=0.125
+2024-08-25 12:09:28,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=75552.0, ans=0.125
+2024-08-25 12:09:31,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75552.0, ans=0.1
+2024-08-25 12:09:55,906 INFO [train.py:1114] (1/4) Epoch 6, batch 1750, loss[loss=0.236, simple_loss=0.2838, pruned_loss=0.06793, ctc_loss=0.1307, over 19642.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3197, pruned_loss=0.08766, ctc_loss=0.1645, over 3853401.56 frames. ], batch size: 45, lr: 2.33e-02, grad_scale: 16.0
+2024-08-25 12:09:59,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=75712.0, ans=0.2
+2024-08-25 12:10:03,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=75712.0, ans=0.125
+2024-08-25 12:10:03,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=75712.0, ans=0.0
+2024-08-25 12:10:25,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75818.66666666667, ans=0.1
+2024-08-25 12:10:29,783 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=75872.0, ans=0.0
+2024-08-25 12:10:32,749 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.890e+02 2.130e+02 2.587e+02 4.262e+02, threshold=4.260e+02, percent-clipped=0.0
+2024-08-25 12:10:50,356 INFO [train.py:1114] (1/4) Epoch 6, batch 1800, loss[loss=0.2852, simple_loss=0.3298, pruned_loss=0.08791, ctc_loss=0.1621, over 19621.00 frames. ], tot_loss[loss=0.2821, simple_loss=0.3208, pruned_loss=0.08845, ctc_loss=0.1659, over 3854754.97 frames. ], batch size: 55, lr: 2.33e-02, grad_scale: 8.0
+2024-08-25 12:10:52,782 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=75978.66666666667, ans=0.0
+2024-08-25 12:11:22,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=76138.66666666667, ans=0.1
+2024-08-25 12:11:36,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=76192.0, ans=0.125
+2024-08-25 12:11:44,875 INFO [train.py:1114] (1/4) Epoch 6, batch 1850, loss[loss=0.2582, simple_loss=0.3079, pruned_loss=0.07531, ctc_loss=0.1447, over 19601.00 frames. ], tot_loss[loss=0.2814, simple_loss=0.3203, pruned_loss=0.0882, ctc_loss=0.1653, over 3857229.03 frames. ], batch size: 57, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:11:45,455 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.02 vs. limit=15.0
+2024-08-25 12:11:46,627 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.55 vs. limit=15.0
+2024-08-25 12:11:49,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=76245.33333333333, ans=0.07
+2024-08-25 12:11:49,608 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76245.33333333333, ans=0.1
+2024-08-25 12:12:06,753 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=76352.0, ans=0.025
+2024-08-25 12:12:06,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=76352.0, ans=0.0
+2024-08-25 12:12:20,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=76405.33333333333, ans=0.125
+2024-08-25 12:12:22,231 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.994e+02 2.285e+02 2.712e+02 4.413e+02, threshold=4.569e+02, percent-clipped=2.0
+2024-08-25 12:12:43,370 INFO [train.py:1114] (1/4) Epoch 6, batch 1900, loss[loss=0.2715, simple_loss=0.3216, pruned_loss=0.08023, ctc_loss=0.1524, over 19651.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3212, pruned_loss=0.08857, ctc_loss=0.1661, over 3861251.94 frames. ], batch size: 59, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:12:45,853 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=76512.0, ans=0.2
+2024-08-25 12:12:54,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=76565.33333333333, ans=0.2
+2024-08-25 12:12:54,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=76565.33333333333, ans=0.0
+2024-08-25 12:12:54,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=76565.33333333333, ans=0.2
+2024-08-25 12:12:54,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=76565.33333333333, ans=0.125
+2024-08-25 12:13:02,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=76565.33333333333, ans=0.125
+2024-08-25 12:13:09,242 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.60 vs. limit=10.0
+2024-08-25 12:13:25,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=76672.0, ans=0.125
+2024-08-25 12:13:30,480 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=76725.33333333333, ans=0.0
+2024-08-25 12:13:40,517 INFO [train.py:1114] (1/4) Epoch 6, batch 1950, loss[loss=0.261, simple_loss=0.3035, pruned_loss=0.07966, ctc_loss=0.1476, over 19585.00 frames. ], tot_loss[loss=0.2835, simple_loss=0.3223, pruned_loss=0.08896, ctc_loss=0.1668, over 3870447.06 frames. ], batch size: 52, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:13:45,314 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76778.66666666667, ans=0.1
+2024-08-25 12:14:05,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=76885.33333333333, ans=0.0
+2024-08-25 12:14:18,626 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 1.890e+02 2.137e+02 2.349e+02 3.743e+02, threshold=4.275e+02, percent-clipped=0.0
+2024-08-25 12:14:28,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=76992.0, ans=0.125
+2024-08-25 12:14:35,993 INFO [train.py:1114] (1/4) Epoch 6, batch 2000, loss[loss=0.2677, simple_loss=0.3018, pruned_loss=0.08605, ctc_loss=0.1536, over 19686.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3225, pruned_loss=0.08916, ctc_loss=0.1669, over 3854966.03 frames. ], batch size: 45, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:14:59,405 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:15:26,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=77258.66666666667, ans=0.1
+2024-08-25 12:15:27,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=77258.66666666667, ans=0.125
+2024-08-25 12:15:30,080 INFO [train.py:1114] (1/4) Epoch 6, batch 2050, loss[loss=0.2082, simple_loss=0.2619, pruned_loss=0.05527, ctc_loss=0.1098, over 19698.00 frames. ], tot_loss[loss=0.2816, simple_loss=0.3207, pruned_loss=0.0882, ctc_loss=0.1652, over 3851842.29 frames. ], batch size: 47, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:15:44,150 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.44 vs. limit=15.0
+2024-08-25 12:15:49,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=77365.33333333333, ans=0.025
+2024-08-25 12:15:50,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=77365.33333333333, ans=0.0
+2024-08-25 12:15:53,235 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=77365.33333333333, ans=0.2
+2024-08-25 12:16:04,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=77418.66666666667, ans=0.0
+2024-08-25 12:16:04,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=77418.66666666667, ans=0.125
+2024-08-25 12:16:06,100 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:16:13,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=77472.0, ans=0.125
+2024-08-25 12:16:14,693 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.955e+02 2.380e+02 2.986e+02 1.021e+03, threshold=4.760e+02, percent-clipped=7.0
+2024-08-25 12:16:20,539 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.01 vs. limit=15.0
+2024-08-25 12:16:25,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=77525.33333333333, ans=0.125
+2024-08-25 12:16:32,225 INFO [train.py:1114] (1/4) Epoch 6, batch 2100, loss[loss=0.3168, simple_loss=0.3425, pruned_loss=0.1053, ctc_loss=0.2016, over 19786.00 frames. ], tot_loss[loss=0.2796, simple_loss=0.3195, pruned_loss=0.08717, ctc_loss=0.1633, over 3857919.47 frames. ], batch size: 54, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:16:54,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=77685.33333333333, ans=0.0
+2024-08-25 12:16:55,576 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=77685.33333333333, ans=0.2
+2024-08-25 12:17:03,396 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:17:14,300 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=77738.66666666667, ans=0.1
+2024-08-25 12:17:23,863 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=77792.0, ans=0.125
+2024-08-25 12:17:28,076 INFO [train.py:1114] (1/4) Epoch 6, batch 2150, loss[loss=0.257, simple_loss=0.3059, pruned_loss=0.07625, ctc_loss=0.1392, over 19583.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3185, pruned_loss=0.08653, ctc_loss=0.1619, over 3869040.43 frames. ], batch size: 52, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:17:33,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=77845.33333333333, ans=0.2
+2024-08-25 12:17:43,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=77898.66666666667, ans=0.0
+2024-08-25 12:18:04,789 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.43 vs. limit=12.0
+2024-08-25 12:18:07,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=77952.0, ans=0.0
+2024-08-25 12:18:15,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=78005.33333333333, ans=0.1
+2024-08-25 12:18:19,497 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.877e+02 2.258e+02 2.799e+02 6.726e+02, threshold=4.515e+02, percent-clipped=2.0
+2024-08-25 12:18:19,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=78005.33333333333, ans=0.125
+2024-08-25 12:18:25,172 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:19:06,055 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=78112.0, ans=0.0
+2024-08-25 12:19:06,973 INFO [train.py:1114] (1/4) Epoch 6, batch 2200, loss[loss=0.2829, simple_loss=0.3266, pruned_loss=0.08565, ctc_loss=0.1694, over 19604.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3187, pruned_loss=0.08683, ctc_loss=0.1628, over 3868027.54 frames. ], batch size: 57, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:19:17,046 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=2.522e-03
+2024-08-25 12:19:28,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=78218.66666666667, ans=0.125
+2024-08-25 12:19:45,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=78272.0, ans=0.2
+2024-08-25 12:20:02,390 INFO [train.py:1114] (1/4) Epoch 6, batch 2250, loss[loss=0.2947, simple_loss=0.3354, pruned_loss=0.09168, ctc_loss=0.1765, over 19604.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.3192, pruned_loss=0.08704, ctc_loss=0.1633, over 3868605.32 frames. ], batch size: 55, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:20:16,799 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.49 vs. limit=15.0
+2024-08-25 12:20:18,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=78432.0, ans=0.2
+2024-08-25 12:20:30,213 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=78485.33333333333, ans=0.125
+2024-08-25 12:20:38,631 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.005e+02 2.234e+02 2.581e+02 4.325e+02, threshold=4.468e+02, percent-clipped=0.0
+2024-08-25 12:20:52,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=78592.0, ans=0.2
+2024-08-25 12:20:56,310 INFO [train.py:1114] (1/4) Epoch 6, batch 2300, loss[loss=0.2316, simple_loss=0.2837, pruned_loss=0.06592, ctc_loss=0.1193, over 19509.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.3179, pruned_loss=0.0869, ctc_loss=0.1627, over 3862070.77 frames. ], batch size: 49, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:21:01,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=78645.33333333333, ans=0.0
+2024-08-25 12:21:14,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=78698.66666666667, ans=0.125
+2024-08-25 12:21:21,152 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.92 vs. limit=15.0
+2024-08-25 12:21:23,041 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:21:33,030 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=78805.33333333333, ans=0.125
+2024-08-25 12:21:46,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=78858.66666666667, ans=0.125
+2024-08-25 12:21:52,656 INFO [train.py:1114] (1/4) Epoch 6, batch 2350, loss[loss=0.2723, simple_loss=0.3204, pruned_loss=0.08259, ctc_loss=0.1476, over 19696.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.3181, pruned_loss=0.08685, ctc_loss=0.1625, over 3864413.14 frames. ], batch size: 63, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:22:28,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=79072.0, ans=0.04949747468305833
+2024-08-25 12:22:30,284 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 2.097e+02 2.553e+02 3.084e+02 6.792e+02, threshold=5.106e+02, percent-clipped=2.0
+2024-08-25 12:22:32,056 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.20 vs. limit=15.0
+2024-08-25 12:22:41,617 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=79125.33333333333, ans=0.0
+2024-08-25 12:22:43,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=79125.33333333333, ans=0.025
+2024-08-25 12:22:47,955 INFO [train.py:1114] (1/4) Epoch 6, batch 2400, loss[loss=0.3048, simple_loss=0.337, pruned_loss=0.09917, ctc_loss=0.1857, over 19418.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.3209, pruned_loss=0.08816, ctc_loss=0.1647, over 3858376.52 frames. ], batch size: 67, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:22:58,890 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.87 vs. limit=22.5
+2024-08-25 12:23:00,051 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.24 vs. limit=15.0
+2024-08-25 12:23:06,116 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.40 vs. limit=15.0
+2024-08-25 12:23:07,124 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.78 vs. limit=15.0
+2024-08-25 12:23:32,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=79338.66666666667, ans=0.125
+2024-08-25 12:23:44,103 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.20 vs. limit=15.0
+2024-08-25 12:23:45,698 INFO [train.py:1114] (1/4) Epoch 6, batch 2450, loss[loss=0.3661, simple_loss=0.3674, pruned_loss=0.1329, ctc_loss=0.2476, over 13027.00 frames. ], tot_loss[loss=0.29, simple_loss=0.326, pruned_loss=0.09241, ctc_loss=0.173, over 3730840.69 frames. ], batch size: 140, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:23:50,737 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.97 vs. limit=6.0
+2024-08-25 12:24:16,722 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.36 vs. limit=15.0
+2024-08-25 12:25:42,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=79498.66666666667, ans=0.125
+2024-08-25 12:27:14,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=79552.0, ans=0.125
+2024-08-25 12:28:01,648 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.056e+02 2.291e+02 2.526e+02 5.572e+02, threshold=4.582e+02, percent-clipped=1.0
+2024-08-25 12:29:27,604 INFO [train.py:1114] (1/4) Epoch 7, batch 0, loss[loss=0.2804, simple_loss=0.3119, pruned_loss=0.09087, ctc_loss=0.1677, over 19404.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3119, pruned_loss=0.09087, ctc_loss=0.1677, over 19404.00 frames. ], batch size: 48, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:29:27,605 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 12:29:44,285 INFO [train.py:1146] (1/4) Epoch 7, validation: loss=0.2269, simple_loss=0.307, pruned_loss=0.05393, ctc_loss=0.0975, over 944034.00 frames.
+2024-08-25 12:29:44,285 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 12:29:47,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=79658.66666666667, ans=0.125
+2024-08-25 12:31:09,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=79765.33333333333, ans=0.1
+2024-08-25 12:31:41,390 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=79818.66666666667, ans=0.1
+2024-08-25 12:31:41,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=79818.66666666667, ans=0.0
+2024-08-25 12:33:04,672 INFO [train.py:1114] (1/4) Epoch 7, batch 50, loss[loss=0.2531, simple_loss=0.2944, pruned_loss=0.07757, ctc_loss=0.1413, over 19704.00 frames. ], tot_loss[loss=0.2847, simple_loss=0.3232, pruned_loss=0.08922, ctc_loss=0.1693, over 845399.71 frames. ], batch size: 47, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:33:56,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=80085.33333333333, ans=0.2
+2024-08-25 12:34:17,264 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.999e+02 2.246e+02 2.808e+02 5.514e+02, threshold=4.492e+02, percent-clipped=3.0
+2024-08-25 12:34:24,293 INFO [train.py:1114] (1/4) Epoch 7, batch 100, loss[loss=0.2355, simple_loss=0.2914, pruned_loss=0.06565, ctc_loss=0.1206, over 19722.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3226, pruned_loss=0.08785, ctc_loss=0.1661, over 1499399.80 frames. ], batch size: 51, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:34:34,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=80192.0, ans=0.04949747468305833
+2024-08-25 12:35:11,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=80405.33333333333, ans=0.0
+2024-08-25 12:35:21,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=80405.33333333333, ans=0.1
+2024-08-25 12:35:21,392 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.91 vs. limit=15.0
+2024-08-25 12:35:23,295 INFO [train.py:1114] (1/4) Epoch 7, batch 150, loss[loss=0.2494, simple_loss=0.2951, pruned_loss=0.07369, ctc_loss=0.1406, over 19722.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3188, pruned_loss=0.08537, ctc_loss=0.1615, over 2027591.77 frames. ], batch size: 47, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:35:30,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=80458.66666666667, ans=0.125
+2024-08-25 12:35:50,255 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.32 vs. limit=22.5
+2024-08-25 12:36:05,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=80618.66666666667, ans=0.04949747468305833
+2024-08-25 12:36:09,083 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.17 vs. limit=15.0
+2024-08-25 12:36:18,832 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.959e+02 2.217e+02 2.953e+02 5.735e+02, threshold=4.434e+02, percent-clipped=2.0
+2024-08-25 12:36:26,006 INFO [train.py:1114] (1/4) Epoch 7, batch 200, loss[loss=0.2939, simple_loss=0.3357, pruned_loss=0.09189, ctc_loss=0.1707, over 18201.00 frames. ], tot_loss[loss=0.2736, simple_loss=0.3162, pruned_loss=0.08379, ctc_loss=0.1585, over 2435660.66 frames. ], batch size: 85, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:36:30,751 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:36:57,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80832.0, ans=0.1
+2024-08-25 12:37:00,811 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=80885.33333333333, ans=0.125
+2024-08-25 12:37:02,003 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80885.33333333333, ans=0.1
+2024-08-25 12:37:07,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=80885.33333333333, ans=0.1
+2024-08-25 12:37:22,871 INFO [train.py:1114] (1/4) Epoch 7, batch 250, loss[loss=0.3101, simple_loss=0.341, pruned_loss=0.1018, ctc_loss=0.189, over 19374.00 frames. ], tot_loss[loss=0.274, simple_loss=0.3159, pruned_loss=0.08417, ctc_loss=0.1591, over 2757003.56 frames. ], batch size: 67, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:37:23,218 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:37:44,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=81045.33333333333, ans=0.0
+2024-08-25 12:38:07,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=81152.0, ans=0.125
+2024-08-25 12:38:14,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=81205.33333333333, ans=0.125
+2024-08-25 12:38:16,686 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.901e+02 2.294e+02 2.833e+02 4.254e+02, threshold=4.587e+02, percent-clipped=0.0
+2024-08-25 12:38:17,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=81205.33333333333, ans=0.125
+2024-08-25 12:38:18,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=81205.33333333333, ans=0.0
+2024-08-25 12:38:22,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=81258.66666666667, ans=0.125
+2024-08-25 12:38:23,341 INFO [train.py:1114] (1/4) Epoch 7, batch 300, loss[loss=0.2854, simple_loss=0.3251, pruned_loss=0.08797, ctc_loss=0.1744, over 19528.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.315, pruned_loss=0.08336, ctc_loss=0.1578, over 3002026.24 frames. ], batch size: 61, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:38:43,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=81312.0, ans=0.125
+2024-08-25 12:39:06,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=81418.66666666667, ans=0.04949747468305833
+2024-08-25 12:39:52,667 INFO [train.py:1114] (1/4) Epoch 7, batch 350, loss[loss=0.276, simple_loss=0.304, pruned_loss=0.08996, ctc_loss=0.1702, over 19747.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3168, pruned_loss=0.08488, ctc_loss=0.1605, over 3191705.25 frames. ], batch size: 48, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:40:21,750 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.32 vs. limit=22.5
+2024-08-25 12:40:35,002 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81685.33333333333, ans=0.1
+2024-08-25 12:40:43,962 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.980e+02 2.268e+02 2.810e+02 5.782e+02, threshold=4.535e+02, percent-clipped=1.0
+2024-08-25 12:40:50,663 INFO [train.py:1114] (1/4) Epoch 7, batch 400, loss[loss=0.2924, simple_loss=0.3373, pruned_loss=0.08992, ctc_loss=0.1689, over 19508.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3158, pruned_loss=0.08389, ctc_loss=0.1585, over 3343929.88 frames. ], batch size: 54, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:40:53,278 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=81792.0, ans=0.2
+2024-08-25 12:41:01,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=81845.33333333333, ans=0.125
+2024-08-25 12:41:19,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=81898.66666666667, ans=0.125
+2024-08-25 12:41:21,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=81898.66666666667, ans=0.125
+2024-08-25 12:41:30,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=81952.0, ans=0.025
+2024-08-25 12:41:52,328 INFO [train.py:1114] (1/4) Epoch 7, batch 450, loss[loss=0.2488, simple_loss=0.3105, pruned_loss=0.06808, ctc_loss=0.1276, over 19623.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3157, pruned_loss=0.08397, ctc_loss=0.1585, over 3450438.20 frames. ], batch size: 55, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:42:28,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_na.min_abs, batch_count=82218.66666666667, ans=0.02
+2024-08-25 12:42:29,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=82218.66666666667, ans=0.05
+2024-08-25 12:42:30,550 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=82218.66666666667, ans=0.2
+2024-08-25 12:42:30,561 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=82218.66666666667, ans=0.1
+2024-08-25 12:42:33,363 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.43 vs. limit=15.0
+2024-08-25 12:42:33,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=82218.66666666667, ans=0.07
+2024-08-25 12:42:43,142 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 1.947e+02 2.448e+02 2.960e+02 4.262e+02, threshold=4.896e+02, percent-clipped=0.0
+2024-08-25 12:42:43,928 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.73 vs. limit=22.5
+2024-08-25 12:42:47,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=82272.0, ans=0.125
+2024-08-25 12:42:52,045 INFO [train.py:1114] (1/4) Epoch 7, batch 500, loss[loss=0.3005, simple_loss=0.3423, pruned_loss=0.09426, ctc_loss=0.1752, over 19678.00 frames. ], tot_loss[loss=0.2723, simple_loss=0.3149, pruned_loss=0.08346, ctc_loss=0.1572, over 3547137.89 frames. ], batch size: 63, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:42:52,611 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.95 vs. limit=15.0
+2024-08-25 12:42:55,285 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82325.33333333333, ans=0.1
+2024-08-25 12:42:57,587 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=82325.33333333333, ans=0.125
+2024-08-25 12:42:58,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=82325.33333333333, ans=0.1
+2024-08-25 12:43:03,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=82325.33333333333, ans=0.0
+2024-08-25 12:43:09,421 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.68 vs. limit=22.5
+2024-08-25 12:43:31,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=82485.33333333333, ans=0.125
+2024-08-25 12:43:34,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=82485.33333333333, ans=0.04949747468305833
+2024-08-25 12:43:41,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=82538.66666666667, ans=0.0
+2024-08-25 12:43:51,841 INFO [train.py:1114] (1/4) Epoch 7, batch 550, loss[loss=0.3222, simple_loss=0.3509, pruned_loss=0.1065, ctc_loss=0.2011, over 19309.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3145, pruned_loss=0.08327, ctc_loss=0.1565, over 3609288.66 frames. ], batch size: 71, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:43:53,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=82592.0, ans=0.125
+2024-08-25 12:43:58,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=82592.0, ans=0.2
+2024-08-25 12:44:44,958 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.000e+02 2.364e+02 2.910e+02 5.356e+02, threshold=4.728e+02, percent-clipped=1.0
+2024-08-25 12:44:52,597 INFO [train.py:1114] (1/4) Epoch 7, batch 600, loss[loss=0.3204, simple_loss=0.347, pruned_loss=0.106, ctc_loss=0.2045, over 19404.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3148, pruned_loss=0.08341, ctc_loss=0.1567, over 3666929.36 frames. ], batch size: 67, lr: 2.11e-02, grad_scale: 16.0
+2024-08-25 12:44:56,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=82858.66666666667, ans=0.125
+2024-08-25 12:44:58,339 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.74 vs. limit=15.0
+2024-08-25 12:45:07,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=82912.0, ans=0.125
+2024-08-25 12:45:10,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=82912.0, ans=0.0
+2024-08-25 12:45:20,263 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.52 vs. limit=15.0
+2024-08-25 12:45:25,805 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=82965.33333333333, ans=0.125
+2024-08-25 12:45:41,029 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.23 vs. limit=15.0
+2024-08-25 12:45:51,129 INFO [train.py:1114] (1/4) Epoch 7, batch 650, loss[loss=0.2626, simple_loss=0.3086, pruned_loss=0.07932, ctc_loss=0.1449, over 19777.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3134, pruned_loss=0.08258, ctc_loss=0.1552, over 3716872.42 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:45:59,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=83125.33333333333, ans=0.125
+2024-08-25 12:46:11,918 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.68 vs. limit=15.0
+2024-08-25 12:46:47,172 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.844e+02 2.004e+02 2.285e+02 4.065e+02, threshold=4.009e+02, percent-clipped=0.0
+2024-08-25 12:46:52,901 INFO [train.py:1114] (1/4) Epoch 7, batch 700, loss[loss=0.2953, simple_loss=0.3257, pruned_loss=0.0965, ctc_loss=0.1797, over 19715.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3143, pruned_loss=0.08327, ctc_loss=0.1563, over 3748590.20 frames. ], batch size: 51, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:47:11,823 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=83445.33333333333, ans=0.125
+2024-08-25 12:47:18,191 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.03 vs. limit=10.0
+2024-08-25 12:47:25,895 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=83552.0, ans=0.125
+2024-08-25 12:47:46,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=83605.33333333333, ans=0.0
+2024-08-25 12:47:46,649 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.31 vs. limit=12.0
+2024-08-25 12:47:47,342 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=83605.33333333333, ans=0.125
+2024-08-25 12:47:49,553 INFO [train.py:1114] (1/4) Epoch 7, batch 750, loss[loss=0.2642, simple_loss=0.3114, pruned_loss=0.07869, ctc_loss=0.149, over 19489.00 frames. ], tot_loss[loss=0.2705, simple_loss=0.3136, pruned_loss=0.08271, ctc_loss=0.1551, over 3773990.72 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:48:03,145 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1.whitening_limit, batch_count=83712.0, ans=10.0
+2024-08-25 12:48:27,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=83818.66666666667, ans=0.125
+2024-08-25 12:48:45,007 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 1.885e+02 2.166e+02 2.690e+02 4.534e+02, threshold=4.331e+02, percent-clipped=3.0
+2024-08-25 12:48:49,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=83925.33333333333, ans=0.125
+2024-08-25 12:48:50,695 INFO [train.py:1114] (1/4) Epoch 7, batch 800, loss[loss=0.2378, simple_loss=0.2864, pruned_loss=0.06771, ctc_loss=0.1344, over 19412.00 frames. ], tot_loss[loss=0.2701, simple_loss=0.3133, pruned_loss=0.08254, ctc_loss=0.1547, over 3794781.94 frames. ], batch size: 48, lr: 2.10e-02, grad_scale: 32.0
+2024-08-25 12:48:59,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=83925.33333333333, ans=0.125
+2024-08-25 12:49:13,688 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=84032.0, ans=0.125
+2024-08-25 12:49:17,162 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=84032.0, ans=0.125
+2024-08-25 12:49:40,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=84138.66666666667, ans=0.07
+2024-08-25 12:49:51,365 INFO [train.py:1114] (1/4) Epoch 7, batch 850, loss[loss=0.2729, simple_loss=0.3229, pruned_loss=0.08135, ctc_loss=0.1503, over 19653.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3124, pruned_loss=0.08185, ctc_loss=0.1537, over 3814384.82 frames. ], batch size: 59, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:49:51,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=84192.0, ans=0.125
+2024-08-25 12:49:51,691 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=84192.0, ans=0.1
+2024-08-25 12:49:54,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=84192.0, ans=0.125
+2024-08-25 12:49:56,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=84192.0, ans=0.0
+2024-08-25 12:50:21,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=84298.66666666667, ans=0.5
+2024-08-25 12:50:31,314 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.39 vs. limit=6.0
+2024-08-25 12:50:43,490 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 1.946e+02 2.270e+02 2.825e+02 4.143e+02, threshold=4.540e+02, percent-clipped=0.0
+2024-08-25 12:50:49,138 INFO [train.py:1114] (1/4) Epoch 7, batch 900, loss[loss=0.2349, simple_loss=0.2832, pruned_loss=0.06809, ctc_loss=0.1258, over 19808.00 frames. ], tot_loss[loss=0.2705, simple_loss=0.3135, pruned_loss=0.08276, ctc_loss=0.1551, over 3818924.23 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:51:00,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.91 vs. limit=15.0
+2024-08-25 12:51:02,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=84458.66666666667, ans=0.125
+2024-08-25 12:51:05,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=84458.66666666667, ans=0.125
+2024-08-25 12:51:08,601 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=84458.66666666667, ans=0.125
+2024-08-25 12:51:21,890 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.44 vs. limit=15.0
+2024-08-25 12:51:47,172 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.50 vs. limit=15.0
+2024-08-25 12:51:58,929 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.93 vs. limit=15.0
+2024-08-25 12:52:05,341 INFO [train.py:1114] (1/4) Epoch 7, batch 950, loss[loss=0.2285, simple_loss=0.2785, pruned_loss=0.06449, ctc_loss=0.1237, over 19496.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3142, pruned_loss=0.08333, ctc_loss=0.1564, over 3820132.42 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 16.0
+2024-08-25 12:52:20,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=84778.66666666667, ans=0.125
+2024-08-25 12:52:59,188 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.065e+02 2.373e+02 2.949e+02 1.128e+03, threshold=4.746e+02, percent-clipped=6.0
+2024-08-25 12:53:00,530 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=84938.66666666667, ans=0.0
+2024-08-25 12:53:05,274 INFO [train.py:1114] (1/4) Epoch 7, batch 1000, loss[loss=0.2663, simple_loss=0.3103, pruned_loss=0.08027, ctc_loss=0.1542, over 19875.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.3156, pruned_loss=0.08391, ctc_loss=0.1578, over 3815457.84 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:53:22,941 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.10 vs. limit=15.0
+2024-08-25 12:53:25,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=85045.33333333333, ans=0.025
+2024-08-25 12:53:38,642 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.25 vs. limit=22.5
+2024-08-25 12:53:45,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=85152.0, ans=0.125
+2024-08-25 12:53:51,360 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=85152.0, ans=0.05
+2024-08-25 12:53:57,413 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=85205.33333333333, ans=0.125
+2024-08-25 12:54:04,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=85258.66666666667, ans=15.0
+2024-08-25 12:54:05,089 INFO [train.py:1114] (1/4) Epoch 7, batch 1050, loss[loss=0.2865, simple_loss=0.3322, pruned_loss=0.08727, ctc_loss=0.1656, over 19844.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3149, pruned_loss=0.0836, ctc_loss=0.1571, over 3823090.38 frames. ], batch size: 57, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:54:15,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=85312.0, ans=0.125
+2024-08-25 12:54:41,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=85418.66666666667, ans=0.125
+2024-08-25 12:54:55,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=85472.0, ans=0.1
+2024-08-25 12:55:01,654 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 1.918e+02 2.325e+02 2.776e+02 4.591e+02, threshold=4.650e+02, percent-clipped=1.0
+2024-08-25 12:55:02,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=85472.0, ans=0.0
+2024-08-25 12:55:05,413 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=85525.33333333333, ans=0.125
+2024-08-25 12:55:06,549 INFO [train.py:1114] (1/4) Epoch 7, batch 1100, loss[loss=0.2829, simple_loss=0.3202, pruned_loss=0.09011, ctc_loss=0.1636, over 19591.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3145, pruned_loss=0.08304, ctc_loss=0.1562, over 3830140.20 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:55:16,590 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=85525.33333333333, ans=0.2
+2024-08-25 12:55:24,755 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.94 vs. limit=15.0
+2024-08-25 12:55:26,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=85578.66666666667, ans=0.125
+2024-08-25 12:55:30,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=85632.0, ans=0.125
+2024-08-25 12:55:41,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=85685.33333333333, ans=0.0
+2024-08-25 12:55:52,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=85685.33333333333, ans=0.0
+2024-08-25 12:56:05,760 INFO [train.py:1114] (1/4) Epoch 7, batch 1150, loss[loss=0.2733, simple_loss=0.3126, pruned_loss=0.08446, ctc_loss=0.1629, over 19582.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3141, pruned_loss=0.08298, ctc_loss=0.1561, over 3828421.18 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:56:05,988 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=85792.0, ans=0.125
+2024-08-25 12:56:08,397 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:56:15,503 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=85792.0, ans=0.125
+2024-08-25 12:56:24,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=85845.33333333333, ans=0.025
+2024-08-25 12:56:36,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85898.66666666667, ans=0.1
+2024-08-25 12:57:02,979 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.959e+02 2.167e+02 2.666e+02 4.946e+02, threshold=4.335e+02, percent-clipped=2.0
+2024-08-25 12:57:03,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=86005.33333333333, ans=0.125
+2024-08-25 12:57:07,688 INFO [train.py:1114] (1/4) Epoch 7, batch 1200, loss[loss=0.2661, simple_loss=0.3192, pruned_loss=0.07789, ctc_loss=0.1428, over 19830.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3146, pruned_loss=0.08322, ctc_loss=0.1566, over 3823710.99 frames. ], batch size: 57, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:57:21,789 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.69 vs. limit=10.0
+2024-08-25 12:57:30,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=86165.33333333333, ans=0.025
+2024-08-25 12:57:31,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.80 vs. limit=15.0
+2024-08-25 12:57:33,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=86165.33333333333, ans=0.0
+2024-08-25 12:58:05,923 INFO [train.py:1114] (1/4) Epoch 7, batch 1250, loss[loss=0.2911, simple_loss=0.3397, pruned_loss=0.08802, ctc_loss=0.166, over 19523.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3145, pruned_loss=0.08271, ctc_loss=0.1557, over 3842663.98 frames. ], batch size: 61, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:58:06,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=86325.33333333333, ans=0.035
+2024-08-25 12:58:14,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=86325.33333333333, ans=10.0
+2024-08-25 12:58:29,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=86378.66666666667, ans=0.125
+2024-08-25 12:58:34,082 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:58:44,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=86485.33333333333, ans=0.125
+2024-08-25 12:58:52,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=86485.33333333333, ans=0.2
+2024-08-25 12:59:02,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=86538.66666666667, ans=0.07
+2024-08-25 12:59:02,856 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.964e+02 2.304e+02 2.729e+02 5.465e+02, threshold=4.608e+02, percent-clipped=2.0
+2024-08-25 12:59:06,939 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.31 vs. limit=6.0
+2024-08-25 12:59:07,513 INFO [train.py:1114] (1/4) Epoch 7, batch 1300, loss[loss=0.2964, simple_loss=0.3364, pruned_loss=0.0932, ctc_loss=0.1752, over 18921.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3134, pruned_loss=0.08205, ctc_loss=0.1544, over 3846659.20 frames. ], batch size: 76, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:59:20,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=86645.33333333333, ans=0.125
+2024-08-25 12:59:26,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=86645.33333333333, ans=0.0
+2024-08-25 12:59:38,969 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.56 vs. limit=15.0
+2024-08-25 13:00:02,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=86805.33333333333, ans=0.0
+2024-08-25 13:00:03,504 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=86805.33333333333, ans=0.125
+2024-08-25 13:00:04,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=86805.33333333333, ans=0.09899494936611666
+2024-08-25 13:00:07,958 INFO [train.py:1114] (1/4) Epoch 7, batch 1350, loss[loss=0.2597, simple_loss=0.3104, pruned_loss=0.07583, ctc_loss=0.1436, over 19786.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3126, pruned_loss=0.08147, ctc_loss=0.1529, over 3858723.75 frames. ], batch size: 54, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 13:00:20,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=86912.0, ans=0.125
+2024-08-25 13:00:46,473 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.73 vs. limit=10.0
+2024-08-25 13:01:58,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=87072.0, ans=0.2
+2024-08-25 13:01:59,607 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.935e+02 2.309e+02 3.009e+02 4.449e+02, threshold=4.618e+02, percent-clipped=0.0
+2024-08-25 13:02:04,195 INFO [train.py:1114] (1/4) Epoch 7, batch 1400, loss[loss=0.1956, simple_loss=0.2526, pruned_loss=0.04965, ctc_loss=0.09842, over 19649.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3118, pruned_loss=0.08118, ctc_loss=0.1526, over 3865468.57 frames. ], batch size: 46, lr: 2.06e-02, grad_scale: 32.0
+2024-08-25 13:02:05,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=87125.33333333333, ans=0.0
+2024-08-25 13:02:06,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=87125.33333333333, ans=0.0
+2024-08-25 13:02:25,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=87178.66666666667, ans=0.0
+2024-08-25 13:02:32,570 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.82 vs. limit=10.0
+2024-08-25 13:02:51,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=87285.33333333333, ans=0.07
+2024-08-25 13:03:05,400 INFO [train.py:1114] (1/4) Epoch 7, batch 1450, loss[loss=0.2777, simple_loss=0.3265, pruned_loss=0.08398, ctc_loss=0.1522, over 19663.00 frames. ], tot_loss[loss=0.2686, simple_loss=0.3127, pruned_loss=0.08156, ctc_loss=0.1534, over 3863342.27 frames. ], batch size: 63, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:03:18,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=87445.33333333333, ans=0.0
+2024-08-25 13:03:22,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=87445.33333333333, ans=0.1
+2024-08-25 13:03:36,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=87498.66666666667, ans=0.09899494936611666
+2024-08-25 13:04:29,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=87552.0, ans=0.0
+2024-08-25 13:04:32,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=87552.0, ans=0.2
+2024-08-25 13:04:46,556 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.015e+02 2.285e+02 2.716e+02 4.465e+02, threshold=4.569e+02, percent-clipped=0.0
+2024-08-25 13:04:50,185 INFO [train.py:1114] (1/4) Epoch 7, batch 1500, loss[loss=0.2528, simple_loss=0.3041, pruned_loss=0.07368, ctc_loss=0.1355, over 19582.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.313, pruned_loss=0.0815, ctc_loss=0.1534, over 3862746.88 frames. ], batch size: 57, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:05:02,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=87712.0, ans=0.0
+2024-08-25 13:05:25,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=87765.33333333333, ans=0.125
+2024-08-25 13:05:30,809 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=87765.33333333333, ans=0.0
+2024-08-25 13:05:33,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=87818.66666666667, ans=0.1
+2024-08-25 13:05:39,072 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=87818.66666666667, ans=0.09899494936611666
+2024-08-25 13:05:49,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=87872.0, ans=0.125
+2024-08-25 13:05:51,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=87872.0, ans=0.0
+2024-08-25 13:05:57,428 INFO [train.py:1114] (1/4) Epoch 7, batch 1550, loss[loss=0.2902, simple_loss=0.3303, pruned_loss=0.09178, ctc_loss=0.1663, over 19607.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.3132, pruned_loss=0.08173, ctc_loss=0.1539, over 3848878.45 frames. ], batch size: 60, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:06:01,328 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:06:11,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=87978.66666666667, ans=0.0
+2024-08-25 13:06:52,620 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=88138.66666666667, ans=0.0
+2024-08-25 13:06:55,901 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.880e+02 2.225e+02 2.757e+02 4.141e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 13:07:00,958 INFO [train.py:1114] (1/4) Epoch 7, batch 1600, loss[loss=0.2942, simple_loss=0.3341, pruned_loss=0.09276, ctc_loss=0.172, over 19853.00 frames. ], tot_loss[loss=0.2686, simple_loss=0.3129, pruned_loss=0.08142, ctc_loss=0.1535, over 3837900.68 frames. ], batch size: 57, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:07:21,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=88245.33333333333, ans=0.125
+2024-08-25 13:07:41,883 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=88352.0, ans=0.1
+2024-08-25 13:07:53,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=88405.33333333333, ans=0.025
+2024-08-25 13:07:56,794 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=88405.33333333333, ans=0.0
+2024-08-25 13:07:58,840 INFO [train.py:1114] (1/4) Epoch 7, batch 1650, loss[loss=0.2529, simple_loss=0.3055, pruned_loss=0.07369, ctc_loss=0.1321, over 19670.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3128, pruned_loss=0.08169, ctc_loss=0.1537, over 3834353.21 frames. ], batch size: 59, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:08:17,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=88512.0, ans=0.125
+2024-08-25 13:08:24,544 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.23 vs. limit=15.0
+2024-08-25 13:08:32,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=88565.33333333333, ans=0.125
+2024-08-25 13:08:36,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=88618.66666666667, ans=0.125
+2024-08-25 13:08:42,037 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=88618.66666666667, ans=0.0
+2024-08-25 13:08:54,963 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.917e+02 2.131e+02 2.729e+02 4.248e+02, threshold=4.261e+02, percent-clipped=0.0
+2024-08-25 13:08:58,383 INFO [train.py:1114] (1/4) Epoch 7, batch 1700, loss[loss=0.234, simple_loss=0.2778, pruned_loss=0.0696, ctc_loss=0.1276, over 19698.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.312, pruned_loss=0.08079, ctc_loss=0.1518, over 3848276.44 frames. ], batch size: 46, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:09:05,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=88725.33333333333, ans=0.2
+2024-08-25 13:09:19,778 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.87 vs. limit=22.5
+2024-08-25 13:09:30,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=88832.0, ans=0.125
+2024-08-25 13:09:35,106 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=88885.33333333333, ans=0.1
+2024-08-25 13:09:45,773 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.64 vs. limit=15.0
+2024-08-25 13:09:46,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=88938.66666666667, ans=0.125
+2024-08-25 13:09:55,053 INFO [train.py:1114] (1/4) Epoch 7, batch 1750, loss[loss=0.2239, simple_loss=0.2707, pruned_loss=0.06378, ctc_loss=0.1237, over 19625.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3118, pruned_loss=0.08103, ctc_loss=0.1525, over 3852421.63 frames. ], batch size: 45, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:16:07,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=89045.33333333333, ans=0.2
+2024-08-25 13:16:07,808 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.24 vs. limit=15.0
+2024-08-25 13:17:21,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=89098.66666666667, ans=0.125
+2024-08-25 13:25:00,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=89152.0, ans=0.1
+2024-08-25 13:29:44,217 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.972e+02 2.344e+02 2.828e+02 4.449e+02, threshold=4.688e+02, percent-clipped=1.0
+2024-08-25 13:29:47,698 INFO [train.py:1114] (1/4) Epoch 7, batch 1800, loss[loss=0.2834, simple_loss=0.327, pruned_loss=0.08713, ctc_loss=0.1638, over 19609.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3119, pruned_loss=0.08102, ctc_loss=0.1523, over 3853282.55 frames. ], batch size: 55, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:31:06,211 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=89258.66666666667, ans=0.0
+2024-08-25 13:31:14,467 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=21.58 vs. limit=22.5
+2024-08-25 13:38:20,170 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.61 vs. limit=10.0
+2024-08-25 13:38:36,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=89418.66666666667, ans=0.1
+2024-08-25 13:38:37,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=89418.66666666667, ans=0.025
+2024-08-25 13:38:42,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=89418.66666666667, ans=0.2
+2024-08-25 13:39:43,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=89472.0, ans=0.125
+2024-08-25 13:40:34,832 INFO [train.py:1114] (1/4) Epoch 7, batch 1850, loss[loss=0.272, simple_loss=0.324, pruned_loss=0.07912, ctc_loss=0.1546, over 19570.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3121, pruned_loss=0.08125, ctc_loss=0.1527, over 3857561.30 frames. ], batch size: 57, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:43:28,639 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.41 vs. limit=15.0
+2024-08-25 13:43:38,665 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.24 vs. limit=12.0
+2024-08-25 13:43:46,930 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=89738.66666666667, ans=0.0
+2024-08-25 13:44:01,309 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.852e+02 2.070e+02 2.397e+02 4.608e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-25 13:44:07,982 INFO [train.py:1114] (1/4) Epoch 7, batch 1900, loss[loss=0.2413, simple_loss=0.3042, pruned_loss=0.06451, ctc_loss=0.1236, over 19656.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3133, pruned_loss=0.08165, ctc_loss=0.1537, over 3862690.98 frames. ], batch size: 59, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:44:18,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=89792.0, ans=0.125
+2024-08-25 13:44:24,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=89792.0, ans=0.1
+2024-08-25 13:44:28,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=89845.33333333333, ans=0.2
+2024-08-25 13:44:29,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=89845.33333333333, ans=0.125
+2024-08-25 13:44:29,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=89845.33333333333, ans=0.07
+2024-08-25 13:44:53,116 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:45:05,813 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=89952.0, ans=0.125
+2024-08-25 13:45:19,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=89952.0, ans=0.0
+2024-08-25 13:45:28,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90005.33333333333, ans=0.1
+2024-08-25 13:45:41,187 INFO [train.py:1114] (1/4) Epoch 7, batch 1950, loss[loss=0.2557, simple_loss=0.2998, pruned_loss=0.07757, ctc_loss=0.1413, over 19577.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3134, pruned_loss=0.08118, ctc_loss=0.1527, over 3871580.56 frames. ], batch size: 52, lr: 2.04e-02, grad_scale: 16.0
+2024-08-25 13:45:54,319 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.29 vs. limit=12.0
+2024-08-25 13:45:55,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=90112.0, ans=0.125
+2024-08-25 13:46:02,201 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.01 vs. limit=15.0
+2024-08-25 13:46:42,773 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 1.896e+02 2.177e+02 2.703e+02 3.964e+02, threshold=4.354e+02, percent-clipped=0.0
+2024-08-25 13:46:45,052 INFO [train.py:1114] (1/4) Epoch 7, batch 2000, loss[loss=0.2374, simple_loss=0.2886, pruned_loss=0.0681, ctc_loss=0.1251, over 19656.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3146, pruned_loss=0.0821, ctc_loss=0.1545, over 3855939.06 frames. ], batch size: 45, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:46:50,988 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=90325.33333333333, ans=10.0
+2024-08-25 13:47:10,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=90432.0, ans=0.125
+2024-08-25 13:47:14,887 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.74 vs. limit=10.0
+2024-08-25 13:47:24,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=90485.33333333333, ans=0.0
+2024-08-25 13:47:41,004 INFO [train.py:1114] (1/4) Epoch 7, batch 2050, loss[loss=0.2333, simple_loss=0.2847, pruned_loss=0.06615, ctc_loss=0.1239, over 19726.00 frames. ], tot_loss[loss=0.2695, simple_loss=0.3134, pruned_loss=0.08187, ctc_loss=0.1544, over 3852162.00 frames. ], batch size: 47, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:47:56,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=90645.33333333333, ans=0.0
+2024-08-25 13:47:57,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff2.min_abs, batch_count=90645.33333333333, ans=0.1
+2024-08-25 13:48:00,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=90645.33333333333, ans=0.0
+2024-08-25 13:48:13,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=90698.66666666667, ans=0.125
+2024-08-25 13:48:23,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=90752.0, ans=0.125
+2024-08-25 13:48:36,339 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.053e+02 2.413e+02 3.017e+02 5.203e+02, threshold=4.827e+02, percent-clipped=2.0
+2024-08-25 13:48:38,576 INFO [train.py:1114] (1/4) Epoch 7, batch 2100, loss[loss=0.2623, simple_loss=0.306, pruned_loss=0.07879, ctc_loss=0.1526, over 19754.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3124, pruned_loss=0.08093, ctc_loss=0.1527, over 3858882.17 frames. ], batch size: 54, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:49:02,164 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.57 vs. limit=12.0
+2024-08-25 13:49:20,365 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=91018.66666666667, ans=0.0
+2024-08-25 13:49:24,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=91018.66666666667, ans=0.025
+2024-08-25 13:49:30,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=91072.0, ans=0.2
+2024-08-25 13:49:34,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=91072.0, ans=0.125
+2024-08-25 13:49:42,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=91125.33333333333, ans=0.125
+2024-08-25 13:49:43,245 INFO [train.py:1114] (1/4) Epoch 7, batch 2150, loss[loss=0.2573, simple_loss=0.2995, pruned_loss=0.07831, ctc_loss=0.1461, over 19584.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3114, pruned_loss=0.08061, ctc_loss=0.1519, over 3869609.55 frames. ], batch size: 52, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:49:55,697 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-25 13:49:56,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-25 13:49:58,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-25 13:49:58,941 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-25 13:50:01,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91178.66666666667, ans=0.1
+2024-08-25 13:50:03,476 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-25 13:50:12,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=91232.0, ans=0.125
+2024-08-25 13:50:20,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=91285.33333333333, ans=0.125
+2024-08-25 13:50:33,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_ff3.min_abs, batch_count=91338.66666666667, ans=0.2
+2024-08-25 13:50:36,445 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 1.920e+02 2.200e+02 2.924e+02 5.090e+02, threshold=4.400e+02, percent-clipped=1.0
+2024-08-25 13:50:38,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=91392.0, ans=0.0
+2024-08-25 13:50:39,128 INFO [train.py:1114] (1/4) Epoch 7, batch 2200, loss[loss=0.2893, simple_loss=0.3313, pruned_loss=0.09015, ctc_loss=0.1677, over 19580.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3116, pruned_loss=0.08061, ctc_loss=0.1518, over 3868263.55 frames. ], batch size: 57, lr: 2.02e-02, grad_scale: 32.0
+2024-08-25 13:50:58,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91445.33333333333, ans=0.1
+2024-08-25 13:51:00,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=91498.66666666667, ans=0.0
+2024-08-25 13:51:09,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=91498.66666666667, ans=0.1
+2024-08-25 13:51:16,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=91552.0, ans=0.0
+2024-08-25 13:51:22,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=91552.0, ans=0.125
+2024-08-25 13:51:34,995 INFO [train.py:1114] (1/4) Epoch 7, batch 2250, loss[loss=0.2456, simple_loss=0.306, pruned_loss=0.06734, ctc_loss=0.1264, over 19603.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3119, pruned_loss=0.08085, ctc_loss=0.1521, over 3867162.72 frames. ], batch size: 55, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:51:39,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.61 vs. limit=10.0
+2024-08-25 13:51:44,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=91712.0, ans=0.125
+2024-08-25 13:52:01,547 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.52 vs. limit=6.0
+2024-08-25 13:52:05,523 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=91765.33333333333, ans=0.125
+2024-08-25 13:52:14,497 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=91818.66666666667, ans=0.0
+2024-08-25 13:52:28,393 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.146e+02 2.677e+02 3.204e+02 4.930e+02, threshold=5.354e+02, percent-clipped=3.0
+2024-08-25 13:52:29,557 INFO [train.py:1114] (1/4) Epoch 7, batch 2300, loss[loss=0.2404, simple_loss=0.2894, pruned_loss=0.07054, ctc_loss=0.1257, over 19505.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3113, pruned_loss=0.08071, ctc_loss=0.1518, over 3861012.41 frames. ], batch size: 49, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:52:33,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=91925.33333333333, ans=0.2
+2024-08-25 13:52:37,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=91925.33333333333, ans=0.0
+2024-08-25 13:52:47,743 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.73 vs. limit=10.0
+2024-08-25 13:52:57,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=92032.0, ans=0.1
+2024-08-25 13:53:05,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=92085.33333333333, ans=0.1
+2024-08-25 13:53:18,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=92138.66666666667, ans=0.025
+2024-08-25 13:53:23,070 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:53:25,163 INFO [train.py:1114] (1/4) Epoch 7, batch 2350, loss[loss=0.2673, simple_loss=0.3228, pruned_loss=0.07795, ctc_loss=0.14, over 19666.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3113, pruned_loss=0.08074, ctc_loss=0.1515, over 3863615.91 frames. ], batch size: 63, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:53:35,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=92245.33333333333, ans=0.0
+2024-08-25 13:53:51,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=92298.66666666667, ans=0.0
+2024-08-25 13:53:54,709 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=92298.66666666667, ans=0.0
+2024-08-25 13:54:10,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=92405.33333333333, ans=0.2
+2024-08-25 13:54:18,217 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.985e+02 2.336e+02 2.802e+02 4.974e+02, threshold=4.671e+02, percent-clipped=0.0
+2024-08-25 13:54:19,273 INFO [train.py:1114] (1/4) Epoch 7, batch 2400, loss[loss=0.3083, simple_loss=0.3479, pruned_loss=0.0984, ctc_loss=0.1796, over 19295.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3129, pruned_loss=0.0815, ctc_loss=0.1526, over 3857314.72 frames. ], batch size: 71, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:54:28,777 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.38 vs. limit=15.0
+2024-08-25 13:54:44,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=92565.33333333333, ans=0.1
+2024-08-25 13:55:47,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=92565.33333333333, ans=0.1
+2024-08-25 13:55:52,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=92618.66666666667, ans=0.125
+2024-08-25 13:56:02,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=92672.0, ans=0.0
+2024-08-25 13:56:10,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=92672.0, ans=0.09899494936611666
+2024-08-25 13:56:13,545 INFO [train.py:1114] (1/4) Epoch 7, batch 2450, loss[loss=0.3927, simple_loss=0.3779, pruned_loss=0.1476, ctc_loss=0.2807, over 13294.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.3181, pruned_loss=0.08608, ctc_loss=0.1616, over 3729139.51 frames. ], batch size: 140, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:56:18,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=92725.33333333333, ans=0.125
+2024-08-25 13:56:19,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=92725.33333333333, ans=0.2
+2024-08-25 13:56:19,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=92725.33333333333, ans=0.0
+2024-08-25 13:56:42,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=92778.66666666667, ans=0.125
+2024-08-25 13:56:42,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=92778.66666666667, ans=0.125
+2024-08-25 13:57:01,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=92885.33333333333, ans=0.0
+2024-08-25 13:57:54,269 INFO [train.py:1114] (1/4) Epoch 8, batch 0, loss[loss=0.246, simple_loss=0.2955, pruned_loss=0.07066, ctc_loss=0.1379, over 19413.00 frames. ], tot_loss[loss=0.246, simple_loss=0.2955, pruned_loss=0.07066, ctc_loss=0.1379, over 19413.00 frames. ], batch size: 48, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 13:57:54,270 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 13:59:56,290 INFO [train.py:1146] (1/4) Epoch 8, validation: loss=0.2171, simple_loss=0.2997, pruned_loss=0.04948, ctc_loss=0.08904, over 944034.00 frames.
+2024-08-25 13:59:56,291 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 13:59:56,762 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.49 vs. limit=15.0
+2024-08-25 14:00:05,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=92933.33333333333, ans=0.0
+2024-08-25 14:01:03,637 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.158e+02 2.483e+02 2.902e+02 5.180e+02, threshold=4.965e+02, percent-clipped=2.0
+2024-08-25 14:01:42,491 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.71 vs. limit=15.0
+2024-08-25 14:02:04,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=93146.66666666667, ans=0.0
+2024-08-25 14:02:06,443 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=93146.66666666667, ans=0.1
+2024-08-25 14:02:13,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=93146.66666666667, ans=0.0
+2024-08-25 14:02:17,064 INFO [train.py:1114] (1/4) Epoch 8, batch 50, loss[loss=0.2386, simple_loss=0.2844, pruned_loss=0.06973, ctc_loss=0.1332, over 19725.00 frames. ], tot_loss[loss=0.2759, simple_loss=0.3178, pruned_loss=0.08489, ctc_loss=0.1606, over 845284.01 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:02:34,064 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.17 vs. limit=15.0
+2024-08-25 14:02:43,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=93306.66666666667, ans=0.125
+2024-08-25 14:03:01,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=93360.0, ans=0.125
+2024-08-25 14:04:49,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=93413.33333333333, ans=0.0
+2024-08-25 14:05:00,921 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=93413.33333333333, ans=0.0
+2024-08-25 14:05:03,232 INFO [train.py:1114] (1/4) Epoch 8, batch 100, loss[loss=0.2546, simple_loss=0.3013, pruned_loss=0.07476, ctc_loss=0.1462, over 19716.00 frames. ], tot_loss[loss=0.2704, simple_loss=0.3157, pruned_loss=0.08176, ctc_loss=0.1541, over 1500494.30 frames. ], batch size: 51, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:05:14,933 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.910e+02 2.219e+02 2.660e+02 5.043e+02, threshold=4.439e+02, percent-clipped=1.0
+2024-08-25 14:05:16,750 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=93520.0, ans=0.0
+2024-08-25 14:05:44,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=93626.66666666667, ans=0.125
+2024-08-25 14:05:48,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=93626.66666666667, ans=0.04949747468305833
+2024-08-25 14:05:55,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=93680.0, ans=0.2
+2024-08-25 14:07:16,378 INFO [train.py:1114] (1/4) Epoch 8, batch 150, loss[loss=0.2213, simple_loss=0.2755, pruned_loss=0.06106, ctc_loss=0.1126, over 19719.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3109, pruned_loss=0.0794, ctc_loss=0.1488, over 2028940.94 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:07:29,176 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.34 vs. limit=15.0
+2024-08-25 14:07:31,714 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.09 vs. limit=15.0
+2024-08-25 14:08:14,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=93786.66666666667, ans=0.0
+2024-08-25 14:08:14,753 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=93786.66666666667, ans=0.5
+2024-08-25 14:09:07,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=93840.0, ans=0.1
+2024-08-25 14:09:23,698 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=93893.33333333333, ans=0.125
+2024-08-25 14:09:27,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=93946.66666666667, ans=0.0
+2024-08-25 14:10:14,745 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.31 vs. limit=15.0
+2024-08-25 14:10:16,267 INFO [train.py:1114] (1/4) Epoch 8, batch 200, loss[loss=0.2691, simple_loss=0.3119, pruned_loss=0.08168, ctc_loss=0.1574, over 18206.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3076, pruned_loss=0.07724, ctc_loss=0.1451, over 2436792.66 frames. ], batch size: 85, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:10:22,287 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=94000.0, ans=0.2
+2024-08-25 14:10:29,222 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.854e+02 2.093e+02 2.544e+02 5.078e+02, threshold=4.187e+02, percent-clipped=1.0
+2024-08-25 14:10:33,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=94053.33333333333, ans=0.125
+2024-08-25 14:10:46,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=94106.66666666667, ans=0.125
+2024-08-25 14:10:59,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=94160.0, ans=0.2
+2024-08-25 14:11:07,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=94213.33333333333, ans=0.0
+2024-08-25 14:11:17,845 INFO [train.py:1114] (1/4) Epoch 8, batch 250, loss[loss=0.2827, simple_loss=0.3221, pruned_loss=0.08986, ctc_loss=0.1586, over 19411.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3082, pruned_loss=0.078, ctc_loss=0.1465, over 2756344.82 frames. ], batch size: 67, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:11:31,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=94266.66666666667, ans=0.125
+2024-08-25 14:12:28,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=94320.0, ans=0.125
+2024-08-25 14:12:51,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=94426.66666666667, ans=0.2
+2024-08-25 14:12:58,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=94426.66666666667, ans=0.0
+2024-08-25 14:13:02,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=94480.0, ans=0.0
+2024-08-25 14:13:13,476 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.33 vs. limit=15.0
+2024-08-25 14:13:16,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=94480.0, ans=0.0
+2024-08-25 14:13:21,907 INFO [train.py:1114] (1/4) Epoch 8, batch 300, loss[loss=0.268, simple_loss=0.3168, pruned_loss=0.07985, ctc_loss=0.149, over 19536.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3064, pruned_loss=0.07692, ctc_loss=0.1443, over 3000549.85 frames. ], batch size: 61, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:13:33,347 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 1.987e+02 2.340e+02 3.022e+02 6.047e+02, threshold=4.681e+02, percent-clipped=9.0
+2024-08-25 14:13:44,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=94640.0, ans=0.1
+2024-08-25 14:13:45,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=94640.0, ans=0.125
+2024-08-25 14:13:49,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=94640.0, ans=0.1
+2024-08-25 14:14:31,945 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=94693.33333333333, ans=0.125
+2024-08-25 14:14:52,108 INFO [train.py:1114] (1/4) Epoch 8, batch 350, loss[loss=0.2446, simple_loss=0.2863, pruned_loss=0.07317, ctc_loss=0.1411, over 19761.00 frames. ], tot_loss[loss=0.2598, simple_loss=0.3075, pruned_loss=0.07712, ctc_loss=0.1448, over 3191442.03 frames. ], batch size: 48, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:16:35,417 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.51 vs. limit=10.0
+2024-08-25 14:16:38,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95013.33333333333, ans=0.1
+2024-08-25 14:16:45,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=95013.33333333333, ans=0.1
+2024-08-25 14:16:50,820 INFO [train.py:1114] (1/4) Epoch 8, batch 400, loss[loss=0.2767, simple_loss=0.3185, pruned_loss=0.08633, ctc_loss=0.1556, over 19491.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.3066, pruned_loss=0.07653, ctc_loss=0.1438, over 3343449.44 frames. ], batch size: 54, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:17:03,866 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.019e+02 2.528e+02 3.132e+02 5.852e+02, threshold=5.056e+02, percent-clipped=7.0
+2024-08-25 14:17:09,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=95120.0, ans=0.125
+2024-08-25 14:17:41,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=95280.0, ans=0.2
+2024-08-25 14:17:50,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=95280.0, ans=0.125
+2024-08-25 14:18:38,389 INFO [train.py:1114] (1/4) Epoch 8, batch 450, loss[loss=0.2598, simple_loss=0.3246, pruned_loss=0.07021, ctc_loss=0.1364, over 19623.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3067, pruned_loss=0.07688, ctc_loss=0.1444, over 3451718.36 frames. ], batch size: 55, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:18:42,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95333.33333333333, ans=0.1
+2024-08-25 14:18:57,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=95386.66666666667, ans=0.125
+2024-08-25 14:18:57,929 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.83 vs. limit=15.0
+2024-08-25 14:19:31,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=95546.66666666667, ans=0.125
+2024-08-25 14:19:39,036 INFO [train.py:1114] (1/4) Epoch 8, batch 500, loss[loss=0.2579, simple_loss=0.3156, pruned_loss=0.07288, ctc_loss=0.1362, over 19638.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3053, pruned_loss=0.07618, ctc_loss=0.1432, over 3547664.10 frames. ], batch size: 63, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:19:52,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=95600.0, ans=0.2
+2024-08-25 14:21:37,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=95600.0, ans=0.1
+2024-08-25 14:21:37,854 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.13 vs. limit=15.0
+2024-08-25 14:21:42,069 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.925e+02 2.242e+02 2.655e+02 4.786e+02, threshold=4.483e+02, percent-clipped=0.0
+2024-08-25 14:21:58,768 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.47 vs. limit=12.0
+2024-08-25 14:22:05,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=95760.0, ans=0.125
+2024-08-25 14:22:34,086 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=95813.33333333333, ans=0.125
+2024-08-25 14:22:36,073 INFO [train.py:1114] (1/4) Epoch 8, batch 550, loss[loss=0.2695, simple_loss=0.3166, pruned_loss=0.08083, ctc_loss=0.1519, over 19289.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3063, pruned_loss=0.07673, ctc_loss=0.1441, over 3609336.19 frames. ], batch size: 71, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:22:45,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=95866.66666666667, ans=0.1
+2024-08-25 14:24:05,563 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=95973.33333333333, ans=0.2
+2024-08-25 14:25:43,163 INFO [train.py:1114] (1/4) Epoch 8, batch 600, loss[loss=0.2668, simple_loss=0.3224, pruned_loss=0.07763, ctc_loss=0.1397, over 19378.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.3068, pruned_loss=0.07691, ctc_loss=0.1445, over 3666215.50 frames. ], batch size: 67, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:25:54,325 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 1.975e+02 2.461e+02 2.998e+02 6.685e+02, threshold=4.922e+02, percent-clipped=2.0
+2024-08-25 14:26:01,544 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=96186.66666666667, ans=0.125
+2024-08-25 14:26:24,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=96293.33333333333, ans=0.125
+2024-08-25 14:26:31,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=96293.33333333333, ans=0.2
+2024-08-25 14:27:51,048 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=96346.66666666667, ans=0.2
+2024-08-25 14:29:18,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=96346.66666666667, ans=0.1
+2024-08-25 14:29:20,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=96346.66666666667, ans=0.025
+2024-08-25 14:29:23,560 INFO [train.py:1114] (1/4) Epoch 8, batch 650, loss[loss=0.2582, simple_loss=0.3045, pruned_loss=0.07511, ctc_loss=0.1541, over 19759.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3062, pruned_loss=0.07656, ctc_loss=0.1443, over 3716209.91 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:29:23,692 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:29:36,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=96453.33333333333, ans=0.0
+2024-08-25 14:29:42,483 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.25 vs. limit=10.0
+2024-08-25 14:29:46,684 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=96453.33333333333, ans=10.0
+2024-08-25 14:30:03,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=96560.0, ans=0.0
+2024-08-25 14:30:07,731 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.75 vs. limit=22.5
+2024-08-25 14:30:52,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=96560.0, ans=0.0
+2024-08-25 14:31:06,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=96613.33333333333, ans=0.125
+2024-08-25 14:31:08,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=96613.33333333333, ans=0.125
+2024-08-25 14:31:24,402 INFO [train.py:1114] (1/4) Epoch 8, batch 700, loss[loss=0.2306, simple_loss=0.2872, pruned_loss=0.06264, ctc_loss=0.1218, over 19719.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3066, pruned_loss=0.0766, ctc_loss=0.1443, over 3749112.39 frames. ], batch size: 51, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:31:25,680 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.97 vs. limit=15.0
+2024-08-25 14:31:36,079 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 1.952e+02 2.228e+02 2.907e+02 4.140e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 14:31:54,117 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.29 vs. limit=6.0
+2024-08-25 14:31:54,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=96773.33333333333, ans=0.125
+2024-08-25 14:32:00,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=96773.33333333333, ans=0.125
+2024-08-25 14:32:04,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=96773.33333333333, ans=0.125
+2024-08-25 14:32:27,585 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.18 vs. limit=15.0
+2024-08-25 14:32:35,086 INFO [train.py:1114] (1/4) Epoch 8, batch 750, loss[loss=0.2512, simple_loss=0.3118, pruned_loss=0.06927, ctc_loss=0.1299, over 19493.00 frames. ], tot_loss[loss=0.2583, simple_loss=0.3062, pruned_loss=0.07648, ctc_loss=0.1438, over 3774857.95 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:32:44,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=96933.33333333333, ans=0.0
+2024-08-25 14:32:52,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=96986.66666666667, ans=0.0
+2024-08-25 14:33:15,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=97040.0, ans=0.125
+2024-08-25 14:33:22,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=97093.33333333333, ans=0.125
+2024-08-25 14:33:39,200 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.02 vs. limit=15.0
+2024-08-25 14:33:45,421 INFO [train.py:1114] (1/4) Epoch 8, batch 800, loss[loss=0.2205, simple_loss=0.2722, pruned_loss=0.06213, ctc_loss=0.1112, over 19804.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.3061, pruned_loss=0.07637, ctc_loss=0.1434, over 3796328.98 frames. ], batch size: 49, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:34:27,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=97200.0, ans=0.125
+2024-08-25 14:34:31,437 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.07 vs. limit=15.0
+2024-08-25 14:34:33,562 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.39 vs. limit=15.0
+2024-08-25 14:34:35,085 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 1.855e+02 2.176e+02 2.933e+02 4.905e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-25 14:35:22,239 INFO [train.py:1114] (1/4) Epoch 8, batch 850, loss[loss=0.2519, simple_loss=0.3111, pruned_loss=0.06966, ctc_loss=0.1337, over 19646.00 frames. ], tot_loss[loss=0.2571, simple_loss=0.3055, pruned_loss=0.07587, ctc_loss=0.1423, over 3815639.42 frames. ], batch size: 59, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:35:26,937 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=97466.66666666667, ans=0.1
+2024-08-25 14:35:37,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=97520.0, ans=0.95
+2024-08-25 14:36:00,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=97626.66666666667, ans=0.2
+2024-08-25 14:36:01,175 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=97626.66666666667, ans=0.125
+2024-08-25 14:36:02,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=97626.66666666667, ans=0.025
+2024-08-25 14:36:13,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=97680.0, ans=0.1
+2024-08-25 14:36:19,793 INFO [train.py:1114] (1/4) Epoch 8, batch 900, loss[loss=0.2583, simple_loss=0.2945, pruned_loss=0.08169, ctc_loss=0.1465, over 19428.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3063, pruned_loss=0.07658, ctc_loss=0.1435, over 3820034.18 frames. ], batch size: 48, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:38:24,996 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.98 vs. limit=15.0
+2024-08-25 14:38:28,622 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.79 vs. limit=15.0
+2024-08-25 14:38:28,755 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.54 vs. limit=15.0
+2024-08-25 14:38:30,474 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 1.935e+02 2.327e+02 2.780e+02 5.034e+02, threshold=4.654e+02, percent-clipped=2.0
+2024-08-25 14:38:30,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_na.min_abs, batch_count=97786.66666666667, ans=0.02
+2024-08-25 14:38:35,004 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=97786.66666666667, ans=0.125
+2024-08-25 14:38:39,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=97786.66666666667, ans=0.1
+2024-08-25 14:38:50,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=97840.0, ans=0.0
+2024-08-25 14:39:10,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=97946.66666666667, ans=0.1
+2024-08-25 14:39:56,275 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=97946.66666666667, ans=0.125
+2024-08-25 14:40:01,371 INFO [train.py:1114] (1/4) Epoch 8, batch 950, loss[loss=0.2532, simple_loss=0.2885, pruned_loss=0.07917, ctc_loss=0.149, over 19506.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3069, pruned_loss=0.07704, ctc_loss=0.1443, over 3821119.37 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:40:17,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=98053.33333333333, ans=0.125
+2024-08-25 14:40:22,522 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=98053.33333333333, ans=0.025
+2024-08-25 14:41:20,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=98106.66666666667, ans=0.0
+2024-08-25 14:41:31,277 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=98160.0, ans=0.2
+2024-08-25 14:42:08,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=98160.0, ans=0.125
+2024-08-25 14:43:17,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=98213.33333333333, ans=0.0
+2024-08-25 14:43:17,673 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.38 vs. limit=22.5
+2024-08-25 14:43:26,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=98213.33333333333, ans=10.0
+2024-08-25 14:43:29,286 INFO [train.py:1114] (1/4) Epoch 8, batch 1000, loss[loss=0.2476, simple_loss=0.2964, pruned_loss=0.07145, ctc_loss=0.1397, over 19838.00 frames. ], tot_loss[loss=0.2617, simple_loss=0.3087, pruned_loss=0.0781, ctc_loss=0.1464, over 3817448.78 frames. ], batch size: 52, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:43:30,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=98266.66666666667, ans=0.125
+2024-08-25 14:43:39,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=98320.0, ans=0.04949747468305833
+2024-08-25 14:43:47,360 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.014e+02 2.465e+02 3.304e+02 4.205e+02, threshold=4.930e+02, percent-clipped=0.0
+2024-08-25 14:43:50,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=98320.0, ans=0.0
+2024-08-25 14:43:52,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=98320.0, ans=0.125
+2024-08-25 14:46:05,489 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=98320.0, ans=0.125
+2024-08-25 14:46:32,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=98480.0, ans=0.05
+2024-08-25 14:46:35,501 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.59 vs. limit=22.5
+2024-08-25 14:46:37,978 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=98480.0, ans=0.125
+2024-08-25 14:46:44,411 INFO [train.py:1114] (1/4) Epoch 8, batch 1050, loss[loss=0.285, simple_loss=0.3254, pruned_loss=0.08865, ctc_loss=0.1684, over 19833.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3077, pruned_loss=0.07767, ctc_loss=0.1458, over 3823813.01 frames. ], batch size: 57, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:46:45,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=98533.33333333333, ans=0.0
+2024-08-25 14:46:49,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=98533.33333333333, ans=0.125
+2024-08-25 14:47:37,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=98746.66666666667, ans=0.0
+2024-08-25 14:47:44,591 INFO [train.py:1114] (1/4) Epoch 8, batch 1100, loss[loss=0.2333, simple_loss=0.2866, pruned_loss=0.06527, ctc_loss=0.1237, over 19576.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.3069, pruned_loss=0.07688, ctc_loss=0.1444, over 3831686.92 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:48:13,768 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.814e+02 2.071e+02 2.620e+02 3.682e+02, threshold=4.142e+02, percent-clipped=0.0
+2024-08-25 14:48:29,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=98853.33333333333, ans=0.125
+2024-08-25 14:48:40,366 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=98906.66666666667, ans=0.2
+2024-08-25 14:49:01,435 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=98906.66666666667, ans=0.125
+2024-08-25 14:49:02,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=98906.66666666667, ans=0.125
+2024-08-25 14:49:15,561 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=98960.0, ans=0.2
+2024-08-25 14:49:20,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=98960.0, ans=0.125
+2024-08-25 14:50:00,860 INFO [train.py:1114] (1/4) Epoch 8, batch 1150, loss[loss=0.254, simple_loss=0.2986, pruned_loss=0.07656, ctc_loss=0.141, over 19597.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.3078, pruned_loss=0.07768, ctc_loss=0.146, over 3830909.21 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:51:09,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=99120.0, ans=0.05
+2024-08-25 14:51:26,402 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.83 vs. limit=6.0
+2024-08-25 14:51:58,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=99173.33333333333, ans=0.0
+2024-08-25 14:52:04,106 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=99226.66666666667, ans=0.2
+2024-08-25 14:52:51,781 INFO [train.py:1114] (1/4) Epoch 8, batch 1200, loss[loss=0.2694, simple_loss=0.3202, pruned_loss=0.07986, ctc_loss=0.1474, over 19842.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3079, pruned_loss=0.07736, ctc_loss=0.1455, over 3824781.15 frames. ], batch size: 57, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:53:01,562 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=99333.33333333333, ans=0.1
+2024-08-25 14:53:06,252 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.879e+02 2.149e+02 2.634e+02 4.011e+02, threshold=4.298e+02, percent-clipped=0.0
+2024-08-25 14:53:32,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=99493.33333333333, ans=0.0
+2024-08-25 14:53:48,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=99546.66666666667, ans=0.125
+2024-08-25 14:53:50,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=99546.66666666667, ans=0.125
+2024-08-25 14:53:52,342 INFO [train.py:1114] (1/4) Epoch 8, batch 1250, loss[loss=0.3024, simple_loss=0.3363, pruned_loss=0.09866, ctc_loss=0.1778, over 19526.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3083, pruned_loss=0.07715, ctc_loss=0.1453, over 3843129.06 frames. ], batch size: 61, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:54:05,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=99653.33333333333, ans=0.1
+2024-08-25 14:54:06,869 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=99653.33333333333, ans=0.0
+2024-08-25 14:54:11,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=99653.33333333333, ans=0.125
+2024-08-25 14:54:40,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=99706.66666666667, ans=0.07
+2024-08-25 14:55:38,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=99706.66666666667, ans=0.2
+2024-08-25 14:55:45,373 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:55:50,844 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=99760.0, ans=0.125
+2024-08-25 14:55:54,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=99813.33333333333, ans=0.0
+2024-08-25 14:56:05,545 INFO [train.py:1114] (1/4) Epoch 8, batch 1300, loss[loss=0.2712, simple_loss=0.3122, pruned_loss=0.08421, ctc_loss=0.1542, over 18890.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.3064, pruned_loss=0.07615, ctc_loss=0.1436, over 3846658.49 frames. ], batch size: 76, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:56:08,368 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=99866.66666666667, ans=0.0
+2024-08-25 14:56:17,013 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.809e+02 2.147e+02 2.747e+02 4.726e+02, threshold=4.293e+02, percent-clipped=4.0
+2024-08-25 14:58:09,048 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:58:23,704 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100080.0, ans=0.1
+2024-08-25 14:58:53,773 INFO [train.py:1114] (1/4) Epoch 8, batch 1350, loss[loss=0.2668, simple_loss=0.3198, pruned_loss=0.07724, ctc_loss=0.1484, over 19792.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3066, pruned_loss=0.07639, ctc_loss=0.144, over 3858031.17 frames. ], batch size: 54, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:58:58,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100133.33333333333, ans=0.125
+2024-08-25 14:59:41,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=100346.66666666667, ans=0.95
+2024-08-25 14:59:51,903 INFO [train.py:1114] (1/4) Epoch 8, batch 1400, loss[loss=0.2218, simple_loss=0.2748, pruned_loss=0.06047, ctc_loss=0.1196, over 19658.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3061, pruned_loss=0.07595, ctc_loss=0.1429, over 3865207.46 frames. ], batch size: 46, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:59:53,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=100400.0, ans=0.125
+2024-08-25 14:59:54,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=100400.0, ans=0.0
+2024-08-25 14:59:54,300 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=100400.0, ans=0.2
+2024-08-25 15:00:00,539 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.65 vs. limit=22.5
+2024-08-25 15:00:03,300 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.018e+02 2.600e+02 3.300e+02 7.375e+02, threshold=5.199e+02, percent-clipped=11.0
+2024-08-25 15:00:55,881 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=100666.66666666667, ans=0.125
+2024-08-25 15:00:56,747 INFO [train.py:1114] (1/4) Epoch 8, batch 1450, loss[loss=0.2663, simple_loss=0.318, pruned_loss=0.07862, ctc_loss=0.1433, over 19693.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3072, pruned_loss=0.07683, ctc_loss=0.1443, over 3863108.04 frames. ], batch size: 63, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:00:56,925 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=100666.66666666667, ans=0.2
+2024-08-25 15:01:29,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=100773.33333333333, ans=0.125
+2024-08-25 15:01:46,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100880.0, ans=0.125
+2024-08-25 15:02:32,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=100880.0, ans=0.0
+2024-08-25 15:03:16,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=100933.33333333333, ans=0.2
+2024-08-25 15:03:17,701 INFO [train.py:1114] (1/4) Epoch 8, batch 1500, loss[loss=0.2619, simple_loss=0.315, pruned_loss=0.07554, ctc_loss=0.1445, over 19590.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.307, pruned_loss=0.07651, ctc_loss=0.1439, over 3862434.42 frames. ], batch size: 57, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:05:19,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=100933.33333333333, ans=0.125
+2024-08-25 15:05:24,436 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.972e+02 2.271e+02 2.845e+02 5.404e+02, threshold=4.542e+02, percent-clipped=1.0
+2024-08-25 15:05:34,562 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.40 vs. limit=15.0
+2024-08-25 15:08:04,576 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=101040.0, ans=0.2
+2024-08-25 15:08:38,145 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=101040.0, ans=0.125
+2024-08-25 15:09:15,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=101093.33333333333, ans=0.2
+2024-08-25 15:09:36,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=101093.33333333333, ans=0.125
+2024-08-25 15:10:18,896 INFO [train.py:1114] (1/4) Epoch 8, batch 1550, loss[loss=0.277, simple_loss=0.3226, pruned_loss=0.08466, ctc_loss=0.1551, over 19627.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3077, pruned_loss=0.07722, ctc_loss=0.1451, over 3846907.76 frames. ], batch size: 60, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:12:28,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=101253.33333333333, ans=0.025
+2024-08-25 15:13:44,336 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=101360.0, ans=0.125
+2024-08-25 15:14:03,595 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=101413.33333333333, ans=0.0
+2024-08-25 15:14:11,856 INFO [train.py:1114] (1/4) Epoch 8, batch 1600, loss[loss=0.2509, simple_loss=0.3122, pruned_loss=0.06816, ctc_loss=0.1329, over 19828.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3076, pruned_loss=0.07727, ctc_loss=0.1454, over 3836701.43 frames. ], batch size: 57, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:14:12,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=101466.66666666667, ans=0.125
+2024-08-25 15:14:21,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=101466.66666666667, ans=0.1
+2024-08-25 15:14:31,972 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.915e+02 2.222e+02 2.696e+02 4.640e+02, threshold=4.444e+02, percent-clipped=1.0
+2024-08-25 15:14:59,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=101573.33333333333, ans=0.125
+2024-08-25 15:15:01,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=101573.33333333333, ans=0.05
+2024-08-25 15:15:07,695 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=101626.66666666667, ans=0.0
+2024-08-25 15:15:22,497 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=101680.0, ans=0.0
+2024-08-25 15:15:30,476 INFO [train.py:1114] (1/4) Epoch 8, batch 1650, loss[loss=0.2637, simple_loss=0.3161, pruned_loss=0.07666, ctc_loss=0.1448, over 19665.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3073, pruned_loss=0.07731, ctc_loss=0.1455, over 3832380.28 frames. ], batch size: 59, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:16:01,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=101840.0, ans=0.025
+2024-08-25 15:16:14,082 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.64 vs. limit=10.0
+2024-08-25 15:16:24,118 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=101946.66666666667, ans=0.125
+2024-08-25 15:16:28,203 INFO [train.py:1114] (1/4) Epoch 8, batch 1700, loss[loss=0.1987, simple_loss=0.2532, pruned_loss=0.05151, ctc_loss=0.1031, over 19678.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.3063, pruned_loss=0.07623, ctc_loss=0.1435, over 3846502.58 frames. ], batch size: 46, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:16:29,532 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:16:40,740 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.920e+02 2.237e+02 2.711e+02 4.644e+02, threshold=4.474e+02, percent-clipped=2.0
+2024-08-25 15:16:57,030 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102106.66666666667, ans=0.1
+2024-08-25 15:17:27,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=102213.33333333333, ans=0.125
+2024-08-25 15:17:29,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=102213.33333333333, ans=0.04949747468305833
+2024-08-25 15:17:39,501 INFO [train.py:1114] (1/4) Epoch 8, batch 1750, loss[loss=0.2305, simple_loss=0.27, pruned_loss=0.06913, ctc_loss=0.1317, over 19665.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.3062, pruned_loss=0.07639, ctc_loss=0.1436, over 3850806.49 frames. ], batch size: 45, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:18:05,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=102320.0, ans=0.125
+2024-08-25 15:18:10,193 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.33 vs. limit=10.0
+2024-08-25 15:19:54,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102373.33333333333, ans=0.1
+2024-08-25 15:20:22,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=102480.0, ans=0.0
+2024-08-25 15:20:25,838 INFO [train.py:1114] (1/4) Epoch 8, batch 1800, loss[loss=0.2799, simple_loss=0.3252, pruned_loss=0.08406, ctc_loss=0.1664, over 19613.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.3071, pruned_loss=0.07707, ctc_loss=0.1447, over 3852697.43 frames. ], batch size: 55, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:20:29,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102533.33333333333, ans=0.125
+2024-08-25 15:20:37,811 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 1.874e+02 2.230e+02 2.859e+02 4.439e+02, threshold=4.460e+02, percent-clipped=0.0
+2024-08-25 15:23:31,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=102586.66666666667, ans=0.0
+2024-08-25 15:23:44,706 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102640.0, ans=0.125
+2024-08-25 15:24:50,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.18 vs. limit=12.0
+2024-08-25 15:26:49,366 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-25 15:28:59,117 INFO [train.py:1114] (1/4) Epoch 8, batch 1850, loss[loss=0.2837, simple_loss=0.3303, pruned_loss=0.0869, ctc_loss=0.1583, over 19586.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3066, pruned_loss=0.07665, ctc_loss=0.144, over 3856138.23 frames. ], batch size: 57, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:29:15,780 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=13.29 vs. limit=15.0
+2024-08-25 15:29:30,164 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.70 vs. limit=12.0
+2024-08-25 15:29:34,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=102960.0, ans=0.025
+2024-08-25 15:29:37,374 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.08 vs. limit=22.5
+2024-08-25 15:30:32,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=103013.33333333333, ans=0.2
+2024-08-25 15:30:33,005 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=103013.33333333333, ans=0.125
+2024-08-25 15:32:36,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=103013.33333333333, ans=0.125
+2024-08-25 15:32:38,667 INFO [train.py:1114] (1/4) Epoch 8, batch 1900, loss[loss=0.2615, simple_loss=0.322, pruned_loss=0.07241, ctc_loss=0.1404, over 19658.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3066, pruned_loss=0.07634, ctc_loss=0.1435, over 3860755.12 frames. ], batch size: 59, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:32:40,329 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.92 vs. limit=15.0
+2024-08-25 15:32:52,960 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.872e+02 2.139e+02 2.618e+02 5.849e+02, threshold=4.279e+02, percent-clipped=4.0
+2024-08-25 15:33:09,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=103173.33333333333, ans=0.125
+2024-08-25 15:33:18,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=103226.66666666667, ans=0.1
+2024-08-25 15:33:18,895 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=103226.66666666667, ans=0.0
+2024-08-25 15:33:25,838 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:33:26,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=103280.0, ans=0.0
+2024-08-25 15:33:32,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=103280.0, ans=0.125
+2024-08-25 15:33:37,635 INFO [train.py:1114] (1/4) Epoch 8, batch 1950, loss[loss=0.274, simple_loss=0.3115, pruned_loss=0.08684, ctc_loss=0.1571, over 19599.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3075, pruned_loss=0.07615, ctc_loss=0.1431, over 3870260.74 frames. ], batch size: 52, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:33:48,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103333.33333333333, ans=0.125
+2024-08-25 15:33:53,261 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.88 vs. limit=15.0
+2024-08-25 15:34:23,750 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.89 vs. limit=15.0
+2024-08-25 15:34:39,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=103546.66666666667, ans=0.0
+2024-08-25 15:34:42,890 INFO [train.py:1114] (1/4) Epoch 8, batch 2000, loss[loss=0.2047, simple_loss=0.2581, pruned_loss=0.05481, ctc_loss=0.104, over 19638.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.3077, pruned_loss=0.07637, ctc_loss=0.1434, over 3855069.17 frames. ], batch size: 45, lr: 1.81e-02, grad_scale: 32.0
+2024-08-25 15:34:55,660 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 2.022e+02 2.450e+02 4.734e+02, threshold=4.043e+02, percent-clipped=1.0
+2024-08-25 15:35:22,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=103760.0, ans=0.0
+2024-08-25 15:35:38,626 INFO [train.py:1114] (1/4) Epoch 8, batch 2050, loss[loss=0.2293, simple_loss=0.2852, pruned_loss=0.06334, ctc_loss=0.1168, over 19708.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3071, pruned_loss=0.07648, ctc_loss=0.1436, over 3850588.02 frames. ], batch size: 47, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:35:41,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=103866.66666666667, ans=0.0
+2024-08-25 15:35:44,935 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=103866.66666666667, ans=0.125
+2024-08-25 15:35:48,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103920.0, ans=0.1
+2024-08-25 15:35:53,721 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=103920.0, ans=0.07
+2024-08-25 15:35:58,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103920.0, ans=0.125
+2024-08-25 15:36:29,308 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.00 vs. limit=15.0
+2024-08-25 15:36:32,753 INFO [train.py:1114] (1/4) Epoch 8, batch 2100, loss[loss=0.2195, simple_loss=0.2864, pruned_loss=0.05501, ctc_loss=0.1065, over 19768.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3057, pruned_loss=0.0754, ctc_loss=0.1416, over 3856941.52 frames. ], batch size: 54, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:36:39,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104133.33333333333, ans=0.1
+2024-08-25 15:36:44,887 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.055e+02 2.348e+02 2.987e+02 4.948e+02, threshold=4.695e+02, percent-clipped=5.0
+2024-08-25 15:36:50,842 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.70 vs. limit=15.0
+2024-08-25 15:36:57,270 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:37:03,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=104240.0, ans=0.0
+2024-08-25 15:37:22,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=104346.66666666667, ans=0.125
+2024-08-25 15:37:27,172 INFO [train.py:1114] (1/4) Epoch 8, batch 2150, loss[loss=0.2449, simple_loss=0.2962, pruned_loss=0.06997, ctc_loss=0.1344, over 19573.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3045, pruned_loss=0.07472, ctc_loss=0.1406, over 3867606.77 frames. ], batch size: 52, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:37:28,843 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.42 vs. limit=12.0
+2024-08-25 15:38:18,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=104613.33333333333, ans=0.0
+2024-08-25 15:38:21,073 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:38:23,247 INFO [train.py:1114] (1/4) Epoch 8, batch 2200, loss[loss=0.2547, simple_loss=0.3103, pruned_loss=0.07287, ctc_loss=0.1332, over 19584.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3045, pruned_loss=0.0749, ctc_loss=0.141, over 3866375.89 frames. ], batch size: 57, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:38:26,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.63 vs. limit=15.0
+2024-08-25 15:38:35,667 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.961e+02 2.280e+02 3.038e+02 5.675e+02, threshold=4.560e+02, percent-clipped=2.0
+2024-08-25 15:39:08,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=104880.0, ans=0.0
+2024-08-25 15:39:13,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=104880.0, ans=0.09899494936611666
+2024-08-25 15:39:19,052 INFO [train.py:1114] (1/4) Epoch 8, batch 2250, loss[loss=0.2414, simple_loss=0.2983, pruned_loss=0.06669, ctc_loss=0.1279, over 19622.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.305, pruned_loss=0.07518, ctc_loss=0.1412, over 3866484.83 frames. ], batch size: 55, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:39:33,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=104986.66666666667, ans=0.0
+2024-08-25 15:39:36,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=104986.66666666667, ans=0.0
+2024-08-25 15:39:44,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=105040.0, ans=0.125
+2024-08-25 15:40:04,721 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=105146.66666666667, ans=0.2
+2024-08-25 15:40:14,489 INFO [train.py:1114] (1/4) Epoch 8, batch 2300, loss[loss=0.2074, simple_loss=0.2735, pruned_loss=0.05141, ctc_loss=0.09601, over 19506.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.304, pruned_loss=0.07509, ctc_loss=0.141, over 3861007.67 frames. ], batch size: 49, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:40:21,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=105200.0, ans=0.025
+2024-08-25 15:40:28,025 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.907e+02 2.167e+02 2.593e+02 4.976e+02, threshold=4.335e+02, percent-clipped=1.0
+2024-08-25 15:40:55,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=105360.0, ans=0.0
+2024-08-25 15:41:03,985 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.44 vs. limit=15.0
+2024-08-25 15:41:11,091 INFO [train.py:1114] (1/4) Epoch 8, batch 2350, loss[loss=0.2948, simple_loss=0.3328, pruned_loss=0.09276, ctc_loss=0.1784, over 19670.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3041, pruned_loss=0.07519, ctc_loss=0.1411, over 3864515.36 frames. ], batch size: 63, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:41:23,260 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=105520.0, ans=0.0
+2024-08-25 15:41:32,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-25 15:41:37,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=105573.33333333333, ans=0.025
+2024-08-25 15:41:37,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105573.33333333333, ans=0.1
+2024-08-25 15:42:06,002 INFO [train.py:1114] (1/4) Epoch 8, batch 2400, loss[loss=0.246, simple_loss=0.2989, pruned_loss=0.07061, ctc_loss=0.1299, over 19309.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.306, pruned_loss=0.07575, ctc_loss=0.1422, over 3859355.61 frames. ], batch size: 71, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:42:17,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=105786.66666666667, ans=0.0
+2024-08-25 15:42:18,059 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.983e+02 2.255e+02 2.870e+02 5.067e+02, threshold=4.510e+02, percent-clipped=2.0
+2024-08-25 15:42:21,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105786.66666666667, ans=0.125
+2024-08-25 15:42:28,272 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.54 vs. limit=22.5
+2024-08-25 15:42:52,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=105946.66666666667, ans=0.125
+2024-08-25 15:42:54,452 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.99 vs. limit=10.0
+2024-08-25 15:43:01,711 INFO [train.py:1114] (1/4) Epoch 8, batch 2450, loss[loss=0.3259, simple_loss=0.3425, pruned_loss=0.1111, ctc_loss=0.2176, over 13681.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3115, pruned_loss=0.08031, ctc_loss=0.1511, over 3729889.19 frames. ], batch size: 140, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:43:07,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=106000.0, ans=0.125
+2024-08-25 15:43:09,200 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=106000.0, ans=0.015
+2024-08-25 15:43:11,707 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=106053.33333333333, ans=0.125
+2024-08-25 15:43:28,270 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.56 vs. limit=15.0
+2024-08-25 15:43:35,000 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.25 vs. limit=6.0
+2024-08-25 15:43:37,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=106160.0, ans=0.0
+2024-08-25 15:43:38,749 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=106160.0, ans=0.0
+2024-08-25 15:43:38,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=106160.0, ans=0.125
+2024-08-25 15:43:40,063 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=9.07 vs. limit=15.0
+2024-08-25 15:44:31,277 INFO [train.py:1114] (1/4) Epoch 9, batch 0, loss[loss=0.2654, simple_loss=0.3057, pruned_loss=0.08225, ctc_loss=0.1513, over 19801.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3057, pruned_loss=0.08225, ctc_loss=0.1513, over 19801.00 frames. ], batch size: 49, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:44:31,278 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 15:44:49,823 INFO [train.py:1146] (1/4) Epoch 9, validation: loss=0.21, simple_loss=0.2947, pruned_loss=0.04621, ctc_loss=0.08206, over 944034.00 frames.
+2024-08-25 15:44:49,823 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 15:44:55,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=106208.0, ans=0.125
+2024-08-25 15:45:14,958 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.59 vs. limit=15.0
+2024-08-25 15:45:15,527 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.154e+02 2.510e+02 2.953e+02 5.707e+02, threshold=5.019e+02, percent-clipped=2.0
+2024-08-25 15:46:09,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=106314.66666666667, ans=0.025
+2024-08-25 15:46:32,383 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff2.min_abs, batch_count=106421.33333333333, ans=0.1
+2024-08-25 15:46:36,875 INFO [train.py:1114] (1/4) Epoch 9, batch 50, loss[loss=0.2392, simple_loss=0.2857, pruned_loss=0.0697, ctc_loss=0.1332, over 19686.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3095, pruned_loss=0.07777, ctc_loss=0.1472, over 845285.85 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:46:41,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=106474.66666666667, ans=0.2
+2024-08-25 15:46:43,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106474.66666666667, ans=0.1
+2024-08-25 15:46:46,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=106474.66666666667, ans=0.2
+2024-08-25 15:46:55,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=106528.0, ans=0.025
+2024-08-25 15:46:59,930 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=106581.33333333333, ans=0.04949747468305833
+2024-08-25 15:47:02,442 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.83 vs. limit=15.0
+2024-08-25 15:47:27,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=106634.66666666667, ans=0.125
+2024-08-25 15:47:33,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=106688.0, ans=0.0
+2024-08-25 15:47:44,289 INFO [train.py:1114] (1/4) Epoch 9, batch 100, loss[loss=0.2289, simple_loss=0.2801, pruned_loss=0.06495, ctc_loss=0.1194, over 19705.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3082, pruned_loss=0.07651, ctc_loss=0.1437, over 1499557.75 frames. ], batch size: 51, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:47:44,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=106741.33333333333, ans=0.125
+2024-08-25 15:47:51,257 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=106741.33333333333, ans=0.2
+2024-08-25 15:48:09,484 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.842e+02 2.163e+02 2.785e+02 4.838e+02, threshold=4.326e+02, percent-clipped=0.0
+2024-08-25 15:48:10,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=106848.0, ans=0.2
+2024-08-25 15:48:41,459 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=107008.0, ans=0.125
+2024-08-25 15:48:41,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=107008.0, ans=0.2
+2024-08-25 15:48:42,144 INFO [train.py:1114] (1/4) Epoch 9, batch 150, loss[loss=0.2183, simple_loss=0.2755, pruned_loss=0.05778, ctc_loss=0.1136, over 19740.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3053, pruned_loss=0.07502, ctc_loss=0.1411, over 2028994.73 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:48:48,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=107008.0, ans=0.125
+2024-08-25 15:48:55,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=107061.33333333333, ans=0.0
+2024-08-25 15:49:02,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=107061.33333333333, ans=0.125
+2024-08-25 15:49:03,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107061.33333333333, ans=0.1
+2024-08-25 15:49:04,672 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=5.060e-03
+2024-08-25 15:49:04,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=107061.33333333333, ans=0.125
+2024-08-25 15:49:18,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=107168.0, ans=0.125
+2024-08-25 15:49:23,364 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.27 vs. limit=15.0
+2024-08-25 15:49:34,503 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=107221.33333333333, ans=0.2
+2024-08-25 15:49:41,043 INFO [train.py:1114] (1/4) Epoch 9, batch 200, loss[loss=0.2935, simple_loss=0.3268, pruned_loss=0.0939, ctc_loss=0.1809, over 18126.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3029, pruned_loss=0.07325, ctc_loss=0.1378, over 2436455.84 frames. ], batch size: 85, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:49:43,601 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=107274.66666666667, ans=0.0
+2024-08-25 15:49:47,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=107274.66666666667, ans=0.025
+2024-08-25 15:50:00,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=107328.0, ans=0.125
+2024-08-25 15:50:03,408 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.21 vs. limit=15.0
+2024-08-25 15:50:06,165 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.799e+02 2.039e+02 2.617e+02 5.282e+02, threshold=4.078e+02, percent-clipped=1.0
+2024-08-25 15:51:02,163 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.11 vs. limit=15.0
+2024-08-25 15:51:11,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=107488.0, ans=0.95
+2024-08-25 15:51:17,119 INFO [train.py:1114] (1/4) Epoch 9, batch 250, loss[loss=0.2388, simple_loss=0.298, pruned_loss=0.0661, ctc_loss=0.1186, over 19416.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3031, pruned_loss=0.0735, ctc_loss=0.138, over 2756445.89 frames. ], batch size: 67, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:51:18,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107541.33333333333, ans=0.1
+2024-08-25 15:51:39,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=107594.66666666667, ans=0.0
+2024-08-25 15:51:42,861 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=107648.0, ans=0.125
+2024-08-25 15:51:54,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=107701.33333333333, ans=0.125
+2024-08-25 15:51:54,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=107701.33333333333, ans=0.125
+2024-08-25 15:52:15,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-25 15:52:18,771 INFO [train.py:1114] (1/4) Epoch 9, batch 300, loss[loss=0.2977, simple_loss=0.3352, pruned_loss=0.0955, ctc_loss=0.1732, over 19512.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3032, pruned_loss=0.07355, ctc_loss=0.138, over 3000901.66 frames. ], batch size: 61, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:52:24,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=107808.0, ans=0.05
+2024-08-25 15:52:24,719 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=107808.0, ans=0.0
+2024-08-25 15:52:26,709 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.05 vs. limit=10.0
+2024-08-25 15:52:31,560 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=107861.33333333333, ans=0.0
+2024-08-25 15:52:32,846 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=107861.33333333333, ans=0.125
+2024-08-25 15:52:38,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107861.33333333333, ans=0.125
+2024-08-25 15:52:40,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=107861.33333333333, ans=0.0
+2024-08-25 15:52:47,052 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 1.831e+02 2.248e+02 2.885e+02 5.251e+02, threshold=4.495e+02, percent-clipped=2.0
+2024-08-25 15:52:50,267 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.73 vs. limit=6.0
+2024-08-25 15:52:57,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=107968.0, ans=0.0
+2024-08-25 15:53:02,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=107968.0, ans=0.5
+2024-08-25 15:53:18,334 INFO [train.py:1114] (1/4) Epoch 9, batch 350, loss[loss=0.2022, simple_loss=0.2609, pruned_loss=0.05194, ctc_loss=0.09918, over 19739.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3038, pruned_loss=0.074, ctc_loss=0.1389, over 3191307.58 frames. ], batch size: 48, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:53:18,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=108074.66666666667, ans=0.0
+2024-08-25 15:53:26,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=108074.66666666667, ans=0.07
+2024-08-25 15:53:39,175 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=108128.0, ans=0.125
+2024-08-25 15:53:54,931 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=108234.66666666667, ans=0.0
+2024-08-25 15:53:56,191 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=108234.66666666667, ans=0.2
+2024-08-25 15:53:56,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=108234.66666666667, ans=0.125
+2024-08-25 15:54:04,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=108288.0, ans=0.0
+2024-08-25 15:54:14,260 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.55 vs. limit=12.0
+2024-08-25 15:54:14,907 INFO [train.py:1114] (1/4) Epoch 9, batch 400, loss[loss=0.2737, simple_loss=0.3192, pruned_loss=0.08289, ctc_loss=0.156, over 19488.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3034, pruned_loss=0.07378, ctc_loss=0.1387, over 3343094.27 frames. ], batch size: 54, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:54:43,459 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.039e+02 2.514e+02 3.062e+02 4.428e+02, threshold=5.028e+02, percent-clipped=0.0
+2024-08-25 15:54:43,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=108448.0, ans=0.0
+2024-08-25 15:55:10,670 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=108554.66666666667, ans=0.2
+2024-08-25 15:55:16,267 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:55:18,509 INFO [train.py:1114] (1/4) Epoch 9, batch 450, loss[loss=0.2339, simple_loss=0.2964, pruned_loss=0.06145, ctc_loss=0.1211, over 19607.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.303, pruned_loss=0.07341, ctc_loss=0.1377, over 3449872.69 frames. ], batch size: 55, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:55:38,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=108661.33333333333, ans=0.125
+2024-08-25 15:58:58,002 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=108821.33333333333, ans=0.125
+2024-08-25 15:59:00,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=108821.33333333333, ans=0.125
+2024-08-25 15:59:07,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=108821.33333333333, ans=0.0
+2024-08-25 15:59:11,154 INFO [train.py:1114] (1/4) Epoch 9, batch 500, loss[loss=0.2624, simple_loss=0.3172, pruned_loss=0.07408, ctc_loss=0.1488, over 19651.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3017, pruned_loss=0.07239, ctc_loss=0.136, over 3546527.31 frames. ], batch size: 63, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:59:14,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=108874.66666666667, ans=0.025
+2024-08-25 15:59:20,665 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=108874.66666666667, ans=0.0
+2024-08-25 15:59:30,022 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:59:37,505 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.839e+02 2.298e+02 3.023e+02 4.931e+02, threshold=4.596e+02, percent-clipped=0.0
+2024-08-25 15:59:42,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=108981.33333333333, ans=0.125
+2024-08-25 15:59:46,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109034.66666666667, ans=0.1
+2024-08-25 15:59:48,794 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.47 vs. limit=10.0
+2024-08-25 16:00:08,629 INFO [train.py:1114] (1/4) Epoch 9, batch 550, loss[loss=0.2878, simple_loss=0.3341, pruned_loss=0.08796, ctc_loss=0.164, over 19302.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3019, pruned_loss=0.07262, ctc_loss=0.1364, over 3609589.52 frames. ], batch size: 71, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:00:15,227 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.64 vs. limit=22.5
+2024-08-25 16:00:32,653 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.90 vs. limit=15.0
+2024-08-25 16:00:42,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=109248.0, ans=22.5
+2024-08-25 16:00:44,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=109301.33333333333, ans=0.2
+2024-08-25 16:00:47,199 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.61 vs. limit=6.0
+2024-08-25 16:01:12,449 INFO [train.py:1114] (1/4) Epoch 9, batch 600, loss[loss=0.2614, simple_loss=0.3091, pruned_loss=0.07841, ctc_loss=0.1425, over 19431.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3017, pruned_loss=0.07246, ctc_loss=0.1362, over 3666994.55 frames. ], batch size: 67, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:01:29,379 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.63 vs. limit=15.0
+2024-08-25 16:01:44,300 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.91 vs. limit=15.0
+2024-08-25 16:01:51,491 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.960e+02 2.208e+02 2.721e+02 5.490e+02, threshold=4.416e+02, percent-clipped=2.0
+2024-08-25 16:01:52,830 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=109514.66666666667, ans=0.0
+2024-08-25 16:02:24,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=109568.0, ans=0.125
+2024-08-25 16:02:27,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=109568.0, ans=0.025
+2024-08-25 16:02:37,824 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.34 vs. limit=15.0
+2024-08-25 16:02:47,616 INFO [train.py:1114] (1/4) Epoch 9, batch 650, loss[loss=0.2303, simple_loss=0.2919, pruned_loss=0.06099, ctc_loss=0.117, over 19762.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3011, pruned_loss=0.07211, ctc_loss=0.1357, over 3717055.50 frames. ], batch size: 54, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:02:48,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=109674.66666666667, ans=0.0
+2024-08-25 16:03:47,849 INFO [train.py:1114] (1/4) Epoch 9, batch 700, loss[loss=0.2317, simple_loss=0.2908, pruned_loss=0.06299, ctc_loss=0.1163, over 19718.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3019, pruned_loss=0.07255, ctc_loss=0.1365, over 3748739.61 frames. ], batch size: 51, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:03:50,711 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.69 vs. limit=15.0
+2024-08-25 16:03:53,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=109941.33333333333, ans=0.125
+2024-08-25 16:04:03,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109994.66666666667, ans=0.1
+2024-08-25 16:04:10,817 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=15.12 vs. limit=22.5
+2024-08-25 16:04:14,378 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 1.949e+02 2.382e+02 2.859e+02 4.618e+02, threshold=4.764e+02, percent-clipped=1.0
+2024-08-25 16:04:22,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=110101.33333333333, ans=0.0
+2024-08-25 16:04:37,581 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.00 vs. limit=22.5
+2024-08-25 16:04:41,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=110154.66666666667, ans=0.125
+2024-08-25 16:04:44,748 INFO [train.py:1114] (1/4) Epoch 9, batch 750, loss[loss=0.2462, simple_loss=0.3002, pruned_loss=0.07074, ctc_loss=0.1265, over 19491.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3006, pruned_loss=0.07191, ctc_loss=0.1351, over 3774634.43 frames. ], batch size: 54, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:04:59,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110261.33333333333, ans=0.1
+2024-08-25 16:05:20,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110314.66666666667, ans=0.1
+2024-08-25 16:05:23,062 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.86 vs. limit=15.0
+2024-08-25 16:05:31,846 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=110368.0, ans=0.125
+2024-08-25 16:05:44,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=110421.33333333333, ans=0.025
+2024-08-25 16:05:47,597 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.17 vs. limit=15.0
+2024-08-25 16:05:48,070 INFO [train.py:1114] (1/4) Epoch 9, batch 800, loss[loss=0.2709, simple_loss=0.2978, pruned_loss=0.08998, ctc_loss=0.1601, over 19420.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3003, pruned_loss=0.07183, ctc_loss=0.1349, over 3795830.93 frames. ], batch size: 48, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:06:00,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=110528.0, ans=0.125
+2024-08-25 16:06:11,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=110581.33333333333, ans=0.05
+2024-08-25 16:06:14,966 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.861e+02 2.104e+02 2.558e+02 4.618e+02, threshold=4.207e+02, percent-clipped=0.0
+2024-08-25 16:06:28,561 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=110634.66666666667, ans=0.125
+2024-08-25 16:06:28,935 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.06 vs. limit=22.5
+2024-08-25 16:06:31,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=110634.66666666667, ans=0.025
+2024-08-25 16:06:47,179 INFO [train.py:1114] (1/4) Epoch 9, batch 850, loss[loss=0.2594, simple_loss=0.3096, pruned_loss=0.07626, ctc_loss=0.1416, over 19651.00 frames. ], tot_loss[loss=0.25, simple_loss=0.301, pruned_loss=0.07228, ctc_loss=0.136, over 3815143.33 frames. ], batch size: 59, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:06:50,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=110741.33333333333, ans=0.0
+2024-08-25 16:07:08,324 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110794.66666666667, ans=0.1
+2024-08-25 16:07:09,304 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=110848.0, ans=0.125
+2024-08-25 16:07:21,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=110901.33333333333, ans=0.125
+2024-08-25 16:08:42,724 INFO [train.py:1114] (1/4) Epoch 9, batch 900, loss[loss=0.2403, simple_loss=0.2827, pruned_loss=0.07241, ctc_loss=0.1327, over 19821.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3011, pruned_loss=0.07271, ctc_loss=0.1366, over 3820006.16 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:09:12,335 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.982e+02 2.328e+02 2.784e+02 5.806e+02, threshold=4.657e+02, percent-clipped=1.0
+2024-08-25 16:09:47,298 INFO [train.py:1114] (1/4) Epoch 9, batch 950, loss[loss=0.2206, simple_loss=0.2814, pruned_loss=0.05783, ctc_loss=0.1105, over 19494.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3014, pruned_loss=0.07302, ctc_loss=0.1374, over 3822352.50 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:10:14,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111381.33333333333, ans=0.1
+2024-08-25 16:10:34,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=111488.0, ans=0.0
+2024-08-25 16:10:38,367 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=111488.0, ans=0.125
+2024-08-25 16:10:43,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111488.0, ans=0.1
+2024-08-25 16:10:45,137 INFO [train.py:1114] (1/4) Epoch 9, batch 1000, loss[loss=0.2708, simple_loss=0.3142, pruned_loss=0.08277, ctc_loss=0.1546, over 19843.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3027, pruned_loss=0.07379, ctc_loss=0.1385, over 3816887.10 frames. ], batch size: 52, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:11:09,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=111648.0, ans=0.125
+2024-08-25 16:11:13,876 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.864e+02 2.156e+02 2.793e+02 4.751e+02, threshold=4.311e+02, percent-clipped=1.0
+2024-08-25 16:11:38,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111754.66666666667, ans=0.125
+2024-08-25 16:11:45,640 INFO [train.py:1114] (1/4) Epoch 9, batch 1050, loss[loss=0.2487, simple_loss=0.3096, pruned_loss=0.06659, ctc_loss=0.1368, over 19826.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3026, pruned_loss=0.07415, ctc_loss=0.1395, over 3823253.42 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:11:53,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=111808.0, ans=0.0
+2024-08-25 16:12:02,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=111861.33333333333, ans=0.2
+2024-08-25 16:12:17,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=111914.66666666667, ans=0.0
+2024-08-25 16:12:23,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=111968.0, ans=0.0
+2024-08-25 16:12:23,994 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=111968.0, ans=0.025
+2024-08-25 16:12:26,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111968.0, ans=0.1
+2024-08-25 16:12:51,803 INFO [train.py:1114] (1/4) Epoch 9, batch 1100, loss[loss=0.2276, simple_loss=0.2856, pruned_loss=0.06084, ctc_loss=0.1197, over 19600.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3016, pruned_loss=0.07332, ctc_loss=0.1381, over 3830916.43 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:13:03,151 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.88 vs. limit=22.5
+2024-08-25 16:13:19,826 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 1.820e+02 2.090e+02 2.645e+02 4.523e+02, threshold=4.179e+02, percent-clipped=2.0
+2024-08-25 16:13:37,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=112234.66666666667, ans=0.2
+2024-08-25 16:13:48,994 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=112288.0, ans=0.025
+2024-08-25 16:13:50,954 INFO [train.py:1114] (1/4) Epoch 9, batch 1150, loss[loss=0.2117, simple_loss=0.2806, pruned_loss=0.05205, ctc_loss=0.09674, over 19561.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3014, pruned_loss=0.07318, ctc_loss=0.1379, over 3829625.20 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:13:57,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=112341.33333333333, ans=0.2
+2024-08-25 16:14:12,459 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=112394.66666666667, ans=0.0
+2024-08-25 16:14:19,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112448.0, ans=0.125
+2024-08-25 16:14:42,523 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=112554.66666666667, ans=0.125
+2024-08-25 16:14:44,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=112554.66666666667, ans=0.0
+2024-08-25 16:14:51,113 INFO [train.py:1114] (1/4) Epoch 9, batch 1200, loss[loss=0.2432, simple_loss=0.3057, pruned_loss=0.06484, ctc_loss=0.1275, over 19844.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3017, pruned_loss=0.07308, ctc_loss=0.1378, over 3825238.18 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:15:53,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=112714.66666666667, ans=22.5
+2024-08-25 16:15:59,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=112714.66666666667, ans=0.0
+2024-08-25 16:16:01,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=112714.66666666667, ans=0.125
+2024-08-25 16:16:05,752 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.875e+02 2.166e+02 2.598e+02 4.323e+02, threshold=4.331e+02, percent-clipped=2.0
+2024-08-25 16:16:17,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=112768.0, ans=0.125
+2024-08-25 16:16:18,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=112768.0, ans=0.125
+2024-08-25 16:16:21,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=112768.0, ans=0.09899494936611666
+2024-08-25 16:16:22,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=112768.0, ans=0.0
+2024-08-25 16:16:39,520 INFO [train.py:1114] (1/4) Epoch 9, batch 1250, loss[loss=0.2428, simple_loss=0.3024, pruned_loss=0.06553, ctc_loss=0.1302, over 19502.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3014, pruned_loss=0.07238, ctc_loss=0.1365, over 3844066.73 frames. ], batch size: 61, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:16:43,613 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.01 vs. limit=15.0
+2024-08-25 16:17:34,483 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113088.0, ans=0.1
+2024-08-25 16:17:40,919 INFO [train.py:1114] (1/4) Epoch 9, batch 1300, loss[loss=0.2666, simple_loss=0.3186, pruned_loss=0.07834, ctc_loss=0.1451, over 18755.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3003, pruned_loss=0.07192, ctc_loss=0.1356, over 3847285.77 frames. ], batch size: 76, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:17:48,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113141.33333333333, ans=0.125
+2024-08-25 16:17:55,481 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.81 vs. limit=10.0
+2024-08-25 16:18:04,485 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.67 vs. limit=15.0
+2024-08-25 16:18:05,568 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.29 vs. limit=12.0
+2024-08-25 16:18:08,518 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 1.959e+02 2.315e+02 2.984e+02 4.812e+02, threshold=4.630e+02, percent-clipped=1.0
+2024-08-25 16:18:13,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=113248.0, ans=0.125
+2024-08-25 16:18:15,039 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.68 vs. limit=10.0
+2024-08-25 16:18:30,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=113354.66666666667, ans=0.2
+2024-08-25 16:18:37,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=113354.66666666667, ans=0.125
+2024-08-25 16:18:39,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=113354.66666666667, ans=0.125
+2024-08-25 16:18:42,118 INFO [train.py:1114] (1/4) Epoch 9, batch 1350, loss[loss=0.2575, simple_loss=0.3113, pruned_loss=0.07477, ctc_loss=0.1354, over 19776.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3002, pruned_loss=0.07177, ctc_loss=0.1352, over 3858500.78 frames. ], batch size: 54, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:18:49,261 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=113408.0, ans=0.0
+2024-08-25 16:19:00,304 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.90 vs. limit=22.5
+2024-08-25 16:19:03,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113461.33333333333, ans=0.1
+2024-08-25 16:19:15,421 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.37 vs. limit=15.0
+2024-08-25 16:19:21,673 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=113568.0, ans=0.2
+2024-08-25 16:19:28,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=113621.33333333333, ans=0.2
+2024-08-25 16:19:32,391 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.25 vs. limit=15.0
+2024-08-25 16:19:38,873 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=113674.66666666667, ans=0.2
+2024-08-25 16:19:40,005 INFO [train.py:1114] (1/4) Epoch 9, batch 1400, loss[loss=0.2267, simple_loss=0.2724, pruned_loss=0.06722, ctc_loss=0.1163, over 19683.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3, pruned_loss=0.07185, ctc_loss=0.1353, over 3865413.39 frames. ], batch size: 46, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:19:49,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=113674.66666666667, ans=0.0
+2024-08-25 16:20:07,559 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.860e+02 2.127e+02 2.545e+02 4.134e+02, threshold=4.253e+02, percent-clipped=0.0
+2024-08-25 16:20:09,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=113781.33333333333, ans=0.05
+2024-08-25 16:20:27,260 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:20:32,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=113888.0, ans=0.5
+2024-08-25 16:20:43,001 INFO [train.py:1114] (1/4) Epoch 9, batch 1450, loss[loss=0.2587, simple_loss=0.3196, pruned_loss=0.07284, ctc_loss=0.1306, over 19683.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3009, pruned_loss=0.07212, ctc_loss=0.1355, over 3862231.39 frames. ], batch size: 63, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:21:02,582 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=113994.66666666667, ans=0.125
+2024-08-25 16:21:25,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=114101.33333333333, ans=0.125
+2024-08-25 16:21:26,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=114101.33333333333, ans=0.125
+2024-08-25 16:21:45,884 INFO [train.py:1114] (1/4) Epoch 9, batch 1500, loss[loss=0.237, simple_loss=0.3039, pruned_loss=0.06191, ctc_loss=0.1157, over 19577.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3017, pruned_loss=0.0722, ctc_loss=0.1356, over 3862086.48 frames. ], batch size: 57, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:21:46,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=114208.0, ans=0.5
+2024-08-25 16:22:15,428 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 1.928e+02 2.180e+02 2.740e+02 4.350e+02, threshold=4.360e+02, percent-clipped=2.0
+2024-08-25 16:22:37,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=114421.33333333333, ans=0.125
+2024-08-25 16:22:42,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=114421.33333333333, ans=0.025
+2024-08-25 16:22:45,650 INFO [train.py:1114] (1/4) Epoch 9, batch 1550, loss[loss=0.2578, simple_loss=0.3118, pruned_loss=0.07365, ctc_loss=0.1412, over 19601.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3021, pruned_loss=0.07263, ctc_loss=0.1364, over 3845598.20 frames. ], batch size: 60, lr: 1.64e-02, grad_scale: 16.0
+2024-08-25 16:22:47,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=114474.66666666667, ans=0.1
+2024-08-25 16:22:53,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=114474.66666666667, ans=0.025
+2024-08-25 16:23:03,563 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.65 vs. limit=22.5
+2024-08-25 16:23:06,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=114528.0, ans=0.0
+2024-08-25 16:23:08,087 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.38 vs. limit=15.0
+2024-08-25 16:23:21,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=114634.66666666667, ans=0.2
+2024-08-25 16:23:27,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=114634.66666666667, ans=0.125
+2024-08-25 16:23:40,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=114688.0, ans=0.125
+2024-08-25 16:23:47,226 INFO [train.py:1114] (1/4) Epoch 9, batch 1600, loss[loss=0.2453, simple_loss=0.3024, pruned_loss=0.06816, ctc_loss=0.1299, over 19845.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.302, pruned_loss=0.07258, ctc_loss=0.1365, over 3835446.47 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 32.0
+2024-08-25 16:23:57,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=114741.33333333333, ans=0.0
+2024-08-25 16:24:04,220 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.13 vs. limit=15.0
+2024-08-25 16:24:06,536 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.74 vs. limit=15.0
+2024-08-25 16:24:16,807 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.930e+02 2.504e+02 3.084e+02 5.673e+02, threshold=5.009e+02, percent-clipped=4.0
+2024-08-25 16:24:27,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=114901.33333333333, ans=0.125
+2024-08-25 16:24:27,945 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.77 vs. limit=10.0
+2024-08-25 16:24:32,424 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.90 vs. limit=6.0
+2024-08-25 16:24:46,351 INFO [train.py:1114] (1/4) Epoch 9, batch 1650, loss[loss=0.2183, simple_loss=0.2869, pruned_loss=0.05438, ctc_loss=0.1021, over 19634.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3025, pruned_loss=0.07308, ctc_loss=0.1374, over 3831847.39 frames. ], batch size: 59, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:25:08,162 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=115114.66666666667, ans=0.2
+2024-08-25 16:25:20,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=115114.66666666667, ans=0.2
+2024-08-25 16:25:22,857 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.74 vs. limit=15.0
+2024-08-25 16:25:29,480 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:25:30,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=115168.0, ans=0.1
+2024-08-25 16:25:39,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=115221.33333333333, ans=0.125
+2024-08-25 16:25:40,698 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=115221.33333333333, ans=0.95
+2024-08-25 16:25:41,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=115221.33333333333, ans=0.09899494936611666
+2024-08-25 16:25:45,147 INFO [train.py:1114] (1/4) Epoch 9, batch 1700, loss[loss=0.2428, simple_loss=0.2858, pruned_loss=0.07353, ctc_loss=0.1317, over 19663.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3017, pruned_loss=0.07225, ctc_loss=0.1358, over 3846475.32 frames. ], batch size: 46, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:25:59,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115328.0, ans=0.1
+2024-08-25 16:26:05,117 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.70 vs. limit=22.5
+2024-08-25 16:26:10,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115381.33333333333, ans=0.1
+2024-08-25 16:26:11,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=115381.33333333333, ans=0.125
+2024-08-25 16:26:13,053 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.773e+02 1.969e+02 2.283e+02 4.673e+02, threshold=3.938e+02, percent-clipped=0.0
+2024-08-25 16:26:25,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=115434.66666666667, ans=0.125
+2024-08-25 16:26:41,722 INFO [train.py:1114] (1/4) Epoch 9, batch 1750, loss[loss=0.19, simple_loss=0.2488, pruned_loss=0.04818, ctc_loss=0.08713, over 19655.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3008, pruned_loss=0.07202, ctc_loss=0.1353, over 3851275.88 frames. ], batch size: 45, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:26:47,474 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.03 vs. limit=10.0
+2024-08-25 16:28:05,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=115754.66666666667, ans=0.0
+2024-08-25 16:28:06,518 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.55 vs. limit=15.0
+2024-08-25 16:28:12,437 INFO [train.py:1114] (1/4) Epoch 9, batch 1800, loss[loss=0.2444, simple_loss=0.3069, pruned_loss=0.0664, ctc_loss=0.1231, over 19616.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3013, pruned_loss=0.07239, ctc_loss=0.1359, over 3855165.41 frames. ], batch size: 55, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:28:23,733 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=115808.0, ans=0.025
+2024-08-25 16:28:46,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=115914.66666666667, ans=0.125
+2024-08-25 16:28:48,999 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 1.840e+02 2.097e+02 2.711e+02 4.220e+02, threshold=4.193e+02, percent-clipped=2.0
+2024-08-25 16:29:16,782 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:29:17,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=116021.33333333333, ans=0.1
+2024-08-25 16:29:25,117 INFO [train.py:1114] (1/4) Epoch 9, batch 1850, loss[loss=0.2755, simple_loss=0.3211, pruned_loss=0.08298, ctc_loss=0.16, over 19587.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3005, pruned_loss=0.07193, ctc_loss=0.135, over 3857559.76 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:29:29,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=116074.66666666667, ans=0.125
+2024-08-25 16:29:35,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=116128.0, ans=0.125
+2024-08-25 16:29:42,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=116128.0, ans=0.125
+2024-08-25 16:30:18,145 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=116128.0, ans=0.125
+2024-08-25 16:30:20,335 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.86 vs. limit=22.5
+2024-08-25 16:30:37,269 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=116234.66666666667, ans=0.125
+2024-08-25 16:30:42,100 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=116234.66666666667, ans=0.025
+2024-08-25 16:30:52,566 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.51 vs. limit=10.0
+2024-08-25 16:30:56,366 INFO [train.py:1114] (1/4) Epoch 9, batch 1900, loss[loss=0.2302, simple_loss=0.2956, pruned_loss=0.05962, ctc_loss=0.114, over 19648.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3011, pruned_loss=0.07188, ctc_loss=0.1348, over 3862589.14 frames. ], batch size: 59, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:30:58,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=116341.33333333333, ans=0.2
+2024-08-25 16:30:59,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=116341.33333333333, ans=0.125
+2024-08-25 16:32:00,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=116341.33333333333, ans=0.025
+2024-08-25 16:32:18,083 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.48 vs. limit=15.0
+2024-08-25 16:32:21,937 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.810e+02 2.075e+02 2.674e+02 4.757e+02, threshold=4.150e+02, percent-clipped=3.0
+2024-08-25 16:32:28,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=116501.33333333333, ans=0.0
+2024-08-25 16:33:06,104 INFO [train.py:1114] (1/4) Epoch 9, batch 1950, loss[loss=0.2129, simple_loss=0.2767, pruned_loss=0.05359, ctc_loss=0.1046, over 19604.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3022, pruned_loss=0.07224, ctc_loss=0.1356, over 3871366.78 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:33:15,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=116608.0, ans=10.0
+2024-08-25 16:33:19,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=116661.33333333333, ans=0.125
+2024-08-25 16:33:23,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=116661.33333333333, ans=0.125
+2024-08-25 16:33:29,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=116714.66666666667, ans=0.125
+2024-08-25 16:33:32,222 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.26 vs. limit=12.0
+2024-08-25 16:33:46,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=116768.0, ans=0.2
+2024-08-25 16:33:46,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=116768.0, ans=0.125
+2024-08-25 16:33:58,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=116821.33333333333, ans=0.1
+2024-08-25 16:34:02,732 INFO [train.py:1114] (1/4) Epoch 9, batch 2000, loss[loss=0.2129, simple_loss=0.2698, pruned_loss=0.05707, ctc_loss=0.105, over 19628.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3026, pruned_loss=0.07235, ctc_loss=0.1359, over 3855299.30 frames. ], batch size: 45, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:34:25,709 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=116981.33333333333, ans=0.0
+2024-08-25 16:34:30,973 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 1.787e+02 2.122e+02 2.673e+02 5.196e+02, threshold=4.245e+02, percent-clipped=10.0
+2024-08-25 16:34:34,428 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=116981.33333333333, ans=0.1
+2024-08-25 16:34:39,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=117034.66666666667, ans=0.0
+2024-08-25 16:34:43,529 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.70 vs. limit=10.0
+2024-08-25 16:34:46,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=117034.66666666667, ans=0.1
+2024-08-25 16:34:47,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=117088.0, ans=0.0
+2024-08-25 16:34:47,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=117088.0, ans=0.04949747468305833
+2024-08-25 16:34:52,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=117088.0, ans=0.2
+2024-08-25 16:34:59,590 INFO [train.py:1114] (1/4) Epoch 9, batch 2050, loss[loss=0.2023, simple_loss=0.2571, pruned_loss=0.05367, ctc_loss=0.1005, over 19729.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3019, pruned_loss=0.07227, ctc_loss=0.1359, over 3851579.32 frames. ], batch size: 47, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:35:39,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=117301.33333333333, ans=0.125
+2024-08-25 16:35:43,038 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.11 vs. limit=22.5
+2024-08-25 16:36:57,817 INFO [train.py:1114] (1/4) Epoch 9, batch 2100, loss[loss=0.2674, simple_loss=0.3169, pruned_loss=0.07894, ctc_loss=0.1498, over 19762.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3013, pruned_loss=0.072, ctc_loss=0.1353, over 3857799.30 frames. ], batch size: 54, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:37:00,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=117408.0, ans=0.125
+2024-08-25 16:37:38,955 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.824e+02 2.012e+02 2.446e+02 4.504e+02, threshold=4.025e+02, percent-clipped=2.0
+2024-08-25 16:37:43,079 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=10.00 vs. limit=15.0
+2024-08-25 16:37:43,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=117568.0, ans=0.125
+2024-08-25 16:37:47,057 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.28 vs. limit=15.0
+2024-08-25 16:37:50,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=117568.0, ans=0.125
+2024-08-25 16:37:57,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=117621.33333333333, ans=0.125
+2024-08-25 16:38:01,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=117621.33333333333, ans=0.125
+2024-08-25 16:38:02,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=117621.33333333333, ans=0.025
+2024-08-25 16:38:04,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=117621.33333333333, ans=10.0
+2024-08-25 16:38:05,188 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.12 vs. limit=15.0
+2024-08-25 16:38:06,800 INFO [train.py:1114] (1/4) Epoch 9, batch 2150, loss[loss=0.2221, simple_loss=0.2786, pruned_loss=0.06116, ctc_loss=0.1085, over 19567.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.2998, pruned_loss=0.07105, ctc_loss=0.1334, over 3867762.36 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:38:12,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=117674.66666666667, ans=0.125
+2024-08-25 16:38:21,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=117728.0, ans=0.125
+2024-08-25 16:38:24,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=117728.0, ans=22.5
+2024-08-25 16:38:25,796 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=117728.0, ans=0.2
+2024-08-25 16:38:32,511 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.66 vs. limit=10.0
+2024-08-25 16:39:02,677 INFO [train.py:1114] (1/4) Epoch 9, batch 2200, loss[loss=0.2625, simple_loss=0.3215, pruned_loss=0.07289, ctc_loss=0.144, over 19601.00 frames. ], tot_loss[loss=0.248, simple_loss=0.3, pruned_loss=0.07126, ctc_loss=0.1336, over 3866602.78 frames. ], batch size: 57, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:39:10,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=117941.33333333333, ans=0.5
+2024-08-25 16:39:19,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=117994.66666666667, ans=0.125
+2024-08-25 16:39:24,109 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.18 vs. limit=15.0
+2024-08-25 16:39:30,922 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.840e+02 2.263e+02 2.882e+02 6.553e+02, threshold=4.526e+02, percent-clipped=9.0
+2024-08-25 16:39:30,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=118048.0, ans=0.125
+2024-08-25 16:39:35,702 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.92 vs. limit=15.0
+2024-08-25 16:39:44,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=118101.33333333333, ans=0.125
+2024-08-25 16:39:48,861 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=118154.66666666667, ans=0.125
+2024-08-25 16:39:49,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=118154.66666666667, ans=0.2
+2024-08-25 16:39:59,169 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.49 vs. limit=15.0
+2024-08-25 16:39:59,973 INFO [train.py:1114] (1/4) Epoch 9, batch 2250, loss[loss=0.2663, simple_loss=0.3161, pruned_loss=0.0779, ctc_loss=0.1521, over 19614.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.2998, pruned_loss=0.07104, ctc_loss=0.1334, over 3867023.90 frames. ], batch size: 55, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:40:00,343 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.77 vs. limit=22.5
+2024-08-25 16:40:12,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=118261.33333333333, ans=0.125
+2024-08-25 16:40:20,939 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.58 vs. limit=15.0
+2024-08-25 16:40:26,577 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.05 vs. limit=15.0
+2024-08-25 16:40:30,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=118314.66666666667, ans=10.0
+2024-08-25 16:40:46,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=118421.33333333333, ans=0.0
+2024-08-25 16:40:53,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=118474.66666666667, ans=0.125
+2024-08-25 16:40:54,819 INFO [train.py:1114] (1/4) Epoch 9, batch 2300, loss[loss=0.236, simple_loss=0.2865, pruned_loss=0.06772, ctc_loss=0.1255, over 19496.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.2986, pruned_loss=0.07092, ctc_loss=0.1332, over 3860668.95 frames. ], batch size: 49, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:41:24,912 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.864e+02 2.265e+02 3.023e+02 5.230e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 16:41:42,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=118688.0, ans=0.2
+2024-08-25 16:41:51,067 INFO [train.py:1114] (1/4) Epoch 9, batch 2350, loss[loss=0.2722, simple_loss=0.3179, pruned_loss=0.083, ctc_loss=0.1515, over 19669.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.2986, pruned_loss=0.07112, ctc_loss=0.1335, over 3863748.78 frames. ], batch size: 63, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:41:59,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=118741.33333333333, ans=0.125
+2024-08-25 16:42:00,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=118741.33333333333, ans=0.125
+2024-08-25 16:42:13,997 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.24 vs. limit=22.5
+2024-08-25 16:42:50,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=118954.66666666667, ans=0.0
+2024-08-25 16:43:01,782 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=119008.0, ans=0.1
+2024-08-25 16:43:01,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=119008.0, ans=0.125
+2024-08-25 16:43:02,772 INFO [train.py:1114] (1/4) Epoch 9, batch 2400, loss[loss=0.2995, simple_loss=0.3378, pruned_loss=0.0943, ctc_loss=0.1814, over 19240.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.301, pruned_loss=0.07205, ctc_loss=0.1353, over 3857864.72 frames. ], batch size: 71, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:43:08,424 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.07 vs. limit=10.0
+2024-08-25 16:43:32,522 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.930e+02 2.301e+02 2.799e+02 4.768e+02, threshold=4.601e+02, percent-clipped=1.0
+2024-08-25 16:43:33,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=119114.66666666667, ans=0.025
+2024-08-25 16:43:40,224 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=119168.0, ans=0.2
+2024-08-25 16:43:59,357 INFO [train.py:1114] (1/4) Epoch 9, batch 2450, loss[loss=0.3035, simple_loss=0.3287, pruned_loss=0.101, ctc_loss=0.1905, over 13701.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3052, pruned_loss=0.07535, ctc_loss=0.1417, over 3731728.38 frames. ], batch size: 141, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:44:02,187 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.50 vs. limit=15.0
+2024-08-25 16:44:15,624 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.86 vs. limit=6.0
+2024-08-25 16:44:24,390 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.30 vs. limit=22.5
+2024-08-25 16:44:27,733 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.96 vs. limit=15.0
+2024-08-25 16:45:25,699 INFO [train.py:1114] (1/4) Epoch 10, batch 0, loss[loss=0.2229, simple_loss=0.2741, pruned_loss=0.06217, ctc_loss=0.1185, over 19818.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2741, pruned_loss=0.06217, ctc_loss=0.1185, over 19818.00 frames. ], batch size: 49, lr: 1.53e-02, grad_scale: 32.0
+2024-08-25 16:45:25,699 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 16:45:33,814 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.6490, 4.1308, 2.4382, 1.9018], device='cuda:1')
+2024-08-25 16:46:37,100 INFO [train.py:1146] (1/4) Epoch 10, validation: loss=0.2041, simple_loss=0.2903, pruned_loss=0.04356, ctc_loss=0.07708, over 944034.00 frames.
+2024-08-25 16:46:37,101 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 16:46:37,989 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.05 vs. limit=22.5
+2024-08-25 16:46:41,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=119482.66666666667, ans=0.0
+2024-08-25 16:47:12,492 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.82 vs. limit=15.0
+2024-08-25 16:47:17,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=119642.66666666667, ans=0.025
+2024-08-25 16:47:46,597 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 1.955e+02 2.116e+02 2.362e+02 4.652e+02, threshold=4.231e+02, percent-clipped=1.0
+2024-08-25 16:48:28,303 INFO [train.py:1114] (1/4) Epoch 10, batch 50, loss[loss=0.2105, simple_loss=0.274, pruned_loss=0.05325, ctc_loss=0.1014, over 19716.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3022, pruned_loss=0.0727, ctc_loss=0.1375, over 843671.78 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:48:57,683 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=119749.33333333333, ans=0.125
+2024-08-25 16:50:42,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=119909.33333333333, ans=0.025
+2024-08-25 16:50:49,240 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=119909.33333333333, ans=0.125
+2024-08-25 16:51:13,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=119962.66666666667, ans=0.0
+2024-08-25 16:51:24,525 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=119962.66666666667, ans=0.125
+2024-08-25 16:51:25,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=119962.66666666667, ans=0.0
+2024-08-25 16:52:34,142 INFO [train.py:1114] (1/4) Epoch 10, batch 100, loss[loss=0.2142, simple_loss=0.2754, pruned_loss=0.05569, ctc_loss=0.1039, over 19715.00 frames. ], tot_loss[loss=0.2521, simple_loss=0.3037, pruned_loss=0.07275, ctc_loss=0.1374, over 1499199.10 frames. ], batch size: 51, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:53:09,704 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=120069.33333333333, ans=0.025
+2024-08-25 16:53:36,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=120176.0, ans=0.5
+2024-08-25 16:53:42,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=120176.0, ans=0.125
+2024-08-25 16:53:46,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.27 vs. limit=22.5
+2024-08-25 16:53:47,835 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 1.798e+02 2.253e+02 2.860e+02 4.134e+02, threshold=4.507e+02, percent-clipped=0.0
+2024-08-25 16:54:30,607 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.43 vs. limit=15.0
+2024-08-25 16:54:33,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=120229.33333333333, ans=0.125
+2024-08-25 16:54:47,470 INFO [train.py:1114] (1/4) Epoch 10, batch 150, loss[loss=0.2182, simple_loss=0.2682, pruned_loss=0.06154, ctc_loss=0.1127, over 19685.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.3003, pruned_loss=0.07088, ctc_loss=0.1335, over 2028038.53 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:54:49,998 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-25 16:54:56,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=120282.66666666667, ans=0.0
+2024-08-25 16:54:58,859 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-25 16:54:58,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=120282.66666666667, ans=0.2
+2024-08-25 16:55:23,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=120389.33333333333, ans=0.125
+2024-08-25 16:55:40,802 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.05 vs. limit=15.0
+2024-08-25 16:55:52,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=120496.0, ans=0.125
+2024-08-25 16:56:01,811 INFO [train.py:1114] (1/4) Epoch 10, batch 200, loss[loss=0.2619, simple_loss=0.316, pruned_loss=0.07478, ctc_loss=0.1453, over 18236.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.2986, pruned_loss=0.06962, ctc_loss=0.1311, over 2435602.70 frames. ], batch size: 85, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:56:11,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=120549.33333333333, ans=0.125
+2024-08-25 16:56:20,812 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=120602.66666666667, ans=0.0
+2024-08-25 16:57:32,666 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=120656.0, ans=0.0
+2024-08-25 16:57:54,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=120656.0, ans=0.125
+2024-08-25 16:57:56,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=120656.0, ans=0.1
+2024-08-25 16:57:58,164 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.28 vs. limit=22.5
+2024-08-25 16:57:59,392 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.24 vs. limit=15.0
+2024-08-25 16:58:07,754 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.824e+02 2.064e+02 2.548e+02 6.143e+02, threshold=4.128e+02, percent-clipped=2.0
+2024-08-25 16:58:30,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=120816.0, ans=0.125
+2024-08-25 16:58:32,999 INFO [train.py:1114] (1/4) Epoch 10, batch 250, loss[loss=0.278, simple_loss=0.32, pruned_loss=0.08522, ctc_loss=0.1638, over 19382.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.2987, pruned_loss=0.06987, ctc_loss=0.1314, over 2755365.69 frames. ], batch size: 67, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:58:34,439 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=120816.0, ans=0.125
+2024-08-25 16:59:04,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=120869.33333333333, ans=0.0
+2024-08-25 16:59:05,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=120869.33333333333, ans=0.125
+2024-08-25 16:59:42,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=120922.66666666667, ans=0.125
+2024-08-25 17:00:08,737 INFO [train.py:1114] (1/4) Epoch 10, batch 300, loss[loss=0.2551, simple_loss=0.3098, pruned_loss=0.07359, ctc_loss=0.1329, over 19524.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.2972, pruned_loss=0.06877, ctc_loss=0.1295, over 3000066.51 frames. ], batch size: 61, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:00:10,163 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.82 vs. limit=15.0
+2024-08-25 17:00:24,352 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=121136.0, ans=0.125
+2024-08-25 17:00:42,563 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=121189.33333333333, ans=0.125
+2024-08-25 17:00:43,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=121189.33333333333, ans=0.0
+2024-08-25 17:00:52,071 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=121242.66666666667, ans=0.0
+2024-08-25 17:01:01,167 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.908e+02 2.186e+02 2.769e+02 4.118e+02, threshold=4.372e+02, percent-clipped=0.0
+2024-08-25 17:01:01,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=121242.66666666667, ans=0.0
+2024-08-25 17:01:10,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=121296.0, ans=0.2
+2024-08-25 17:02:40,277 INFO [train.py:1114] (1/4) Epoch 10, batch 350, loss[loss=0.237, simple_loss=0.2795, pruned_loss=0.07122, ctc_loss=0.1303, over 19801.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2975, pruned_loss=0.06914, ctc_loss=0.1299, over 3189437.63 frames. ], batch size: 48, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:02:51,861 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=121402.66666666667, ans=0.2
+2024-08-25 17:02:55,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=121402.66666666667, ans=0.2
+2024-08-25 17:03:22,378 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=121509.33333333333, ans=0.09899494936611666
+2024-08-25 17:03:40,547 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=121562.66666666667, ans=0.2
+2024-08-25 17:03:42,423 INFO [train.py:1114] (1/4) Epoch 10, batch 400, loss[loss=0.2558, simple_loss=0.3063, pruned_loss=0.07464, ctc_loss=0.1398, over 19497.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2966, pruned_loss=0.06872, ctc_loss=0.1291, over 3341910.85 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:03:42,698 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=121616.0, ans=0.0
+2024-08-25 17:03:44,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=121616.0, ans=0.025
+2024-08-25 17:03:49,867 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=121616.0, ans=0.125
+2024-08-25 17:04:00,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=121669.33333333333, ans=0.0
+2024-08-25 17:04:24,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=121776.0, ans=0.125
+2024-08-25 17:04:30,622 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=121776.0, ans=0.2
+2024-08-25 17:04:33,757 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 1.874e+02 2.151e+02 2.761e+02 4.102e+02, threshold=4.302e+02, percent-clipped=0.0
+2024-08-25 17:04:35,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=121776.0, ans=0.0
+2024-08-25 17:04:50,467 INFO [train.py:1114] (1/4) Epoch 10, batch 450, loss[loss=0.22, simple_loss=0.2902, pruned_loss=0.05444, ctc_loss=0.1021, over 19622.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.2965, pruned_loss=0.06871, ctc_loss=0.1289, over 3450128.20 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:07:20,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=121936.0, ans=0.2
+2024-08-25 17:08:26,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=122042.66666666667, ans=0.125
+2024-08-25 17:09:02,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=122096.0, ans=0.0
+2024-08-25 17:09:04,039 INFO [train.py:1114] (1/4) Epoch 10, batch 500, loss[loss=0.2701, simple_loss=0.32, pruned_loss=0.08149, ctc_loss=0.1432, over 19674.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2965, pruned_loss=0.06884, ctc_loss=0.1292, over 3545386.11 frames. ], batch size: 63, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:09:21,037 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=122149.33333333333, ans=0.1
+2024-08-25 17:09:33,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=122202.66666666667, ans=0.125
+2024-08-25 17:09:57,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=122256.0, ans=0.0
+2024-08-25 17:10:35,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=122309.33333333333, ans=0.5
+2024-08-25 17:10:36,234 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.797e+02 2.290e+02 2.870e+02 3.920e+02, threshold=4.579e+02, percent-clipped=0.0
+2024-08-25 17:10:37,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=122309.33333333333, ans=0.0
+2024-08-25 17:10:46,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-25 17:10:51,436 INFO [train.py:1114] (1/4) Epoch 10, batch 550, loss[loss=0.2679, simple_loss=0.3077, pruned_loss=0.0832, ctc_loss=0.1541, over 19232.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2967, pruned_loss=0.069, ctc_loss=0.1293, over 3607483.00 frames. ], batch size: 71, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:11:02,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=122416.0, ans=0.125
+2024-08-25 17:11:55,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=122469.33333333333, ans=0.2
+2024-08-25 17:11:57,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=122469.33333333333, ans=0.2
+2024-08-25 17:13:39,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=122522.66666666667, ans=0.125
+2024-08-25 17:13:50,632 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:13:59,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=122576.0, ans=0.125
+2024-08-25 17:14:10,536 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=122629.33333333333, ans=0.125
+2024-08-25 17:14:11,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=122629.33333333333, ans=0.0
+2024-08-25 17:14:20,771 INFO [train.py:1114] (1/4) Epoch 10, batch 600, loss[loss=0.2591, simple_loss=0.3094, pruned_loss=0.07619, ctc_loss=0.141, over 19386.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.2971, pruned_loss=0.06919, ctc_loss=0.1296, over 3665325.29 frames. ], batch size: 67, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:14:32,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=122736.0, ans=0.1
+2024-08-25 17:14:33,905 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.41 vs. limit=15.0
+2024-08-25 17:14:50,614 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.41 vs. limit=6.0
+2024-08-25 17:14:59,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=122842.66666666667, ans=0.125
+2024-08-25 17:15:07,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=122842.66666666667, ans=0.0
+2024-08-25 17:15:08,643 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 2.061e+02 2.496e+02 4.365e+02, threshold=4.122e+02, percent-clipped=0.0
+2024-08-25 17:15:20,720 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.56 vs. limit=15.0
+2024-08-25 17:15:23,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=122949.33333333333, ans=0.125
+2024-08-25 17:15:24,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.39 vs. limit=10.0
+2024-08-25 17:15:24,790 INFO [train.py:1114] (1/4) Epoch 10, batch 650, loss[loss=0.2314, simple_loss=0.2901, pruned_loss=0.06289, ctc_loss=0.1171, over 19773.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2966, pruned_loss=0.06878, ctc_loss=0.1292, over 3716018.99 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:15:25,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=122949.33333333333, ans=0.2
+2024-08-25 17:15:36,651 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=123002.66666666667, ans=0.125
+2024-08-25 17:15:41,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=123002.66666666667, ans=0.035
+2024-08-25 17:15:42,707 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.44 vs. limit=12.0
+2024-08-25 17:15:43,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=123002.66666666667, ans=0.125
+2024-08-25 17:15:45,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=123002.66666666667, ans=0.1
+2024-08-25 17:15:54,354 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.04 vs. limit=10.0
+2024-08-25 17:15:59,167 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.53 vs. limit=6.0
+2024-08-25 17:16:00,447 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=10.07 vs. limit=15.0
+2024-08-25 17:16:34,659 INFO [train.py:1114] (1/4) Epoch 10, batch 700, loss[loss=0.2321, simple_loss=0.2855, pruned_loss=0.06492, ctc_loss=0.1221, over 19726.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2966, pruned_loss=0.06843, ctc_loss=0.1285, over 3747424.30 frames. ], batch size: 51, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:17:56,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=123322.66666666667, ans=0.125
+2024-08-25 17:18:04,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=123376.0, ans=0.125
+2024-08-25 17:18:09,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=123376.0, ans=0.2
+2024-08-25 17:18:13,483 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 1.934e+02 2.276e+02 3.026e+02 5.626e+02, threshold=4.552e+02, percent-clipped=3.0
+2024-08-25 17:18:17,285 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=123429.33333333333, ans=0.0
+2024-08-25 17:18:28,248 INFO [train.py:1114] (1/4) Epoch 10, batch 750, loss[loss=0.2353, simple_loss=0.2904, pruned_loss=0.06547, ctc_loss=0.1229, over 19519.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2958, pruned_loss=0.068, ctc_loss=0.1276, over 3773650.40 frames. ], batch size: 54, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:18:31,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=123482.66666666667, ans=0.125
+2024-08-25 17:18:35,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=123482.66666666667, ans=0.025
+2024-08-25 17:18:54,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=123589.33333333333, ans=0.125
+2024-08-25 17:19:09,638 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=123642.66666666667, ans=0.125
+2024-08-25 17:19:18,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=123642.66666666667, ans=0.125
+2024-08-25 17:19:28,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=123696.0, ans=0.0
+2024-08-25 17:19:32,713 INFO [train.py:1114] (1/4) Epoch 10, batch 800, loss[loss=0.2082, simple_loss=0.2606, pruned_loss=0.05684, ctc_loss=0.1051, over 19818.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.2953, pruned_loss=0.06783, ctc_loss=0.1272, over 3795006.20 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:20:00,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=123802.66666666667, ans=0.125
+2024-08-25 17:20:07,574 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=123802.66666666667, ans=0.2
+2024-08-25 17:20:19,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=123856.0, ans=0.125
+2024-08-25 17:20:21,950 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.73 vs. limit=15.0
+2024-08-25 17:20:24,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=123909.33333333333, ans=0.125
+2024-08-25 17:20:33,020 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.887e+02 2.136e+02 2.736e+02 3.984e+02, threshold=4.273e+02, percent-clipped=0.0
+2024-08-25 17:20:42,220 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=123962.66666666667, ans=0.125
+2024-08-25 17:20:47,943 INFO [train.py:1114] (1/4) Epoch 10, batch 850, loss[loss=0.2518, simple_loss=0.3066, pruned_loss=0.07221, ctc_loss=0.1314, over 19645.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.2954, pruned_loss=0.06787, ctc_loss=0.1273, over 3814424.14 frames. ], batch size: 59, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:20:49,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=124016.0, ans=0.1
+2024-08-25 17:20:59,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=124069.33333333333, ans=0.0
+2024-08-25 17:21:04,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=124069.33333333333, ans=0.125
+2024-08-25 17:21:20,731 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=124122.66666666667, ans=0.0
+2024-08-25 17:22:12,208 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.24 vs. limit=15.0
+2024-08-25 17:22:13,184 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:22:26,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=124229.33333333333, ans=0.125
+2024-08-25 17:22:28,575 INFO [train.py:1114] (1/4) Epoch 10, batch 900, loss[loss=0.2091, simple_loss=0.2682, pruned_loss=0.05457, ctc_loss=0.1021, over 19420.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2954, pruned_loss=0.06815, ctc_loss=0.1277, over 3819291.77 frames. ], batch size: 48, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:22:31,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=124282.66666666667, ans=0.1
+2024-08-25 17:22:51,866 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=124389.33333333333, ans=0.0
+2024-08-25 17:23:13,945 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.854e+02 2.167e+02 2.763e+02 5.395e+02, threshold=4.333e+02, percent-clipped=2.0
+2024-08-25 17:23:30,292 INFO [train.py:1114] (1/4) Epoch 10, batch 950, loss[loss=0.2282, simple_loss=0.28, pruned_loss=0.0645, ctc_loss=0.1188, over 19481.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2956, pruned_loss=0.06811, ctc_loss=0.128, over 3822031.47 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:23:30,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=124549.33333333333, ans=0.2
+2024-08-25 17:23:45,483 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=124602.66666666667, ans=0.125
+2024-08-25 17:24:09,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=124709.33333333333, ans=0.0
+2024-08-25 17:24:12,316 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-25 17:24:21,153 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.15 vs. limit=6.0
+2024-08-25 17:24:32,280 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=5.051e-03
+2024-08-25 17:24:34,415 INFO [train.py:1114] (1/4) Epoch 10, batch 1000, loss[loss=0.2264, simple_loss=0.2794, pruned_loss=0.06252, ctc_loss=0.121, over 19854.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2964, pruned_loss=0.06878, ctc_loss=0.1294, over 3817081.22 frames. ], batch size: 52, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:24:44,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=124816.0, ans=0.0
+2024-08-25 17:25:06,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=124922.66666666667, ans=0.1
+2024-08-25 17:25:11,543 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.28 vs. limit=15.0
+2024-08-25 17:25:18,038 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 1.797e+02 2.069e+02 2.553e+02 4.130e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-25 17:25:20,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=125029.33333333333, ans=0.125
+2024-08-25 17:25:24,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=125029.33333333333, ans=0.125
+2024-08-25 17:25:28,839 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff2.min_abs, batch_count=125029.33333333333, ans=0.1
+2024-08-25 17:25:33,285 INFO [train.py:1114] (1/4) Epoch 10, batch 1050, loss[loss=0.2983, simple_loss=0.3448, pruned_loss=0.09171, ctc_loss=0.1711, over 19837.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.296, pruned_loss=0.06877, ctc_loss=0.1294, over 3823053.40 frames. ], batch size: 57, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:26:12,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=125242.66666666667, ans=0.125
+2024-08-25 17:26:15,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=125242.66666666667, ans=0.125
+2024-08-25 17:26:17,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=125242.66666666667, ans=0.0
+2024-08-25 17:26:24,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=125296.0, ans=0.2
+2024-08-25 17:26:32,047 INFO [train.py:1114] (1/4) Epoch 10, batch 1100, loss[loss=0.2461, simple_loss=0.2965, pruned_loss=0.07154, ctc_loss=0.1316, over 19580.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2952, pruned_loss=0.0683, ctc_loss=0.1285, over 3829407.05 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:26:45,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=125349.33333333333, ans=0.0
+2024-08-25 17:26:46,328 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=125402.66666666667, ans=0.2
+2024-08-25 17:26:49,984 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.95 vs. limit=15.0
+2024-08-25 17:27:18,171 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.787e+02 2.060e+02 2.560e+02 4.808e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 17:27:18,406 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:27:32,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=125616.0, ans=0.125
+2024-08-25 17:27:33,340 INFO [train.py:1114] (1/4) Epoch 10, batch 1150, loss[loss=0.2285, simple_loss=0.2857, pruned_loss=0.06201, ctc_loss=0.1181, over 19601.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.296, pruned_loss=0.06892, ctc_loss=0.1295, over 3829021.94 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:28:06,568 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.68 vs. limit=10.0
+2024-08-25 17:28:08,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=125722.66666666667, ans=0.125
+2024-08-25 17:28:21,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=125776.0, ans=0.025
+2024-08-25 17:28:25,941 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=125776.0, ans=0.0
+2024-08-25 17:28:27,945 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.34 vs. limit=15.0
+2024-08-25 17:28:44,405 INFO [train.py:1114] (1/4) Epoch 10, batch 1200, loss[loss=0.2733, simple_loss=0.322, pruned_loss=0.08155, ctc_loss=0.1539, over 19845.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2969, pruned_loss=0.06934, ctc_loss=0.1303, over 3823979.26 frames. ], batch size: 57, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:29:01,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=125936.0, ans=0.0
+2024-08-25 17:29:11,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=125989.33333333333, ans=0.125
+2024-08-25 17:29:12,175 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.43 vs. limit=15.0
+2024-08-25 17:29:16,278 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=125989.33333333333, ans=0.0
+2024-08-25 17:29:22,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=126042.66666666667, ans=0.2
+2024-08-25 17:29:29,218 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=126042.66666666667, ans=0.125
+2024-08-25 17:29:30,099 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.823e+02 2.047e+02 2.358e+02 4.051e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 17:29:32,638 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=126096.0, ans=0.125
+2024-08-25 17:29:45,829 INFO [train.py:1114] (1/4) Epoch 10, batch 1250, loss[loss=0.2401, simple_loss=0.3031, pruned_loss=0.06496, ctc_loss=0.118, over 19506.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.2968, pruned_loss=0.06858, ctc_loss=0.1291, over 3841312.74 frames. ], batch size: 61, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:30:16,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=126256.0, ans=15.0
+2024-08-25 17:30:33,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=126256.0, ans=0.2
+2024-08-25 17:30:44,584 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=126309.33333333333, ans=0.0
+2024-08-25 17:30:59,772 INFO [train.py:1114] (1/4) Epoch 10, batch 1300, loss[loss=0.2753, simple_loss=0.3181, pruned_loss=0.08284, ctc_loss=0.1672, over 18932.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.2959, pruned_loss=0.06819, ctc_loss=0.1285, over 3844627.38 frames. ], batch size: 76, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:31:18,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=126416.0, ans=0.1
+2024-08-25 17:31:35,261 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=126416.0, ans=0.125
+2024-08-25 17:31:36,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=126469.33333333333, ans=0.2
+2024-08-25 17:31:37,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=126469.33333333333, ans=0.125
+2024-08-25 17:32:13,113 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.900e+02 2.303e+02 2.970e+02 5.096e+02, threshold=4.606e+02, percent-clipped=7.0
+2024-08-25 17:32:28,195 INFO [train.py:1114] (1/4) Epoch 10, batch 1350, loss[loss=0.2751, simple_loss=0.3231, pruned_loss=0.08164, ctc_loss=0.1597, over 19782.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2956, pruned_loss=0.06801, ctc_loss=0.1279, over 3855221.60 frames. ], batch size: 54, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:32:38,714 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=126736.0, ans=0.125
+2024-08-25 17:32:54,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=126789.33333333333, ans=0.0
+2024-08-25 17:32:57,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=126789.33333333333, ans=0.2
+2024-08-25 17:33:06,011 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=18.94 vs. limit=15.0
+2024-08-25 17:33:08,320 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.70 vs. limit=15.0
+2024-08-25 17:33:13,752 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.39 vs. limit=15.0
+2024-08-25 17:33:26,056 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.41 vs. limit=15.0
+2024-08-25 17:33:30,484 INFO [train.py:1114] (1/4) Epoch 10, batch 1400, loss[loss=0.2103, simple_loss=0.2589, pruned_loss=0.05847, ctc_loss=0.1119, over 19658.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.2951, pruned_loss=0.0678, ctc_loss=0.1275, over 3861749.60 frames. ], batch size: 46, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:33:35,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=126949.33333333333, ans=0.125
+2024-08-25 17:34:03,914 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.94 vs. limit=15.0
+2024-08-25 17:34:33,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=127109.33333333333, ans=0.0
+2024-08-25 17:34:34,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127109.33333333333, ans=0.1
+2024-08-25 17:34:42,447 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.856e+02 2.167e+02 2.631e+02 4.500e+02, threshold=4.335e+02, percent-clipped=0.0
+2024-08-25 17:34:43,793 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=127109.33333333333, ans=0.2
+2024-08-25 17:34:55,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=127162.66666666667, ans=0.025
+2024-08-25 17:34:56,031 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.50 vs. limit=15.0
+2024-08-25 17:35:02,134 INFO [train.py:1114] (1/4) Epoch 10, batch 1450, loss[loss=0.2321, simple_loss=0.3034, pruned_loss=0.05878, ctc_loss=0.108, over 19707.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2959, pruned_loss=0.06806, ctc_loss=0.1278, over 3860715.75 frames. ], batch size: 63, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:35:14,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=127269.33333333333, ans=0.5
+2024-08-25 17:35:20,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=127269.33333333333, ans=0.125
+2024-08-25 17:35:26,477 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=127322.66666666667, ans=0.125
+2024-08-25 17:35:33,967 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.35 vs. limit=15.0
+2024-08-25 17:35:38,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127376.0, ans=0.1
+2024-08-25 17:36:02,111 INFO [train.py:1114] (1/4) Epoch 10, batch 1500, loss[loss=0.2482, simple_loss=0.3115, pruned_loss=0.06753, ctc_loss=0.1246, over 19599.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2965, pruned_loss=0.0683, ctc_loss=0.1283, over 3861030.36 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:36:07,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=127482.66666666667, ans=0.025
+2024-08-25 17:36:07,819 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.15 vs. limit=15.0
+2024-08-25 17:36:11,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=127482.66666666667, ans=0.0
+2024-08-25 17:36:13,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=127536.0, ans=0.025
+2024-08-25 17:36:17,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=127536.0, ans=0.0
+2024-08-25 17:36:23,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=127536.0, ans=0.125
+2024-08-25 17:36:55,677 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 1.877e+02 2.186e+02 2.626e+02 4.478e+02, threshold=4.372e+02, percent-clipped=1.0
+2024-08-25 17:37:19,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=127696.0, ans=0.1
+2024-08-25 17:37:24,240 INFO [train.py:1114] (1/4) Epoch 10, batch 1550, loss[loss=0.2678, simple_loss=0.3177, pruned_loss=0.07883, ctc_loss=0.1506, over 19591.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2966, pruned_loss=0.06844, ctc_loss=0.1289, over 3846715.28 frames. ], batch size: 60, lr: 1.48e-02, grad_scale: 16.0
+2024-08-25 17:37:31,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=127749.33333333333, ans=0.2
+2024-08-25 17:37:36,374 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.01 vs. limit=22.5
+2024-08-25 17:37:40,765 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=127802.66666666667, ans=0.95
+2024-08-25 17:37:47,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=127856.0, ans=0.04949747468305833
+2024-08-25 17:37:55,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=127856.0, ans=0.2
+2024-08-25 17:38:02,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=127856.0, ans=0.0
+2024-08-25 17:38:54,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=127909.33333333333, ans=0.0
+2024-08-25 17:39:39,004 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=127909.33333333333, ans=0.2
+2024-08-25 17:39:39,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=127909.33333333333, ans=0.2
+2024-08-25 17:39:51,755 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.29 vs. limit=22.5
+2024-08-25 17:41:06,797 INFO [train.py:1114] (1/4) Epoch 10, batch 1600, loss[loss=0.2487, simple_loss=0.3028, pruned_loss=0.07028, ctc_loss=0.1349, over 19843.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.2972, pruned_loss=0.06905, ctc_loss=0.1302, over 3837124.92 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:42:54,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=128122.66666666667, ans=0.2
+2024-08-25 17:42:55,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=128122.66666666667, ans=0.0
+2024-08-25 17:42:57,612 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.55 vs. limit=5.0
+2024-08-25 17:43:24,241 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 1.849e+02 2.080e+02 2.733e+02 5.175e+02, threshold=4.161e+02, percent-clipped=4.0
+2024-08-25 17:43:25,961 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.68 vs. limit=15.0
+2024-08-25 17:43:52,646 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=128229.33333333333, ans=0.125
+2024-08-25 17:43:55,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=128229.33333333333, ans=0.0
+2024-08-25 17:44:00,797 INFO [train.py:1114] (1/4) Epoch 10, batch 1650, loss[loss=0.235, simple_loss=0.3015, pruned_loss=0.06035, ctc_loss=0.1193, over 19672.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.2969, pruned_loss=0.06887, ctc_loss=0.1298, over 3833661.31 frames. ], batch size: 59, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:44:01,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=128282.66666666667, ans=0.0
+2024-08-25 17:44:13,899 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.29 vs. limit=12.0
+2024-08-25 17:44:37,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=128389.33333333333, ans=0.025
+2024-08-25 17:45:12,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=128496.0, ans=0.2
+2024-08-25 17:45:21,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=128549.33333333333, ans=0.125
+2024-08-25 17:45:46,331 INFO [train.py:1114] (1/4) Epoch 10, batch 1700, loss[loss=0.2091, simple_loss=0.2636, pruned_loss=0.05581, ctc_loss=0.1073, over 19655.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2962, pruned_loss=0.06837, ctc_loss=0.1286, over 3847897.17 frames. ], batch size: 46, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:45:51,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=128549.33333333333, ans=0.125
+2024-08-25 17:46:39,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=128602.66666666667, ans=0.125
+2024-08-25 17:47:00,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=128656.0, ans=0.0
+2024-08-25 17:47:11,338 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.773e+02 2.059e+02 2.527e+02 4.467e+02, threshold=4.119e+02, percent-clipped=1.0
+2024-08-25 17:48:11,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=128816.0, ans=0.025
+2024-08-25 17:48:12,458 INFO [train.py:1114] (1/4) Epoch 10, batch 1750, loss[loss=0.2173, simple_loss=0.2619, pruned_loss=0.06203, ctc_loss=0.1216, over 19670.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.296, pruned_loss=0.06853, ctc_loss=0.129, over 3851825.52 frames. ], batch size: 45, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:48:31,198 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=128869.33333333333, ans=0.025
+2024-08-25 17:48:42,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=128922.66666666667, ans=0.2
+2024-08-25 17:48:48,390 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=128976.0, ans=0.1
+2024-08-25 17:49:05,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=129029.33333333333, ans=0.125
+2024-08-25 17:49:11,938 INFO [train.py:1114] (1/4) Epoch 10, batch 1800, loss[loss=0.2361, simple_loss=0.2962, pruned_loss=0.06375, ctc_loss=0.1214, over 19616.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2962, pruned_loss=0.06847, ctc_loss=0.1287, over 3852281.16 frames. ], batch size: 55, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 17:49:16,500 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.52 vs. limit=10.0
+2024-08-25 17:49:16,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=129082.66666666667, ans=0.125
+2024-08-25 17:49:19,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=129082.66666666667, ans=0.125
+2024-08-25 17:49:22,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=129082.66666666667, ans=0.125
+2024-08-25 18:01:23,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=129189.33333333333, ans=0.125
+2024-08-25 18:11:17,736 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.930e+02 2.270e+02 3.115e+02 5.695e+02, threshold=4.540e+02, percent-clipped=10.0
+2024-08-25 18:13:37,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=129296.0, ans=0.0
+2024-08-25 18:14:47,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=129296.0, ans=0.125
+2024-08-25 18:19:59,304 INFO [train.py:1114] (1/4) Epoch 10, batch 1850, loss[loss=0.2254, simple_loss=0.2884, pruned_loss=0.05794, ctc_loss=0.1164, over 19591.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2953, pruned_loss=0.06822, ctc_loss=0.1279, over 3854580.27 frames. ], batch size: 57, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:28:36,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=129509.33333333333, ans=0.2
+2024-08-25 18:29:00,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=129509.33333333333, ans=15.0
+2024-08-25 18:32:37,432 INFO [train.py:1114] (1/4) Epoch 10, batch 1900, loss[loss=0.2592, simple_loss=0.3179, pruned_loss=0.07307, ctc_loss=0.136, over 19661.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.296, pruned_loss=0.06838, ctc_loss=0.1284, over 3860649.34 frames. ], batch size: 59, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:34:00,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=129616.0, ans=0.0
+2024-08-25 18:35:37,452 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.41 vs. limit=10.0
+2024-08-25 18:36:57,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=129776.0, ans=0.025
+2024-08-25 18:36:59,500 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=129776.0, ans=0.0
+2024-08-25 18:37:11,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=129776.0, ans=0.2
+2024-08-25 18:37:13,087 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.64 vs. limit=15.0
+2024-08-25 18:37:43,363 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.882e+02 2.156e+02 2.772e+02 4.689e+02, threshold=4.313e+02, percent-clipped=1.0
+2024-08-25 18:38:47,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=129829.33333333333, ans=0.025
+2024-08-25 18:38:48,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=129882.66666666667, ans=0.125
+2024-08-25 18:38:51,207 INFO [train.py:1114] (1/4) Epoch 10, batch 1950, loss[loss=0.2547, simple_loss=0.3037, pruned_loss=0.07466, ctc_loss=0.1412, over 19601.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.297, pruned_loss=0.06849, ctc_loss=0.1285, over 3869545.62 frames. ], batch size: 52, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:39:31,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=129882.66666666667, ans=0.125
+2024-08-25 18:39:32,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=129882.66666666667, ans=0.0
+2024-08-25 18:41:08,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=129989.33333333333, ans=10.0
+2024-08-25 18:41:23,045 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=129989.33333333333, ans=0.1
+2024-08-25 18:41:31,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=129989.33333333333, ans=0.125
+2024-08-25 18:44:04,314 INFO [train.py:1114] (1/4) Epoch 10, batch 2000, loss[loss=0.2392, simple_loss=0.2839, pruned_loss=0.06995, ctc_loss=0.1362, over 19652.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2976, pruned_loss=0.06863, ctc_loss=0.1288, over 3854942.46 frames. ], batch size: 45, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:46:12,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=130256.0, ans=0.125
+2024-08-25 18:47:32,405 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.882e+02 2.262e+02 2.707e+02 4.864e+02, threshold=4.523e+02, percent-clipped=1.0
+2024-08-25 18:48:29,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=130362.66666666667, ans=0.125
+2024-08-25 18:48:39,777 INFO [train.py:1114] (1/4) Epoch 10, batch 2050, loss[loss=0.2379, simple_loss=0.2826, pruned_loss=0.06951, ctc_loss=0.1354, over 19689.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.2968, pruned_loss=0.06852, ctc_loss=0.1287, over 3851297.82 frames. ], batch size: 47, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:48:39,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=130416.0, ans=0.0
+2024-08-25 18:50:32,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=130522.66666666667, ans=0.2
+2024-08-25 18:51:07,400 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.31 vs. limit=15.0
+2024-08-25 18:52:20,496 INFO [train.py:1114] (1/4) Epoch 10, batch 2100, loss[loss=0.254, simple_loss=0.3018, pruned_loss=0.0748, ctc_loss=0.1414, over 19773.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.2957, pruned_loss=0.06764, ctc_loss=0.1273, over 3857717.85 frames. ], batch size: 54, lr: 1.47e-02, grad_scale: 16.0
+2024-08-25 18:52:24,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=130682.66666666667, ans=0.125
+2024-08-25 18:52:43,085 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.47 vs. limit=15.0
+2024-08-25 18:53:30,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=130789.33333333333, ans=0.1
+2024-08-25 18:53:31,504 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=130789.33333333333, ans=0.1
+2024-08-25 18:53:46,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=130842.66666666667, ans=0.125
+2024-08-25 18:53:51,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=130842.66666666667, ans=0.0
+2024-08-25 18:53:58,205 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.839e+02 2.296e+02 2.721e+02 6.154e+02, threshold=4.593e+02, percent-clipped=3.0
+2024-08-25 18:54:01,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=130896.0, ans=0.125
+2024-08-25 18:54:37,549 INFO [train.py:1114] (1/4) Epoch 10, batch 2150, loss[loss=0.2184, simple_loss=0.2745, pruned_loss=0.05858, ctc_loss=0.1127, over 19584.00 frames. ], tot_loss[loss=0.241, simple_loss=0.2954, pruned_loss=0.06777, ctc_loss=0.1275, over 3868234.63 frames. ], batch size: 52, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:55:07,830 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=131002.66666666667, ans=0.125
+2024-08-25 18:55:39,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=131056.0, ans=0.125
+2024-08-25 18:55:52,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=131109.33333333334, ans=0.0
+2024-08-25 18:56:32,988 INFO [train.py:1114] (1/4) Epoch 10, batch 2200, loss[loss=0.2446, simple_loss=0.3048, pruned_loss=0.06774, ctc_loss=0.1224, over 19586.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2949, pruned_loss=0.06724, ctc_loss=0.1268, over 3867086.24 frames. ], batch size: 57, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:56:42,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=131216.0, ans=0.2
+2024-08-25 18:57:12,714 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=131269.33333333334, ans=0.125
+2024-08-25 18:57:16,924 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=131322.66666666666, ans=0.2
+2024-08-25 18:57:32,410 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.85 vs. limit=5.0
+2024-08-25 18:57:51,458 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.773e+02 2.006e+02 2.540e+02 3.937e+02, threshold=4.013e+02, percent-clipped=0.0
+2024-08-25 18:57:57,013 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.39 vs. limit=6.0
+2024-08-25 18:58:00,612 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.61 vs. limit=10.0
+2024-08-25 18:58:07,778 INFO [train.py:1114] (1/4) Epoch 10, batch 2250, loss[loss=0.2676, simple_loss=0.3163, pruned_loss=0.07967, ctc_loss=0.1487, over 19612.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.2954, pruned_loss=0.06746, ctc_loss=0.127, over 3866927.65 frames. ], batch size: 55, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:58:10,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=131482.66666666666, ans=0.0
+2024-08-25 18:58:17,086 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=131482.66666666666, ans=0.125
+2024-08-25 18:58:29,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=131536.0, ans=0.125
+2024-08-25 18:58:32,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=131589.33333333334, ans=0.2
+2024-08-25 18:58:32,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=131589.33333333334, ans=0.0
+2024-08-25 18:59:05,060 INFO [train.py:1114] (1/4) Epoch 10, batch 2300, loss[loss=0.2126, simple_loss=0.2733, pruned_loss=0.05439, ctc_loss=0.1077, over 19491.00 frames. ], tot_loss[loss=0.24, simple_loss=0.2947, pruned_loss=0.06733, ctc_loss=0.1266, over 3861842.14 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:59:09,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=131749.33333333334, ans=0.07
+2024-08-25 18:59:10,662 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 18:59:27,425 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.62 vs. limit=22.5
+2024-08-25 18:59:55,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=131909.33333333334, ans=0.125
+2024-08-25 19:00:00,732 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.815e+02 2.310e+02 2.961e+02 4.661e+02, threshold=4.621e+02, percent-clipped=5.0
+2024-08-25 19:00:03,086 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=131962.66666666666, ans=0.0
+2024-08-25 19:00:14,652 INFO [train.py:1114] (1/4) Epoch 10, batch 2350, loss[loss=0.2324, simple_loss=0.2927, pruned_loss=0.06164, ctc_loss=0.122, over 19659.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.2948, pruned_loss=0.06771, ctc_loss=0.1271, over 3863871.65 frames. ], batch size: 63, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:00:16,021 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=132016.0, ans=0.1
+2024-08-25 19:00:27,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=132069.33333333334, ans=0.125
+2024-08-25 19:00:35,524 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.95 vs. limit=10.0
+2024-08-25 19:00:37,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=132122.66666666666, ans=0.0
+2024-08-25 19:00:42,395 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.65 vs. limit=15.0
+2024-08-25 19:00:53,435 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.69 vs. limit=15.0
+2024-08-25 19:00:54,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=132176.0, ans=0.2
+2024-08-25 19:00:59,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=132229.33333333334, ans=0.025
+2024-08-25 19:01:00,049 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:01:01,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=132229.33333333334, ans=0.1
+2024-08-25 19:01:11,688 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.91 vs. limit=22.5
+2024-08-25 19:01:13,177 INFO [train.py:1114] (1/4) Epoch 10, batch 2400, loss[loss=0.2284, simple_loss=0.292, pruned_loss=0.06032, ctc_loss=0.1104, over 19163.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2961, pruned_loss=0.0679, ctc_loss=0.1274, over 3857574.56 frames. ], batch size: 71, lr: 1.46e-02, grad_scale: 32.0
+2024-08-25 19:01:15,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=132282.66666666666, ans=0.0
+2024-08-25 19:01:40,021 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.42 vs. limit=15.0
+2024-08-25 19:01:45,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132389.33333333334, ans=0.1
+2024-08-25 19:02:10,731 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.986e+02 2.279e+02 2.618e+02 8.799e+02, threshold=4.558e+02, percent-clipped=0.0
+2024-08-25 19:02:16,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=132496.0, ans=0.125
+2024-08-25 19:02:22,031 INFO [train.py:1114] (1/4) Epoch 10, batch 2450, loss[loss=0.3314, simple_loss=0.3434, pruned_loss=0.1161, ctc_loss=0.2181, over 13381.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.2999, pruned_loss=0.07092, ctc_loss=0.1333, over 3729699.44 frames. ], batch size: 140, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:02:28,129 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=132549.33333333334, ans=0.025
+2024-08-25 19:02:30,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=132549.33333333334, ans=0.0
+2024-08-25 19:02:48,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=132656.0, ans=0.125
+2024-08-25 19:04:28,599 INFO [train.py:1114] (1/4) Epoch 11, batch 0, loss[loss=0.2547, simple_loss=0.2951, pruned_loss=0.07833, ctc_loss=0.1444, over 19408.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.2951, pruned_loss=0.07833, ctc_loss=0.1444, over 19408.00 frames. ], batch size: 48, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:04:28,599 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 19:04:55,864 INFO [train.py:1146] (1/4) Epoch 11, validation: loss=0.2031, simple_loss=0.2887, pruned_loss=0.04339, ctc_loss=0.0768, over 944034.00 frames.
+2024-08-25 19:04:55,865 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 19:04:58,749 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.41 vs. limit=22.5
+2024-08-25 19:05:28,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132864.0, ans=0.1
+2024-08-25 19:05:29,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.38 vs. limit=22.5
+2024-08-25 19:05:42,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=132917.33333333334, ans=0.1
+2024-08-25 19:05:48,350 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=132917.33333333334, ans=0.07
+2024-08-25 19:06:02,240 INFO [train.py:1114] (1/4) Epoch 11, batch 50, loss[loss=0.2133, simple_loss=0.2663, pruned_loss=0.05749, ctc_loss=0.113, over 19716.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.2961, pruned_loss=0.06922, ctc_loss=0.1307, over 845449.42 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:06:03,361 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.050e+02 2.234e+02 2.552e+02 4.359e+02, threshold=4.468e+02, percent-clipped=1.0
+2024-08-25 19:06:09,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten.whitening_limit, batch_count=133024.0, ans=22.5
+2024-08-25 19:06:10,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=133024.0, ans=0.125
+2024-08-25 19:06:14,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=133024.0, ans=0.2
+2024-08-25 19:06:15,720 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.08 vs. limit=22.5
+2024-08-25 19:06:20,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=133077.33333333334, ans=0.0
+2024-08-25 19:06:26,021 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=133077.33333333334, ans=0.1
+2024-08-25 19:06:38,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=133130.66666666666, ans=0.125
+2024-08-25 19:06:39,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=133184.0, ans=0.0
+2024-08-25 19:06:40,080 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.71 vs. limit=22.5
+2024-08-25 19:06:44,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=133184.0, ans=0.125
+2024-08-25 19:07:26,793 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=133237.33333333334, ans=0.025
+2024-08-25 19:07:42,552 INFO [train.py:1114] (1/4) Epoch 11, batch 100, loss[loss=0.2158, simple_loss=0.282, pruned_loss=0.05434, ctc_loss=0.1023, over 19715.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2971, pruned_loss=0.06858, ctc_loss=0.129, over 1498424.97 frames. ], batch size: 51, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:07:45,029 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=133290.66666666666, ans=0.125
+2024-08-25 19:08:24,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=133450.66666666666, ans=0.0
+2024-08-25 19:08:26,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=133450.66666666666, ans=0.0
+2024-08-25 19:08:42,371 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.48 vs. limit=15.0
+2024-08-25 19:08:44,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=133450.66666666666, ans=0.1
+2024-08-25 19:08:59,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=133504.0, ans=0.125
+2024-08-25 19:09:00,386 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.99 vs. limit=15.0
+2024-08-25 19:09:10,085 INFO [train.py:1114] (1/4) Epoch 11, batch 150, loss[loss=0.2383, simple_loss=0.2836, pruned_loss=0.07108, ctc_loss=0.1271, over 19708.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2958, pruned_loss=0.06817, ctc_loss=0.1283, over 2026854.05 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:09:12,925 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.747e+02 2.015e+02 2.344e+02 3.708e+02, threshold=4.031e+02, percent-clipped=0.0
+2024-08-25 19:09:17,326 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.49 vs. limit=6.0
+2024-08-25 19:09:27,127 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=133610.66666666666, ans=0.0
+2024-08-25 19:09:48,923 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=133717.33333333334, ans=0.2
+2024-08-25 19:09:49,216 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.00 vs. limit=22.5
+2024-08-25 19:10:30,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=133770.66666666666, ans=0.125
+2024-08-25 19:10:34,570 INFO [train.py:1114] (1/4) Epoch 11, batch 200, loss[loss=0.2363, simple_loss=0.2972, pruned_loss=0.06337, ctc_loss=0.1214, over 18510.00 frames. ], tot_loss[loss=0.238, simple_loss=0.2931, pruned_loss=0.0664, ctc_loss=0.1251, over 2434621.63 frames. ], batch size: 85, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:10:35,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=133824.0, ans=0.0
+2024-08-25 19:11:14,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=133930.66666666666, ans=0.2
+2024-08-25 19:11:41,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=133984.0, ans=0.0
+2024-08-25 19:12:01,076 INFO [train.py:1114] (1/4) Epoch 11, batch 250, loss[loss=0.2616, simple_loss=0.3157, pruned_loss=0.07734, ctc_loss=0.1321, over 19382.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.2933, pruned_loss=0.06624, ctc_loss=0.1245, over 2754253.18 frames. ], batch size: 67, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:12:01,327 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=134090.66666666666, ans=0.1
+2024-08-25 19:12:02,124 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.716e+02 2.023e+02 2.469e+02 5.021e+02, threshold=4.046e+02, percent-clipped=3.0
+2024-08-25 19:12:29,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=134197.33333333334, ans=0.1
+2024-08-25 19:12:30,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=134197.33333333334, ans=0.09899494936611666
+2024-08-25 19:12:33,584 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=134197.33333333334, ans=0.0
+2024-08-25 19:12:36,778 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=134197.33333333334, ans=0.015
+2024-08-25 19:12:37,004 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=134197.33333333334, ans=0.125
+2024-08-25 19:12:37,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=134197.33333333334, ans=0.125
+2024-08-25 19:12:56,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=134304.0, ans=0.0
+2024-08-25 19:13:03,623 INFO [train.py:1114] (1/4) Epoch 11, batch 300, loss[loss=0.2452, simple_loss=0.2969, pruned_loss=0.07051, ctc_loss=0.1314, over 19510.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2925, pruned_loss=0.06553, ctc_loss=0.1232, over 3000172.55 frames. ], batch size: 61, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:13:03,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=134357.33333333334, ans=0.125
+2024-08-25 19:13:15,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=134357.33333333334, ans=0.2
+2024-08-25 19:13:53,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=134570.66666666666, ans=0.0
+2024-08-25 19:14:07,024 INFO [train.py:1114] (1/4) Epoch 11, batch 350, loss[loss=0.1997, simple_loss=0.2575, pruned_loss=0.05169, ctc_loss=0.09628, over 19724.00 frames. ], tot_loss[loss=0.237, simple_loss=0.293, pruned_loss=0.0658, ctc_loss=0.1234, over 3190539.03 frames. ], batch size: 48, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:14:08,113 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.838e+02 2.258e+02 2.898e+02 4.827e+02, threshold=4.516e+02, percent-clipped=2.0
+2024-08-25 19:14:43,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=134677.33333333334, ans=0.0
+2024-08-25 19:14:46,697 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.491e-01
+2024-08-25 19:15:15,930 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=134784.0, ans=0.125
+2024-08-25 19:15:47,923 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=134837.33333333334, ans=0.125
+2024-08-25 19:15:57,899 INFO [train.py:1114] (1/4) Epoch 11, batch 400, loss[loss=0.2351, simple_loss=0.2936, pruned_loss=0.06457, ctc_loss=0.1186, over 19513.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2927, pruned_loss=0.06543, ctc_loss=0.1232, over 3342563.84 frames. ], batch size: 54, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:16:06,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=134890.66666666666, ans=0.125
+2024-08-25 19:16:15,342 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=134944.0, ans=0.0
+2024-08-25 19:16:17,169 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.18 vs. limit=12.0
+2024-08-25 19:16:36,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=134997.33333333334, ans=0.1
+2024-08-25 19:16:40,395 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.95 vs. limit=6.0
+2024-08-25 19:16:41,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.49 vs. limit=12.0
+2024-08-25 19:16:42,480 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=134997.33333333334, ans=0.2
+2024-08-25 19:17:16,297 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.64 vs. limit=22.5
+2024-08-25 19:17:16,859 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=135104.0, ans=0.0
+2024-08-25 19:17:22,197 INFO [train.py:1114] (1/4) Epoch 11, batch 450, loss[loss=0.231, simple_loss=0.2928, pruned_loss=0.06168, ctc_loss=0.1145, over 19618.00 frames. ], tot_loss[loss=0.238, simple_loss=0.2934, pruned_loss=0.06633, ctc_loss=0.1248, over 3451323.42 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:17:31,724 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.841e+02 2.102e+02 2.681e+02 4.407e+02, threshold=4.204e+02, percent-clipped=0.0
+2024-08-25 19:18:04,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=135317.33333333334, ans=0.0
+2024-08-25 19:18:34,755 INFO [train.py:1114] (1/4) Epoch 11, batch 500, loss[loss=0.2569, simple_loss=0.3064, pruned_loss=0.07462, ctc_loss=0.1452, over 19714.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2924, pruned_loss=0.0658, ctc_loss=0.1238, over 3547378.58 frames. ], batch size: 63, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:18:36,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=135424.0, ans=0.0
+2024-08-25 19:19:12,048 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=135530.66666666666, ans=0.0
+2024-08-25 19:19:20,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=135530.66666666666, ans=0.0
+2024-08-25 19:19:33,602 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.56 vs. limit=15.0
+2024-08-25 19:19:48,314 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=135637.33333333334, ans=0.125
+2024-08-25 19:20:17,205 INFO [train.py:1114] (1/4) Epoch 11, batch 550, loss[loss=0.2685, simple_loss=0.3212, pruned_loss=0.07719, ctc_loss=0.1533, over 19374.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.2925, pruned_loss=0.06582, ctc_loss=0.124, over 3609558.62 frames. ], batch size: 71, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:20:18,391 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.822e+02 2.069e+02 2.386e+02 4.149e+02, threshold=4.137e+02, percent-clipped=0.0
+2024-08-25 19:20:26,619 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.72 vs. limit=15.0
+2024-08-25 19:20:32,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=135690.66666666666, ans=0.125
+2024-08-25 19:20:40,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=135744.0, ans=0.0
+2024-08-25 19:20:40,504 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=135744.0, ans=0.125
+2024-08-25 19:20:56,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=135797.33333333334, ans=0.0
+2024-08-25 19:21:09,695 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.24 vs. limit=6.0
+2024-08-25 19:21:19,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=135904.0, ans=0.0
+2024-08-25 19:21:28,055 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=135957.33333333334, ans=0.2
+2024-08-25 19:21:30,816 INFO [train.py:1114] (1/4) Epoch 11, batch 600, loss[loss=0.2788, simple_loss=0.3282, pruned_loss=0.08448, ctc_loss=0.1508, over 19334.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.2931, pruned_loss=0.06614, ctc_loss=0.1244, over 3665906.05 frames. ], batch size: 67, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:22:25,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=136010.66666666666, ans=0.2
+2024-08-25 19:22:26,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=136010.66666666666, ans=0.2
+2024-08-25 19:22:44,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=136064.0, ans=0.0
+2024-08-25 19:22:57,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=136117.33333333334, ans=0.0
+2024-08-25 19:23:10,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=136170.66666666666, ans=0.0
+2024-08-25 19:23:54,562 INFO [train.py:1114] (1/4) Epoch 11, batch 650, loss[loss=0.222, simple_loss=0.2936, pruned_loss=0.05525, ctc_loss=0.09964, over 19775.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2921, pruned_loss=0.06553, ctc_loss=0.123, over 3715925.17 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:23:55,633 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.913e+02 2.094e+02 2.738e+02 4.984e+02, threshold=4.187e+02, percent-clipped=5.0
+2024-08-25 19:23:58,441 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.16 vs. limit=22.5
+2024-08-25 19:24:08,412 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=14.27 vs. limit=15.0
+2024-08-25 19:24:27,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=136277.33333333334, ans=0.5
+2024-08-25 19:25:04,435 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.75 vs. limit=15.0
+2024-08-25 19:25:34,148 INFO [train.py:1114] (1/4) Epoch 11, batch 700, loss[loss=0.221, simple_loss=0.2774, pruned_loss=0.05971, ctc_loss=0.1129, over 19718.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2924, pruned_loss=0.06556, ctc_loss=0.1232, over 3747623.78 frames. ], batch size: 51, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:26:41,911 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.06 vs. limit=22.5
+2024-08-25 19:27:08,867 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=136597.33333333334, ans=0.0
+2024-08-25 19:27:38,370 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.06 vs. limit=15.0
+2024-08-25 19:27:47,162 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=136704.0, ans=0.1
+2024-08-25 19:28:07,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=136704.0, ans=0.0
+2024-08-25 19:28:09,983 INFO [train.py:1114] (1/4) Epoch 11, batch 750, loss[loss=0.2472, simple_loss=0.3048, pruned_loss=0.06943, ctc_loss=0.1267, over 19503.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2917, pruned_loss=0.06534, ctc_loss=0.1225, over 3774889.90 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:28:25,947 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.821e+02 2.028e+02 2.720e+02 4.524e+02, threshold=4.057e+02, percent-clipped=2.0
+2024-08-25 19:29:32,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=136864.0, ans=0.125
+2024-08-25 19:30:00,805 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=136917.33333333334, ans=0.025
+2024-08-25 19:30:15,273 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.66 vs. limit=15.0
+2024-08-25 19:30:52,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=136970.66666666666, ans=0.1
+2024-08-25 19:30:53,342 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=136970.66666666666, ans=0.125
+2024-08-25 19:30:55,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=136970.66666666666, ans=0.125
+2024-08-25 19:32:06,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=136970.66666666666, ans=0.125
+2024-08-25 19:32:08,680 INFO [train.py:1114] (1/4) Epoch 11, batch 800, loss[loss=0.2227, simple_loss=0.277, pruned_loss=0.06165, ctc_loss=0.1128, over 19782.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2922, pruned_loss=0.06554, ctc_loss=0.1231, over 3797096.71 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:32:08,923 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=137024.0, ans=0.0
+2024-08-25 19:32:24,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=137024.0, ans=0.035
+2024-08-25 19:32:54,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=137077.33333333334, ans=0.0
+2024-08-25 19:33:02,168 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.51 vs. limit=15.0
+2024-08-25 19:33:28,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=137184.0, ans=0.1
+2024-08-25 19:33:49,189 INFO [train.py:1114] (1/4) Epoch 11, batch 850, loss[loss=0.2617, simple_loss=0.3119, pruned_loss=0.07766, ctc_loss=0.1403, over 19685.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.292, pruned_loss=0.06554, ctc_loss=0.1232, over 3816208.68 frames. ], batch size: 59, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:33:50,257 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.863e+02 2.065e+02 2.415e+02 4.305e+02, threshold=4.130e+02, percent-clipped=1.0
+2024-08-25 19:34:10,903 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.38 vs. limit=12.0
+2024-08-25 19:34:19,932 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.69 vs. limit=15.0
+2024-08-25 19:34:21,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=137397.33333333334, ans=0.125
+2024-08-25 19:34:30,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=137397.33333333334, ans=0.0
+2024-08-25 19:34:30,813 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=137397.33333333334, ans=0.125
+2024-08-25 19:34:31,871 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:34:33,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=137397.33333333334, ans=0.025
+2024-08-25 19:34:39,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=137450.66666666666, ans=0.0
+2024-08-25 19:34:49,049 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.56 vs. limit=12.0
+2024-08-25 19:35:04,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=137557.33333333334, ans=0.125
+2024-08-25 19:35:05,238 INFO [train.py:1114] (1/4) Epoch 11, batch 900, loss[loss=0.2189, simple_loss=0.2704, pruned_loss=0.06181, ctc_loss=0.1095, over 19413.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2921, pruned_loss=0.06587, ctc_loss=0.1238, over 3819692.85 frames. ], batch size: 48, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:35:09,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=137557.33333333334, ans=0.0
+2024-08-25 19:35:10,592 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:36:09,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=137717.33333333334, ans=0.1
+2024-08-25 19:36:30,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_ff2.min_abs, batch_count=137770.66666666666, ans=0.1
+2024-08-25 19:37:18,561 INFO [train.py:1114] (1/4) Epoch 11, batch 950, loss[loss=0.2138, simple_loss=0.2739, pruned_loss=0.05619, ctc_loss=0.1032, over 19495.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.2926, pruned_loss=0.06586, ctc_loss=0.1238, over 3821715.22 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:37:19,700 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.805e+02 2.081e+02 2.536e+02 4.211e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-25 19:38:02,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=137984.0, ans=0.125
+2024-08-25 19:38:06,778 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=137984.0, ans=0.125
+2024-08-25 19:38:33,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=138037.33333333334, ans=0.0
+2024-08-25 19:38:48,988 INFO [train.py:1114] (1/4) Epoch 11, batch 1000, loss[loss=0.2223, simple_loss=0.2878, pruned_loss=0.0568, ctc_loss=0.108, over 19861.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.2937, pruned_loss=0.06651, ctc_loss=0.1251, over 3818232.30 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:39:22,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=138144.0, ans=0.125
+2024-08-25 19:39:23,833 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.82 vs. limit=15.0
+2024-08-25 19:39:27,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=138197.33333333334, ans=0.0
+2024-08-25 19:40:14,844 INFO [train.py:1114] (1/4) Epoch 11, batch 1050, loss[loss=0.2418, simple_loss=0.2961, pruned_loss=0.06832, ctc_loss=0.1268, over 19836.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.2925, pruned_loss=0.06598, ctc_loss=0.1242, over 3825246.23 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:40:16,854 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.874e+02 2.329e+02 2.645e+02 4.211e+02, threshold=4.658e+02, percent-clipped=2.0
+2024-08-25 19:40:26,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=138410.66666666666, ans=10.0
+2024-08-25 19:40:33,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=138410.66666666666, ans=0.0
+2024-08-25 19:40:39,455 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=138464.0, ans=0.125
+2024-08-25 19:40:39,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=138464.0, ans=0.2
+2024-08-25 19:40:51,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=138464.0, ans=0.125
+2024-08-25 19:40:52,056 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.09 vs. limit=8.0
+2024-08-25 19:41:18,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=138570.66666666666, ans=0.09899494936611666
+2024-08-25 19:41:19,797 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=6.36 vs. limit=15.0
+2024-08-25 19:41:25,111 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=138624.0, ans=0.125
+2024-08-25 19:41:26,113 INFO [train.py:1114] (1/4) Epoch 11, batch 1100, loss[loss=0.2279, simple_loss=0.2865, pruned_loss=0.06054, ctc_loss=0.1207, over 19595.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.2919, pruned_loss=0.06523, ctc_loss=0.1228, over 3832824.15 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:42:15,697 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=138677.33333333334, ans=0.125
+2024-08-25 19:43:00,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=138730.66666666666, ans=0.125
+2024-08-25 19:43:16,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff3.min_abs, batch_count=138784.0, ans=0.2
+2024-08-25 19:43:18,823 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.86 vs. limit=15.0
+2024-08-25 19:43:35,986 INFO [train.py:1114] (1/4) Epoch 11, batch 1150, loss[loss=0.2074, simple_loss=0.2725, pruned_loss=0.05143, ctc_loss=0.09873, over 19607.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.2916, pruned_loss=0.06515, ctc_loss=0.1226, over 3832353.99 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:43:37,195 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.797e+02 2.039e+02 2.453e+02 4.580e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-25 19:44:10,941 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=138997.33333333334, ans=0.2
+2024-08-25 19:44:39,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=139104.0, ans=0.1
+2024-08-25 19:44:41,837 INFO [train.py:1114] (1/4) Epoch 11, batch 1200, loss[loss=0.2178, simple_loss=0.2868, pruned_loss=0.05383, ctc_loss=0.1029, over 19837.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.2928, pruned_loss=0.06565, ctc_loss=0.1237, over 3827484.35 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:44:49,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=139157.33333333334, ans=0.125
+2024-08-25 19:44:50,419 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.74 vs. limit=15.0
+2024-08-25 19:45:09,700 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=139210.66666666666, ans=0.125
+2024-08-25 19:45:33,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=139264.0, ans=0.1
+2024-08-25 19:45:42,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=139317.33333333334, ans=0.0
+2024-08-25 19:46:00,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff2.min_abs, batch_count=139317.33333333334, ans=0.1
+2024-08-25 19:46:15,644 INFO [train.py:1114] (1/4) Epoch 11, batch 1250, loss[loss=0.237, simple_loss=0.299, pruned_loss=0.06306, ctc_loss=0.1221, over 19537.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2929, pruned_loss=0.06554, ctc_loss=0.1235, over 3845387.56 frames. ], batch size: 61, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:46:16,708 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.769e+02 1.992e+02 2.545e+02 3.633e+02, threshold=3.984e+02, percent-clipped=0.0
+2024-08-25 19:46:19,086 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=139424.0, ans=0.1
+2024-08-25 19:46:39,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=139424.0, ans=0.025
+2024-08-25 19:46:45,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=139477.33333333334, ans=0.125
+2024-08-25 19:46:57,651 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=139530.66666666666, ans=0.1
+2024-08-25 19:47:08,016 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.07 vs. limit=22.5
+2024-08-25 19:47:18,248 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.20 vs. limit=10.0
+2024-08-25 19:47:21,714 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.49 vs. limit=15.0
+2024-08-25 19:47:22,581 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:47:33,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=139637.33333333334, ans=0.09899494936611666
+2024-08-25 19:47:33,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=139637.33333333334, ans=0.04949747468305833
+2024-08-25 19:47:40,577 INFO [train.py:1114] (1/4) Epoch 11, batch 1300, loss[loss=0.2857, simple_loss=0.3315, pruned_loss=0.08714, ctc_loss=0.1641, over 18837.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.2924, pruned_loss=0.06525, ctc_loss=0.123, over 3847789.11 frames. ], batch size: 76, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:47:43,530 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.21 vs. limit=15.0
+2024-08-25 19:48:06,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=139797.33333333334, ans=0.125
+2024-08-25 19:48:59,248 INFO [train.py:1114] (1/4) Epoch 11, batch 1350, loss[loss=0.2201, simple_loss=0.2859, pruned_loss=0.05661, ctc_loss=0.1027, over 19754.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.2919, pruned_loss=0.0648, ctc_loss=0.122, over 3858244.04 frames. ], batch size: 54, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:48:59,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=139957.33333333334, ans=0.125
+2024-08-25 19:49:01,647 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.851e+02 2.124e+02 2.742e+02 4.665e+02, threshold=4.248e+02, percent-clipped=3.0
+2024-08-25 19:49:01,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=139957.33333333334, ans=0.07
+2024-08-25 19:49:10,247 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.73 vs. limit=15.0
+2024-08-25 19:49:17,413 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=140010.66666666666, ans=0.125
+2024-08-25 19:49:19,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=140010.66666666666, ans=0.0
+2024-08-25 19:49:56,215 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:50:07,172 INFO [train.py:1114] (1/4) Epoch 11, batch 1400, loss[loss=0.1681, simple_loss=0.2414, pruned_loss=0.03454, ctc_loss=0.06425, over 19659.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2915, pruned_loss=0.06448, ctc_loss=0.1214, over 3865075.90 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:50:16,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=140224.0, ans=0.0
+2024-08-25 19:50:17,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=140224.0, ans=0.0
+2024-08-25 19:50:38,314 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.59 vs. limit=15.0
+2024-08-25 19:51:08,371 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:51:18,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=140384.0, ans=0.125
+2024-08-25 19:51:32,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=140437.33333333334, ans=0.125
+2024-08-25 19:51:42,651 INFO [train.py:1114] (1/4) Epoch 11, batch 1450, loss[loss=0.2292, simple_loss=0.2898, pruned_loss=0.06212, ctc_loss=0.111, over 19704.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2929, pruned_loss=0.06549, ctc_loss=0.1232, over 3862923.73 frames. ], batch size: 63, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:51:45,007 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.813e+02 2.052e+02 2.523e+02 4.896e+02, threshold=4.103e+02, percent-clipped=2.0
+2024-08-25 19:52:09,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=140544.0, ans=0.1
+2024-08-25 19:52:12,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=140544.0, ans=0.125
+2024-08-25 19:52:14,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=140597.33333333334, ans=0.125
+2024-08-25 19:52:41,224 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=140650.66666666666, ans=0.125
+2024-08-25 19:52:54,337 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=140704.0, ans=0.125
+2024-08-25 19:53:16,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=140704.0, ans=0.125
+2024-08-25 19:53:19,916 INFO [train.py:1114] (1/4) Epoch 11, batch 1500, loss[loss=0.2112, simple_loss=0.2836, pruned_loss=0.0503, ctc_loss=0.09548, over 19592.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.2926, pruned_loss=0.06504, ctc_loss=0.1225, over 3862884.47 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:53:25,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=140757.33333333334, ans=0.0
+2024-08-25 19:53:39,455 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.48 vs. limit=10.0
+2024-08-25 19:53:51,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=140810.66666666666, ans=0.2
+2024-08-25 19:53:58,925 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=140810.66666666666, ans=0.125
+2024-08-25 19:54:01,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=140810.66666666666, ans=0.1
+2024-08-25 19:54:07,961 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=13.29 vs. limit=15.0
+2024-08-25 19:54:17,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=140864.0, ans=0.125
+2024-08-25 19:54:28,002 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.38 vs. limit=6.0
+2024-08-25 19:54:31,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=140917.33333333334, ans=0.125
+2024-08-25 19:54:38,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=140917.33333333334, ans=0.2
+2024-08-25 19:55:01,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=140970.66666666666, ans=0.0
+2024-08-25 19:55:07,086 INFO [train.py:1114] (1/4) Epoch 11, batch 1550, loss[loss=0.2338, simple_loss=0.2904, pruned_loss=0.06632, ctc_loss=0.1112, over 19614.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2918, pruned_loss=0.06466, ctc_loss=0.1217, over 3847926.47 frames. ], batch size: 60, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:55:10,259 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.30 vs. limit=15.0
+2024-08-25 19:55:10,756 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 1.804e+02 2.014e+02 2.422e+02 4.168e+02, threshold=4.028e+02, percent-clipped=1.0
+2024-08-25 19:56:25,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=141184.0, ans=0.015
+2024-08-25 19:56:57,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=141184.0, ans=0.0
+2024-08-25 19:57:16,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=141237.33333333334, ans=0.125
+2024-08-25 19:57:19,239 INFO [train.py:1114] (1/4) Epoch 11, batch 1600, loss[loss=0.2712, simple_loss=0.3232, pruned_loss=0.07889, ctc_loss=0.1536, over 19840.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2917, pruned_loss=0.06468, ctc_loss=0.1219, over 3837004.12 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:57:49,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=141290.66666666666, ans=0.95
+2024-08-25 19:58:40,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=141504.0, ans=0.1
+2024-08-25 19:59:27,400 INFO [train.py:1114] (1/4) Epoch 11, batch 1650, loss[loss=0.2221, simple_loss=0.2911, pruned_loss=0.05557, ctc_loss=0.1047, over 19656.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.291, pruned_loss=0.06447, ctc_loss=0.1214, over 3833068.39 frames. ], batch size: 59, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:59:29,887 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.768e+02 1.990e+02 2.303e+02 4.438e+02, threshold=3.979e+02, percent-clipped=2.0
+2024-08-25 19:59:46,932 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=141557.33333333334, ans=0.125
+2024-08-25 20:00:05,527 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=141610.66666666666, ans=0.0
+2024-08-25 20:00:23,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=141717.33333333334, ans=0.125
+2024-08-25 20:00:36,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=141717.33333333334, ans=0.125
+2024-08-25 20:01:17,966 INFO [train.py:1114] (1/4) Epoch 11, batch 1700, loss[loss=0.2108, simple_loss=0.2632, pruned_loss=0.05828, ctc_loss=0.1047, over 19652.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.2912, pruned_loss=0.06449, ctc_loss=0.1213, over 3847348.79 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:02:14,827 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.20 vs. limit=12.0
+2024-08-25 20:02:16,953 INFO [train.py:1114] (1/4) Epoch 11, batch 1750, loss[loss=0.2141, simple_loss=0.2611, pruned_loss=0.06121, ctc_loss=0.1117, over 19616.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2907, pruned_loss=0.06403, ctc_loss=0.1204, over 3851715.47 frames. ], batch size: 45, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:02:20,529 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.814e+02 2.107e+02 2.366e+02 3.890e+02, threshold=4.214e+02, percent-clipped=0.0
+2024-08-25 20:02:46,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=142144.0, ans=0.125
+2024-08-25 20:03:19,661 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.12 vs. limit=15.0
+2024-08-25 20:04:08,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=142304.0, ans=0.0
+2024-08-25 20:04:27,019 INFO [train.py:1114] (1/4) Epoch 11, batch 1800, loss[loss=0.2096, simple_loss=0.2846, pruned_loss=0.04834, ctc_loss=0.09483, over 19611.00 frames. ], tot_loss[loss=0.234, simple_loss=0.2909, pruned_loss=0.06439, ctc_loss=0.1209, over 3854080.64 frames. ], batch size: 55, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:04:27,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=142357.33333333334, ans=0.2
+2024-08-25 20:04:48,871 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.63 vs. limit=22.5
+2024-08-25 20:04:59,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=142410.66666666666, ans=0.1
+2024-08-25 20:05:03,645 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.78 vs. limit=6.0
+2024-08-25 20:05:18,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=142464.0, ans=0.2
+2024-08-25 20:06:15,201 INFO [train.py:1114] (1/4) Epoch 11, batch 1850, loss[loss=0.2437, simple_loss=0.3059, pruned_loss=0.06488, ctc_loss=0.1292, over 19581.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.2908, pruned_loss=0.0643, ctc_loss=0.1209, over 3856468.98 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:06:17,530 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=142624.0, ans=0.04949747468305833
+2024-08-25 20:06:18,513 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 1.849e+02 2.256e+02 2.966e+02 5.642e+02, threshold=4.511e+02, percent-clipped=6.0
+2024-08-25 20:06:28,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=142677.33333333334, ans=0.125
+2024-08-25 20:06:31,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=142677.33333333334, ans=0.0
+2024-08-25 20:06:47,163 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=142677.33333333334, ans=0.125
+2024-08-25 20:06:59,383 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.82 vs. limit=15.0
+2024-08-25 20:07:23,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=142784.0, ans=0.0
+2024-08-25 20:07:37,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=142837.33333333334, ans=0.0
+2024-08-25 20:07:51,852 INFO [train.py:1114] (1/4) Epoch 11, batch 1900, loss[loss=0.2222, simple_loss=0.292, pruned_loss=0.05551, ctc_loss=0.1033, over 19649.00 frames. ], tot_loss[loss=0.234, simple_loss=0.2912, pruned_loss=0.06427, ctc_loss=0.1208, over 3861115.76 frames. ], batch size: 59, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:08:05,893 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.99 vs. limit=15.0
+2024-08-25 20:08:12,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=142944.0, ans=0.5
+2024-08-25 20:51:12,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=143104.0, ans=0.1
+2024-08-25 20:55:30,008 INFO [train.py:1114] (1/4) Epoch 11, batch 1950, loss[loss=0.2146, simple_loss=0.2711, pruned_loss=0.05771, ctc_loss=0.1069, over 19586.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2927, pruned_loss=0.06465, ctc_loss=0.1217, over 3870338.24 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 21:03:39,811 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.850e+02 2.123e+02 2.695e+02 5.282e+02, threshold=4.246e+02, percent-clipped=2.0
+2024-08-25 21:05:40,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=143157.33333333334, ans=0.1
+2024-08-25 21:09:35,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=143157.33333333334, ans=0.0
+2024-08-25 21:27:04,846 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=143264.0, ans=0.125
+2024-08-25 21:32:12,829 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.99 vs. limit=15.0
+2024-08-25 21:32:13,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=143317.33333333334, ans=0.0
+2024-08-25 21:46:38,196 INFO [train.py:1114] (1/4) Epoch 11, batch 2000, loss[loss=0.2072, simple_loss=0.2582, pruned_loss=0.05636, ctc_loss=0.1088, over 19632.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2937, pruned_loss=0.06518, ctc_loss=0.1229, over 3855225.69 frames. ], batch size: 45, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 21:50:29,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=143424.0, ans=0.125
+2024-08-25 21:55:36,784 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 22:08:03,307 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.87 vs. limit=15.0
+2024-08-25 22:19:42,816 INFO [train.py:1114] (1/4) Epoch 11, batch 2050, loss[loss=0.2012, simple_loss=0.2613, pruned_loss=0.05097, ctc_loss=0.09813, over 19721.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2925, pruned_loss=0.06476, ctc_loss=0.1219, over 3850917.53 frames. ], batch size: 47, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:20:13,485 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.838e+02 2.216e+02 2.724e+02 4.008e+02, threshold=4.432e+02, percent-clipped=0.0
+2024-08-25 22:20:42,887 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=143690.66666666666, ans=0.125
+2024-08-25 22:28:13,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=143850.66666666666, ans=0.125
+2024-08-25 22:32:18,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-25 22:32:23,593 INFO [train.py:1114] (1/4) Epoch 11, batch 2100, loss[loss=0.2193, simple_loss=0.2869, pruned_loss=0.05539, ctc_loss=0.1022, over 19785.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2915, pruned_loss=0.06448, ctc_loss=0.1213, over 3857044.90 frames. ], batch size: 54, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:34:54,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=144010.66666666666, ans=0.0
+2024-08-25 22:36:41,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=144064.0, ans=0.125
+2024-08-25 22:37:14,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=144117.33333333334, ans=0.125
+2024-08-25 22:38:06,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=144170.66666666666, ans=0.125
+2024-08-25 22:38:36,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=144170.66666666666, ans=0.2
+2024-08-25 22:39:07,553 INFO [train.py:1114] (1/4) Epoch 11, batch 2150, loss[loss=0.2224, simple_loss=0.2758, pruned_loss=0.06149, ctc_loss=0.1151, over 19581.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2903, pruned_loss=0.06405, ctc_loss=0.1204, over 3867527.11 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:39:51,935 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 1.804e+02 2.068e+02 2.942e+02 5.639e+02, threshold=4.136e+02, percent-clipped=4.0
+2024-08-25 22:40:48,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=144277.33333333334, ans=0.0
+2024-08-25 22:41:09,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=144277.33333333334, ans=0.025
+2024-08-25 22:41:30,198 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=144330.66666666666, ans=0.0
+2024-08-25 22:42:48,251 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.48 vs. limit=12.0
+2024-08-25 22:43:20,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=144384.0, ans=0.025
+2024-08-25 22:44:02,549 INFO [train.py:1114] (1/4) Epoch 11, batch 2200, loss[loss=0.2648, simple_loss=0.3198, pruned_loss=0.07659, ctc_loss=0.1417, over 19593.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2903, pruned_loss=0.06431, ctc_loss=0.1209, over 3867221.50 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:44:05,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=144490.66666666666, ans=0.0
+2024-08-25 22:44:27,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=144490.66666666666, ans=0.0
+2024-08-25 22:45:41,702 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=7.853e-03
+2024-08-25 22:47:26,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=144650.66666666666, ans=0.0
+2024-08-25 22:48:53,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=144704.0, ans=0.125
+2024-08-25 22:49:00,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=144704.0, ans=0.1
+2024-08-25 22:49:02,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=144757.33333333334, ans=0.95
+2024-08-25 22:49:03,018 INFO [train.py:1114] (1/4) Epoch 11, batch 2250, loss[loss=0.2433, simple_loss=0.3067, pruned_loss=0.06566, ctc_loss=0.1217, over 19604.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.2906, pruned_loss=0.0644, ctc_loss=0.1211, over 3866737.60 frames. ], batch size: 55, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:49:09,610 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.818e+02 2.110e+02 2.782e+02 6.628e+02, threshold=4.220e+02, percent-clipped=3.0
+2024-08-25 22:49:19,945 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=144757.33333333334, ans=0.0
+2024-08-25 22:49:55,007 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.00 vs. limit=15.0
+2024-08-25 22:50:09,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=144917.33333333334, ans=0.125
+2024-08-25 22:50:32,931 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=144970.66666666666, ans=0.1
+2024-08-25 22:50:36,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=144970.66666666666, ans=0.125
+2024-08-25 22:50:46,947 INFO [train.py:1114] (1/4) Epoch 11, batch 2300, loss[loss=0.2255, simple_loss=0.2826, pruned_loss=0.06112, ctc_loss=0.1155, over 19489.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.2894, pruned_loss=0.0641, ctc_loss=0.1204, over 3861277.75 frames. ], batch size: 49, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:51:27,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=145077.33333333334, ans=0.07
+2024-08-25 22:52:02,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=145130.66666666666, ans=0.1
+2024-08-25 22:52:38,596 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.36 vs. limit=15.0
+2024-08-25 22:52:55,272 INFO [train.py:1114] (1/4) Epoch 11, batch 2350, loss[loss=0.2606, simple_loss=0.3145, pruned_loss=0.07513, ctc_loss=0.1413, over 19640.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2894, pruned_loss=0.06397, ctc_loss=0.12, over 3863502.55 frames. ], batch size: 63, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:53:01,237 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.317e+02 1.788e+02 2.141e+02 2.380e+02 3.835e+02, threshold=4.282e+02, percent-clipped=0.0
+2024-08-25 22:53:05,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=145290.66666666666, ans=0.125
+2024-08-25 22:53:05,468 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.86 vs. limit=15.0
+2024-08-25 22:53:27,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=145344.0, ans=0.0
+2024-08-25 22:53:40,887 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=145397.33333333334, ans=0.125
+2024-08-25 22:53:56,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=145450.66666666666, ans=0.0
+2024-08-25 22:54:01,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=145450.66666666666, ans=0.0
+2024-08-25 22:54:05,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=145450.66666666666, ans=0.2
+2024-08-25 22:54:16,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=145504.0, ans=0.125
+2024-08-25 22:54:26,001 INFO [train.py:1114] (1/4) Epoch 11, batch 2400, loss[loss=0.2354, simple_loss=0.2986, pruned_loss=0.06222, ctc_loss=0.1195, over 19286.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.292, pruned_loss=0.065, ctc_loss=0.1216, over 3858062.37 frames. ], batch size: 71, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:54:28,766 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.57 vs. limit=22.5
+2024-08-25 22:54:34,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=145557.33333333334, ans=0.2
+2024-08-25 22:54:40,289 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.27 vs. limit=6.0
+2024-08-25 22:54:55,115 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.33 vs. limit=22.5
+2024-08-25 22:54:56,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=145664.0, ans=0.025
+2024-08-25 22:54:59,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145664.0, ans=0.1
+2024-08-25 22:55:17,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145717.33333333334, ans=0.1
+2024-08-25 22:55:44,077 INFO [train.py:1114] (1/4) Epoch 11, batch 2450, loss[loss=0.3245, simple_loss=0.3395, pruned_loss=0.1126, ctc_loss=0.2106, over 13684.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.2967, pruned_loss=0.06861, ctc_loss=0.1288, over 3727836.25 frames. ], batch size: 141, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:56:00,762 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.910e+02 2.208e+02 2.594e+02 5.356e+02, threshold=4.415e+02, percent-clipped=1.0
+2024-08-25 22:56:01,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145824.0, ans=0.1
+2024-08-25 22:56:35,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=145877.33333333334, ans=0.125
+2024-08-25 22:57:15,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=145984.0, ans=0.125
+2024-08-25 22:57:16,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=145984.0, ans=0.125
+2024-08-25 22:58:44,108 INFO [train.py:1114] (1/4) Epoch 12, batch 0, loss[loss=0.2626, simple_loss=0.2967, pruned_loss=0.08263, ctc_loss=0.158, over 19403.00 frames. ], tot_loss[loss=0.2626, simple_loss=0.2967, pruned_loss=0.08263, ctc_loss=0.158, over 19403.00 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 22:58:44,109 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 22:59:47,573 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.0449, 2.7830, 3.2282, 2.5431], device='cuda:1')
+2024-08-25 23:00:02,929 INFO [train.py:1146] (1/4) Epoch 12, validation: loss=0.1972, simple_loss=0.2841, pruned_loss=0.04086, ctc_loss=0.07109, over 944034.00 frames.
+2024-08-25 23:00:02,930 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-25 23:00:39,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=146138.66666666666, ans=0.0
+2024-08-25 23:00:58,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=146245.33333333334, ans=0.125
+2024-08-25 23:01:08,426 INFO [train.py:1114] (1/4) Epoch 12, batch 50, loss[loss=0.1951, simple_loss=0.2596, pruned_loss=0.04782, ctc_loss=0.0871, over 19725.00 frames. ], tot_loss[loss=0.239, simple_loss=0.2941, pruned_loss=0.06656, ctc_loss=0.1273, over 845355.73 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:01:16,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=146298.66666666666, ans=0.09899494936611666
+2024-08-25 23:01:19,435 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=146298.66666666666, ans=0.0
+2024-08-25 23:01:20,907 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.82 vs. limit=15.0
+2024-08-25 23:01:27,720 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.810e+02 2.073e+02 2.436e+02 4.057e+02, threshold=4.147e+02, percent-clipped=0.0
+2024-08-25 23:01:27,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=146352.0, ans=0.0
+2024-08-25 23:01:37,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=146405.33333333334, ans=0.125
+2024-08-25 23:01:38,982 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.58 vs. limit=22.5
+2024-08-25 23:01:45,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=146458.66666666666, ans=0.0
+2024-08-25 23:01:49,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=146458.66666666666, ans=0.1
+2024-08-25 23:01:53,837 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.72 vs. limit=6.0
+2024-08-25 23:01:56,886 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:02:20,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=146512.0, ans=0.2
+2024-08-25 23:02:22,998 INFO [train.py:1114] (1/4) Epoch 12, batch 100, loss[loss=0.2007, simple_loss=0.2679, pruned_loss=0.04797, ctc_loss=0.09366, over 19713.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.2952, pruned_loss=0.06632, ctc_loss=0.1261, over 1498997.21 frames. ], batch size: 51, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:02:31,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=146565.33333333334, ans=10.0
+2024-08-25 23:02:34,491 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.73 vs. limit=6.0
+2024-08-25 23:02:35,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=146565.33333333334, ans=0.125
+2024-08-25 23:02:40,251 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=146565.33333333334, ans=0.125
+2024-08-25 23:03:03,437 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=8.07 vs. limit=15.0
+2024-08-25 23:03:05,764 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.32 vs. limit=15.0
+2024-08-25 23:03:39,238 INFO [train.py:1114] (1/4) Epoch 12, batch 150, loss[loss=0.2033, simple_loss=0.2577, pruned_loss=0.05344, ctc_loss=0.1051, over 19728.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.2913, pruned_loss=0.06415, ctc_loss=0.1214, over 2027965.43 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:04:09,867 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.659e+02 1.880e+02 2.314e+02 3.650e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-25 23:04:15,863 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.93 vs. limit=12.0
+2024-08-25 23:04:19,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=146938.66666666666, ans=0.1
+2024-08-25 23:05:06,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=147098.66666666666, ans=0.0
+2024-08-25 23:05:07,036 INFO [train.py:1114] (1/4) Epoch 12, batch 200, loss[loss=0.2589, simple_loss=0.3152, pruned_loss=0.07364, ctc_loss=0.1383, over 18078.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2895, pruned_loss=0.06304, ctc_loss=0.1193, over 2435354.71 frames. ], batch size: 85, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:05:59,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=147152.0, ans=0.125
+2024-08-25 23:06:14,182 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.15 vs. limit=12.0
+2024-08-25 23:06:18,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=147205.33333333334, ans=0.0
+2024-08-25 23:06:42,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=147258.66666666666, ans=0.025
+2024-08-25 23:06:54,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=147312.0, ans=0.125
+2024-08-25 23:07:01,821 INFO [train.py:1114] (1/4) Epoch 12, batch 250, loss[loss=0.2372, simple_loss=0.2966, pruned_loss=0.06511, ctc_loss=0.1186, over 19469.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.2886, pruned_loss=0.06259, ctc_loss=0.118, over 2757226.13 frames. ], batch size: 67, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:07:22,628 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 1.825e+02 2.154e+02 2.499e+02 3.884e+02, threshold=4.307e+02, percent-clipped=2.0
+2024-08-25 23:07:25,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=147418.66666666666, ans=0.125
+2024-08-25 23:07:26,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=147418.66666666666, ans=0.1
+2024-08-25 23:07:32,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=147472.0, ans=0.125
+2024-08-25 23:07:45,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=147525.33333333334, ans=0.0
+2024-08-25 23:07:52,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=147525.33333333334, ans=0.125
+2024-08-25 23:08:03,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=147578.66666666666, ans=0.2
+2024-08-25 23:08:07,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten.whitening_limit, batch_count=147578.66666666666, ans=15.0
+2024-08-25 23:08:07,423 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=7.50 vs. limit=15.0
+2024-08-25 23:08:10,577 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:08:13,950 INFO [train.py:1114] (1/4) Epoch 12, batch 300, loss[loss=0.2628, simple_loss=0.3183, pruned_loss=0.0747, ctc_loss=0.1447, over 19513.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2881, pruned_loss=0.0625, ctc_loss=0.1178, over 3000839.16 frames. ], batch size: 61, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:08:32,553 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=147685.33333333334, ans=0.125
+2024-08-25 23:08:39,836 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.40 vs. limit=10.0
+2024-08-25 23:08:53,954 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=147792.0, ans=0.05
+2024-08-25 23:08:58,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=147792.0, ans=0.125
+2024-08-25 23:09:11,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=147845.33333333334, ans=0.125
+2024-08-25 23:09:11,883 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=147845.33333333334, ans=0.0
+2024-08-25 23:09:17,473 INFO [train.py:1114] (1/4) Epoch 12, batch 350, loss[loss=0.2265, simple_loss=0.2813, pruned_loss=0.06278, ctc_loss=0.1154, over 19757.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.2886, pruned_loss=0.06257, ctc_loss=0.118, over 3190720.75 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:09:18,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=147898.66666666666, ans=0.125
+2024-08-25 23:09:36,448 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.749e+02 2.047e+02 2.740e+02 4.170e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 23:09:45,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=148005.33333333334, ans=0.125
+2024-08-25 23:09:55,253 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.38 vs. limit=15.0
+2024-08-25 23:10:01,327 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=148058.66666666666, ans=0.125
+2024-08-25 23:10:10,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=148058.66666666666, ans=0.125
+2024-08-25 23:10:25,926 INFO [train.py:1114] (1/4) Epoch 12, batch 400, loss[loss=0.2158, simple_loss=0.2805, pruned_loss=0.05521, ctc_loss=0.1018, over 19497.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2884, pruned_loss=0.06242, ctc_loss=0.1176, over 3342696.82 frames. ], batch size: 54, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:11:02,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=148272.0, ans=0.2
+2024-08-25 23:11:18,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=148325.33333333334, ans=0.025
+2024-08-25 23:11:23,591 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.37 vs. limit=15.0
+2024-08-25 23:12:02,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=148378.66666666666, ans=0.125
+2024-08-25 23:12:04,218 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.27 vs. limit=12.0
+2024-08-25 23:12:05,894 INFO [train.py:1114] (1/4) Epoch 12, batch 450, loss[loss=0.2311, simple_loss=0.301, pruned_loss=0.05845, ctc_loss=0.1109, over 19609.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2889, pruned_loss=0.06295, ctc_loss=0.1184, over 3451399.95 frames. ], batch size: 55, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:12:12,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=148432.0, ans=0.125
+2024-08-25 23:12:17,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=148432.0, ans=0.0
+2024-08-25 23:12:21,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=148485.33333333334, ans=0.2
+2024-08-25 23:12:28,364 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.359e+02 1.830e+02 2.201e+02 2.765e+02 4.484e+02, threshold=4.403e+02, percent-clipped=1.0
+2024-08-25 23:12:52,304 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=148592.0, ans=0.025
+2024-08-25 23:13:05,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=148645.33333333334, ans=0.0
+2024-08-25 23:13:21,163 INFO [train.py:1114] (1/4) Epoch 12, batch 500, loss[loss=0.2387, simple_loss=0.2971, pruned_loss=0.06522, ctc_loss=0.1247, over 19695.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.2877, pruned_loss=0.06242, ctc_loss=0.1172, over 3547732.00 frames. ], batch size: 63, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:13:54,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=148752.0, ans=15.0
+2024-08-25 23:14:28,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=148858.66666666666, ans=0.125
+2024-08-25 23:14:42,345 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.09 vs. limit=6.0
+2024-08-25 23:14:50,654 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=148912.0, ans=0.125
+2024-08-25 23:14:54,899 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=148912.0, ans=6.0
+2024-08-25 23:14:54,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=148912.0, ans=22.5
+2024-08-25 23:14:56,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=148912.0, ans=10.0
+2024-08-25 23:14:59,255 INFO [train.py:1114] (1/4) Epoch 12, batch 550, loss[loss=0.2819, simple_loss=0.3281, pruned_loss=0.08656, ctc_loss=0.1565, over 19247.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2883, pruned_loss=0.06262, ctc_loss=0.1176, over 3608680.84 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:15:13,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=148965.33333333334, ans=0.125
+2024-08-25 23:15:16,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=148965.33333333334, ans=0.125
+2024-08-25 23:15:36,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=149018.66666666666, ans=0.0
+2024-08-25 23:15:42,253 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.692e+02 2.049e+02 2.499e+02 4.022e+02, threshold=4.098e+02, percent-clipped=0.0
+2024-08-25 23:15:58,695 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.50 vs. limit=15.0
+2024-08-25 23:16:06,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=149072.0, ans=0.025
+2024-08-25 23:16:47,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=149178.66666666666, ans=0.0
+2024-08-25 23:16:54,724 INFO [train.py:1114] (1/4) Epoch 12, batch 600, loss[loss=0.2456, simple_loss=0.3091, pruned_loss=0.06765, ctc_loss=0.1169, over 19360.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.2883, pruned_loss=0.0623, ctc_loss=0.1169, over 3665956.24 frames. ], batch size: 67, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:17:02,059 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.99 vs. limit=6.0
+2024-08-25 23:17:11,535 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:17:14,373 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.59 vs. limit=10.0
+2024-08-25 23:18:47,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=149338.66666666666, ans=0.125
+2024-08-25 23:19:03,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=149392.0, ans=0.1
+2024-08-25 23:19:07,058 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.91 vs. limit=15.0
+2024-08-25 23:19:21,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=149498.66666666666, ans=0.0
+2024-08-25 23:19:22,620 INFO [train.py:1114] (1/4) Epoch 12, batch 650, loss[loss=0.2555, simple_loss=0.3076, pruned_loss=0.07371, ctc_loss=0.1398, over 19754.00 frames. ], tot_loss[loss=0.229, simple_loss=0.2877, pruned_loss=0.06191, ctc_loss=0.1164, over 3716283.73 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:19:31,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=149498.66666666666, ans=0.125
+2024-08-25 23:19:35,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=149498.66666666666, ans=0.125
+2024-08-25 23:19:43,670 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=149552.0, ans=0.0
+2024-08-25 23:19:48,490 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.911e+02 2.346e+02 2.911e+02 5.072e+02, threshold=4.691e+02, percent-clipped=6.0
+2024-08-25 23:20:04,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.93 vs. limit=10.0
+2024-08-25 23:20:08,990 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.38 vs. limit=15.0
+2024-08-25 23:20:37,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=149712.0, ans=0.125
+2024-08-25 23:20:47,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_na.min_abs, batch_count=149712.0, ans=0.02
+2024-08-25 23:20:48,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=149765.33333333334, ans=0.025
+2024-08-25 23:20:49,404 INFO [train.py:1114] (1/4) Epoch 12, batch 700, loss[loss=0.2117, simple_loss=0.2696, pruned_loss=0.05676, ctc_loss=0.1006, over 19713.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.288, pruned_loss=0.06203, ctc_loss=0.1165, over 3746477.32 frames. ], batch size: 51, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:21:33,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=149925.33333333334, ans=0.125
+2024-08-25 23:21:41,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=149978.66666666666, ans=0.0
+2024-08-25 23:21:51,364 INFO [train.py:1114] (1/4) Epoch 12, batch 750, loss[loss=0.2343, simple_loss=0.2988, pruned_loss=0.06137, ctc_loss=0.1175, over 19512.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2877, pruned_loss=0.06189, ctc_loss=0.1161, over 3773749.03 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:22:11,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=150032.0, ans=0.0
+2024-08-25 23:22:14,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=150085.33333333334, ans=0.125
+2024-08-25 23:22:17,304 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.77 vs. limit=22.5
+2024-08-25 23:22:20,749 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.992e+02 2.563e+02 3.460e+02 5.252e+02, threshold=5.125e+02, percent-clipped=3.0
+2024-08-25 23:22:35,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=150138.66666666666, ans=0.125
+2024-08-25 23:23:10,613 INFO [train.py:1114] (1/4) Epoch 12, batch 800, loss[loss=0.1824, simple_loss=0.2488, pruned_loss=0.04206, ctc_loss=0.07951, over 19417.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.2872, pruned_loss=0.06153, ctc_loss=0.1156, over 3795520.06 frames. ], batch size: 48, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:23:16,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=150298.66666666666, ans=0.125
+2024-08-25 23:23:19,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=150298.66666666666, ans=0.125
+2024-08-25 23:23:25,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=150352.0, ans=0.125
+2024-08-25 23:23:36,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=150405.33333333334, ans=0.035
+2024-08-25 23:24:01,569 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=150512.0, ans=0.0
+2024-08-25 23:24:07,596 INFO [train.py:1114] (1/4) Epoch 12, batch 850, loss[loss=0.2241, simple_loss=0.2918, pruned_loss=0.05665, ctc_loss=0.1077, over 19644.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.2874, pruned_loss=0.06165, ctc_loss=0.1159, over 3815292.38 frames. ], batch size: 59, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:24:15,506 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=150565.33333333334, ans=0.0
+2024-08-25 23:24:30,650 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.376e+02 1.732e+02 2.149e+02 2.756e+02 4.869e+02, threshold=4.297e+02, percent-clipped=0.0
+2024-08-25 23:24:39,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=150672.0, ans=0.025
+2024-08-25 23:24:54,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.min_positive, batch_count=150672.0, ans=0.05
+2024-08-25 23:25:14,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=150778.66666666666, ans=0.125
+2024-08-25 23:25:16,427 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.75 vs. limit=15.0
+2024-08-25 23:25:39,230 INFO [train.py:1114] (1/4) Epoch 12, batch 900, loss[loss=0.2267, simple_loss=0.2718, pruned_loss=0.06595, ctc_loss=0.1244, over 19398.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.2878, pruned_loss=0.06194, ctc_loss=0.1164, over 3818318.72 frames. ], batch size: 48, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:25:41,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=150832.0, ans=0.0
+2024-08-25 23:26:01,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=150832.0, ans=0.125
+2024-08-25 23:26:21,510 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.84 vs. limit=15.0
+2024-08-25 23:26:23,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=150938.66666666666, ans=0.2
+2024-08-25 23:26:28,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=150938.66666666666, ans=0.125
+2024-08-25 23:27:00,583 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.62 vs. limit=15.0
+2024-08-25 23:27:21,988 INFO [train.py:1114] (1/4) Epoch 12, batch 950, loss[loss=0.2084, simple_loss=0.2701, pruned_loss=0.05306, ctc_loss=0.1014, over 19481.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2884, pruned_loss=0.06228, ctc_loss=0.1171, over 3818384.29 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:27:47,798 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 1.727e+02 2.047e+02 2.468e+02 3.873e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-08-25 23:28:00,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=151152.0, ans=0.125
+2024-08-25 23:28:26,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=151205.33333333334, ans=0.125
+2024-08-25 23:28:37,953 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.49 vs. limit=6.0
+2024-08-25 23:28:55,953 INFO [train.py:1114] (1/4) Epoch 12, batch 1000, loss[loss=0.2233, simple_loss=0.2808, pruned_loss=0.06077, ctc_loss=0.1107, over 19855.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.289, pruned_loss=0.06265, ctc_loss=0.1178, over 3815332.89 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:29:11,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=151365.33333333334, ans=0.0
+2024-08-25 23:30:33,334 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.46 vs. limit=6.0
+2024-08-25 23:30:41,783 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=151525.33333333334, ans=0.125
+2024-08-25 23:30:53,455 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=151578.66666666666, ans=0.0
+2024-08-25 23:30:55,449 INFO [train.py:1114] (1/4) Epoch 12, batch 1050, loss[loss=0.2228, simple_loss=0.2899, pruned_loss=0.05583, ctc_loss=0.1102, over 19847.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.2883, pruned_loss=0.06221, ctc_loss=0.1173, over 3821509.76 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:31:14,265 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.748e+02 2.222e+02 2.883e+02 4.562e+02, threshold=4.445e+02, percent-clipped=3.0
+2024-08-25 23:31:30,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=151738.66666666666, ans=0.0
+2024-08-25 23:31:49,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=151845.33333333334, ans=0.125
+2024-08-25 23:32:14,292 INFO [train.py:1114] (1/4) Epoch 12, batch 1100, loss[loss=0.2354, simple_loss=0.2906, pruned_loss=0.066, ctc_loss=0.1208, over 19578.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.288, pruned_loss=0.06218, ctc_loss=0.1173, over 3828704.68 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:32:16,882 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.07 vs. limit=15.0
+2024-08-25 23:32:18,250 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.43 vs. limit=10.0
+2024-08-25 23:32:26,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=151898.66666666666, ans=0.0
+2024-08-25 23:32:31,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=151952.0, ans=0.1
+2024-08-25 23:32:37,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=151952.0, ans=0.125
+2024-08-25 23:32:49,991 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.94 vs. limit=15.0
+2024-08-25 23:33:13,393 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=152112.0, ans=0.0
+2024-08-25 23:33:20,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=152112.0, ans=0.125
+2024-08-25 23:33:32,434 INFO [train.py:1114] (1/4) Epoch 12, batch 1150, loss[loss=0.202, simple_loss=0.263, pruned_loss=0.05103, ctc_loss=0.09728, over 19595.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2877, pruned_loss=0.06206, ctc_loss=0.117, over 3828532.61 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:33:46,291 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.19 vs. limit=15.0
+2024-08-25 23:33:49,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=152165.33333333334, ans=0.0
+2024-08-25 23:33:52,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=152218.66666666666, ans=0.125
+2024-08-25 23:34:07,234 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.763e+02 2.002e+02 2.335e+02 5.298e+02, threshold=4.005e+02, percent-clipped=1.0
+2024-08-25 23:34:12,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=152218.66666666666, ans=0.1
+2024-08-25 23:34:18,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=152272.0, ans=0.025
+2024-08-25 23:34:26,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=152272.0, ans=0.125
+2024-08-25 23:34:30,145 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.28 vs. limit=15.0
+2024-08-25 23:34:39,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=152325.33333333334, ans=0.2
+2024-08-25 23:34:59,041 INFO [train.py:1114] (1/4) Epoch 12, batch 1200, loss[loss=0.2439, simple_loss=0.2991, pruned_loss=0.06838, ctc_loss=0.1297, over 19828.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.289, pruned_loss=0.06288, ctc_loss=0.1185, over 3824097.13 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:35:01,478 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:35:15,211 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=152485.33333333334, ans=0.1
+2024-08-25 23:35:52,367 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=152592.0, ans=0.0
+2024-08-25 23:36:09,952 INFO [train.py:1114] (1/4) Epoch 12, batch 1250, loss[loss=0.2464, simple_loss=0.301, pruned_loss=0.07083, ctc_loss=0.1256, over 19525.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2895, pruned_loss=0.06274, ctc_loss=0.118, over 3842652.98 frames. ], batch size: 61, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:36:29,074 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=152752.0, ans=0.0
+2024-08-25 23:36:34,022 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.907e+02 2.265e+02 2.785e+02 4.753e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 23:36:59,413 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=152858.66666666666, ans=0.1
+2024-08-25 23:37:18,926 INFO [train.py:1114] (1/4) Epoch 12, batch 1300, loss[loss=0.2439, simple_loss=0.3011, pruned_loss=0.06828, ctc_loss=0.1256, over 18795.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2879, pruned_loss=0.06178, ctc_loss=0.116, over 3846441.29 frames. ], batch size: 76, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:37:22,925 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.56 vs. limit=6.0
+2024-08-25 23:37:23,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=152965.33333333334, ans=0.125
+2024-08-25 23:37:42,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=153018.66666666666, ans=0.125
+2024-08-25 23:38:07,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=153125.33333333334, ans=0.0
+2024-08-25 23:38:28,991 INFO [train.py:1114] (1/4) Epoch 12, batch 1350, loss[loss=0.2165, simple_loss=0.2782, pruned_loss=0.05683, ctc_loss=0.103, over 19780.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.2873, pruned_loss=0.0615, ctc_loss=0.1155, over 3856928.48 frames. ], batch size: 54, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:38:41,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=153285.33333333334, ans=0.125
+2024-08-25 23:38:46,283 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.707e+02 2.039e+02 2.408e+02 4.402e+02, threshold=4.078e+02, percent-clipped=0.0
+2024-08-25 23:38:49,149 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.79 vs. limit=15.0
+2024-08-25 23:39:05,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=153392.0, ans=0.025
+2024-08-25 23:39:19,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=153392.0, ans=0.2
+2024-08-25 23:39:21,495 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.04 vs. limit=15.0
+2024-08-25 23:39:38,366 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=20.90 vs. limit=22.5
+2024-08-25 23:39:41,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=153445.33333333334, ans=0.04949747468305833
+2024-08-25 23:39:43,074 INFO [train.py:1114] (1/4) Epoch 12, batch 1400, loss[loss=0.2275, simple_loss=0.272, pruned_loss=0.06678, ctc_loss=0.1237, over 19678.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.2873, pruned_loss=0.06148, ctc_loss=0.1154, over 3864355.01 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:39:50,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=153498.66666666666, ans=0.125
+2024-08-25 23:39:53,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=153498.66666666666, ans=0.125
+2024-08-25 23:40:17,868 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.69 vs. limit=22.5
+2024-08-25 23:40:19,937 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=153605.33333333334, ans=0.125
+2024-08-25 23:40:46,725 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-25 23:41:07,351 INFO [train.py:1114] (1/4) Epoch 12, batch 1450, loss[loss=0.2338, simple_loss=0.2937, pruned_loss=0.06282, ctc_loss=0.1205, over 19673.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2882, pruned_loss=0.06197, ctc_loss=0.1164, over 3862587.11 frames. ], batch size: 63, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:41:27,996 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 1.773e+02 2.135e+02 2.639e+02 4.435e+02, threshold=4.270e+02, percent-clipped=2.0
+2024-08-25 23:41:29,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=153818.66666666666, ans=0.125
+2024-08-25 23:41:54,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=153872.0, ans=0.0
+2024-08-25 23:41:56,654 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.74 vs. limit=15.0
+2024-08-25 23:42:20,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=153978.66666666666, ans=0.125
+2024-08-25 23:42:22,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=153978.66666666666, ans=0.125
+2024-08-25 23:42:43,046 INFO [train.py:1114] (1/4) Epoch 12, batch 1500, loss[loss=0.2262, simple_loss=0.2856, pruned_loss=0.06118, ctc_loss=0.111, over 19598.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2885, pruned_loss=0.06204, ctc_loss=0.1167, over 3861806.77 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:43:07,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=154085.33333333334, ans=0.2
+2024-08-25 23:43:15,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=154138.66666666666, ans=0.0
+2024-08-25 23:44:09,910 INFO [train.py:1114] (1/4) Epoch 12, batch 1550, loss[loss=0.2354, simple_loss=0.3011, pruned_loss=0.0624, ctc_loss=0.1123, over 19594.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2886, pruned_loss=0.06233, ctc_loss=0.1171, over 3845995.35 frames. ], batch size: 60, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:44:13,032 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=7.36 vs. limit=15.0
+2024-08-25 23:44:20,937 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.05 vs. limit=6.0
+2024-08-25 23:44:22,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=154352.0, ans=0.2
+2024-08-25 23:44:43,860 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.860e+02 2.194e+02 2.828e+02 4.590e+02, threshold=4.388e+02, percent-clipped=1.0
+2024-08-25 23:45:06,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=154405.33333333334, ans=0.125
+2024-08-25 23:45:09,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=154405.33333333334, ans=0.1
+2024-08-25 23:46:10,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=154405.33333333334, ans=0.125
+2024-08-25 23:46:12,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=154405.33333333334, ans=0.0
+2024-08-25 23:46:30,750 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=154512.0, ans=0.125
+2024-08-25 23:46:37,536 INFO [train.py:1114] (1/4) Epoch 12, batch 1600, loss[loss=0.2378, simple_loss=0.2987, pruned_loss=0.06416, ctc_loss=0.1211, over 19849.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.288, pruned_loss=0.06226, ctc_loss=0.1173, over 3835436.76 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:46:55,811 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=154565.33333333334, ans=0.2
+2024-08-25 23:47:19,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=154618.66666666666, ans=0.125
+2024-08-25 23:47:28,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=154618.66666666666, ans=0.0
+2024-08-25 23:47:32,593 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.219e-01
+2024-08-25 23:47:46,685 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=154672.0, ans=0.125
+2024-08-25 23:48:12,940 INFO [train.py:1114] (1/4) Epoch 12, batch 1650, loss[loss=0.2361, simple_loss=0.2967, pruned_loss=0.06416, ctc_loss=0.118, over 19656.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.2876, pruned_loss=0.06203, ctc_loss=0.1169, over 3831667.61 frames. ], batch size: 59, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:48:28,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=154885.33333333334, ans=0.0
+2024-08-25 23:48:32,982 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.751e+02 2.060e+02 2.481e+02 4.497e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 23:48:44,750 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=154938.66666666666, ans=0.125
+2024-08-25 23:48:44,867 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=154938.66666666666, ans=0.125
+2024-08-25 23:48:55,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=154992.0, ans=0.09899494936611666
+2024-08-25 23:49:12,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=155045.33333333334, ans=0.125
+2024-08-25 23:49:19,218 INFO [train.py:1114] (1/4) Epoch 12, batch 1700, loss[loss=0.2198, simple_loss=0.2694, pruned_loss=0.06251, ctc_loss=0.1126, over 19671.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2876, pruned_loss=0.06186, ctc_loss=0.1165, over 3846379.08 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:49:26,539 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=155098.66666666666, ans=0.0
+2024-08-25 23:49:41,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=155205.33333333334, ans=0.125
+2024-08-25 23:49:58,643 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.27 vs. limit=15.0
+2024-08-25 23:50:19,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=155258.66666666666, ans=0.125
+2024-08-25 23:50:36,449 INFO [train.py:1114] (1/4) Epoch 12, batch 1750, loss[loss=0.2254, simple_loss=0.2735, pruned_loss=0.06374, ctc_loss=0.1245, over 19637.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2872, pruned_loss=0.06141, ctc_loss=0.1156, over 3851033.26 frames. ], batch size: 45, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:50:57,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=155365.33333333334, ans=0.2
+2024-08-25 23:51:01,000 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:51:07,089 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.08 vs. limit=15.0
+2024-08-25 23:51:12,445 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.691e+02 1.944e+02 2.310e+02 4.068e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-25 23:51:17,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=155418.66666666666, ans=0.0
+2024-08-25 23:51:19,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=155472.0, ans=0.125
+2024-08-25 23:51:55,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=155578.66666666666, ans=0.125
+2024-08-25 23:52:01,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=155578.66666666666, ans=0.0
+2024-08-25 23:52:03,816 INFO [train.py:1114] (1/4) Epoch 12, batch 1800, loss[loss=0.2304, simple_loss=0.2962, pruned_loss=0.05912, ctc_loss=0.1162, over 19599.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.2877, pruned_loss=0.06167, ctc_loss=0.1161, over 3853096.14 frames. ], batch size: 55, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:52:11,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=155632.0, ans=0.1
+2024-08-25 23:52:29,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=155685.33333333334, ans=0.0
+2024-08-25 23:52:30,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=155685.33333333334, ans=0.1
+2024-08-25 23:53:00,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=155738.66666666666, ans=0.125
+2024-08-25 23:53:21,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=155792.0, ans=0.025
+2024-08-25 23:53:25,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=155792.0, ans=0.0
+2024-08-25 23:53:26,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=155792.0, ans=10.0
+2024-08-25 23:53:33,439 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=155845.33333333334, ans=0.0
+2024-08-25 23:54:05,046 INFO [train.py:1114] (1/4) Epoch 12, batch 1850, loss[loss=0.2767, simple_loss=0.3313, pruned_loss=0.0809, ctc_loss=0.1507, over 19597.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.2876, pruned_loss=0.06177, ctc_loss=0.1161, over 3855362.83 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:54:28,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=155898.66666666666, ans=0.125
+2024-08-25 23:54:33,711 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.33 vs. limit=6.0
+2024-08-25 23:54:44,471 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.53 vs. limit=15.0
+2024-08-25 23:54:44,938 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 1.785e+02 2.050e+02 2.712e+02 4.249e+02, threshold=4.100e+02, percent-clipped=1.0
+2024-08-25 23:55:25,697 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=156058.66666666666, ans=0.2
+2024-08-25 23:55:26,716 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=156058.66666666666, ans=0.025
+2024-08-25 23:55:36,010 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.68 vs. limit=15.0
+2024-08-25 23:55:42,278 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=156165.33333333334, ans=0.125
+2024-08-25 23:56:02,274 INFO [train.py:1114] (1/4) Epoch 12, batch 1900, loss[loss=0.2147, simple_loss=0.2903, pruned_loss=0.04982, ctc_loss=0.09899, over 19655.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2881, pruned_loss=0.0617, ctc_loss=0.1159, over 3861072.89 frames. ], batch size: 59, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:56:40,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=156218.66666666666, ans=0.125
+2024-08-25 23:57:14,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=156325.33333333334, ans=0.2
+2024-08-25 23:57:29,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=156325.33333333334, ans=0.125
+2024-08-25 23:57:34,994 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=156378.66666666666, ans=0.125
+2024-08-25 23:58:01,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=156378.66666666666, ans=0.025
+2024-08-25 23:58:09,291 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=156378.66666666666, ans=0.125
+2024-08-25 23:58:28,732 INFO [train.py:1114] (1/4) Epoch 12, batch 1950, loss[loss=0.2085, simple_loss=0.2685, pruned_loss=0.05487, ctc_loss=0.09672, over 19593.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.289, pruned_loss=0.0619, ctc_loss=0.1161, over 3869858.39 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:58:33,481 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.21 vs. limit=15.0
+2024-08-25 23:58:54,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=156432.0, ans=0.2
+2024-08-25 23:59:03,813 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 1.700e+02 2.031e+02 2.417e+02 3.778e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-08-25 23:59:12,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=156538.66666666666, ans=0.125
+2024-08-25 23:59:23,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=156538.66666666666, ans=0.125
+2024-08-25 23:59:33,258 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.71 vs. limit=12.0
+2024-08-25 23:59:35,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=156592.0, ans=0.0
+2024-08-25 23:59:46,476 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.44 vs. limit=15.0
+2024-08-25 23:59:51,578 INFO [train.py:1114] (1/4) Epoch 12, batch 2000, loss[loss=0.2147, simple_loss=0.2646, pruned_loss=0.05986, ctc_loss=0.1127, over 19656.00 frames. ], tot_loss[loss=0.231, simple_loss=0.2899, pruned_loss=0.06258, ctc_loss=0.1175, over 3853730.37 frames. ], batch size: 45, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:00:06,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=156752.0, ans=0.125
+2024-08-26 00:00:40,461 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 00:00:44,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=156858.66666666666, ans=0.125
+2024-08-26 00:01:27,842 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=156965.33333333334, ans=0.125
+2024-08-26 00:01:28,938 INFO [train.py:1114] (1/4) Epoch 12, batch 2050, loss[loss=0.2269, simple_loss=0.2749, pruned_loss=0.06446, ctc_loss=0.125, over 19725.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.2885, pruned_loss=0.06249, ctc_loss=0.1174, over 3850393.15 frames. ], batch size: 47, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:01:31,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=156965.33333333334, ans=0.125
+2024-08-26 00:01:39,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=157018.66666666666, ans=0.2
+2024-08-26 00:01:43,252 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=157018.66666666666, ans=0.0
+2024-08-26 00:01:46,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=157018.66666666666, ans=0.0
+2024-08-26 00:01:46,859 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.778e+02 1.977e+02 2.412e+02 4.440e+02, threshold=3.953e+02, percent-clipped=1.0
+2024-08-26 00:01:53,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=157072.0, ans=0.0
+2024-08-26 00:02:44,464 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.39 vs. limit=15.0
+2024-08-26 00:02:45,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=157178.66666666666, ans=0.0
+2024-08-26 00:02:45,367 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=157178.66666666666, ans=0.0
+2024-08-26 00:03:00,087 INFO [train.py:1114] (1/4) Epoch 12, batch 2100, loss[loss=0.2091, simple_loss=0.2776, pruned_loss=0.05139, ctc_loss=0.09445, over 19764.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.288, pruned_loss=0.06198, ctc_loss=0.1166, over 3858278.38 frames. ], batch size: 54, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:18:53,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=157285.33333333334, ans=0.0
+2024-08-26 00:19:56,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=157285.33333333334, ans=0.2
+2024-08-26 00:23:40,367 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.60 vs. limit=15.0
+2024-08-26 00:30:17,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=157338.66666666666, ans=0.1
+2024-08-26 00:40:42,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=157392.0, ans=0.125
+2024-08-26 00:56:07,935 INFO [train.py:1114] (1/4) Epoch 12, batch 2150, loss[loss=0.2283, simple_loss=0.2879, pruned_loss=0.06042, ctc_loss=0.1195, over 19563.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.2876, pruned_loss=0.06204, ctc_loss=0.1166, over 3869358.89 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 01:01:41,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=157498.66666666666, ans=0.025
+2024-08-26 01:03:05,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=157498.66666666666, ans=0.2
+2024-08-26 01:08:26,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=157552.0, ans=0.0
+2024-08-26 01:09:53,318 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.788e+02 2.174e+02 2.705e+02 6.148e+02, threshold=4.348e+02, percent-clipped=11.0
+2024-08-26 01:13:04,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=157552.0, ans=0.0
+2024-08-26 01:16:37,297 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=157605.33333333334, ans=0.2
+2024-08-26 01:16:38,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=157605.33333333334, ans=0.0
+2024-08-26 01:18:15,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=157605.33333333334, ans=0.2
+2024-08-26 01:21:15,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=157605.33333333334, ans=0.0
+2024-08-26 01:21:15,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=157605.33333333334, ans=0.0
+2024-08-26 01:34:09,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=157712.0, ans=0.0
+2024-08-26 01:34:51,468 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.11 vs. limit=15.0
+2024-08-26 01:37:35,680 INFO [train.py:1114] (1/4) Epoch 12, batch 2200, loss[loss=0.2624, simple_loss=0.3178, pruned_loss=0.0753, ctc_loss=0.141, over 19595.00 frames. ], tot_loss[loss=0.229, simple_loss=0.2875, pruned_loss=0.062, ctc_loss=0.1166, over 3867203.55 frames. ], batch size: 57, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 01:44:47,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=157818.66666666666, ans=0.125
+2024-08-26 01:49:48,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=157872.0, ans=0.125
+2024-08-26 01:49:48,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=157872.0, ans=0.0
+2024-08-26 01:49:49,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=157925.33333333334, ans=0.0
+2024-08-26 01:57:30,326 INFO [train.py:1114] (1/4) Epoch 12, batch 2250, loss[loss=0.2416, simple_loss=0.2989, pruned_loss=0.06814, ctc_loss=0.12, over 19608.00 frames. ], tot_loss[loss=0.229, simple_loss=0.2876, pruned_loss=0.06196, ctc_loss=0.1163, over 3866797.67 frames. ], batch size: 55, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:04:09,786 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.77 vs. limit=22.5
+2024-08-26 02:04:28,486 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.839e+02 2.199e+02 2.577e+02 6.358e+02, threshold=4.399e+02, percent-clipped=1.0
+2024-08-26 02:10:33,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=158192.0, ans=0.125
+2024-08-26 02:13:21,298 INFO [train.py:1114] (1/4) Epoch 12, batch 2300, loss[loss=0.19, simple_loss=0.2611, pruned_loss=0.0428, ctc_loss=0.08317, over 19502.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.2869, pruned_loss=0.06205, ctc_loss=0.1165, over 3860138.16 frames. ], batch size: 49, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:15:17,612 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=158352.0, ans=0.125
+2024-08-26 02:16:57,515 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.92 vs. limit=15.0
+2024-08-26 02:19:29,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=158458.66666666666, ans=0.0
+2024-08-26 02:19:30,318 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=158458.66666666666, ans=0.125
+2024-08-26 02:20:36,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=158458.66666666666, ans=0.125
+2024-08-26 02:21:54,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=158512.0, ans=0.025
+2024-08-26 02:22:39,618 INFO [train.py:1114] (1/4) Epoch 12, batch 2350, loss[loss=0.2511, simple_loss=0.3073, pruned_loss=0.07012, ctc_loss=0.1367, over 19669.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.2872, pruned_loss=0.06258, ctc_loss=0.1177, over 3863839.87 frames. ], batch size: 63, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:22:51,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=158565.33333333334, ans=0.125
+2024-08-26 02:23:10,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158565.33333333334, ans=0.1
+2024-08-26 02:25:18,438 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.991e+02 2.536e+02 3.183e+02 5.552e+02, threshold=5.072e+02, percent-clipped=5.0
+2024-08-26 02:26:00,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=158672.0, ans=10.0
+2024-08-26 02:26:35,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=158672.0, ans=0.0
+2024-08-26 02:27:08,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158725.33333333334, ans=0.1
+2024-08-26 02:30:58,348 INFO [train.py:1114] (1/4) Epoch 12, batch 2400, loss[loss=0.2902, simple_loss=0.3313, pruned_loss=0.09067, ctc_loss=0.1691, over 19313.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2893, pruned_loss=0.06322, ctc_loss=0.1189, over 3858473.22 frames. ], batch size: 71, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:31:28,935 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158885.33333333334, ans=0.1
+2024-08-26 02:31:51,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=158885.33333333334, ans=0.2
+2024-08-26 02:35:10,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=158938.66666666666, ans=0.1
+2024-08-26 02:36:09,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=158938.66666666666, ans=0.0
+2024-08-26 02:36:18,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158992.0, ans=0.1
+2024-08-26 02:36:52,157 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 02:37:47,280 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 02:38:22,388 INFO [train.py:1114] (1/4) Epoch 12, batch 2450, loss[loss=0.3557, simple_loss=0.3573, pruned_loss=0.1296, ctc_loss=0.237, over 13343.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.2931, pruned_loss=0.066, ctc_loss=0.1244, over 3731368.21 frames. ], batch size: 140, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:39:03,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=159098.66666666666, ans=0.0
+2024-08-26 02:39:07,928 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=159098.66666666666, ans=0.125
+2024-08-26 02:39:10,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=159152.0, ans=0.07
+2024-08-26 02:39:42,309 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.859e+02 2.162e+02 2.447e+02 4.124e+02, threshold=4.324e+02, percent-clipped=0.0
+2024-08-26 02:43:45,617 INFO [train.py:1114] (1/4) Epoch 13, batch 0, loss[loss=0.1992, simple_loss=0.2634, pruned_loss=0.04946, ctc_loss=0.09021, over 19821.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2634, pruned_loss=0.04946, ctc_loss=0.09021, over 19821.00 frames. ], batch size: 49, lr: 1.18e-02, grad_scale: 32.0
+2024-08-26 02:43:45,618 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 02:45:27,904 INFO [train.py:1146] (1/4) Epoch 13, validation: loss=0.1972, simple_loss=0.2835, pruned_loss=0.04113, ctc_loss=0.07151, over 944034.00 frames.
+2024-08-26 02:45:27,905 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-26 02:45:29,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=159306.66666666666, ans=0.025
+2024-08-26 02:45:44,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=159306.66666666666, ans=0.125
+2024-08-26 02:45:57,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=159360.0, ans=0.125
+2024-08-26 02:45:58,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=159360.0, ans=0.1
+2024-08-26 02:46:14,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=159413.33333333334, ans=0.04949747468305833
+2024-08-26 02:46:19,751 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=159413.33333333334, ans=0.125
+2024-08-26 02:46:24,854 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=5.236e-03
+2024-08-26 02:46:30,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=159413.33333333334, ans=0.125
+2024-08-26 02:46:53,557 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=159466.66666666666, ans=0.2
+2024-08-26 02:46:56,941 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=159466.66666666666, ans=0.0
+2024-08-26 02:48:02,402 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.70 vs. limit=6.0
+2024-08-26 02:48:06,368 INFO [train.py:1114] (1/4) Epoch 13, batch 50, loss[loss=0.181, simple_loss=0.244, pruned_loss=0.04319, ctc_loss=0.07873, over 19716.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.2882, pruned_loss=0.06144, ctc_loss=0.1163, over 844748.83 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:48:07,878 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.42 vs. limit=15.0
+2024-08-26 02:48:20,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=159626.66666666666, ans=0.035
+2024-08-26 02:48:34,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=159680.0, ans=0.1
+2024-08-26 02:48:34,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=159680.0, ans=0.09899494936611666
+2024-08-26 02:48:55,465 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.822e+02 2.122e+02 2.766e+02 5.339e+02, threshold=4.244e+02, percent-clipped=3.0
+2024-08-26 02:49:23,059 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.66 vs. limit=15.0
+2024-08-26 02:49:26,905 INFO [train.py:1114] (1/4) Epoch 13, batch 100, loss[loss=0.2239, simple_loss=0.2807, pruned_loss=0.0604, ctc_loss=0.1155, over 19708.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.2894, pruned_loss=0.06134, ctc_loss=0.1157, over 1499477.02 frames. ], batch size: 51, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:49:29,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=159840.0, ans=0.0
+2024-08-26 02:49:32,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=159840.0, ans=0.07
+2024-08-26 02:49:49,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=159893.33333333334, ans=0.0
+2024-08-26 02:49:58,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=159893.33333333334, ans=0.125
+2024-08-26 02:50:41,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=160000.0, ans=0.125
+2024-08-26 02:50:45,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=160000.0, ans=0.125
+2024-08-26 02:51:17,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 02:51:18,188 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=160053.33333333334, ans=0.0
+2024-08-26 02:51:27,234 INFO [train.py:1114] (1/4) Epoch 13, batch 150, loss[loss=0.1995, simple_loss=0.2576, pruned_loss=0.0522, ctc_loss=0.09226, over 19698.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2864, pruned_loss=0.06004, ctc_loss=0.1129, over 2027175.31 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:51:42,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=160106.66666666666, ans=0.035
+2024-08-26 02:52:08,891 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160160.0, ans=0.1
+2024-08-26 02:52:26,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=160213.33333333334, ans=0.125
+2024-08-26 02:52:40,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=160213.33333333334, ans=0.125
+2024-08-26 02:52:42,811 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.80 vs. limit=6.0
+2024-08-26 02:52:48,527 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.693e+02 1.889e+02 2.276e+02 3.515e+02, threshold=3.778e+02, percent-clipped=0.0
+2024-08-26 02:53:07,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160266.66666666666, ans=0.1
+2024-08-26 02:53:08,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160266.66666666666, ans=0.1
+2024-08-26 02:53:36,258 INFO [train.py:1114] (1/4) Epoch 13, batch 200, loss[loss=0.2553, simple_loss=0.3076, pruned_loss=0.07258, ctc_loss=0.1446, over 18394.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2852, pruned_loss=0.05977, ctc_loss=0.1126, over 2434816.66 frames. ], batch size: 85, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:54:07,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=160426.66666666666, ans=0.025
+2024-08-26 02:54:25,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=160480.0, ans=0.1
+2024-08-26 02:54:25,741 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.44 vs. limit=22.5
+2024-08-26 02:54:28,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=160480.0, ans=0.125
+2024-08-26 02:55:03,367 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.17 vs. limit=15.0
+2024-08-26 02:55:15,701 INFO [train.py:1114] (1/4) Epoch 13, batch 250, loss[loss=0.2365, simple_loss=0.2908, pruned_loss=0.06634, ctc_loss=0.1238, over 19336.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2852, pruned_loss=0.05979, ctc_loss=0.1125, over 2754771.31 frames. ], batch size: 67, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:55:18,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=160640.0, ans=0.0
+2024-08-26 02:55:22,539 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=160640.0, ans=0.5
+2024-08-26 02:55:23,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=160640.0, ans=0.125
+2024-08-26 02:55:25,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=160640.0, ans=0.1
+2024-08-26 02:55:32,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=160693.33333333334, ans=0.0
+2024-08-26 02:55:38,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=160746.66666666666, ans=0.025
+2024-08-26 02:55:47,653 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.754e+02 2.188e+02 2.577e+02 4.403e+02, threshold=4.375e+02, percent-clipped=2.0
+2024-08-26 02:55:47,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=160746.66666666666, ans=0.125
+2024-08-26 02:55:49,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=160800.0, ans=0.2
+2024-08-26 02:55:54,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=160800.0, ans=0.2
+2024-08-26 02:55:55,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=160800.0, ans=0.025
+2024-08-26 02:56:07,127 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=160800.0, ans=0.1
+2024-08-26 02:56:07,249 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.32 vs. limit=22.5
+2024-08-26 02:56:12,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=160853.33333333334, ans=0.0
+2024-08-26 02:56:20,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=160853.33333333334, ans=0.0
+2024-08-26 02:56:24,168 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.82 vs. limit=15.0
+2024-08-26 02:56:43,557 INFO [train.py:1114] (1/4) Epoch 13, batch 300, loss[loss=0.2438, simple_loss=0.2962, pruned_loss=0.06964, ctc_loss=0.1301, over 19518.00 frames. ], tot_loss[loss=0.225, simple_loss=0.2851, pruned_loss=0.0599, ctc_loss=0.1128, over 3000163.21 frames. ], batch size: 61, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:56:46,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=160906.66666666666, ans=0.125
+2024-08-26 02:57:00,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=160960.0, ans=0.0
+2024-08-26 02:57:05,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=160960.0, ans=0.2
+2024-08-26 02:57:23,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=161013.33333333334, ans=0.025
+2024-08-26 02:57:35,547 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 02:57:37,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=161120.0, ans=0.125
+2024-08-26 02:57:50,481 INFO [train.py:1114] (1/4) Epoch 13, batch 350, loss[loss=0.1999, simple_loss=0.259, pruned_loss=0.05202, ctc_loss=0.09175, over 19765.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2855, pruned_loss=0.06002, ctc_loss=0.1127, over 3189239.62 frames. ], batch size: 48, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:58:25,613 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.772e+02 2.039e+02 2.354e+02 3.759e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-26 02:58:54,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=161386.66666666666, ans=0.1
+2024-08-26 02:59:24,150 INFO [train.py:1114] (1/4) Epoch 13, batch 400, loss[loss=0.2108, simple_loss=0.2807, pruned_loss=0.05184, ctc_loss=0.09298, over 19481.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2846, pruned_loss=0.05934, ctc_loss=0.1116, over 3341325.68 frames. ], batch size: 54, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 02:59:53,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=161440.0, ans=0.2
+2024-08-26 02:59:58,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=161493.33333333334, ans=0.0
+2024-08-26 03:00:04,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.55 vs. limit=15.0
+2024-08-26 03:00:39,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=161600.0, ans=0.125
+2024-08-26 03:00:39,020 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=161600.0, ans=0.07
+2024-08-26 03:00:43,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=161600.0, ans=0.0
+2024-08-26 03:01:53,834 INFO [train.py:1114] (1/4) Epoch 13, batch 450, loss[loss=0.2097, simple_loss=0.2838, pruned_loss=0.04922, ctc_loss=0.09268, over 19624.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2851, pruned_loss=0.05972, ctc_loss=0.1125, over 3450443.00 frames. ], batch size: 55, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:01:55,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=161706.66666666666, ans=0.0
+2024-08-26 03:02:06,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=161706.66666666666, ans=0.025
+2024-08-26 03:02:43,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=161760.0, ans=0.125
+2024-08-26 03:03:10,109 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.726e+02 2.085e+02 2.754e+02 4.301e+02, threshold=4.170e+02, percent-clipped=3.0
+2024-08-26 03:03:51,753 INFO [train.py:1114] (1/4) Epoch 13, batch 500, loss[loss=0.2528, simple_loss=0.3014, pruned_loss=0.07671, ctc_loss=0.127, over 19660.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2841, pruned_loss=0.05929, ctc_loss=0.1116, over 3546503.37 frames. ], batch size: 63, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:04:15,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=161973.33333333334, ans=0.125
+2024-08-26 03:05:04,618 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.28 vs. limit=15.0
+2024-08-26 03:05:10,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=162080.0, ans=0.125
+2024-08-26 03:06:03,081 INFO [train.py:1114] (1/4) Epoch 13, batch 550, loss[loss=0.2419, simple_loss=0.2998, pruned_loss=0.06667, ctc_loss=0.1264, over 19314.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2838, pruned_loss=0.05928, ctc_loss=0.1116, over 3608218.83 frames. ], batch size: 71, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:06:14,895 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=162293.33333333334, ans=0.125
+2024-08-26 03:06:16,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=162293.33333333334, ans=0.1
+2024-08-26 03:06:21,553 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=162293.33333333334, ans=0.2
+2024-08-26 03:06:33,530 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.66 vs. limit=15.0
+2024-08-26 03:06:42,115 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.39 vs. limit=15.0
+2024-08-26 03:06:47,057 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 1.758e+02 1.954e+02 2.485e+02 4.688e+02, threshold=3.908e+02, percent-clipped=2.0
+2024-08-26 03:06:55,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=162400.0, ans=0.0
+2024-08-26 03:07:14,584 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=162453.33333333334, ans=0.125
+2024-08-26 03:07:24,268 INFO [train.py:1114] (1/4) Epoch 13, batch 600, loss[loss=0.2155, simple_loss=0.2833, pruned_loss=0.05416, ctc_loss=0.0984, over 19384.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2841, pruned_loss=0.05916, ctc_loss=0.1113, over 3667030.45 frames. ], batch size: 67, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:07:50,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=162560.0, ans=0.0
+2024-08-26 03:07:51,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=162560.0, ans=10.0
+2024-08-26 03:08:12,259 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.66 vs. limit=15.0
+2024-08-26 03:08:19,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff2.min_abs, batch_count=162613.33333333334, ans=0.1
+2024-08-26 03:08:25,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=162666.66666666666, ans=0.1
+2024-08-26 03:08:29,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=162666.66666666666, ans=0.2
+2024-08-26 03:08:46,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=162720.0, ans=0.025
+2024-08-26 03:09:14,959 INFO [train.py:1114] (1/4) Epoch 13, batch 650, loss[loss=0.2037, simple_loss=0.2747, pruned_loss=0.04834, ctc_loss=0.08982, over 19763.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2838, pruned_loss=0.05902, ctc_loss=0.111, over 3717515.22 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:09:25,690 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.46 vs. limit=15.0
+2024-08-26 03:09:43,153 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=162826.66666666666, ans=0.125
+2024-08-26 03:09:56,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=162880.0, ans=0.0
+2024-08-26 03:09:56,544 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=162880.0, ans=0.0
+2024-08-26 03:10:02,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=162880.0, ans=0.2
+2024-08-26 03:10:09,866 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.755e+02 2.119e+02 2.960e+02 5.119e+02, threshold=4.237e+02, percent-clipped=6.0
+2024-08-26 03:10:28,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=162986.66666666666, ans=0.0
+2024-08-26 03:10:39,735 INFO [train.py:1114] (1/4) Epoch 13, batch 700, loss[loss=0.2101, simple_loss=0.2691, pruned_loss=0.05561, ctc_loss=0.09999, over 19725.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2842, pruned_loss=0.05912, ctc_loss=0.1109, over 3749066.62 frames. ], batch size: 51, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:11:04,805 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=163093.33333333334, ans=0.125
+2024-08-26 03:11:33,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=163200.0, ans=0.0
+2024-08-26 03:11:52,550 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=163253.33333333334, ans=0.0
+2024-08-26 03:11:55,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=163253.33333333334, ans=0.0
+2024-08-26 03:12:00,804 INFO [train.py:1114] (1/4) Epoch 13, batch 750, loss[loss=0.2101, simple_loss=0.2789, pruned_loss=0.05138, ctc_loss=0.09659, over 19474.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.284, pruned_loss=0.05919, ctc_loss=0.111, over 3774963.78 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:12:05,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=163306.66666666666, ans=0.125
+2024-08-26 03:12:07,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=163306.66666666666, ans=0.07
+2024-08-26 03:12:26,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=163413.33333333334, ans=0.125
+2024-08-26 03:12:28,846 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=163413.33333333334, ans=0.125
+2024-08-26 03:12:43,029 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.800e+02 2.310e+02 2.882e+02 4.749e+02, threshold=4.619e+02, percent-clipped=2.0
+2024-08-26 03:12:45,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=163466.66666666666, ans=0.2
+2024-08-26 03:12:46,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=163466.66666666666, ans=0.125
+2024-08-26 03:13:35,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=163520.0, ans=0.125
+2024-08-26 03:13:56,354 INFO [train.py:1114] (1/4) Epoch 13, batch 800, loss[loss=0.2159, simple_loss=0.2717, pruned_loss=0.05828, ctc_loss=0.109, over 19820.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2838, pruned_loss=0.05886, ctc_loss=0.1103, over 3796423.05 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:14:20,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=163626.66666666666, ans=0.125
+2024-08-26 03:14:29,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=163680.0, ans=0.125
+2024-08-26 03:14:32,920 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.98 vs. limit=15.0
+2024-08-26 03:14:33,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=163680.0, ans=0.125
+2024-08-26 03:14:52,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=163733.33333333334, ans=0.125
+2024-08-26 03:15:01,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=163786.66666666666, ans=0.035
+2024-08-26 03:15:01,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=163786.66666666666, ans=0.2
+2024-08-26 03:15:10,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=163786.66666666666, ans=0.1
+2024-08-26 03:15:12,862 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=163840.0, ans=0.04949747468305833
+2024-08-26 03:15:13,890 INFO [train.py:1114] (1/4) Epoch 13, batch 850, loss[loss=0.2274, simple_loss=0.2929, pruned_loss=0.05866, ctc_loss=0.1111, over 19658.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2833, pruned_loss=0.05874, ctc_loss=0.1102, over 3814761.56 frames. ], batch size: 59, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:15:16,638 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.32 vs. limit=22.5
+2024-08-26 03:15:33,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=163893.33333333334, ans=0.125
+2024-08-26 03:16:07,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=163946.66666666666, ans=0.125
+2024-08-26 03:16:11,622 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.727e+02 1.948e+02 2.271e+02 3.773e+02, threshold=3.897e+02, percent-clipped=0.0
+2024-08-26 03:16:25,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=164053.33333333334, ans=0.0
+2024-08-26 03:16:39,635 INFO [train.py:1114] (1/4) Epoch 13, batch 900, loss[loss=0.2188, simple_loss=0.2686, pruned_loss=0.06142, ctc_loss=0.1152, over 19831.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.284, pruned_loss=0.0593, ctc_loss=0.1113, over 3819190.32 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:16:44,778 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.00 vs. limit=6.0
+2024-08-26 03:16:57,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=164160.0, ans=0.125
+2024-08-26 03:17:00,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=164160.0, ans=0.2
+2024-08-26 03:17:05,744 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.36 vs. limit=15.0
+2024-08-26 03:17:13,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=164213.33333333334, ans=0.125
+2024-08-26 03:17:15,811 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.87 vs. limit=15.0
+2024-08-26 03:17:30,860 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.25 vs. limit=15.0
+2024-08-26 03:17:35,240 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.04 vs. limit=15.0
+2024-08-26 03:17:39,435 INFO [train.py:1114] (1/4) Epoch 13, batch 950, loss[loss=0.2102, simple_loss=0.2709, pruned_loss=0.05451, ctc_loss=0.1015, over 19500.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2842, pruned_loss=0.0595, ctc_loss=0.1117, over 3820526.74 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:18:33,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=164426.66666666666, ans=0.125
+2024-08-26 03:18:41,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=164480.0, ans=0.125
+2024-08-26 03:18:41,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=164480.0, ans=0.125
+2024-08-26 03:18:52,284 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.763e+02 2.081e+02 2.549e+02 5.575e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-26 03:18:54,105 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.66 vs. limit=10.0
+2024-08-26 03:19:00,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=164533.33333333334, ans=0.125
+2024-08-26 03:19:09,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=164533.33333333334, ans=0.125
+2024-08-26 03:19:29,883 INFO [train.py:1114] (1/4) Epoch 13, batch 1000, loss[loss=0.2352, simple_loss=0.2903, pruned_loss=0.06572, ctc_loss=0.1216, over 19861.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2856, pruned_loss=0.06039, ctc_loss=0.1133, over 3816217.51 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:19:38,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=164640.0, ans=0.2
+2024-08-26 03:19:44,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=164693.33333333334, ans=0.0
+2024-08-26 03:19:45,265 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=164693.33333333334, ans=0.125
+2024-08-26 03:20:06,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=164746.66666666666, ans=0.0
+2024-08-26 03:20:15,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=164800.0, ans=0.125
+2024-08-26 03:20:31,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=164853.33333333334, ans=0.05
+2024-08-26 03:20:35,692 INFO [train.py:1114] (1/4) Epoch 13, batch 1050, loss[loss=0.2331, simple_loss=0.2911, pruned_loss=0.06369, ctc_loss=0.1193, over 19840.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2849, pruned_loss=0.06024, ctc_loss=0.1131, over 3821903.62 frames. ], batch size: 57, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:20:44,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=164906.66666666666, ans=15.0
+2024-08-26 03:20:55,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=164960.0, ans=0.125
+2024-08-26 03:21:08,095 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.698e+02 1.997e+02 2.318e+02 3.616e+02, threshold=3.994e+02, percent-clipped=0.0
+2024-08-26 03:21:31,222 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=165120.0, ans=0.025
+2024-08-26 03:21:44,513 INFO [train.py:1114] (1/4) Epoch 13, batch 1100, loss[loss=0.2319, simple_loss=0.2833, pruned_loss=0.06551, ctc_loss=0.1237, over 19579.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2852, pruned_loss=0.0601, ctc_loss=0.113, over 3829803.11 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:22:00,908 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.30 vs. limit=12.0
+2024-08-26 03:22:04,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=165226.66666666666, ans=10.0
+2024-08-26 03:22:19,578 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:22:22,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=165333.33333333334, ans=0.5
+2024-08-26 03:22:35,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=165333.33333333334, ans=0.0
+2024-08-26 03:22:57,610 INFO [train.py:1114] (1/4) Epoch 13, batch 1150, loss[loss=0.2307, simple_loss=0.2883, pruned_loss=0.06225, ctc_loss=0.1216, over 19582.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2858, pruned_loss=0.06056, ctc_loss=0.1139, over 3829936.96 frames. ], batch size: 52, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:23:10,128 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=165493.33333333334, ans=0.1
+2024-08-26 03:23:11,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=165493.33333333334, ans=0.0
+2024-08-26 03:23:19,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=165493.33333333334, ans=0.125
+2024-08-26 03:23:38,674 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.729e+02 2.006e+02 2.456e+02 7.202e+02, threshold=4.012e+02, percent-clipped=3.0
+2024-08-26 03:23:50,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=165600.0, ans=0.09899494936611666
+2024-08-26 03:23:54,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=165653.33333333334, ans=0.0
+2024-08-26 03:24:11,584 INFO [train.py:1114] (1/4) Epoch 13, batch 1200, loss[loss=0.2316, simple_loss=0.2999, pruned_loss=0.05814, ctc_loss=0.1177, over 19832.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.2869, pruned_loss=0.06092, ctc_loss=0.1147, over 3824766.47 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:24:31,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=165760.0, ans=0.2
+2024-08-26 03:25:15,635 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.21 vs. limit=12.0
+2024-08-26 03:25:21,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=165813.33333333334, ans=0.125
+2024-08-26 03:26:10,129 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.72 vs. limit=15.0
+2024-08-26 03:26:10,749 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=165920.0, ans=0.2
+2024-08-26 03:26:13,051 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.34 vs. limit=10.0
+2024-08-26 03:26:15,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=165920.0, ans=0.025
+2024-08-26 03:26:20,505 INFO [train.py:1114] (1/4) Epoch 13, batch 1250, loss[loss=0.2435, simple_loss=0.3022, pruned_loss=0.06778, ctc_loss=0.1232, over 19512.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.287, pruned_loss=0.06074, ctc_loss=0.1144, over 3842801.59 frames. ], batch size: 61, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:26:20,696 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=165973.33333333334, ans=0.1
+2024-08-26 03:27:02,765 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=166080.0, ans=0.125
+2024-08-26 03:27:22,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=166080.0, ans=0.1
+2024-08-26 03:27:23,470 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.311e+02 1.715e+02 1.869e+02 2.285e+02 3.930e+02, threshold=3.738e+02, percent-clipped=0.0
+2024-08-26 03:27:35,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=166133.33333333334, ans=0.0
+2024-08-26 03:28:00,522 INFO [train.py:1114] (1/4) Epoch 13, batch 1300, loss[loss=0.2354, simple_loss=0.2913, pruned_loss=0.06582, ctc_loss=0.1198, over 18906.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2857, pruned_loss=0.06002, ctc_loss=0.113, over 3847225.03 frames. ], batch size: 76, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:28:22,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=166293.33333333334, ans=0.125
+2024-08-26 03:28:39,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=166293.33333333334, ans=0.125
+2024-08-26 03:28:41,383 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.97 vs. limit=15.0
+2024-08-26 03:29:29,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=166400.0, ans=0.125
+2024-08-26 03:29:36,172 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:30:17,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=166453.33333333334, ans=0.125
+2024-08-26 03:30:19,039 INFO [train.py:1114] (1/4) Epoch 13, batch 1350, loss[loss=0.2256, simple_loss=0.288, pruned_loss=0.05968, ctc_loss=0.1094, over 19751.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.285, pruned_loss=0.05951, ctc_loss=0.1117, over 3858651.13 frames. ], batch size: 54, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:30:25,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=166506.66666666666, ans=0.025
+2024-08-26 03:31:08,775 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.736e+02 2.053e+02 2.622e+02 5.263e+02, threshold=4.106e+02, percent-clipped=6.0
+2024-08-26 03:31:25,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=166720.0, ans=0.0
+2024-08-26 03:31:40,384 INFO [train.py:1114] (1/4) Epoch 13, batch 1400, loss[loss=0.1663, simple_loss=0.2377, pruned_loss=0.03389, ctc_loss=0.06789, over 19677.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2848, pruned_loss=0.0594, ctc_loss=0.1117, over 3864928.92 frames. ], batch size: 46, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:32:16,461 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.49 vs. limit=10.0
+2024-08-26 03:32:20,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=166826.66666666666, ans=0.125
+2024-08-26 03:32:48,387 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.24 vs. limit=15.0
+2024-08-26 03:32:55,388 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.74 vs. limit=15.0
+2024-08-26 03:33:13,863 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=166986.66666666666, ans=0.0
+2024-08-26 03:33:14,257 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.85 vs. limit=10.0
+2024-08-26 03:33:21,474 INFO [train.py:1114] (1/4) Epoch 13, batch 1450, loss[loss=0.2165, simple_loss=0.2824, pruned_loss=0.05522, ctc_loss=0.1005, over 19651.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2852, pruned_loss=0.05958, ctc_loss=0.1119, over 3863219.48 frames. ], batch size: 63, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:33:31,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=167040.0, ans=0.2
+2024-08-26 03:33:47,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=167093.33333333334, ans=0.025
+2024-08-26 03:33:49,089 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.09 vs. limit=15.0
+2024-08-26 03:33:59,237 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.756e+02 1.937e+02 2.380e+02 3.895e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-26 03:34:06,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=167200.0, ans=0.125
+2024-08-26 03:34:06,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=167200.0, ans=0.0
+2024-08-26 03:34:12,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=167200.0, ans=0.0
+2024-08-26 03:34:17,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=167200.0, ans=0.1
+2024-08-26 03:34:23,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=167253.33333333334, ans=0.125
+2024-08-26 03:34:33,941 INFO [train.py:1114] (1/4) Epoch 13, batch 1500, loss[loss=0.2373, simple_loss=0.2922, pruned_loss=0.06526, ctc_loss=0.1296, over 19591.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2854, pruned_loss=0.0596, ctc_loss=0.1116, over 3862550.12 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:35:01,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=167413.33333333334, ans=0.125
+2024-08-26 03:35:03,342 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=167413.33333333334, ans=0.04949747468305833
+2024-08-26 03:35:34,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=167520.0, ans=0.125
+2024-08-26 03:35:39,239 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.00 vs. limit=12.0
+2024-08-26 03:35:40,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=167520.0, ans=0.1
+2024-08-26 03:35:43,246 INFO [train.py:1114] (1/4) Epoch 13, batch 1550, loss[loss=0.2521, simple_loss=0.3118, pruned_loss=0.06983, ctc_loss=0.1319, over 19627.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.2859, pruned_loss=0.06012, ctc_loss=0.1127, over 3846724.73 frames. ], batch size: 60, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:35:46,335 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.92 vs. limit=6.0
+2024-08-26 03:35:58,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=167626.66666666666, ans=0.125
+2024-08-26 03:36:17,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=167626.66666666666, ans=0.07
+2024-08-26 03:36:35,368 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.778e+02 2.054e+02 2.767e+02 5.252e+02, threshold=4.108e+02, percent-clipped=7.0
+2024-08-26 03:37:04,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=167840.0, ans=0.0
+2024-08-26 03:37:05,302 INFO [train.py:1114] (1/4) Epoch 13, batch 1600, loss[loss=0.1914, simple_loss=0.2677, pruned_loss=0.0418, ctc_loss=0.07855, over 19840.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2852, pruned_loss=0.05982, ctc_loss=0.1125, over 3834490.98 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:37:13,932 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=167840.0, ans=0.125
+2024-08-26 03:37:15,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=167840.0, ans=0.0
+2024-08-26 03:37:16,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=167840.0, ans=0.125
+2024-08-26 03:37:19,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=167840.0, ans=0.025
+2024-08-26 03:37:36,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=167893.33333333334, ans=0.125
+2024-08-26 03:38:35,982 INFO [train.py:1114] (1/4) Epoch 13, batch 1650, loss[loss=0.2036, simple_loss=0.282, pruned_loss=0.04452, ctc_loss=0.09014, over 19655.00 frames. ], tot_loss[loss=0.226, simple_loss=0.2858, pruned_loss=0.06034, ctc_loss=0.1136, over 3832591.27 frames. ], batch size: 59, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:39:07,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=168160.0, ans=0.125
+2024-08-26 03:39:19,473 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.85 vs. limit=22.5
+2024-08-26 03:39:20,047 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.825e+02 2.209e+02 2.614e+02 4.167e+02, threshold=4.418e+02, percent-clipped=2.0
+2024-08-26 03:39:20,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.79 vs. limit=15.0
+2024-08-26 03:39:26,286 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=168266.66666666666, ans=0.1
+2024-08-26 03:39:55,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=168320.0, ans=0.125
+2024-08-26 03:40:00,081 INFO [train.py:1114] (1/4) Epoch 13, batch 1700, loss[loss=0.1967, simple_loss=0.2548, pruned_loss=0.05045, ctc_loss=0.09452, over 19702.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2848, pruned_loss=0.05947, ctc_loss=0.112, over 3847218.47 frames. ], batch size: 46, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:40:17,288 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.16 vs. limit=6.0
+2024-08-26 03:40:31,833 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=4.98 vs. limit=12.0
+2024-08-26 03:40:45,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=168480.0, ans=0.1
+2024-08-26 03:40:45,772 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.01 vs. limit=12.0
+2024-08-26 03:40:48,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=168533.33333333334, ans=0.2
+2024-08-26 03:41:05,341 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:41:17,852 INFO [train.py:1114] (1/4) Epoch 13, batch 1750, loss[loss=0.1969, simple_loss=0.2527, pruned_loss=0.05033, ctc_loss=0.1009, over 19684.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2842, pruned_loss=0.05904, ctc_loss=0.1114, over 3851805.56 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:41:33,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=168693.33333333334, ans=0.125
+2024-08-26 03:41:45,103 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.87 vs. limit=15.0
+2024-08-26 03:41:47,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=168693.33333333334, ans=0.2
+2024-08-26 03:41:57,317 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.37 vs. limit=15.0
+2024-08-26 03:42:01,096 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.702e+02 2.065e+02 2.813e+02 5.109e+02, threshold=4.129e+02, percent-clipped=2.0
+2024-08-26 03:42:45,931 INFO [train.py:1114] (1/4) Epoch 13, batch 1800, loss[loss=0.2092, simple_loss=0.2835, pruned_loss=0.04951, ctc_loss=0.08993, over 19620.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2846, pruned_loss=0.05945, ctc_loss=0.112, over 3853381.72 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:43:01,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=168960.0, ans=0.0
+2024-08-26 03:43:19,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=169013.33333333334, ans=0.0
+2024-08-26 03:43:34,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=169066.66666666666, ans=0.2
+2024-08-26 03:43:44,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=169120.0, ans=0.125
+2024-08-26 03:43:53,522 INFO [train.py:1114] (1/4) Epoch 13, batch 1850, loss[loss=0.23, simple_loss=0.29, pruned_loss=0.06177, ctc_loss=0.1163, over 19571.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2838, pruned_loss=0.05901, ctc_loss=0.1111, over 3856130.43 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:44:01,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=169173.33333333334, ans=0.0
+2024-08-26 03:44:14,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=169226.66666666666, ans=0.1
+2024-08-26 03:44:29,680 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.936e+02 2.666e+02 3.402e+02 5.252e+02, threshold=5.332e+02, percent-clipped=13.0
+2024-08-26 03:44:40,092 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.66 vs. limit=22.5
+2024-08-26 03:44:44,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=169386.66666666666, ans=0.0
+2024-08-26 03:44:56,560 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.93 vs. limit=10.0
+2024-08-26 03:45:07,835 INFO [train.py:1114] (1/4) Epoch 13, batch 1900, loss[loss=0.2239, simple_loss=0.2885, pruned_loss=0.05724, ctc_loss=0.112, over 19628.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2842, pruned_loss=0.05915, ctc_loss=0.1113, over 3861621.93 frames. ], batch size: 59, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:45:15,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=169440.0, ans=0.125
+2024-08-26 03:45:26,743 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.48 vs. limit=22.5
+2024-08-26 03:45:42,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_ff3.min_abs, batch_count=169546.66666666666, ans=0.2
+2024-08-26 03:45:43,485 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=169546.66666666666, ans=0.0
+2024-08-26 03:46:29,178 INFO [train.py:1114] (1/4) Epoch 13, batch 1950, loss[loss=0.2191, simple_loss=0.2824, pruned_loss=0.05702, ctc_loss=0.1044, over 19591.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2858, pruned_loss=0.05978, ctc_loss=0.1125, over 3870432.45 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:46:29,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=169706.66666666666, ans=0.125
+2024-08-26 03:46:57,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=169813.33333333334, ans=0.1
+2024-08-26 03:49:49,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=169813.33333333334, ans=0.125
+2024-08-26 03:50:26,628 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.795e+02 2.018e+02 2.323e+02 3.502e+02, threshold=4.036e+02, percent-clipped=0.0
+2024-08-26 04:20:39,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=169920.0, ans=0.125
+2024-08-26 04:20:52,497 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.09 vs. limit=15.0
+2024-08-26 04:22:39,285 INFO [train.py:1114] (1/4) Epoch 13, batch 2000, loss[loss=0.1961, simple_loss=0.2496, pruned_loss=0.0519, ctc_loss=0.09707, over 19627.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.2865, pruned_loss=0.06022, ctc_loss=0.1133, over 3856227.76 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 04:40:08,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=170026.66666666666, ans=0.125
+2024-08-26 05:17:15,369 INFO [train.py:1114] (1/4) Epoch 13, batch 2050, loss[loss=0.1872, simple_loss=0.2487, pruned_loss=0.04588, ctc_loss=0.08504, over 19719.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2855, pruned_loss=0.0599, ctc_loss=0.1127, over 3851755.38 frames. ], batch size: 47, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:34:32,867 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.739e+02 2.095e+02 2.592e+02 3.598e+02, threshold=4.189e+02, percent-clipped=0.0
+2024-08-26 05:40:56,494 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.15 vs. limit=15.0
+2024-08-26 05:45:20,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=170506.66666666666, ans=0.125
+2024-08-26 05:45:21,774 INFO [train.py:1114] (1/4) Epoch 13, batch 2100, loss[loss=0.2283, simple_loss=0.2881, pruned_loss=0.06206, ctc_loss=0.1109, over 19769.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2849, pruned_loss=0.05941, ctc_loss=0.1119, over 3858448.85 frames. ], batch size: 54, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:50:47,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=170560.0, ans=0.0
+2024-08-26 05:50:48,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=170560.0, ans=0.025
+2024-08-26 05:51:08,658 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=170560.0, ans=0.125
+2024-08-26 05:55:49,645 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.33 vs. limit=15.0
+2024-08-26 05:57:03,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=170720.0, ans=0.0
+2024-08-26 05:57:05,752 INFO [train.py:1114] (1/4) Epoch 13, batch 2150, loss[loss=0.1958, simple_loss=0.2681, pruned_loss=0.04454, ctc_loss=0.0859, over 19608.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2843, pruned_loss=0.05918, ctc_loss=0.1115, over 3869651.90 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 06:00:38,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=170826.66666666666, ans=0.125
+2024-08-26 06:02:00,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=170880.0, ans=0.0
+2024-08-26 06:02:10,734 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.801e+02 2.071e+02 2.646e+02 5.963e+02, threshold=4.141e+02, percent-clipped=6.0
+2024-08-26 06:03:39,259 INFO [train.py:1114] (1/4) Epoch 13, batch 2200, loss[loss=0.2428, simple_loss=0.3005, pruned_loss=0.06773, ctc_loss=0.1242, over 19596.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.2842, pruned_loss=0.059, ctc_loss=0.111, over 3868024.11 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 06:04:13,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=171093.33333333334, ans=10.0
+2024-08-26 06:04:45,624 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.90 vs. limit=15.0
+2024-08-26 06:04:56,172 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=171146.66666666666, ans=0.125
+2024-08-26 06:05:06,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=171200.0, ans=0.07
+2024-08-26 06:05:14,356 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.24 vs. limit=6.0
+2024-08-26 06:05:56,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=171253.33333333334, ans=0.125
+2024-08-26 06:06:16,003 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=171253.33333333334, ans=0.0
+2024-08-26 06:06:26,725 INFO [train.py:1114] (1/4) Epoch 13, batch 2250, loss[loss=0.2164, simple_loss=0.2807, pruned_loss=0.0547, ctc_loss=0.1069, over 19613.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.284, pruned_loss=0.05897, ctc_loss=0.1111, over 3867521.25 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 06:06:26,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=171306.66666666666, ans=0.125
+2024-08-26 06:07:52,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=171413.33333333334, ans=0.2
+2024-08-26 06:07:52,783 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=171413.33333333334, ans=0.125
+2024-08-26 06:08:14,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=171413.33333333334, ans=0.125
+2024-08-26 06:08:30,393 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.765e+02 2.070e+02 2.599e+02 3.761e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-26 06:09:23,310 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.00 vs. limit=15.0
+2024-08-26 06:09:29,808 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.82 vs. limit=15.0
+2024-08-26 06:09:34,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=171466.66666666666, ans=0.0
+2024-08-26 06:10:19,747 INFO [train.py:1114] (1/4) Epoch 13, batch 2300, loss[loss=0.2071, simple_loss=0.2694, pruned_loss=0.05231, ctc_loss=0.1006, over 19513.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.2831, pruned_loss=0.05883, ctc_loss=0.111, over 3860632.66 frames. ], batch size: 49, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:10:25,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=171573.33333333334, ans=0.125
+2024-08-26 06:10:28,110 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.94 vs. limit=22.5
+2024-08-26 06:10:39,986 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.17 vs. limit=12.0
+2024-08-26 06:11:02,060 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=171680.0, ans=0.125
+2024-08-26 06:11:12,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=171680.0, ans=0.125
+2024-08-26 06:11:24,899 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=171733.33333333334, ans=0.0
+2024-08-26 06:11:27,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=171733.33333333334, ans=0.0
+2024-08-26 06:11:36,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=171786.66666666666, ans=0.125
+2024-08-26 06:11:42,413 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=171840.0, ans=0.07
+2024-08-26 06:11:43,311 INFO [train.py:1114] (1/4) Epoch 13, batch 2350, loss[loss=0.2195, simple_loss=0.2937, pruned_loss=0.05211, ctc_loss=0.1027, over 19661.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2831, pruned_loss=0.05874, ctc_loss=0.1106, over 3863505.81 frames. ], batch size: 63, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:11:51,375 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.38 vs. limit=15.0
+2024-08-26 06:12:09,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=171946.66666666666, ans=0.125
+2024-08-26 06:12:12,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=171946.66666666666, ans=0.125
+2024-08-26 06:12:16,619 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.773e+02 2.247e+02 3.255e+02 4.983e+02, threshold=4.494e+02, percent-clipped=2.0
+2024-08-26 06:12:18,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=172000.0, ans=0.125
+2024-08-26 06:12:29,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=172053.33333333334, ans=0.1
+2024-08-26 06:12:46,290 INFO [train.py:1114] (1/4) Epoch 13, batch 2400, loss[loss=0.2308, simple_loss=0.2934, pruned_loss=0.05998, ctc_loss=0.1207, over 19235.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2853, pruned_loss=0.05975, ctc_loss=0.1122, over 3858875.86 frames. ], batch size: 71, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:13:28,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=172213.33333333334, ans=0.125
+2024-08-26 06:13:53,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=172266.66666666666, ans=0.025
+2024-08-26 06:14:00,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=172320.0, ans=0.05
+2024-08-26 06:14:00,816 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=172320.0, ans=0.2
+2024-08-26 06:14:03,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=172320.0, ans=0.125
+2024-08-26 06:14:05,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=172320.0, ans=0.125
+2024-08-26 06:14:08,364 INFO [train.py:1114] (1/4) Epoch 13, batch 2450, loss[loss=0.3062, simple_loss=0.3326, pruned_loss=0.1018, ctc_loss=0.1907, over 13687.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2901, pruned_loss=0.06343, ctc_loss=0.1196, over 3730503.06 frames. ], batch size: 142, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:14:17,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=172373.33333333334, ans=0.0
+2024-08-26 06:14:33,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=172480.0, ans=0.125
+2024-08-26 06:14:43,288 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.935e+02 2.072e+02 2.350e+02 4.711e+02, threshold=4.143e+02, percent-clipped=2.0
+2024-08-26 06:14:43,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=172533.33333333334, ans=0.0
+2024-08-26 06:16:27,503 INFO [train.py:1114] (1/4) Epoch 14, batch 0, loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:16:27,504 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 06:17:53,178 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.1437, 3.0885, 3.6307, 2.8115], device='cuda:1')
+2024-08-26 06:17:58,796 INFO [train.py:1146] (1/4) Epoch 14, validation: loss=0.1898, simple_loss=0.2778, pruned_loss=0.03769, ctc_loss=0.06578, over 944034.00 frames.
+2024-08-26 06:18:12,592 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 14083MB
+2024-08-26 06:18:29,048 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=172634.66666666666, ans=0.0
+2024-08-26 06:18:39,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=172634.66666666666, ans=0.0
+2024-08-26 06:18:56,213 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=172688.0, ans=0.125
+2024-08-26 06:19:03,547 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.62 vs. limit=15.0
+2024-08-26 06:19:25,355 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.36 vs. limit=12.0
+2024-08-26 06:19:53,748 INFO [train.py:1114] (1/4) Epoch 14, batch 50, loss[loss=0.2162, simple_loss=0.2658, pruned_loss=0.06082, ctc_loss=0.1123, over 19699.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2864, pruned_loss=0.06032, ctc_loss=0.1134, over 844835.93 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:20:18,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=172848.0, ans=0.0
+2024-08-26 06:20:43,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=172954.66666666666, ans=0.2
+2024-08-26 06:20:53,696 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=172954.66666666666, ans=0.125
+2024-08-26 06:21:17,216 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.738e+02 2.047e+02 2.487e+02 4.948e+02, threshold=4.095e+02, percent-clipped=4.0
+2024-08-26 06:21:48,435 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=173061.33333333334, ans=0.0
+2024-08-26 06:21:51,846 INFO [train.py:1114] (1/4) Epoch 14, batch 100, loss[loss=0.2132, simple_loss=0.2728, pruned_loss=0.05592, ctc_loss=0.104, over 19712.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.2875, pruned_loss=0.06052, ctc_loss=0.114, over 1499995.38 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:23:09,707 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=173328.0, ans=0.0
+2024-08-26 06:23:33,006 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=173381.33333333334, ans=0.025
+2024-08-26 06:23:38,133 INFO [train.py:1114] (1/4) Epoch 14, batch 150, loss[loss=0.2055, simple_loss=0.2629, pruned_loss=0.05363, ctc_loss=0.102, over 19717.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2839, pruned_loss=0.05839, ctc_loss=0.1102, over 2027349.01 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:23:51,259 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:24:03,850 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.90 vs. limit=22.5
+2024-08-26 06:24:07,601 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=173434.66666666666, ans=0.07
+2024-08-26 06:24:10,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=173434.66666666666, ans=0.125
+2024-08-26 06:24:29,600 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.72 vs. limit=15.0
+2024-08-26 06:24:32,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=173541.33333333334, ans=0.05
+2024-08-26 06:24:43,092 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:24:49,750 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.676e+02 1.898e+02 2.213e+02 4.155e+02, threshold=3.795e+02, percent-clipped=1.0
+2024-08-26 06:24:49,954 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=173594.66666666666, ans=0.125
+2024-08-26 06:24:57,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=173594.66666666666, ans=0.025
+2024-08-26 06:25:00,470 INFO [train.py:1114] (1/4) Epoch 14, batch 200, loss[loss=0.2395, simple_loss=0.2941, pruned_loss=0.06666, ctc_loss=0.129, over 18213.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2818, pruned_loss=0.05741, ctc_loss=0.1077, over 2434363.68 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:25:01,766 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=173648.0, ans=0.0
+2024-08-26 06:25:14,691 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=173648.0, ans=0.125
+2024-08-26 06:25:47,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=173754.66666666666, ans=0.5
+2024-08-26 06:26:01,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=173861.33333333334, ans=0.2
+2024-08-26 06:26:12,149 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.59 vs. limit=15.0
+2024-08-26 06:26:16,064 INFO [train.py:1114] (1/4) Epoch 14, batch 250, loss[loss=0.2392, simple_loss=0.2973, pruned_loss=0.06622, ctc_loss=0.1215, over 19420.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2823, pruned_loss=0.05748, ctc_loss=0.1083, over 2753707.57 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:26:16,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=173914.66666666666, ans=0.125
+2024-08-26 06:26:40,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=173968.0, ans=0.125
+2024-08-26 06:26:59,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=174021.33333333334, ans=0.125
+2024-08-26 06:27:18,000 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.683e+02 2.061e+02 2.648e+02 4.927e+02, threshold=4.123e+02, percent-clipped=4.0
+2024-08-26 06:27:18,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=174128.0, ans=0.2
+2024-08-26 06:27:22,857 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=174128.0, ans=0.025
+2024-08-26 06:27:28,129 INFO [train.py:1114] (1/4) Epoch 14, batch 300, loss[loss=0.2515, simple_loss=0.304, pruned_loss=0.07298, ctc_loss=0.1324, over 19548.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2823, pruned_loss=0.05751, ctc_loss=0.1084, over 2999566.01 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:27:36,791 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.06 vs. limit=22.5
+2024-08-26 06:27:48,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=174234.66666666666, ans=0.125
+2024-08-26 06:27:52,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=174288.0, ans=0.125
+2024-08-26 06:28:04,450 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.52 vs. limit=15.0
+2024-08-26 06:28:06,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=174288.0, ans=0.125
+2024-08-26 06:28:11,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=174341.33333333334, ans=0.1
+2024-08-26 06:28:11,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.32 vs. limit=15.0
+2024-08-26 06:28:13,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=174341.33333333334, ans=0.125
+2024-08-26 06:28:34,436 INFO [train.py:1114] (1/4) Epoch 14, batch 350, loss[loss=0.171, simple_loss=0.2399, pruned_loss=0.03702, ctc_loss=0.07009, over 19763.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2826, pruned_loss=0.05731, ctc_loss=0.1078, over 3190345.80 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:28:43,187 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.09 vs. limit=15.0
+2024-08-26 06:28:53,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=174501.33333333334, ans=0.1
+2024-08-26 06:29:03,927 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:29:32,485 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.657e+02 1.894e+02 2.440e+02 4.007e+02, threshold=3.787e+02, percent-clipped=0.0
+2024-08-26 06:29:42,962 INFO [train.py:1114] (1/4) Epoch 14, batch 400, loss[loss=0.2291, simple_loss=0.2913, pruned_loss=0.06002, ctc_loss=0.117, over 19500.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2823, pruned_loss=0.05734, ctc_loss=0.1079, over 3341930.78 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:29:48,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=174714.66666666666, ans=0.1
+2024-08-26 06:29:56,050 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=10.71 vs. limit=15.0
+2024-08-26 06:30:40,500 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=174874.66666666666, ans=0.95
+2024-08-26 06:30:40,576 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=174874.66666666666, ans=0.0
+2024-08-26 06:30:58,866 INFO [train.py:1114] (1/4) Epoch 14, batch 450, loss[loss=0.2125, simple_loss=0.2825, pruned_loss=0.05198, ctc_loss=0.09665, over 19622.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2827, pruned_loss=0.05793, ctc_loss=0.1087, over 3450368.11 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:31:14,818 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.17 vs. limit=15.0
+2024-08-26 06:31:19,427 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.66 vs. limit=12.0
+2024-08-26 06:31:51,404 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.21 vs. limit=15.0
+2024-08-26 06:31:59,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=175088.0, ans=0.035
+2024-08-26 06:32:32,617 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.297e+02 1.702e+02 1.875e+02 2.205e+02 3.904e+02, threshold=3.749e+02, percent-clipped=2.0
+2024-08-26 06:32:59,821 INFO [train.py:1114] (1/4) Epoch 14, batch 500, loss[loss=0.2416, simple_loss=0.2995, pruned_loss=0.0672, ctc_loss=0.1232, over 19650.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2819, pruned_loss=0.05764, ctc_loss=0.1083, over 3545951.97 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:33:33,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=175301.33333333334, ans=0.125
+2024-08-26 06:33:34,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=175301.33333333334, ans=0.125
+2024-08-26 06:34:00,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=175354.66666666666, ans=0.125
+2024-08-26 06:34:32,917 INFO [train.py:1114] (1/4) Epoch 14, batch 550, loss[loss=0.2288, simple_loss=0.2915, pruned_loss=0.05943, ctc_loss=0.118, over 19266.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2819, pruned_loss=0.05734, ctc_loss=0.108, over 3608026.80 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:34:41,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=175514.66666666666, ans=0.0
+2024-08-26 06:34:44,483 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=175514.66666666666, ans=0.2
+2024-08-26 06:35:13,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=175674.66666666666, ans=0.95
+2024-08-26 06:35:20,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=175674.66666666666, ans=0.5
+2024-08-26 06:35:32,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=175674.66666666666, ans=0.025
+2024-08-26 06:35:35,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=175728.0, ans=0.0
+2024-08-26 06:35:35,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=175728.0, ans=0.125
+2024-08-26 06:35:36,408 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.729e+02 1.957e+02 2.291e+02 4.042e+02, threshold=3.913e+02, percent-clipped=2.0
+2024-08-26 06:36:05,583 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=175728.0, ans=0.0
+2024-08-26 06:36:15,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=175728.0, ans=0.125
+2024-08-26 06:36:18,838 INFO [train.py:1114] (1/4) Epoch 14, batch 600, loss[loss=0.2777, simple_loss=0.3209, pruned_loss=0.08546, ctc_loss=0.1587, over 19321.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2823, pruned_loss=0.05755, ctc_loss=0.1083, over 3664666.00 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:37:46,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=175834.66666666666, ans=0.1
+2024-08-26 06:38:31,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=175834.66666666666, ans=0.0
+2024-08-26 06:39:25,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=176048.0, ans=0.05
+2024-08-26 06:39:25,933 INFO [train.py:1114] (1/4) Epoch 14, batch 650, loss[loss=0.2152, simple_loss=0.2852, pruned_loss=0.05222, ctc_loss=0.1019, over 19752.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2821, pruned_loss=0.0576, ctc_loss=0.1083, over 3715941.04 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:40:12,291 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176048.0, ans=0.1
+2024-08-26 06:40:13,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=176048.0, ans=0.1
+2024-08-26 06:41:31,385 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.772e+02 2.123e+02 2.635e+02 4.354e+02, threshold=4.247e+02, percent-clipped=3.0
+2024-08-26 06:41:31,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176261.33333333334, ans=0.1
+2024-08-26 06:41:33,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=176261.33333333334, ans=0.2
+2024-08-26 06:41:37,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=176261.33333333334, ans=0.0
+2024-08-26 06:41:42,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=176261.33333333334, ans=0.125
+2024-08-26 06:41:45,013 INFO [train.py:1114] (1/4) Epoch 14, batch 700, loss[loss=0.2111, simple_loss=0.2715, pruned_loss=0.05454, ctc_loss=0.104, over 19721.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2827, pruned_loss=0.05776, ctc_loss=0.1086, over 3748643.73 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:41:56,863 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:42:07,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=176421.33333333334, ans=0.125
+2024-08-26 06:42:31,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.16 vs. limit=15.0
+2024-08-26 06:42:33,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=176474.66666666666, ans=0.125
+2024-08-26 06:42:37,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=176474.66666666666, ans=0.2
+2024-08-26 06:42:48,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=176528.0, ans=0.07
+2024-08-26 06:42:51,115 INFO [train.py:1114] (1/4) Epoch 14, batch 750, loss[loss=0.2212, simple_loss=0.285, pruned_loss=0.05666, ctc_loss=0.1103, over 19855.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2821, pruned_loss=0.0574, ctc_loss=0.1081, over 3776206.67 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:43:01,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=176581.33333333334, ans=0.125
+2024-08-26 06:44:17,691 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=176741.33333333334, ans=0.2
+2024-08-26 06:44:27,361 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.803e+02 2.358e+02 3.080e+02 4.835e+02, threshold=4.715e+02, percent-clipped=7.0
+2024-08-26 06:44:41,972 INFO [train.py:1114] (1/4) Epoch 14, batch 800, loss[loss=0.1946, simple_loss=0.2561, pruned_loss=0.04803, ctc_loss=0.09247, over 19804.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2822, pruned_loss=0.05739, ctc_loss=0.1082, over 3797126.98 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:45:07,497 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.92 vs. limit=15.0
+2024-08-26 06:45:09,357 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.93 vs. limit=15.0
+2024-08-26 06:45:17,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=177008.0, ans=0.125
+2024-08-26 06:45:52,041 INFO [train.py:1114] (1/4) Epoch 14, batch 850, loss[loss=0.2126, simple_loss=0.2844, pruned_loss=0.05111, ctc_loss=0.09658, over 19658.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2821, pruned_loss=0.05773, ctc_loss=0.1088, over 3815224.61 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:46:34,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=177221.33333333334, ans=0.125
+2024-08-26 06:46:36,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177221.33333333334, ans=0.1
+2024-08-26 06:46:48,474 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.47 vs. limit=15.0
+2024-08-26 06:47:06,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=177274.66666666666, ans=0.125
+2024-08-26 06:47:11,726 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.690e+02 1.974e+02 2.351e+02 3.908e+02, threshold=3.948e+02, percent-clipped=0.0
+2024-08-26 06:47:15,740 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.81 vs. limit=15.0
+2024-08-26 06:47:18,356 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.28 vs. limit=22.5
+2024-08-26 06:47:24,573 INFO [train.py:1114] (1/4) Epoch 14, batch 900, loss[loss=0.1926, simple_loss=0.2601, pruned_loss=0.04556, ctc_loss=0.08517, over 19433.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2822, pruned_loss=0.05783, ctc_loss=0.1088, over 3818547.13 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:47:32,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=177381.33333333334, ans=0.125
+2024-08-26 06:48:02,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=177541.33333333334, ans=0.0
+2024-08-26 06:48:10,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=177541.33333333334, ans=0.1
+2024-08-26 06:48:29,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=177594.66666666666, ans=0.125
+2024-08-26 06:48:38,037 INFO [train.py:1114] (1/4) Epoch 14, batch 950, loss[loss=0.2398, simple_loss=0.2821, pruned_loss=0.07171, ctc_loss=0.1353, over 19512.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2826, pruned_loss=0.05815, ctc_loss=0.1095, over 3821519.80 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:48:48,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=177648.0, ans=0.0
+2024-08-26 06:48:52,222 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-26 06:49:02,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=177701.33333333334, ans=0.0
+2024-08-26 06:49:03,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=177701.33333333334, ans=0.0
+2024-08-26 06:49:03,724 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=177701.33333333334, ans=0.0
+2024-08-26 06:49:06,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177754.66666666666, ans=0.1
+2024-08-26 06:49:36,179 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.810e+02 2.092e+02 2.519e+02 4.035e+02, threshold=4.185e+02, percent-clipped=1.0
+2024-08-26 06:50:06,704 INFO [train.py:1114] (1/4) Epoch 14, batch 1000, loss[loss=0.1924, simple_loss=0.2641, pruned_loss=0.04389, ctc_loss=0.0825, over 19863.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2833, pruned_loss=0.05845, ctc_loss=0.11, over 3818921.51 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:50:11,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=177914.66666666666, ans=0.125
+2024-08-26 06:50:22,812 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.02 vs. limit=15.0
+2024-08-26 06:51:21,170 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=178128.0, ans=0.125
+2024-08-26 06:51:23,205 INFO [train.py:1114] (1/4) Epoch 14, batch 1050, loss[loss=0.2502, simple_loss=0.3045, pruned_loss=0.07032, ctc_loss=0.1382, over 19861.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2823, pruned_loss=0.058, ctc_loss=0.109, over 3824616.59 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:51:47,371 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:51:57,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=178288.0, ans=0.125
+2024-08-26 06:52:11,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=178341.33333333334, ans=0.0
+2024-08-26 06:52:17,047 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 1.767e+02 2.034e+02 2.568e+02 4.426e+02, threshold=4.067e+02, percent-clipped=2.0
+2024-08-26 06:52:39,168 INFO [train.py:1114] (1/4) Epoch 14, batch 1100, loss[loss=0.2238, simple_loss=0.2854, pruned_loss=0.05921, ctc_loss=0.1093, over 19567.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2818, pruned_loss=0.05758, ctc_loss=0.1084, over 3832045.48 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:52:43,174 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.09 vs. limit=12.0
+2024-08-26 06:53:02,203 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.55 vs. limit=6.0
+2024-08-26 06:53:13,317 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-26 06:53:35,391 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.27 vs. limit=15.0
+2024-08-26 06:53:36,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=178661.33333333334, ans=0.0
+2024-08-26 06:53:42,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=178661.33333333334, ans=0.125
+2024-08-26 06:53:49,717 INFO [train.py:1114] (1/4) Epoch 14, batch 1150, loss[loss=0.1772, simple_loss=0.2502, pruned_loss=0.03772, ctc_loss=0.07201, over 19594.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2818, pruned_loss=0.05789, ctc_loss=0.1091, over 3830601.15 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:53:56,102 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:54:12,574 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=178768.0, ans=0.125
+2024-08-26 06:54:24,930 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=178821.33333333334, ans=0.0
+2024-08-26 06:54:25,781 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.88 vs. limit=22.5
+2024-08-26 06:54:47,790 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.255e+02 1.672e+02 1.916e+02 2.259e+02 4.129e+02, threshold=3.832e+02, percent-clipped=1.0
+2024-08-26 06:54:58,198 INFO [train.py:1114] (1/4) Epoch 14, batch 1200, loss[loss=0.2426, simple_loss=0.3037, pruned_loss=0.0653, ctc_loss=0.1273, over 19831.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2831, pruned_loss=0.05843, ctc_loss=0.1101, over 3826118.36 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:54:58,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=178981.33333333334, ans=0.125
+2024-08-26 06:54:58,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=178981.33333333334, ans=0.0
+2024-08-26 06:55:08,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-26 06:55:17,284 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.46 vs. limit=10.0
+2024-08-26 06:55:50,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=179194.66666666666, ans=0.125
+2024-08-26 06:55:55,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=179194.66666666666, ans=0.125
+2024-08-26 06:56:27,982 INFO [train.py:1114] (1/4) Epoch 14, batch 1250, loss[loss=0.2572, simple_loss=0.3099, pruned_loss=0.07477, ctc_loss=0.1373, over 19505.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2834, pruned_loss=0.05822, ctc_loss=0.1095, over 3843744.84 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:56:41,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=179248.0, ans=0.1
+2024-08-26 06:56:51,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=179301.33333333334, ans=0.125
+2024-08-26 06:57:24,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=179354.66666666666, ans=0.125
+2024-08-26 06:57:58,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=179408.0, ans=0.125
+2024-08-26 06:58:02,685 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=179408.0, ans=0.2
+2024-08-26 06:58:13,348 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.864e+02 2.134e+02 2.537e+02 3.723e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-08-26 06:58:22,895 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=179461.33333333334, ans=0.0
+2024-08-26 06:58:22,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=179461.33333333334, ans=0.125
+2024-08-26 06:58:31,275 INFO [train.py:1114] (1/4) Epoch 14, batch 1300, loss[loss=0.2493, simple_loss=0.3052, pruned_loss=0.07115, ctc_loss=0.1276, over 18980.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2827, pruned_loss=0.05783, ctc_loss=0.109, over 3845894.64 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:58:37,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-26 06:59:07,670 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=179568.0, ans=0.125
+2024-08-26 06:59:14,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=179621.33333333334, ans=0.125
+2024-08-26 07:00:05,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=179674.66666666666, ans=0.1
+2024-08-26 07:00:06,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=179674.66666666666, ans=15.0
+2024-08-26 07:00:35,359 INFO [train.py:1114] (1/4) Epoch 14, batch 1350, loss[loss=0.2309, simple_loss=0.2904, pruned_loss=0.06253, ctc_loss=0.1158, over 19772.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2821, pruned_loss=0.05729, ctc_loss=0.1077, over 3856351.84 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:00:41,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=179781.33333333334, ans=0.0
+2024-08-26 07:00:57,827 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.37 vs. limit=12.0
+2024-08-26 07:02:26,032 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.690e+02 1.870e+02 2.214e+02 3.706e+02, threshold=3.740e+02, percent-clipped=0.0
+2024-08-26 07:02:47,355 INFO [train.py:1114] (1/4) Epoch 14, batch 1400, loss[loss=0.1892, simple_loss=0.2495, pruned_loss=0.04718, ctc_loss=0.08634, over 19666.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2811, pruned_loss=0.0568, ctc_loss=0.1067, over 3863430.61 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:03:00,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=180048.0, ans=0.025
+2024-08-26 07:03:26,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=180101.33333333334, ans=0.0
+2024-08-26 07:03:28,259 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.17 vs. limit=6.0
+2024-08-26 07:03:43,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=180154.66666666666, ans=0.125
+2024-08-26 07:04:04,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=180208.0, ans=0.125
+2024-08-26 07:04:25,280 INFO [train.py:1114] (1/4) Epoch 14, batch 1450, loss[loss=0.2155, simple_loss=0.2856, pruned_loss=0.05341, ctc_loss=0.09656, over 19663.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2821, pruned_loss=0.05743, ctc_loss=0.1078, over 3861905.87 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:04:47,481 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=180368.0, ans=0.125
+2024-08-26 07:05:05,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=180368.0, ans=0.1
+2024-08-26 07:05:26,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-26 07:05:36,954 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-26 07:05:38,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-26 07:05:38,578 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=12.08 vs. limit=15.0
+2024-08-26 07:05:41,165 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.716e+02 1.963e+02 2.339e+02 6.137e+02, threshold=3.925e+02, percent-clipped=1.0
+2024-08-26 07:05:57,997 INFO [train.py:1114] (1/4) Epoch 14, batch 1500, loss[loss=0.2451, simple_loss=0.3022, pruned_loss=0.06745, ctc_loss=0.1329, over 19586.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2828, pruned_loss=0.05764, ctc_loss=0.1081, over 3861529.31 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:06:12,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=180634.66666666666, ans=0.0
+2024-08-26 07:06:49,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=180688.0, ans=0.125
+2024-08-26 07:07:05,130 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.97 vs. limit=15.0
+2024-08-26 07:07:07,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=180741.33333333334, ans=0.125
+2024-08-26 07:07:17,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=180794.66666666666, ans=0.125
+2024-08-26 07:07:17,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=180794.66666666666, ans=0.0
+2024-08-26 07:07:26,451 INFO [train.py:1114] (1/4) Epoch 14, batch 1550, loss[loss=0.2363, simple_loss=0.3052, pruned_loss=0.06148, ctc_loss=0.1111, over 19621.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2832, pruned_loss=0.05811, ctc_loss=0.1093, over 3846645.55 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:07:51,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=180901.33333333334, ans=0.125
+2024-08-26 07:07:54,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=180954.66666666666, ans=0.125
+2024-08-26 07:08:11,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=181008.0, ans=0.05
+2024-08-26 07:08:20,833 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.735e+02 1.996e+02 2.323e+02 4.332e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-26 07:08:45,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=181114.66666666666, ans=0.0
+2024-08-26 07:08:46,920 INFO [train.py:1114] (1/4) Epoch 14, batch 1600, loss[loss=0.1967, simple_loss=0.2748, pruned_loss=0.04284, ctc_loss=0.08241, over 19843.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2827, pruned_loss=0.05797, ctc_loss=0.1092, over 3835823.69 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:08:50,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=181114.66666666666, ans=0.125
+2024-08-26 07:09:24,974 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.40 vs. limit=15.0
+2024-08-26 07:09:33,206 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=15.26 vs. limit=22.5
+2024-08-26 07:10:02,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=181274.66666666666, ans=0.5
+2024-08-26 07:10:22,351 INFO [train.py:1114] (1/4) Epoch 14, batch 1650, loss[loss=0.2107, simple_loss=0.2832, pruned_loss=0.05065, ctc_loss=0.09226, over 19621.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2821, pruned_loss=0.05769, ctc_loss=0.1087, over 3832200.11 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:10:32,284 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.67 vs. limit=15.0
+2024-08-26 07:10:36,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=181434.66666666666, ans=0.125
+2024-08-26 07:10:51,542 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=181488.0, ans=0.0
+2024-08-26 07:10:54,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=181488.0, ans=0.125
+2024-08-26 07:10:56,912 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=181541.33333333334, ans=0.125
+2024-08-26 07:11:02,991 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.42 vs. limit=22.5
+2024-08-26 07:11:10,764 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.857e+02 2.243e+02 2.957e+02 5.258e+02, threshold=4.486e+02, percent-clipped=5.0
+2024-08-26 07:11:14,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=181594.66666666666, ans=0.2
+2024-08-26 07:11:19,632 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.47 vs. limit=22.5
+2024-08-26 07:11:28,232 INFO [train.py:1114] (1/4) Epoch 14, batch 1700, loss[loss=0.1928, simple_loss=0.2532, pruned_loss=0.04628, ctc_loss=0.0996, over 19669.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2819, pruned_loss=0.05742, ctc_loss=0.1083, over 3846575.53 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:11:39,815 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=181701.33333333334, ans=0.2
+2024-08-26 07:11:55,658 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.99 vs. limit=22.5
+2024-08-26 07:12:04,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=181808.0, ans=0.0
+2024-08-26 07:12:15,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=181861.33333333334, ans=0.2
+2024-08-26 07:12:18,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=181861.33333333334, ans=0.025
+2024-08-26 07:12:20,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=181861.33333333334, ans=0.125
+2024-08-26 07:12:22,668 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.58 vs. limit=15.0
+2024-08-26 07:12:24,386 INFO [train.py:1114] (1/4) Epoch 14, batch 1750, loss[loss=0.1948, simple_loss=0.2574, pruned_loss=0.04904, ctc_loss=0.08515, over 19673.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2819, pruned_loss=0.05742, ctc_loss=0.1082, over 3852118.27 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:12:54,616 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.68 vs. limit=10.0
+2024-08-26 07:13:26,543 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=13.90 vs. limit=15.0
+2024-08-26 07:13:28,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=182074.66666666666, ans=0.125
+2024-08-26 07:13:35,899 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.769e+02 2.123e+02 2.747e+02 4.234e+02, threshold=4.245e+02, percent-clipped=0.0
+2024-08-26 07:13:45,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=182128.0, ans=0.0
+2024-08-26 07:13:51,687 INFO [train.py:1114] (1/4) Epoch 14, batch 1800, loss[loss=0.2195, simple_loss=0.2878, pruned_loss=0.05413, ctc_loss=0.1073, over 19615.00 frames. ], tot_loss[loss=0.22, simple_loss=0.282, pruned_loss=0.05731, ctc_loss=0.1082, over 3853355.10 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:14:14,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=182288.0, ans=0.0
+2024-08-26 07:14:15,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=182288.0, ans=0.125
+2024-08-26 07:14:18,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=182288.0, ans=0.0
+2024-08-26 07:14:22,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=182288.0, ans=22.5
+2024-08-26 07:14:30,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=182341.33333333334, ans=0.025
+2024-08-26 07:14:42,327 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.32 vs. limit=10.0
+2024-08-26 07:14:49,558 INFO [train.py:1114] (1/4) Epoch 14, batch 1850, loss[loss=0.2, simple_loss=0.2773, pruned_loss=0.0443, ctc_loss=0.08498, over 19584.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2817, pruned_loss=0.05716, ctc_loss=0.1079, over 3856493.44 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:14:50,788 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:14:52,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=182448.0, ans=0.125
+2024-08-26 07:14:58,868 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:15:23,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=182608.0, ans=0.125
+2024-08-26 07:15:32,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=182608.0, ans=0.125
+2024-08-26 07:15:35,869 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.755e+02 2.000e+02 2.500e+02 5.147e+02, threshold=4.001e+02, percent-clipped=3.0
+2024-08-26 07:15:42,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=182661.33333333334, ans=0.0
+2024-08-26 07:15:52,255 INFO [train.py:1114] (1/4) Epoch 14, batch 1900, loss[loss=0.2068, simple_loss=0.2819, pruned_loss=0.04819, ctc_loss=0.08824, over 19663.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.282, pruned_loss=0.05693, ctc_loss=0.1074, over 3860795.35 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:15:59,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=182714.66666666666, ans=0.2
+2024-08-26 07:16:05,859 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=182768.0, ans=0.125
+2024-08-26 07:16:16,060 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.82 vs. limit=15.0
+2024-08-26 07:16:22,199 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=182821.33333333334, ans=0.0
+2024-08-26 07:16:40,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=182874.66666666666, ans=0.125
+2024-08-26 07:16:56,691 INFO [train.py:1114] (1/4) Epoch 14, batch 1950, loss[loss=0.2259, simple_loss=0.2785, pruned_loss=0.06264, ctc_loss=0.1201, over 19584.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2828, pruned_loss=0.05699, ctc_loss=0.1075, over 3869924.75 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:17:15,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=182981.33333333334, ans=0.125
+2024-08-26 07:17:15,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=182981.33333333334, ans=0.125
+2024-08-26 07:17:18,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=183034.66666666666, ans=0.125
+2024-08-26 07:17:18,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=183034.66666666666, ans=0.07
+2024-08-26 07:17:31,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=183088.0, ans=0.0
+2024-08-26 07:17:51,472 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.06 vs. limit=22.5
+2024-08-26 07:17:51,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.39 vs. limit=10.0
+2024-08-26 07:17:55,521 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.666e+02 1.941e+02 2.281e+02 4.229e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-26 07:18:04,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=183194.66666666666, ans=0.125
+2024-08-26 07:18:14,087 INFO [train.py:1114] (1/4) Epoch 14, batch 2000, loss[loss=0.1862, simple_loss=0.2495, pruned_loss=0.04544, ctc_loss=0.08006, over 19667.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2828, pruned_loss=0.05695, ctc_loss=0.1072, over 3854683.60 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 64.0
+2024-08-26 07:18:25,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=183301.33333333334, ans=0.0
+2024-08-26 07:18:55,971 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.89 vs. limit=22.5
+2024-08-26 07:19:02,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=183461.33333333334, ans=0.125
+2024-08-26 07:19:09,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff3.min_abs, batch_count=183461.33333333334, ans=0.2
+2024-08-26 07:19:11,482 INFO [train.py:1114] (1/4) Epoch 14, batch 2050, loss[loss=0.2008, simple_loss=0.2599, pruned_loss=0.05121, ctc_loss=0.09832, over 19696.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2819, pruned_loss=0.05685, ctc_loss=0.1071, over 3850423.23 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:19:11,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=183514.66666666666, ans=0.0
+2024-08-26 07:19:18,398 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=24.28 vs. limit=22.5
+2024-08-26 07:19:27,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=183568.0, ans=0.0
+2024-08-26 07:20:51,590 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.705e+02 1.994e+02 2.461e+02 3.917e+02, threshold=3.988e+02, percent-clipped=1.0
+2024-08-26 07:22:37,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=183728.0, ans=0.125
+2024-08-26 07:24:48,257 INFO [train.py:1114] (1/4) Epoch 14, batch 2100, loss[loss=0.2056, simple_loss=0.279, pruned_loss=0.04851, ctc_loss=0.08796, over 19783.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2811, pruned_loss=0.0565, ctc_loss=0.1063, over 3857559.04 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:41:47,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=183834.66666666666, ans=0.125
+2024-08-26 08:06:40,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=183994.66666666666, ans=0.04949747468305833
+2024-08-26 08:13:15,353 INFO [train.py:1114] (1/4) Epoch 14, batch 2150, loss[loss=0.2404, simple_loss=0.2951, pruned_loss=0.06717, ctc_loss=0.1283, over 19581.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2812, pruned_loss=0.05695, ctc_loss=0.107, over 3868055.93 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 08:13:16,008 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.97 vs. limit=6.0
+2024-08-26 08:15:51,762 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=184048.0, ans=0.0
+2024-08-26 08:16:34,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=184048.0, ans=0.2
+2024-08-26 08:22:50,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=184101.33333333334, ans=0.0
+2024-08-26 08:41:44,547 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=184154.66666666666, ans=0.1
+2024-08-26 08:42:48,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=184154.66666666666, ans=0.1
+2024-08-26 08:45:53,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=184208.0, ans=0.125
+2024-08-26 08:55:58,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=184208.0, ans=0.125
+2024-08-26 08:59:37,603 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.765e+02 2.052e+02 2.784e+02 6.261e+02, threshold=4.104e+02, percent-clipped=7.0
+2024-08-26 09:01:23,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=184261.33333333334, ans=0.125
+2024-08-26 09:03:09,759 INFO [train.py:1114] (1/4) Epoch 14, batch 2200, loss[loss=0.2232, simple_loss=0.2847, pruned_loss=0.05904, ctc_loss=0.1089, over 19577.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.281, pruned_loss=0.05691, ctc_loss=0.107, over 3867342.29 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 09:03:10,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=184314.66666666666, ans=0.1
+2024-08-26 09:04:32,812 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=184314.66666666666, ans=0.125
+2024-08-26 09:05:56,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=184314.66666666666, ans=0.0
+2024-08-26 09:05:56,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=184314.66666666666, ans=0.0
+2024-08-26 09:18:44,587 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.97 vs. limit=22.5
+2024-08-26 09:20:13,652 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 09:20:23,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=184474.66666666666, ans=0.07
+2024-08-26 09:21:10,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=184528.0, ans=0.1
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-2
new file mode 100644
index 0000000000000000000000000000000000000000..5aa3cacb7efd787e8e4a76be2c82f81109a8f9e5
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-2
@@ -0,0 +1,4958 @@
+2024-08-25 02:23:27,590 INFO [train.py:1182] (2/4) Training started
+2024-08-25 02:23:28,586 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-25 02:23:28,589 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 02:23:28,590 INFO [train.py:1212] (2/4) About to create model
+2024-08-25 02:23:29,302 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-25 02:23:29,453 INFO [train.py:1231] (2/4) Using DDP
+2024-08-25 02:23:51,117 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 02:23:51,496 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-25 02:23:53,043 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-25 02:23:53,051 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-25 02:23:53,294 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-25 02:23:53,346 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-25 02:23:53,648 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-25 02:23:53,648 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 02:27:50,712 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12243MB
+2024-08-25 02:27:52,182 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12325MB
+2024-08-25 02:28:01,912 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12325MB
+2024-08-25 02:28:03,371 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12500MB
+2024-08-25 02:28:26,154 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12500MB
+2024-08-25 02:28:27,771 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12500MB
+2024-08-25 02:29:13,747 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.33 vs. limit=7.5
+2024-08-25 02:29:16,117 INFO [train.py:1114] (2/4) Epoch 1, batch 0, loss[loss=8.848, simple_loss=7.222, pruned_loss=6.803, ctc_loss=4.724, over 19418.00 frames. ], tot_loss[loss=8.848, simple_loss=7.222, pruned_loss=6.803, ctc_loss=4.724, over 19418.00 frames. ], batch size: 48, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 02:29:16,117 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 02:29:29,432 INFO [train.py:1146] (2/4) Epoch 1, validation: loss=8.973, simple_loss=7.311, pruned_loss=6.819, ctc_loss=4.895, over 944034.00 frames.
+2024-08-25 02:29:29,433 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12500MB
+2024-08-25 02:29:30,017 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.48 vs. limit=7.5
+2024-08-25 02:29:31,313 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.68 vs. limit=7.5
+2024-08-25 02:30:09,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=53.333333333333336, ans=0.4975
+2024-08-25 02:30:23,440 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.714e+03 3.750e+03 4.817e+03 5.615e+03 6.551e+03, threshold=1.927e+04, percent-clipped=0.0
+2024-08-25 02:30:23,967 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=6.01 vs. limit=4.021333333333334
+2024-08-25 02:32:12,924 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=505.41 vs. limit=7.58
+2024-08-25 02:32:20,085 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=188.30 vs. limit=7.54
+2024-08-25 02:32:26,058 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.867e+02 1.019e+03 3.714e+03 5.063e+03 6.846e+03, threshold=1.486e+04, percent-clipped=0.0
+2024-08-25 02:32:36,312 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=389.92 vs. limit=5.053333333333334
+2024-08-25 02:33:25,259 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=80.14 vs. limit=7.56
+2024-08-25 02:33:29,576 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=25.19 vs. limit=5.053333333333334
+2024-08-25 02:33:30,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=213.33333333333334, ans=0.49
+2024-08-25 02:33:36,822 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.544e+02 7.649e+02 1.076e+03 3.731e+03 6.846e+03, threshold=4.304e+03, percent-clipped=0.0
+2024-08-25 02:33:37,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=213.33333333333334, ans=0.04933333333333333
+2024-08-25 02:33:43,998 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.75 vs. limit=3.032
+2024-08-25 02:33:49,837 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=101.42 vs. limit=5.1066666666666665
+2024-08-25 02:34:03,164 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=27.19 vs. limit=7.6
+2024-08-25 02:34:04,730 INFO [train.py:1114] (2/4) Epoch 1, batch 50, loss[loss=1.432, simple_loss=1.066, pruned_loss=1.243, ctc_loss=1.133, over 19747.00 frames. ], tot_loss[loss=3.544, simple_loss=2.928, pruned_loss=2.545, ctc_loss=1.773, over 844203.64 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 02:34:06,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=38.73 vs. limit=7.7
+2024-08-25 02:34:15,465 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=73.95 vs. limit=5.133333333333334
+2024-08-25 02:34:32,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=320.0, ans=0.2048
+2024-08-25 02:34:51,720 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=57.13 vs. limit=7.62
+2024-08-25 02:34:54,392 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.min_positive, batch_count=320.0, ans=0.049
+2024-08-25 02:35:16,632 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=110.62 vs. limit=7.64
+2024-08-25 02:35:16,834 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=106.58 vs. limit=7.64
+2024-08-25 02:35:35,386 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=65.93 vs. limit=7.82
+2024-08-25 02:36:59,059 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.04 vs. limit=7.82
+2024-08-25 02:37:29,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.whiten.whitening_limit, batch_count=480.0, ans=4.192
+2024-08-25 02:37:34,420 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=76.91 vs. limit=7.68
+2024-08-25 02:37:51,136 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=216.82 vs. limit=7.7
+2024-08-25 02:37:51,536 INFO [train.py:1114] (2/4) Epoch 1, batch 100, loss[loss=1.379, simple_loss=0.988, pruned_loss=1.257, ctc_loss=1.165, over 19704.00 frames. ], tot_loss[loss=2.409, simple_loss=1.913, pruned_loss=1.86, ctc_loss=1.469, over 1496570.41 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 02:37:55,729 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.639e+01 1.517e+02 3.832e+02 1.019e+03 9.054e+03, threshold=7.665e+02, percent-clipped=2.0
+2024-08-25 02:38:22,896 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=36.74 vs. limit=7.94
+2024-08-25 02:38:47,819 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=28.22 vs. limit=7.98
+2024-08-25 02:38:55,263 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=15.56 vs. limit=5.173333333333334
+2024-08-25 02:39:02,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=693.3333333333334, ans=7.76
+2024-08-25 02:39:03,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=693.3333333333334, ans=0.17400000000000002
+2024-08-25 02:39:07,877 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=746.6666666666666, ans=0.21480000000000005
+2024-08-25 02:39:09,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=746.6666666666666, ans=0.46499999999999997
+2024-08-25 02:39:09,535 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=13.37 vs. limit=5.1866666666666665
+2024-08-25 02:39:14,036 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=218.16 vs. limit=5.373333333333333
+2024-08-25 02:39:16,670 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=53.84 vs. limit=7.78
+2024-08-25 02:39:17,828 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=53.53 vs. limit=8.06
+2024-08-25 02:39:18,155 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=26.65 vs. limit=8.06
+2024-08-25 02:39:21,921 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=129.01 vs. limit=5.4
+2024-08-25 02:39:22,873 INFO [train.py:1114] (2/4) Epoch 1, batch 150, loss[loss=1.165, simple_loss=0.8094, pruned_loss=1.018, ctc_loss=1.076, over 19716.00 frames. ], tot_loss[loss=1.941, simple_loss=1.49, pruned_loss=1.562, ctc_loss=1.343, over 2026976.04 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 02:39:30,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=800.0, ans=0.095
+2024-08-25 02:39:30,843 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=11.60 vs. limit=4.32
+2024-08-25 02:39:35,029 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=93.55 vs. limit=7.8
+2024-08-25 02:39:38,801 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=32.22 vs. limit=7.82
+2024-08-25 02:39:40,306 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=22.44 vs. limit=5.426666666666667
+2024-08-25 02:39:43,163 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=63.03 vs. limit=7.82
+2024-08-25 02:39:46,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=853.3333333333334, ans=0.09466666666666668
+2024-08-25 02:39:47,327 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=265.35 vs. limit=7.82
+2024-08-25 02:39:48,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=853.3333333333334, ans=0.08080000000000001
+2024-08-25 02:39:51,724 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=115.17 vs. limit=5.0
+2024-08-25 02:39:53,990 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=144.76 vs. limit=7.84
+2024-08-25 02:39:55,606 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.52 vs. limit=8.18
+2024-08-25 02:39:57,057 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=156.11 vs. limit=5.453333333333333
+2024-08-25 02:40:00,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=906.6666666666666, ans=0.5
+2024-08-25 02:40:14,740 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=72.24 vs. limit=7.86
+2024-08-25 02:40:14,989 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=65.59 vs. limit=7.86
+2024-08-25 02:40:21,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=1013.3333333333334, ans=0.4525
+2024-08-25 02:40:23,514 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=36.22 vs. limit=7.88
+2024-08-25 02:40:26,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=35.85 vs. limit=7.88
+2024-08-25 02:40:28,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=1013.3333333333334, ans=0.23986666666666667
+2024-08-25 02:40:28,484 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=1013.3333333333334, ans=0.4525
+2024-08-25 02:40:30,343 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=19.69 vs. limit=7.88
+2024-08-25 02:40:32,731 INFO [train.py:1114] (2/4) Epoch 1, batch 200, loss[loss=1.252, simple_loss=0.8637, pruned_loss=1.005, ctc_loss=1.203, over 18042.00 frames. ], tot_loss[loss=1.686, simple_loss=1.261, pruned_loss=1.373, ctc_loss=1.277, over 2434889.36 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 02:40:35,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=1066.6666666666667, ans=0.45
+2024-08-25 02:40:36,956 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.587e+01 1.185e+02 1.545e+02 1.999e+02 4.229e+02, threshold=3.089e+02, percent-clipped=0.0
+2024-08-25 02:40:37,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=1066.6666666666667, ans=0.45
+2024-08-25 02:40:57,402 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=1066.6666666666667, ans=0.16
+2024-08-25 02:41:06,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1120.0, ans=0.2888
+2024-08-25 02:41:16,677 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=1120.0, ans=0.158
+2024-08-25 02:41:28,640 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.90 vs. limit=8.38
+2024-08-25 02:41:29,924 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=15.29 vs. limit=4.469333333333333
+2024-08-25 02:41:31,387 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=95.50 vs. limit=7.94
+2024-08-25 02:41:36,437 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=1226.6666666666667, ans=0.07239999999999999
+2024-08-25 02:41:38,397 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=11.23 vs. limit=4.490666666666667
+2024-08-25 02:41:39,967 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=27.77 vs. limit=5.613333333333333
+2024-08-25 02:41:55,325 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.93 vs. limit=8.42
+2024-08-25 02:42:08,275 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=25.33 vs. limit=5.64
+2024-08-25 02:42:09,605 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=198.28 vs. limit=7.98
+2024-08-25 02:42:11,847 INFO [train.py:1114] (2/4) Epoch 1, batch 250, loss[loss=1.322, simple_loss=0.8959, pruned_loss=1.047, ctc_loss=1.297, over 19449.00 frames. ], tot_loss[loss=1.535, simple_loss=1.123, pruned_loss=1.249, ctc_loss=1.244, over 2755846.84 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 02:42:15,214 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=87.81 vs. limit=8.0
+2024-08-25 02:42:21,067 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=18.57 vs. limit=4.533333333333333
+2024-08-25 02:42:22,699 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=122.35 vs. limit=8.0
+2024-08-25 02:42:24,248 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=15.14 vs. limit=5.333333333333333
+2024-08-25 02:42:30,697 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=1386.6666666666667, ans=0.435
+2024-08-25 02:42:30,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=1386.6666666666667, ans=0.435
+2024-08-25 02:42:34,068 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.07 vs. limit=8.54
+2024-08-25 02:42:35,228 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=10.20 vs. limit=5.346666666666667
+2024-08-25 02:42:44,869 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=19.78 vs. limit=8.04
+2024-08-25 02:42:51,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1440.0, ans=0.28559999999999997
+2024-08-25 02:42:57,473 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=41.74 vs. limit=4.298666666666667
+2024-08-25 02:43:01,004 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.36 vs. limit=3.224
+2024-08-25 02:43:06,796 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.76 vs. limit=8.620000000000001
+2024-08-25 02:43:08,149 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=17.69 vs. limit=8.06
+2024-08-25 02:43:10,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=1546.6666666666667, ans=5.966666666666667
+2024-08-25 02:43:17,592 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=14.72 vs. limit=5.386666666666667
+2024-08-25 02:43:19,876 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=1546.6666666666667, ans=0.163
+2024-08-25 02:43:21,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1546.6666666666667, ans=0.2845333333333333
+2024-08-25 02:43:23,928 INFO [train.py:1114] (2/4) Epoch 1, batch 300, loss[loss=1.247, simple_loss=0.8344, pruned_loss=0.9714, ctc_loss=1.232, over 19531.00 frames. ], tot_loss[loss=1.435, simple_loss=1.029, pruned_loss=1.159, ctc_loss=1.221, over 2999758.88 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 02:43:24,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1600.0, ans=0.284
+2024-08-25 02:43:27,971 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 8.516e+01 1.281e+02 1.784e+02 2.457e+02 1.092e+03, threshold=3.568e+02, percent-clipped=12.0
+2024-08-25 02:43:29,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=1600.0, ans=0.22969848480983485
+2024-08-25 02:43:30,042 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=113.94 vs. limit=8.1
+2024-08-25 02:43:30,363 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=153.50 vs. limit=8.1
+2024-08-25 02:43:34,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=1600.0, ans=0.23399999999999999
+2024-08-25 02:43:35,223 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=97.53 vs. limit=8.1
+2024-08-25 02:43:36,777 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=84.58 vs. limit=8.12
+2024-08-25 02:43:42,003 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=1653.3333333333333, ans=0.08966666666666667
+2024-08-25 02:43:44,788 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1653.3333333333333, ans=0.28346666666666664
+2024-08-25 02:43:56,077 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=1706.6666666666667, ans=0.2829333333333333
+2024-08-25 02:43:57,766 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=57.52 vs. limit=8.14
+2024-08-25 02:44:03,311 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=41.99 vs. limit=8.78
+2024-08-25 02:44:04,818 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=15.06 vs. limit=5.8533333333333335
+2024-08-25 02:44:10,742 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=77.41 vs. limit=8.16
+2024-08-25 02:44:14,062 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.17 vs. limit=8.82
+2024-08-25 02:44:16,500 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=299.68 vs. limit=5.88
+2024-08-25 02:44:24,767 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=50.24 vs. limit=8.18
+2024-08-25 02:44:31,558 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.52 vs. limit=8.86
+2024-08-25 02:44:35,316 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.51 vs. limit=8.86
+2024-08-25 02:44:36,637 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=18.94 vs. limit=5.453333333333333
+2024-08-25 02:44:38,839 INFO [train.py:1114] (2/4) Epoch 1, batch 350, loss[loss=1.11, simple_loss=0.7368, pruned_loss=0.8626, ctc_loss=1.077, over 19758.00 frames. ], tot_loss[loss=1.368, simple_loss=0.9649, pruned_loss=1.096, ctc_loss=1.207, over 3190055.67 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 02:44:39,389 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=109.08 vs. limit=8.2
+2024-08-25 02:44:42,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=1866.6666666666667, ans=0.13
+2024-08-25 02:44:44,117 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=46.55 vs. limit=8.9
+2024-08-25 02:44:44,332 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=52.54 vs. limit=8.2
+2024-08-25 02:44:45,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=1866.6666666666667, ans=0.4125
+2024-08-25 02:44:49,971 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.34 vs. limit=8.9
+2024-08-25 02:45:01,976 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=111.48 vs. limit=8.22
+2024-08-25 02:45:14,317 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=13.66 vs. limit=5.986666666666666
+2024-08-25 02:45:22,949 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.41 vs. limit=8.24
+2024-08-25 02:45:23,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 02:45:31,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 02:46:55,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=2080.0, ans=0.23120000000000002
+2024-08-25 02:47:05,531 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=221.91 vs. limit=8.28
+2024-08-25 02:47:09,660 INFO [train.py:1114] (2/4) Epoch 1, batch 400, loss[loss=1.166, simple_loss=0.7666, pruned_loss=0.8862, ctc_loss=1.137, over 19484.00 frames. ], tot_loss[loss=1.319, simple_loss=0.9161, pruned_loss=1.046, ctc_loss=1.193, over 3342861.84 frames. ], batch size: 54, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 02:47:13,858 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 9.873e+01 1.501e+02 1.913e+02 2.464e+02 6.763e+02, threshold=3.826e+02, percent-clipped=7.0
+2024-08-25 02:47:17,256 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.18 vs. limit=5.533333333333333
+2024-08-25 02:47:17,339 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=7.27 vs. limit=4.8533333333333335
+2024-08-25 02:47:20,070 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.11 vs. limit=5.533333333333333
+2024-08-25 02:47:22,741 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.31 vs. limit=5.546666666666667
+2024-08-25 02:47:29,403 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=2186.6666666666665, ans=0.8234666666666667
+2024-08-25 02:47:29,780 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=42.14 vs. limit=8.32
+2024-08-25 02:47:42,143 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=26.35 vs. limit=6.12
+2024-08-25 02:47:50,298 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=38.16 vs. limit=8.34
+2024-08-25 02:47:54,471 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.25 vs. limit=9.22
+2024-08-25 02:48:00,795 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=76.96 vs. limit=8.36
+2024-08-25 02:48:02,481 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.67 vs. limit=9.22
+2024-08-25 02:48:08,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=33.17 vs. limit=8.38
+2024-08-25 02:48:10,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=2346.6666666666665, ans=0.39
+2024-08-25 02:48:13,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=2346.6666666666665, ans=0.0472
+2024-08-25 02:48:13,661 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.88 vs. limit=8.38
+2024-08-25 02:48:19,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=2400.0, ans=0.23600000000000002
+2024-08-25 02:48:21,766 INFO [train.py:1114] (2/4) Epoch 1, batch 450, loss[loss=1.192, simple_loss=0.7778, pruned_loss=0.8948, ctc_loss=1.154, over 19621.00 frames. ], tot_loss[loss=1.285, simple_loss=0.8812, pruned_loss=1.008, ctc_loss=1.183, over 3451125.70 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 02:48:30,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=2400.0, ans=0.11
+2024-08-25 02:48:35,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=2453.3333333333335, ans=0.385
+2024-08-25 02:48:37,975 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=2453.3333333333335, ans=0.385
+2024-08-25 02:48:46,800 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=26.02 vs. limit=8.42
+2024-08-25 02:48:50,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=2506.6666666666665, ans=0.2749333333333333
+2024-08-25 02:49:07,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=2560.0, ans=0.0424
+2024-08-25 02:49:10,172 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=2560.0, ans=0.38
+2024-08-25 02:49:26,635 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=11.60 vs. limit=8.48
+2024-08-25 02:49:28,670 INFO [train.py:1114] (2/4) Epoch 1, batch 500, loss[loss=1.163, simple_loss=0.7682, pruned_loss=0.8285, ctc_loss=1.106, over 19683.00 frames. ], tot_loss[loss=1.25, simple_loss=0.8493, pruned_loss=0.9662, ctc_loss=1.161, over 3546755.56 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:49:31,898 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=18.49 vs. limit=8.5
+2024-08-25 02:49:32,574 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.834e+02 2.411e+02 2.968e+02 6.409e+02, threshold=4.822e+02, percent-clipped=7.0
+2024-08-25 02:49:34,864 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.99 vs. limit=5.666666666666667
+2024-08-25 02:49:40,298 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=31.48 vs. limit=8.5
+2024-08-25 02:49:41,490 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=22.87 vs. limit=8.52
+2024-08-25 02:49:43,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=2720.0, ans=0.2728
+2024-08-25 02:49:59,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=2773.3333333333335, ans=9.58
+2024-08-25 02:50:02,105 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=20.11 vs. limit=8.54
+2024-08-25 02:50:04,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=2773.3333333333335, ans=0.5
+2024-08-25 02:50:28,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=2880.0, ans=0.082
+2024-08-25 02:50:33,292 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.39 vs. limit=5.72
+2024-08-25 02:50:39,210 INFO [train.py:1114] (2/4) Epoch 1, batch 550, loss[loss=1.086, simple_loss=0.7335, pruned_loss=0.703, ctc_loss=1.046, over 19210.00 frames. ], tot_loss[loss=1.22, simple_loss=0.826, pruned_loss=0.9176, ctc_loss=1.139, over 3608352.33 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:50:50,552 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=2.621e+01
+2024-08-25 02:50:54,645 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.25 vs. limit=8.6
+2024-08-25 02:51:06,011 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=19.69 vs. limit=8.620000000000001
+2024-08-25 02:51:07,617 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.31 vs. limit=5.746666666666666
+2024-08-25 02:51:10,562 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.29 vs. limit=8.620000000000001
+2024-08-25 02:51:11,951 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=18.89 vs. limit=8.64
+2024-08-25 02:51:12,587 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=3040.0, ans=0.35750000000000004
+2024-08-25 02:51:14,146 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.00 vs. limit=6.52
+2024-08-25 02:51:14,304 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=19.93 vs. limit=8.64
+2024-08-25 02:51:19,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=3040.0, ans=0.35750000000000004
+2024-08-25 02:51:22,270 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.00 vs. limit=9.78
+2024-08-25 02:51:31,218 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=3093.3333333333335, ans=0.355
+2024-08-25 02:51:31,440 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.15 vs. limit=8.66
+2024-08-25 02:51:36,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=3093.3333333333335, ans=0.355
+2024-08-25 02:51:38,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=3093.3333333333335, ans=0.09899494936611666
+2024-08-25 02:51:55,119 INFO [train.py:1114] (2/4) Epoch 1, batch 600, loss[loss=1.01, simple_loss=0.6983, pruned_loss=0.6066, ctc_loss=0.9588, over 19434.00 frames. ], tot_loss[loss=1.173, simple_loss=0.7964, pruned_loss=0.8504, ctc_loss=1.101, over 3666673.28 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:51:59,175 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.677e+02 3.553e+02 4.456e+02 9.241e+02, threshold=7.106e+02, percent-clipped=18.0
+2024-08-25 02:52:10,775 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.97 vs. limit=6.626666666666667
+2024-08-25 02:52:15,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=3253.3333333333335, ans=0.03983333333333333
+2024-08-25 02:52:16,123 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.11 vs. limit=5.8133333333333335
+2024-08-25 02:52:39,341 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=3360.0, ans=0.7824
+2024-08-25 02:52:59,430 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.61 vs. limit=8.78
+2024-08-25 02:53:00,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=3466.6666666666665, ans=0.7786666666666667
+2024-08-25 02:53:01,038 INFO [train.py:1114] (2/4) Epoch 1, batch 650, loss[loss=0.8771, simple_loss=0.6167, pruned_loss=0.4882, ctc_loss=0.8447, over 19734.00 frames. ], tot_loss[loss=1.112, simple_loss=0.7583, pruned_loss=0.7744, ctc_loss=1.048, over 3716831.64 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 4.0
+2024-08-25 02:53:09,643 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.67 vs. limit=10.1
+2024-08-25 02:53:22,524 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=11.51 vs. limit=10.14
+2024-08-25 02:53:25,037 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.00 vs. limit=8.82
+2024-08-25 02:53:27,680 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.38 vs. limit=10.18
+2024-08-25 02:53:41,650 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=3626.6666666666665, ans=0.06399999999999997
+2024-08-25 02:53:42,089 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.67 vs. limit=8.86
+2024-08-25 02:53:48,694 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.81 vs. limit=8.86
+2024-08-25 02:53:52,377 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=3680.0, ans=0.3275
+2024-08-25 02:53:52,916 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.19 vs. limit=8.879999999999999
+2024-08-25 02:54:00,807 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.63 vs. limit=5.4719999999999995
+2024-08-25 02:54:05,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten.whitening_limit, batch_count=3680.0, ans=10.26
+2024-08-25 02:54:09,032 INFO [train.py:1114] (2/4) Epoch 1, batch 700, loss[loss=0.7891, simple_loss=0.5561, pruned_loss=0.4348, ctc_loss=0.7494, over 19719.00 frames. ], tot_loss[loss=1.05, simple_loss=0.7214, pruned_loss=0.7014, ctc_loss=0.9923, over 3748642.13 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:54:14,196 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.975e+02 3.878e+02 5.385e+02 1.936e+03, threshold=7.756e+02, percent-clipped=10.0
+2024-08-25 02:54:19,083 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=16.31 vs. limit=8.9
+2024-08-25 02:54:24,192 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.32 vs. limit=5.946666666666666
+2024-08-25 02:54:24,199 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.54 vs. limit=8.92
+2024-08-25 02:54:24,998 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=3786.6666666666665, ans=0.037000000000000005
+2024-08-25 02:54:45,282 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=8.206e+00
+2024-08-25 02:54:45,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=3840.0, ans=0.32
+2024-08-25 02:54:58,652 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.59 vs. limit=10.42
+2024-08-25 02:55:16,797 INFO [train.py:1114] (2/4) Epoch 1, batch 750, loss[loss=0.7646, simple_loss=0.556, pruned_loss=0.3858, ctc_loss=0.7202, over 19497.00 frames. ], tot_loss[loss=0.9866, simple_loss=0.6843, pruned_loss=0.6324, ctc_loss=0.932, over 3774549.71 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:55:24,821 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=4000.0, ans=0.04999999999999999
+2024-08-25 02:55:26,354 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=5.49 vs. limit=5.6
+2024-08-25 02:55:26,684 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.32 vs. limit=7.0
+2024-08-25 02:55:29,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn1.whiten.whitening_limit, batch_count=4000.0, ans=10.5
+2024-08-25 02:55:30,044 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.06 vs. limit=9.0
+2024-08-25 02:55:39,352 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.33 vs. limit=6.013333333333334
+2024-08-25 02:55:44,240 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.81 vs. limit=9.040000000000001
+2024-08-25 02:55:54,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=4106.666666666667, ans=0.049555555555555554
+2024-08-25 02:56:01,323 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.05 vs. limit=6.04
+2024-08-25 02:56:10,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=4213.333333333333, ans=0.7525333333333334
+2024-08-25 02:56:10,570 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.67 vs. limit=7.1066666666666665
+2024-08-25 02:56:24,854 INFO [train.py:1114] (2/4) Epoch 1, batch 800, loss[loss=0.6859, simple_loss=0.5084, pruned_loss=0.3403, ctc_loss=0.6129, over 19408.00 frames. ], tot_loss[loss=0.9291, simple_loss=0.6517, pruned_loss=0.5714, ctc_loss=0.8731, over 3794397.83 frames. ], batch size: 48, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:56:26,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=4266.666666666667, ans=0.3
+2024-08-25 02:56:29,877 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.945e+02 3.956e+02 5.210e+02 9.107e+02, threshold=7.913e+02, percent-clipped=4.0
+2024-08-25 02:56:35,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=4266.666666666667, ans=0.3
+2024-08-25 02:56:36,740 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.35 vs. limit=10.7
+2024-08-25 02:57:01,843 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=4373.333333333333, ans=0.025
+2024-08-25 02:57:04,283 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=4426.666666666667, ans=0.04822222222222222
+2024-08-25 02:57:09,300 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.80 vs. limit=10.82
+2024-08-25 02:57:15,856 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.32 vs. limit=6.1066666666666665
+2024-08-25 02:57:16,981 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.30 vs. limit=6.12
+2024-08-25 02:57:17,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=4480.0, ans=0.035
+2024-08-25 02:57:23,407 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.37 vs. limit=9.18
+2024-08-25 02:57:30,599 INFO [train.py:1114] (2/4) Epoch 1, batch 850, loss[loss=0.7228, simple_loss=0.5409, pruned_loss=0.3425, ctc_loss=0.6577, over 19636.00 frames. ], tot_loss[loss=0.8749, simple_loss=0.6212, pruned_loss=0.517, ctc_loss=0.8154, over 3813762.31 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:57:31,416 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.96 vs. limit=5.8133333333333335
+2024-08-25 02:57:49,851 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=4586.666666666667, ans=0.28500000000000003
+2024-08-25 02:57:58,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=4640.0, ans=0.009860869565217392
+2024-08-25 02:58:11,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=4693.333333333333, ans=0.28
+2024-08-25 02:58:14,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=4693.333333333333, ans=0.025
+2024-08-25 02:58:18,593 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=6.30 vs. limit=5.8773333333333335
+2024-08-25 02:58:42,851 INFO [train.py:1114] (2/4) Epoch 1, batch 900, loss[loss=0.5936, simple_loss=0.4543, pruned_loss=0.2744, ctc_loss=0.5146, over 19400.00 frames. ], tot_loss[loss=0.8294, simple_loss=0.596, pruned_loss=0.4716, ctc_loss=0.7657, over 3818394.56 frames. ], batch size: 48, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 02:58:48,899 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.783e+02 3.682e+02 4.971e+02 1.764e+03, threshold=7.364e+02, percent-clipped=6.0
+2024-08-25 02:58:53,247 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4800.0, ans=0.252
+2024-08-25 02:59:14,486 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=4906.666666666667, ans=0.04622222222222222
+2024-08-25 02:59:28,567 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=4960.0, ans=0.0345
+2024-08-25 02:59:38,823 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=5013.333333333333, ans=0.265
+2024-08-25 02:59:41,808 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.33 vs. limit=11.26
+2024-08-25 02:59:44,767 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=12.78 vs. limit=11.26
+2024-08-25 02:59:50,574 INFO [train.py:1114] (2/4) Epoch 1, batch 950, loss[loss=0.6082, simple_loss=0.4677, pruned_loss=0.2764, ctc_loss=0.5285, over 19500.00 frames. ], tot_loss[loss=0.7897, simple_loss=0.5745, pruned_loss=0.4334, ctc_loss=0.7208, over 3819361.22 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:00:06,525 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.98 vs. limit=11.34
+2024-08-25 03:00:08,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=5120.0, ans=0.009756521739130435
+2024-08-25 03:00:12,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=5120.0, ans=0.035
+2024-08-25 03:00:15,126 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=5173.333333333333, ans=0.045111111111111116
+2024-08-25 03:00:25,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=5173.333333333333, ans=0.009744927536231884
+2024-08-25 03:00:38,493 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.33 vs. limit=6.306666666666667
+2024-08-25 03:00:40,887 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.85 vs. limit=6.112
+2024-08-25 03:00:54,648 INFO [train.py:1114] (2/4) Epoch 1, batch 1000, loss[loss=0.5264, simple_loss=0.4264, pruned_loss=0.2175, ctc_loss=0.4347, over 19850.00 frames. ], tot_loss[loss=0.7523, simple_loss=0.5544, pruned_loss=0.3989, ctc_loss=0.6779, over 3815620.21 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:00:55,540 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.34 vs. limit=6.333333333333333
+2024-08-25 03:00:55,662 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.28 vs. limit=6.133333333333333
+2024-08-25 03:00:56,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=5333.333333333333, ans=0.025
+2024-08-25 03:01:00,962 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.21 vs. limit=3.8
+2024-08-25 03:01:01,317 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.847e+02 3.463e+02 4.611e+02 9.717e+02, threshold=6.926e+02, percent-clipped=4.0
+2024-08-25 03:01:05,959 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.05 vs. limit=11.5
+2024-08-25 03:01:15,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=5386.666666666667, ans=0.0
+2024-08-25 03:01:37,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=5493.333333333333, ans=0.2425
+2024-08-25 03:01:47,197 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.44 vs. limit=11.620000000000001
+2024-08-25 03:02:07,676 INFO [train.py:1114] (2/4) Epoch 1, batch 1050, loss[loss=0.6015, simple_loss=0.4784, pruned_loss=0.258, ctc_loss=0.5053, over 19854.00 frames. ], tot_loss[loss=0.7169, simple_loss=0.5351, pruned_loss=0.3678, ctc_loss=0.638, over 3822603.09 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:02:18,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=5653.333333333333, ans=0.235
+2024-08-25 03:02:21,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=5653.333333333333, ans=0.235
+2024-08-25 03:02:38,446 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.71 vs. limit=9.64
+2024-08-25 03:02:51,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=5760.0, ans=0.22999999999999998
+2024-08-25 03:02:52,086 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.09 vs. limit=9.66
+2024-08-25 03:03:01,103 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.33 vs. limit=7.906666666666666
+2024-08-25 03:03:08,937 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=5813.333333333333, ans=0.04244444444444445
+2024-08-25 03:03:13,779 INFO [train.py:1114] (2/4) Epoch 1, batch 1100, loss[loss=0.5414, simple_loss=0.4379, pruned_loss=0.2296, ctc_loss=0.4363, over 19580.00 frames. ], tot_loss[loss=0.6851, simple_loss=0.5181, pruned_loss=0.3408, ctc_loss=0.6011, over 3829879.19 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:03:15,156 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=5866.666666666667, ans=0.22499999999999998
+2024-08-25 03:03:20,126 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.626e+02 3.754e+02 4.559e+02 6.965e+02, threshold=7.509e+02, percent-clipped=1.0
+2024-08-25 03:03:26,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=5920.0, ans=0.2408
+2024-08-25 03:03:36,113 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.10 vs. limit=9.72
+2024-08-25 03:03:40,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=5973.333333333333, ans=0.24026666666666666
+2024-08-25 03:03:56,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=6026.666666666667, ans=0.6890666666666667
+2024-08-25 03:03:57,558 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=6026.666666666667, ans=0.23973333333333333
+2024-08-25 03:03:57,582 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=1.237e-01
+2024-08-25 03:03:57,700 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=6026.666666666667, ans=0.21750000000000003
+2024-08-25 03:04:18,513 INFO [train.py:1114] (2/4) Epoch 1, batch 1150, loss[loss=0.5575, simple_loss=0.4488, pruned_loss=0.2402, ctc_loss=0.4469, over 19591.00 frames. ], tot_loss[loss=0.6612, simple_loss=0.5057, pruned_loss=0.3204, ctc_loss=0.5726, over 3828919.76 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:04:42,409 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.58 vs. limit=8.093333333333334
+2024-08-25 03:04:44,651 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=6240.0, ans=0.20750000000000002
+2024-08-25 03:04:55,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=6240.0, ans=0.20750000000000002
+2024-08-25 03:04:55,562 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.88 vs. limit=3.936
+2024-08-25 03:05:01,257 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.57 vs. limit=8.146666666666667
+2024-08-25 03:05:19,575 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=3.128e-02
+2024-08-25 03:05:24,510 INFO [train.py:1114] (2/4) Epoch 1, batch 1200, loss[loss=0.5246, simple_loss=0.4423, pruned_loss=0.205, ctc_loss=0.4192, over 19843.00 frames. ], tot_loss[loss=0.6407, simple_loss=0.4955, pruned_loss=0.3029, ctc_loss=0.5477, over 3823874.27 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 03:05:30,705 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.702e+02 3.344e+02 4.028e+02 1.038e+03, threshold=6.687e+02, percent-clipped=4.0
+2024-08-25 03:05:32,175 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:05:44,807 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.68 vs. limit=12.34
+2024-08-25 03:05:47,186 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=27.57 vs. limit=12.34
+2024-08-25 03:06:08,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=6560.0, ans=0.6704
+2024-08-25 03:06:12,698 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=6560.0, ans=0.03933333333333334
+2024-08-25 03:06:17,825 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.70 vs. limit=9.98
+2024-08-25 03:06:33,173 INFO [train.py:1114] (2/4) Epoch 1, batch 1250, loss[loss=0.5623, simple_loss=0.4612, pruned_loss=0.2331, ctc_loss=0.4539, over 19546.00 frames. ], tot_loss[loss=0.6181, simple_loss=0.4844, pruned_loss=0.2848, ctc_loss=0.5213, over 3841753.42 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:06:37,168 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=6666.666666666667, ans=0.6666666666666667
+2024-08-25 03:06:46,484 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=6720.0, ans=0.185
+2024-08-25 03:06:49,393 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.99 vs. limit=12.54
+2024-08-25 03:07:02,817 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=6773.333333333333, ans=0.1825
+2024-08-25 03:07:04,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=6773.333333333333, ans=0.07
+2024-08-25 03:07:06,627 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=6773.333333333333, ans=0.009397101449275363
+2024-08-25 03:07:08,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=6773.333333333333, ans=0.03844444444444445
+2024-08-25 03:07:29,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=6880.0, ans=9.3
+2024-08-25 03:07:53,439 INFO [train.py:1114] (2/4) Epoch 1, batch 1300, loss[loss=0.5511, simple_loss=0.4571, pruned_loss=0.2284, ctc_loss=0.4305, over 18845.00 frames. ], tot_loss[loss=0.5983, simple_loss=0.4742, pruned_loss=0.2699, ctc_loss=0.4984, over 3845178.19 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:07:54,846 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=6933.333333333333, ans=0.025
+2024-08-25 03:07:58,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=6933.333333333333, ans=0.07
+2024-08-25 03:08:00,990 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.595e+02 3.171e+02 4.007e+02 5.829e+02, threshold=6.342e+02, percent-clipped=0.0
+2024-08-25 03:08:26,030 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.52 vs. limit=8.52
+2024-08-25 03:08:27,249 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.00 vs. limit=8.52
+2024-08-25 03:08:44,446 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.05 vs. limit=10.18
+2024-08-25 03:09:00,194 INFO [train.py:1114] (2/4) Epoch 1, batch 1350, loss[loss=0.5197, simple_loss=0.4369, pruned_loss=0.2118, ctc_loss=0.4021, over 19763.00 frames. ], tot_loss[loss=0.582, simple_loss=0.4663, pruned_loss=0.2576, ctc_loss=0.479, over 3856291.44 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:10:09,974 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=7253.333333333333, ans=0.17746666666666666
+2024-08-25 03:10:14,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=7253.333333333333, ans=0.22746666666666665
+2024-08-25 03:10:22,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=7306.666666666667, ans=0.15749999999999997
+2024-08-25 03:12:10,365 INFO [train.py:1114] (2/4) Epoch 1, batch 1400, loss[loss=0.4857, simple_loss=0.4067, pruned_loss=0.2009, ctc_loss=0.3733, over 19658.00 frames. ], tot_loss[loss=0.5666, simple_loss=0.4585, pruned_loss=0.2465, ctc_loss=0.4617, over 3863215.76 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:12:32,368 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.490e+02 2.974e+02 4.034e+02 6.918e+02, threshold=5.948e+02, percent-clipped=1.0
+2024-08-25 03:13:07,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=7626.666666666667, ans=0.14250000000000002
+2024-08-25 03:13:09,549 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.79 vs. limit=13.219999999999999
+2024-08-25 03:13:28,420 INFO [train.py:1114] (2/4) Epoch 1, batch 1450, loss[loss=0.5191, simple_loss=0.4509, pruned_loss=0.2067, ctc_loss=0.3806, over 19655.00 frames. ], tot_loss[loss=0.5548, simple_loss=0.4532, pruned_loss=0.238, ctc_loss=0.4476, over 3861439.54 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:13:33,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=7733.333333333333, ans=0.034444444444444444
+2024-08-25 03:13:47,157 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=7786.666666666667, ans=0.22213333333333332
+2024-08-25 03:13:56,443 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.66 vs. limit=4.176
+2024-08-25 03:14:03,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=7840.0, ans=0.1325
+2024-08-25 03:14:07,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=7893.333333333333, ans=0.03377777777777778
+2024-08-25 03:14:07,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=7893.333333333333, ans=0.13
+2024-08-25 03:14:15,413 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.09 vs. limit=4.184
+2024-08-25 03:14:17,532 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=7946.666666666667, ans=0.22053333333333333
+2024-08-25 03:14:20,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=7946.666666666667, ans=0.1275
+2024-08-25 03:14:22,412 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=7946.666666666667, ans=0.22053333333333333
+2024-08-25 03:14:29,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=8000.0, ans=0.03333333333333334
+2024-08-25 03:14:30,727 INFO [train.py:1114] (2/4) Epoch 1, batch 1500, loss[loss=0.5215, simple_loss=0.4483, pruned_loss=0.2095, ctc_loss=0.3949, over 19592.00 frames. ], tot_loss[loss=0.543, simple_loss=0.4479, pruned_loss=0.2297, ctc_loss=0.434, over 3861829.57 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:14:35,402 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=8000.0, ans=0.125
+2024-08-25 03:14:38,510 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.576e+02 3.382e+02 4.091e+02 7.597e+02, threshold=6.763e+02, percent-clipped=6.0
+2024-08-25 03:14:42,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=8053.333333333333, ans=0.125
+2024-08-25 03:14:48,096 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.02 vs. limit=10.52
+2024-08-25 03:15:16,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=8160.0, ans=0.125
+2024-08-25 03:15:19,138 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.81 vs. limit=10.56
+2024-08-25 03:15:19,923 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=8160.0, ans=0.07
+2024-08-25 03:15:33,296 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=8213.333333333334, ans=0.21786666666666665
+2024-08-25 03:15:40,033 INFO [train.py:1114] (2/4) Epoch 1, batch 1550, loss[loss=0.4951, simple_loss=0.429, pruned_loss=0.1946, ctc_loss=0.3858, over 19606.00 frames. ], tot_loss[loss=0.5316, simple_loss=0.4424, pruned_loss=0.2224, ctc_loss=0.4213, over 3847991.69 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 8.0
+2024-08-25 03:15:41,868 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.77 vs. limit=9.133333333333333
+2024-08-25 03:15:43,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=8266.666666666666, ans=0.6106666666666667
+2024-08-25 03:15:43,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=8266.666666666666, ans=0.07
+2024-08-25 03:15:51,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=8320.0, ans=0.2168
+2024-08-25 03:16:02,925 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.37 vs. limit=9.16
+2024-08-25 03:16:06,786 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.78 vs. limit=7.093333333333334
+2024-08-25 03:16:28,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=8426.666666666666, ans=0.21573333333333333
+2024-08-25 03:16:29,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=8426.666666666666, ans=0.125
+2024-08-25 03:16:41,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=8480.0, ans=0.025
+2024-08-25 03:16:49,347 INFO [train.py:1114] (2/4) Epoch 1, batch 1600, loss[loss=0.4965, simple_loss=0.4421, pruned_loss=0.1915, ctc_loss=0.3713, over 19855.00 frames. ], tot_loss[loss=0.5225, simple_loss=0.4383, pruned_loss=0.2166, ctc_loss=0.4106, over 3835807.66 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:16:53,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=8533.333333333334, ans=0.125
+2024-08-25 03:16:59,543 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.604e+02 3.125e+02 4.170e+02 2.617e+03, threshold=6.251e+02, percent-clipped=7.0
+2024-08-25 03:17:01,090 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=8533.333333333334, ans=0.125
+2024-08-25 03:17:01,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=8533.333333333334, ans=0.03111111111111111
+2024-08-25 03:17:01,629 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.66 vs. limit=7.133333333333334
+2024-08-25 03:17:02,860 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.19 vs. limit=7.133333333333334
+2024-08-25 03:17:04,103 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=14.95 vs. limit=13.94
+2024-08-25 03:17:10,132 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=14.49 vs. limit=13.94
+2024-08-25 03:17:24,109 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.87 vs. limit=7.4559999999999995
+2024-08-25 03:17:30,703 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.66 vs. limit=10.76
+2024-08-25 03:17:32,514 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=8693.333333333334, ans=0.125
+2024-08-25 03:19:08,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=8800.0, ans=0.125
+2024-08-25 03:19:09,317 INFO [train.py:1114] (2/4) Epoch 1, batch 1650, loss[loss=0.5024, simple_loss=0.4362, pruned_loss=0.2012, ctc_loss=0.3833, over 19650.00 frames. ], tot_loss[loss=0.5136, simple_loss=0.4344, pruned_loss=0.211, ctc_loss=0.4012, over 3832121.91 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:19:26,705 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=8853.333333333334, ans=0.025
+2024-08-25 03:19:47,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=8960.0, ans=0.008921739130434782
+2024-08-25 03:19:56,266 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.50 vs. limit=4.344
+2024-08-25 03:19:59,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=9013.333333333334, ans=0.20986666666666665
+2024-08-25 03:20:12,475 INFO [train.py:1114] (2/4) Epoch 1, batch 1700, loss[loss=0.4087, simple_loss=0.3647, pruned_loss=0.1587, ctc_loss=0.3085, over 19698.00 frames. ], tot_loss[loss=0.5038, simple_loss=0.4301, pruned_loss=0.2051, ctc_loss=0.391, over 3846604.51 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:20:19,823 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.828e+02 2.395e+02 2.888e+02 3.702e+02 8.491e+02, threshold=5.776e+02, percent-clipped=2.0
+2024-08-25 03:22:15,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 03:22:16,492 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=9226.666666666666, ans=0.20773333333333333
+2024-08-25 03:22:31,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=9280.0, ans=0.125
+2024-08-25 03:22:33,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=9333.333333333334, ans=0.15666666666666665
+2024-08-25 03:22:33,848 INFO [train.py:1114] (2/4) Epoch 1, batch 1750, loss[loss=0.4041, simple_loss=0.3639, pruned_loss=0.157, ctc_loss=0.3003, over 19669.00 frames. ], tot_loss[loss=0.4944, simple_loss=0.426, pruned_loss=0.1996, ctc_loss=0.3811, over 3851010.48 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:22:37,490 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=9333.333333333334, ans=0.5733333333333334
+2024-08-25 03:22:46,709 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=9386.666666666666, ans=0.025
+2024-08-25 03:23:10,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=9493.333333333334, ans=0.5677333333333334
+2024-08-25 03:23:12,935 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=9493.333333333334, ans=0.008805797101449275
+2024-08-25 03:23:31,432 INFO [train.py:1114] (2/4) Epoch 1, batch 1800, loss[loss=0.4915, simple_loss=0.4403, pruned_loss=0.1923, ctc_loss=0.3721, over 19630.00 frames. ], tot_loss[loss=0.4892, simple_loss=0.4244, pruned_loss=0.1965, ctc_loss=0.3753, over 3851386.34 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 8.0
+2024-08-25 03:23:39,410 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.646e+02 3.473e+02 4.220e+02 8.344e+02, threshold=6.945e+02, percent-clipped=3.0
+2024-08-25 03:23:43,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=9653.333333333334, ans=0.125
+2024-08-25 03:23:57,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=9706.666666666666, ans=0.04949747468305833
+2024-08-25 03:24:06,593 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.02 vs. limit=14.780000000000001
+2024-08-25 03:24:08,143 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=9760.0, ans=0.125
+2024-08-25 03:24:15,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=9760.0, ans=0.025
+2024-08-25 03:24:16,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=9760.0, ans=0.5584
+2024-08-25 03:24:28,023 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.07 vs. limit=4.4719999999999995
+2024-08-25 03:24:28,998 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=9813.333333333334, ans=0.20186666666666667
+2024-08-25 03:24:35,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=9866.666666666666, ans=0.0
+2024-08-25 03:24:35,828 INFO [train.py:1114] (2/4) Epoch 1, batch 1850, loss[loss=0.4668, simple_loss=0.4188, pruned_loss=0.1839, ctc_loss=0.3508, over 19577.00 frames. ], tot_loss[loss=0.4796, simple_loss=0.42, pruned_loss=0.1913, ctc_loss=0.3663, over 3855035.66 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:24:56,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=9920.0, ans=0.125
+2024-08-25 03:25:25,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10026.666666666666, ans=0.19973333333333332
+2024-08-25 03:25:32,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10080.0, ans=0.1992
+2024-08-25 03:25:40,382 INFO [train.py:1114] (2/4) Epoch 1, batch 1900, loss[loss=0.4433, simple_loss=0.4162, pruned_loss=0.1667, ctc_loss=0.328, over 19635.00 frames. ], tot_loss[loss=0.4743, simple_loss=0.4185, pruned_loss=0.1883, ctc_loss=0.3607, over 3860409.62 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:25:48,460 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.873e+02 2.554e+02 2.990e+02 4.033e+02 8.041e+02, threshold=5.979e+02, percent-clipped=3.0
+2024-08-25 03:25:58,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.whiten.whitening_limit, batch_count=10186.666666666666, ans=8.074666666666666
+2024-08-25 03:26:03,572 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:26:03,751 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.50 vs. limit=10.120000000000001
+2024-08-25 03:26:08,299 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=10240.0, ans=0.125
+2024-08-25 03:26:16,812 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.27 vs. limit=8.117333333333335
+2024-08-25 03:26:18,572 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=10293.333333333334, ans=0.125
+2024-08-25 03:26:27,913 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=10346.666666666666, ans=0.02355555555555556
+2024-08-25 03:26:38,015 INFO [train.py:1114] (2/4) Epoch 1, batch 1950, loss[loss=0.4276, simple_loss=0.3897, pruned_loss=0.167, ctc_loss=0.3235, over 19602.00 frames. ], tot_loss[loss=0.4684, simple_loss=0.4172, pruned_loss=0.1849, ctc_loss=0.355, over 3870016.29 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:26:55,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=10453.333333333334, ans=0.125
+2024-08-25 03:27:05,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=10506.666666666666, ans=0.022888888888888893
+2024-08-25 03:27:09,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=10506.666666666666, ans=0.125
+2024-08-25 03:27:13,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=10560.0, ans=0.125
+2024-08-25 03:27:20,042 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=10560.0, ans=0.125
+2024-08-25 03:27:30,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=10613.333333333334, ans=0.125
+2024-08-25 03:27:36,541 INFO [train.py:1114] (2/4) Epoch 1, batch 2000, loss[loss=0.3649, simple_loss=0.3508, pruned_loss=0.1366, ctc_loss=0.2646, over 19660.00 frames. ], tot_loss[loss=0.4647, simple_loss=0.416, pruned_loss=0.183, ctc_loss=0.3519, over 3853565.15 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:27:44,898 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.508e+02 3.011e+02 3.695e+02 6.472e+02, threshold=6.022e+02, percent-clipped=1.0
+2024-08-25 03:27:49,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=10720.0, ans=0.5248
+2024-08-25 03:27:55,352 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.64 vs. limit=11.52
+2024-08-25 03:28:28,928 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.47 vs. limit=15.66
+2024-08-25 03:28:42,015 INFO [train.py:1114] (2/4) Epoch 1, batch 2050, loss[loss=0.4267, simple_loss=0.3921, pruned_loss=0.1671, ctc_loss=0.3179, over 19688.00 frames. ], tot_loss[loss=0.459, simple_loss=0.4133, pruned_loss=0.1806, ctc_loss=0.3464, over 3851076.60 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:29:06,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=10986.666666666666, ans=0.125
+2024-08-25 03:29:06,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=10986.666666666666, ans=0.025
+2024-08-25 03:29:14,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=10986.666666666666, ans=0.19013333333333332
+2024-08-25 03:29:20,624 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=10986.666666666666, ans=0.020888888888888894
+2024-08-25 03:30:30,609 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.44 vs. limit=4.664
+2024-08-25 03:30:57,667 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.48 vs. limit=11.68
+2024-08-25 03:31:02,654 INFO [train.py:1114] (2/4) Epoch 1, batch 2100, loss[loss=0.4241, simple_loss=0.4047, pruned_loss=0.1583, ctc_loss=0.3171, over 19755.00 frames. ], tot_loss[loss=0.4516, simple_loss=0.4101, pruned_loss=0.1767, ctc_loss=0.3397, over 3858190.80 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:31:02,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=11200.0, ans=0.020000000000000004
+2024-08-25 03:31:19,367 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 2.443e+02 2.901e+02 4.101e+02 7.108e+02, threshold=5.802e+02, percent-clipped=5.0
+2024-08-25 03:31:25,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=11253.333333333334, ans=0.125
+2024-08-25 03:31:52,604 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:31:55,236 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.77 vs. limit=10.653333333333332
+2024-08-25 03:32:04,113 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.44 vs. limit=4.704
+2024-08-25 03:32:05,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=11360.0, ans=0.019333333333333338
+2024-08-25 03:32:14,075 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=11413.333333333334, ans=0.019111111111111106
+2024-08-25 03:32:32,790 INFO [train.py:1114] (2/4) Epoch 1, batch 2150, loss[loss=0.3661, simple_loss=0.3669, pruned_loss=0.1306, ctc_loss=0.2602, over 19853.00 frames. ], tot_loss[loss=0.4461, simple_loss=0.4078, pruned_loss=0.1738, ctc_loss=0.3344, over 3868217.85 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:32:44,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=11466.666666666666, ans=0.125
+2024-08-25 03:32:46,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=11466.666666666666, ans=0.008376811594202898
+2024-08-25 03:32:53,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=11520.0, ans=0.125
+2024-08-25 03:32:57,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=11520.0, ans=0.1848
+2024-08-25 03:33:23,984 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.25 vs. limit=16.18
+2024-08-25 03:33:27,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=11626.666666666666, ans=10.0
+2024-08-25 03:33:30,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=11626.666666666666, ans=0.125
+2024-08-25 03:33:49,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=11680.0, ans=0.025
+2024-08-25 03:33:49,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=11680.0, ans=0.125
+2024-08-25 03:33:52,950 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.60 vs. limit=11.879999999999999
+2024-08-25 03:33:57,385 INFO [train.py:1114] (2/4) Epoch 1, batch 2200, loss[loss=0.4278, simple_loss=0.4073, pruned_loss=0.1632, ctc_loss=0.3047, over 19608.00 frames. ], tot_loss[loss=0.4413, simple_loss=0.4058, pruned_loss=0.1713, ctc_loss=0.3297, over 3866449.55 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:33:57,926 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.41 vs. limit=16.3
+2024-08-25 03:34:07,552 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=11733.333333333334, ans=0.008318840579710145
+2024-08-25 03:34:08,401 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.628e+02 3.380e+02 4.438e+02 7.655e+02, threshold=6.760e+02, percent-clipped=12.0
+2024-08-25 03:34:11,946 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=11786.666666666666, ans=0.125
+2024-08-25 03:34:24,619 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.32 vs. limit=11.940000000000001
+2024-08-25 03:35:03,292 INFO [train.py:1114] (2/4) Epoch 1, batch 2250, loss[loss=0.389, simple_loss=0.3903, pruned_loss=0.1389, ctc_loss=0.2747, over 19622.00 frames. ], tot_loss[loss=0.4376, simple_loss=0.4045, pruned_loss=0.1693, ctc_loss=0.3256, over 3867289.53 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:35:11,881 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=12000.0, ans=0.0
+2024-08-25 03:35:13,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=12000.0, ans=0.18
+2024-08-25 03:35:23,114 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.44 vs. limit=16.54
+2024-08-25 03:35:46,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=12160.0, ans=0.3824
+2024-08-25 03:35:54,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=12213.333333333334, ans=0.0
+2024-08-25 03:36:00,468 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.68 vs. limit=11.106666666666667
+2024-08-25 03:36:03,074 INFO [train.py:1114] (2/4) Epoch 1, batch 2300, loss[loss=0.3813, simple_loss=0.3759, pruned_loss=0.139, ctc_loss=0.2721, over 19513.00 frames. ], tot_loss[loss=0.4328, simple_loss=0.4018, pruned_loss=0.167, ctc_loss=0.3208, over 3861513.50 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:36:12,284 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.546e+02 3.099e+02 3.956e+02 8.242e+02, threshold=6.199e+02, percent-clipped=6.0
+2024-08-25 03:36:18,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=12320.0, ans=0.125
+2024-08-25 03:36:28,919 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:36:30,492 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.51 vs. limit=12.14
+2024-08-25 03:36:40,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=12426.666666666666, ans=0.07
+2024-08-25 03:36:46,141 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=12426.666666666666, ans=0.17573333333333335
+2024-08-25 03:36:47,247 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=12426.666666666666, ans=0.025
+2024-08-25 03:36:56,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=12480.0, ans=0.125
+2024-08-25 03:37:00,690 INFO [train.py:1114] (2/4) Epoch 1, batch 2350, loss[loss=0.4733, simple_loss=0.4317, pruned_loss=0.1884, ctc_loss=0.345, over 19677.00 frames. ], tot_loss[loss=0.4295, simple_loss=0.4006, pruned_loss=0.1652, ctc_loss=0.3172, over 3863663.06 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:37:06,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=12533.333333333334, ans=0.4613333333333333
+2024-08-25 03:37:21,924 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=12640.0, ans=0.125
+2024-08-25 03:37:22,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=12640.0, ans=0.014000000000000005
+2024-08-25 03:37:38,456 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.21 vs. limit=17.02
+2024-08-25 03:37:59,413 INFO [train.py:1114] (2/4) Epoch 1, batch 2400, loss[loss=0.4323, simple_loss=0.4116, pruned_loss=0.1647, ctc_loss=0.3092, over 19247.00 frames. ], tot_loss[loss=0.4288, simple_loss=0.4014, pruned_loss=0.1645, ctc_loss=0.316, over 3858080.96 frames. ], batch size: 71, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:38:08,241 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.522e+02 3.053e+02 3.990e+02 1.210e+03, threshold=6.106e+02, percent-clipped=3.0
+2024-08-25 03:38:16,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=12853.333333333334, ans=0.013111111111111108
+2024-08-25 03:38:24,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=12906.666666666666, ans=0.125
+2024-08-25 03:38:29,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=12906.666666666666, ans=0.125
+2024-08-25 03:38:34,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=12960.0, ans=0.008052173913043479
+2024-08-25 03:38:39,200 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.16 vs. limit=11.48
+2024-08-25 03:39:03,897 INFO [train.py:1114] (2/4) Epoch 1, batch 2450, loss[loss=0.5347, simple_loss=0.4495, pruned_loss=0.2263, ctc_loss=0.4184, over 13508.00 frames. ], tot_loss[loss=0.4386, simple_loss=0.4067, pruned_loss=0.1699, ctc_loss=0.3247, over 3731381.37 frames. ], batch size: 141, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:39:21,473 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.60 vs. limit=12.42
+2024-08-25 03:39:26,013 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=13120.0, ans=0.012000000000000004
+2024-08-25 03:39:45,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=13226.666666666666, ans=0.125
+2024-08-25 03:40:43,714 INFO [train.py:1114] (2/4) Epoch 2, batch 0, loss[loss=0.4345, simple_loss=0.3942, pruned_loss=0.1723, ctc_loss=0.3258, over 19423.00 frames. ], tot_loss[loss=0.4345, simple_loss=0.3942, pruned_loss=0.1723, ctc_loss=0.3258, over 19423.00 frames. ], batch size: 48, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 03:40:43,715 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 03:40:55,165 INFO [train.py:1146] (2/4) Epoch 2, validation: loss=0.3317, simple_loss=0.3718, pruned_loss=0.1058, ctc_loss=0.2, over 944034.00 frames.
+2024-08-25 03:40:55,166 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 03:41:04,094 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=13280.0, ans=0.125
+2024-08-25 03:41:10,790 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.94 vs. limit=17.5
+2024-08-25 03:41:12,810 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=13333.333333333334, ans=0.125
+2024-08-25 03:41:17,126 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.388e+02 2.818e+02 3.444e+02 6.577e+02, threshold=5.636e+02, percent-clipped=3.0
+2024-08-25 03:41:21,200 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.85 vs. limit=17.54
+2024-08-25 03:41:57,983 INFO [train.py:1114] (2/4) Epoch 2, batch 50, loss[loss=0.3175, simple_loss=0.3239, pruned_loss=0.1119, ctc_loss=0.2181, over 19707.00 frames. ], tot_loss[loss=0.4185, simple_loss=0.397, pruned_loss=0.1592, ctc_loss=0.304, over 845808.55 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:42:09,650 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=13600.0, ans=0.125
+2024-08-25 03:42:12,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=13600.0, ans=0.025
+2024-08-25 03:42:19,400 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=13600.0, ans=0.16399999999999998
+2024-08-25 03:42:30,097 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=13653.333333333334, ans=0.009777777777777774
+2024-08-25 03:43:11,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=13706.666666666666, ans=0.00955555555555556
+2024-08-25 03:43:29,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=13760.0, ans=0.125
+2024-08-25 03:43:36,892 INFO [train.py:1114] (2/4) Epoch 2, batch 100, loss[loss=0.4026, simple_loss=0.3766, pruned_loss=0.1556, ctc_loss=0.2935, over 19733.00 frames. ], tot_loss[loss=0.4197, simple_loss=0.3992, pruned_loss=0.1592, ctc_loss=0.3047, over 1500191.62 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:43:46,377 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.30 vs. limit=9.525333333333332
+2024-08-25 03:43:49,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=13813.333333333334, ans=0.125
+2024-08-25 03:43:55,914 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=13866.666666666666, ans=0.025
+2024-08-25 03:44:02,829 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.500e+02 2.916e+02 3.893e+02 6.295e+02, threshold=5.832e+02, percent-clipped=2.0
+2024-08-25 03:44:23,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=13973.333333333334, ans=0.125
+2024-08-25 03:44:26,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=13973.333333333334, ans=0.125
+2024-08-25 03:44:34,507 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=14026.666666666666, ans=0.008222222222222228
+2024-08-25 03:44:42,812 INFO [train.py:1114] (2/4) Epoch 2, batch 150, loss[loss=0.3807, simple_loss=0.3596, pruned_loss=0.1454, ctc_loss=0.2774, over 19700.00 frames. ], tot_loss[loss=0.4097, simple_loss=0.3932, pruned_loss=0.1541, ctc_loss=0.2953, over 2029517.07 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:44:46,838 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=14080.0, ans=0.125
+2024-08-25 03:45:12,503 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=14186.666666666666, ans=0.007785507246376812
+2024-08-25 03:45:38,817 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=14293.333333333334, ans=0.8929333333333334
+2024-08-25 03:45:42,117 INFO [train.py:1114] (2/4) Epoch 2, batch 200, loss[loss=0.4261, simple_loss=0.3987, pruned_loss=0.1654, ctc_loss=0.3066, over 18357.00 frames. ], tot_loss[loss=0.4039, simple_loss=0.3896, pruned_loss=0.1511, ctc_loss=0.2898, over 2437654.93 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:45:48,670 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.47 vs. limit=12.173333333333332
+2024-08-25 03:45:51,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=14346.666666666666, ans=0.3978666666666667
+2024-08-25 03:46:06,465 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.951e+02 2.445e+02 2.940e+02 3.728e+02 6.995e+02, threshold=5.880e+02, percent-clipped=3.0
+2024-08-25 03:46:15,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=14453.333333333334, ans=0.007727536231884058
+2024-08-25 03:46:17,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=14453.333333333334, ans=0.0
+2024-08-25 03:46:33,733 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.15 vs. limit=18.42
+2024-08-25 03:46:42,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=14560.0, ans=0.125
+2024-08-25 03:46:45,948 INFO [train.py:1114] (2/4) Epoch 2, batch 250, loss[loss=0.3939, simple_loss=0.3942, pruned_loss=0.1401, ctc_loss=0.2834, over 19354.00 frames. ], tot_loss[loss=0.4035, simple_loss=0.3899, pruned_loss=0.1508, ctc_loss=0.2889, over 2756627.41 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:47:00,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=14666.666666666666, ans=0.125
+2024-08-25 03:47:05,066 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.74 vs. limit=12.333333333333332
+2024-08-25 03:47:16,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=14720.0, ans=0.15280000000000002
+2024-08-25 03:47:37,797 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=14826.666666666666, ans=0.125
+2024-08-25 03:47:50,839 INFO [train.py:1114] (2/4) Epoch 2, batch 300, loss[loss=0.4182, simple_loss=0.4049, pruned_loss=0.1584, ctc_loss=0.2869, over 19528.00 frames. ], tot_loss[loss=0.3996, simple_loss=0.3876, pruned_loss=0.1488, ctc_loss=0.2851, over 3001049.19 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:47:55,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=14880.0, ans=0.007634782608695653
+2024-08-25 03:48:11,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=14933.333333333334, ans=0.05
+2024-08-25 03:48:13,160 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.396e+02 2.818e+02 3.488e+02 8.647e+02, threshold=5.636e+02, percent-clipped=6.0
+2024-08-25 03:48:37,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=15093.333333333334, ans=0.0037777777777777757
+2024-08-25 03:48:42,445 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=15093.333333333334, ans=0.0037777777777777757
+2024-08-25 03:48:50,380 INFO [train.py:1114] (2/4) Epoch 2, batch 350, loss[loss=0.3687, simple_loss=0.3624, pruned_loss=0.1356, ctc_loss=0.2598, over 19768.00 frames. ], tot_loss[loss=0.3984, simple_loss=0.3871, pruned_loss=0.1481, ctc_loss=0.2837, over 3190772.48 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:48:51,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=15146.666666666666, ans=0.125
+2024-08-25 03:49:33,716 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=15200.0, ans=0.42800000000000005
+2024-08-25 03:49:47,123 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=15253.333333333334, ans=0.125
+2024-08-25 03:50:01,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=15306.666666666666, ans=0.125
+2024-08-25 03:50:15,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=15360.0, ans=0.125
+2024-08-25 03:50:17,348 INFO [train.py:1114] (2/4) Epoch 2, batch 400, loss[loss=0.4301, simple_loss=0.406, pruned_loss=0.165, ctc_loss=0.3105, over 19488.00 frames. ], tot_loss[loss=0.3962, simple_loss=0.386, pruned_loss=0.1469, ctc_loss=0.2816, over 3343165.54 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:50:21,422 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.30 vs. limit=13.280000000000001
+2024-08-25 03:50:22,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=15413.333333333334, ans=0.125
+2024-08-25 03:50:22,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=15413.333333333334, ans=0.04949747468305833
+2024-08-25 03:50:23,912 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.88 vs. limit=13.280000000000001
+2024-08-25 03:50:25,823 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.515e+00
+2024-08-25 03:50:39,706 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.407e+02 2.984e+02 3.456e+02 5.488e+02, threshold=5.968e+02, percent-clipped=0.0
+2024-08-25 03:50:41,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_ff2.min_abs, batch_count=15520.0, ans=0.1
+2024-08-25 03:50:44,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=15520.0, ans=0.125
+2024-08-25 03:51:19,363 INFO [train.py:1114] (2/4) Epoch 2, batch 450, loss[loss=0.3914, simple_loss=0.3954, pruned_loss=0.1404, ctc_loss=0.2664, over 19608.00 frames. ], tot_loss[loss=0.3954, simple_loss=0.3854, pruned_loss=0.1467, ctc_loss=0.2802, over 3449550.98 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:51:24,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15680.0, ans=0.14320000000000002
+2024-08-25 03:51:25,819 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=15680.0, ans=0.125
+2024-08-25 03:51:29,293 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:51:41,432 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=3.244e+00
+2024-08-25 03:51:46,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=15786.666666666666, ans=0.0
+2024-08-25 03:52:05,133 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.25 vs. limit=13.440000000000001
+2024-08-25 03:52:05,330 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.21 vs. limit=13.440000000000001
+2024-08-25 03:52:09,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=15893.333333333334, ans=0.125
+2024-08-25 03:52:21,862 INFO [train.py:1114] (2/4) Epoch 2, batch 500, loss[loss=0.3854, simple_loss=0.3897, pruned_loss=0.1372, ctc_loss=0.2666, over 19707.00 frames. ], tot_loss[loss=0.3938, simple_loss=0.3845, pruned_loss=0.1458, ctc_loss=0.2787, over 3545511.59 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:52:57,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=15946.666666666666, ans=0.00022222222222222088
+2024-08-25 03:52:57,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=15946.666666666666, ans=0.125
+2024-08-25 03:53:07,278 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=16000.0, ans=0.0
+2024-08-25 03:53:11,994 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.425e+02 3.079e+02 3.995e+02 1.154e+03, threshold=6.159e+02, percent-clipped=13.0
+2024-08-25 03:53:13,828 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.35 vs. limit=10.421333333333333
+2024-08-25 03:53:31,273 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=16106.666666666666, ans=0.125
+2024-08-25 03:53:42,902 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=16160.0, ans=0.125
+2024-08-25 03:53:49,970 INFO [train.py:1114] (2/4) Epoch 2, batch 550, loss[loss=0.3779, simple_loss=0.3823, pruned_loss=0.1347, ctc_loss=0.2604, over 19245.00 frames. ], tot_loss[loss=0.3924, simple_loss=0.3839, pruned_loss=0.145, ctc_loss=0.2774, over 3607977.97 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:54:14,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=16320.0, ans=0.4448
+2024-08-25 03:54:48,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=16426.666666666668, ans=0.125
+2024-08-25 03:54:51,526 INFO [train.py:1114] (2/4) Epoch 2, batch 600, loss[loss=0.3973, simple_loss=0.3992, pruned_loss=0.1424, ctc_loss=0.2764, over 19326.00 frames. ], tot_loss[loss=0.3911, simple_loss=0.3836, pruned_loss=0.1442, ctc_loss=0.2756, over 3665666.24 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:54:53,376 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.02 vs. limit=13.68
+2024-08-25 03:54:54,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=16480.0, ans=0.13520000000000001
+2024-08-25 03:54:56,515 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=16480.0, ans=0.04949747468305833
+2024-08-25 03:54:57,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=16480.0, ans=0.32320000000000004
+2024-08-25 03:55:14,978 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.336e+02 2.753e+02 3.494e+02 8.105e+02, threshold=5.507e+02, percent-clipped=1.0
+2024-08-25 03:55:26,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=16586.666666666668, ans=0.9158666666666666
+2024-08-25 03:55:50,324 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=16693.333333333332, ans=0.125
+2024-08-25 03:55:54,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=16693.333333333332, ans=13.759999999999998
+2024-08-25 03:55:56,175 INFO [train.py:1114] (2/4) Epoch 2, batch 650, loss[loss=0.3814, simple_loss=0.3757, pruned_loss=0.139, ctc_loss=0.2727, over 19772.00 frames. ], tot_loss[loss=0.3886, simple_loss=0.3817, pruned_loss=0.143, ctc_loss=0.2734, over 3716440.71 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:56:03,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=16746.666666666668, ans=0.07
+2024-08-25 03:56:16,584 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.93 vs. limit=20.1
+2024-08-25 03:56:45,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=16960.0, ans=0.125
+2024-08-25 03:56:45,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=16960.0, ans=0.3064
+2024-08-25 03:56:50,704 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.52 vs. limit=13.86
+2024-08-25 03:56:54,059 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=16960.0, ans=0.007182608695652175
+2024-08-25 03:56:56,406 INFO [train.py:1114] (2/4) Epoch 2, batch 700, loss[loss=0.3479, simple_loss=0.3539, pruned_loss=0.1236, ctc_loss=0.2363, over 19727.00 frames. ], tot_loss[loss=0.3877, simple_loss=0.3817, pruned_loss=0.1424, ctc_loss=0.2721, over 3748435.88 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:57:08,056 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.87 vs. limit=5.5600000000000005
+2024-08-25 03:57:17,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=17066.666666666668, ans=0.125
+2024-08-25 03:57:23,249 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.519e+02 2.895e+02 3.628e+02 6.087e+02, threshold=5.790e+02, percent-clipped=2.0
+2024-08-25 03:57:43,131 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.21 vs. limit=7.434666666666667
+2024-08-25 03:58:01,102 INFO [train.py:1114] (2/4) Epoch 2, batch 750, loss[loss=0.378, simple_loss=0.3883, pruned_loss=0.1336, ctc_loss=0.2515, over 19498.00 frames. ], tot_loss[loss=0.3842, simple_loss=0.3795, pruned_loss=0.1408, ctc_loss=0.2685, over 3774949.39 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:58:25,124 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.20 vs. limit=14.02
+2024-08-25 04:00:00,773 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=17493.333333333332, ans=0.007066666666666667
+2024-08-25 04:00:06,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=17493.333333333332, ans=0.0
+2024-08-25 04:00:16,108 INFO [train.py:1114] (2/4) Epoch 2, batch 800, loss[loss=0.362, simple_loss=0.3596, pruned_loss=0.1324, ctc_loss=0.2491, over 19417.00 frames. ], tot_loss[loss=0.3815, simple_loss=0.378, pruned_loss=0.1394, ctc_loss=0.2658, over 3796406.38 frames. ], batch size: 48, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:00:30,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=17600.0, ans=0.28400000000000003
+2024-08-25 04:00:39,332 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.611e+02 3.088e+02 3.881e+02 9.768e+02, threshold=6.176e+02, percent-clipped=6.0
+2024-08-25 04:01:14,050 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=17813.333333333332, ans=0.125
+2024-08-25 04:01:15,037 INFO [train.py:1114] (2/4) Epoch 2, batch 850, loss[loss=0.396, simple_loss=0.3894, pruned_loss=0.1453, ctc_loss=0.2799, over 19649.00 frames. ], tot_loss[loss=0.3787, simple_loss=0.3762, pruned_loss=0.1379, ctc_loss=0.2633, over 3815173.93 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:01:22,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=17813.333333333332, ans=0.125
+2024-08-25 04:01:27,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=17866.666666666668, ans=0.0
+2024-08-25 04:01:45,586 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.93 vs. limit=11.168
+2024-08-25 04:02:18,993 INFO [train.py:1114] (2/4) Epoch 2, batch 900, loss[loss=0.3415, simple_loss=0.3398, pruned_loss=0.1237, ctc_loss=0.239, over 19420.00 frames. ], tot_loss[loss=0.3802, simple_loss=0.3767, pruned_loss=0.139, ctc_loss=0.2645, over 3818259.72 frames. ], batch size: 48, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:02:45,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=18080.0, ans=0.06920000000000001
+2024-08-25 04:02:52,735 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.79 vs. limit=21.1
+2024-08-25 04:03:00,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=18186.666666666668, ans=0.125
+2024-08-25 04:03:03,829 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.814e+02 2.530e+02 3.033e+02 3.602e+02 3.379e+03, threshold=6.066e+02, percent-clipped=6.0
+2024-08-25 04:03:05,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=18186.666666666668, ans=0.125
+2024-08-25 04:03:12,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=18240.0, ans=0.125
+2024-08-25 04:03:36,915 INFO [train.py:1114] (2/4) Epoch 2, batch 950, loss[loss=0.3804, simple_loss=0.3732, pruned_loss=0.1396, ctc_loss=0.2708, over 19501.00 frames. ], tot_loss[loss=0.3806, simple_loss=0.3773, pruned_loss=0.139, ctc_loss=0.2646, over 3818663.54 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:03:45,263 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=18346.666666666668, ans=0.0
+2024-08-25 04:03:59,635 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.82 vs. limit=5.76
+2024-08-25 04:04:24,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=18506.666666666668, ans=0.0
+2024-08-25 04:04:24,087 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=18506.666666666668, ans=0.125
+2024-08-25 04:04:24,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=18506.666666666668, ans=0.125
+2024-08-25 04:04:30,621 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.47 vs. limit=14.46
+2024-08-25 04:04:31,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=18560.0, ans=0.125
+2024-08-25 04:04:32,334 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=18560.0, ans=0.125
+2024-08-25 04:04:39,322 INFO [train.py:1114] (2/4) Epoch 2, batch 1000, loss[loss=0.3065, simple_loss=0.336, pruned_loss=0.1002, ctc_loss=0.1913, over 19851.00 frames. ], tot_loss[loss=0.3824, simple_loss=0.3787, pruned_loss=0.1398, ctc_loss=0.2661, over 3815174.09 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:04:40,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=18613.333333333332, ans=0.125
+2024-08-25 04:04:44,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=18613.333333333332, ans=0.006823188405797102
+2024-08-25 04:05:05,789 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.321e+02 2.743e+02 3.485e+02 6.350e+02, threshold=5.486e+02, percent-clipped=2.0
+2024-08-25 04:05:39,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18826.666666666668, ans=0.11173333333333332
+2024-08-25 04:05:41,818 INFO [train.py:1114] (2/4) Epoch 2, batch 1050, loss[loss=0.3961, simple_loss=0.3939, pruned_loss=0.1445, ctc_loss=0.2732, over 19847.00 frames. ], tot_loss[loss=0.3799, simple_loss=0.3771, pruned_loss=0.1385, ctc_loss=0.2641, over 3821771.57 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:05:42,181 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=18880.0, ans=0.0
+2024-08-25 04:05:46,422 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.20 vs. limit=11.552
+2024-08-25 04:05:50,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=18880.0, ans=0.125
+2024-08-25 04:06:01,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=18933.333333333332, ans=0.125
+2024-08-25 04:06:12,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=18986.666666666668, ans=0.125
+2024-08-25 04:06:40,933 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=19093.333333333332, ans=0.48639999999999994
+2024-08-25 04:06:43,810 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.71 vs. limit=14.573333333333334
+2024-08-25 04:06:44,165 INFO [train.py:1114] (2/4) Epoch 2, batch 1100, loss[loss=0.3128, simple_loss=0.3446, pruned_loss=0.1018, ctc_loss=0.1939, over 19590.00 frames. ], tot_loss[loss=0.3759, simple_loss=0.375, pruned_loss=0.1364, ctc_loss=0.26, over 3831001.47 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:07:11,083 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.465e+02 2.960e+02 4.039e+02 7.406e+02, threshold=5.919e+02, percent-clipped=11.0
+2024-08-25 04:07:42,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=19306.666666666668, ans=0.125
+2024-08-25 04:08:01,383 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=19360.0, ans=0.006660869565217392
+2024-08-25 04:08:08,078 INFO [train.py:1114] (2/4) Epoch 2, batch 1150, loss[loss=0.3623, simple_loss=0.3656, pruned_loss=0.1309, ctc_loss=0.2428, over 19605.00 frames. ], tot_loss[loss=0.3751, simple_loss=0.3745, pruned_loss=0.136, ctc_loss=0.2589, over 3830964.62 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:08:34,111 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=19520.0, ans=0.09899494936611666
+2024-08-25 04:09:01,104 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=19626.666666666668, ans=0.125
+2024-08-25 04:09:01,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=19626.666666666668, ans=0.125
+2024-08-25 04:09:08,100 INFO [train.py:1114] (2/4) Epoch 2, batch 1200, loss[loss=0.3599, simple_loss=0.3737, pruned_loss=0.1244, ctc_loss=0.2434, over 19853.00 frames. ], tot_loss[loss=0.3767, simple_loss=0.3761, pruned_loss=0.1367, ctc_loss=0.26, over 3825842.79 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 04:09:08,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=19680.0, ans=0.006591304347826087
+2024-08-25 04:09:36,233 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.637e+02 3.065e+02 4.000e+02 6.600e+02, threshold=6.130e+02, percent-clipped=2.0
+2024-08-25 04:09:50,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=19840.0, ans=0.10160000000000002
+2024-08-25 04:10:02,450 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=19893.333333333332, ans=0.0
+2024-08-25 04:10:11,973 INFO [train.py:1114] (2/4) Epoch 2, batch 1250, loss[loss=0.3563, simple_loss=0.3661, pruned_loss=0.1251, ctc_loss=0.2405, over 19508.00 frames. ], tot_loss[loss=0.3761, simple_loss=0.3762, pruned_loss=0.1362, ctc_loss=0.259, over 3843963.19 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:10:12,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=19946.666666666668, ans=0.0
+2024-08-25 04:10:16,264 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.66 vs. limit=22.46
+2024-08-25 04:10:19,478 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=19946.666666666668, ans=0.125
+2024-08-25 04:10:41,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=20053.333333333332, ans=0.0065101449275362325
+2024-08-25 04:10:42,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=20053.333333333332, ans=0.07
+2024-08-25 04:11:01,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=20160.0, ans=0.125
+2024-08-25 04:11:02,966 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=20160.0, ans=0.0
+2024-08-25 04:11:15,239 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.21 vs. limit=15.0
+2024-08-25 04:11:15,942 INFO [train.py:1114] (2/4) Epoch 2, batch 1300, loss[loss=0.4103, simple_loss=0.3966, pruned_loss=0.1539, ctc_loss=0.2903, over 18856.00 frames. ], tot_loss[loss=0.3737, simple_loss=0.3748, pruned_loss=0.135, ctc_loss=0.2566, over 3848656.50 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:11:25,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=20213.333333333332, ans=0.125
+2024-08-25 04:11:41,986 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.187e+02 2.429e+02 2.931e+02 4.736e+02, threshold=4.858e+02, percent-clipped=0.0
+2024-08-25 04:11:46,254 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.62 vs. limit=15.0
+2024-08-25 04:11:50,898 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=20373.333333333332, ans=0.006440579710144928
+2024-08-25 04:12:00,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=20373.333333333332, ans=0.125
+2024-08-25 04:12:08,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=20426.666666666668, ans=0.2
+2024-08-25 04:12:12,615 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.94 vs. limit=12.0
+2024-08-25 04:12:13,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=20426.666666666668, ans=0.07
+2024-08-25 04:12:15,275 INFO [train.py:1114] (2/4) Epoch 2, batch 1350, loss[loss=0.3371, simple_loss=0.3593, pruned_loss=0.1141, ctc_loss=0.2164, over 19769.00 frames. ], tot_loss[loss=0.371, simple_loss=0.3733, pruned_loss=0.1336, ctc_loss=0.2537, over 3859181.43 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:12:36,973 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:12:37,001 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=20533.333333333332, ans=0.0
+2024-08-25 04:12:39,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=20586.666666666668, ans=0.125
+2024-08-25 04:12:51,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=20640.0, ans=0.0
+2024-08-25 04:12:57,482 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.55 vs. limit=22.5
+2024-08-25 04:13:03,342 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=20640.0, ans=0.125
+2024-08-25 04:13:18,546 INFO [train.py:1114] (2/4) Epoch 2, batch 1400, loss[loss=0.2914, simple_loss=0.3099, pruned_loss=0.09955, ctc_loss=0.1843, over 19692.00 frames. ], tot_loss[loss=0.3707, simple_loss=0.3731, pruned_loss=0.1335, ctc_loss=0.2532, over 3865934.75 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:13:59,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=20853.333333333332, ans=0.006336231884057971
+2024-08-25 04:14:03,166 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.385e+02 2.674e+02 3.744e+02 6.684e+02, threshold=5.347e+02, percent-clipped=6.0
+2024-08-25 04:14:08,156 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20853.333333333332, ans=0.1
+2024-08-25 04:14:19,953 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.60 vs. limit=15.0
+2024-08-25 04:14:27,371 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=20960.0, ans=0.2
+2024-08-25 04:14:37,958 INFO [train.py:1114] (2/4) Epoch 2, batch 1450, loss[loss=0.3726, simple_loss=0.3849, pruned_loss=0.1321, ctc_loss=0.2401, over 19650.00 frames. ], tot_loss[loss=0.37, simple_loss=0.3729, pruned_loss=0.1332, ctc_loss=0.2518, over 3864306.48 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:14:38,249 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=21013.333333333332, ans=0.0
+2024-08-25 04:14:45,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=21013.333333333332, ans=0.025
+2024-08-25 04:14:46,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=21013.333333333332, ans=0.125
+2024-08-25 04:14:51,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=21066.666666666668, ans=0.1
+2024-08-25 04:15:51,894 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=21066.666666666668, ans=0.2
+2024-08-25 04:15:54,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=21120.0, ans=0.125
+2024-08-25 04:15:57,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=21120.0, ans=0.125
+2024-08-25 04:15:58,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=21120.0, ans=0.125
+2024-08-25 04:16:04,100 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=18.75 vs. limit=15.0
+2024-08-25 04:16:12,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=21173.333333333332, ans=0.025
+2024-08-25 04:16:12,745 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.44 vs. limit=15.0
+2024-08-25 04:16:33,079 INFO [train.py:1114] (2/4) Epoch 2, batch 1500, loss[loss=0.3686, simple_loss=0.3708, pruned_loss=0.1332, ctc_loss=0.2496, over 19595.00 frames. ], tot_loss[loss=0.3685, simple_loss=0.3723, pruned_loss=0.1323, ctc_loss=0.2503, over 3863794.10 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:16:38,438 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=21280.0, ans=0.006243478260869566
+2024-08-25 04:17:04,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=21386.666666666668, ans=0.1
+2024-08-25 04:17:08,003 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.509e+02 2.906e+02 4.274e+02 8.598e+02, threshold=5.813e+02, percent-clipped=13.0
+2024-08-25 04:17:18,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=21440.0, ans=0.025
+2024-08-25 04:17:24,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=21440.0, ans=10.0
+2024-08-25 04:17:34,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=21493.333333333332, ans=0.125
+2024-08-25 04:17:42,735 INFO [train.py:1114] (2/4) Epoch 2, batch 1550, loss[loss=0.401, simple_loss=0.3992, pruned_loss=0.1461, ctc_loss=0.2765, over 19601.00 frames. ], tot_loss[loss=0.37, simple_loss=0.3729, pruned_loss=0.1332, ctc_loss=0.2515, over 3847595.28 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 04:17:48,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=21546.666666666668, ans=0.125
+2024-08-25 04:17:54,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=21546.666666666668, ans=0.2
+2024-08-25 04:17:58,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=21600.0, ans=0.0061739130434782605
+2024-08-25 04:18:04,839 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=21600.0, ans=0.125
+2024-08-25 04:18:06,894 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=21653.333333333332, ans=0.1
+2024-08-25 04:18:32,216 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.81 vs. limit=15.0
+2024-08-25 04:18:41,613 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=21760.0, ans=0.125
+2024-08-25 04:18:44,946 INFO [train.py:1114] (2/4) Epoch 2, batch 1600, loss[loss=0.3621, simple_loss=0.3755, pruned_loss=0.1262, ctc_loss=0.2407, over 19842.00 frames. ], tot_loss[loss=0.369, simple_loss=0.3722, pruned_loss=0.1328, ctc_loss=0.251, over 3836408.51 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:18:59,805 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.77 vs. limit=15.0
+2024-08-25 04:19:02,271 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.59 vs. limit=15.0
+2024-08-25 04:19:13,743 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.370e+02 2.902e+02 3.664e+02 6.938e+02, threshold=5.803e+02, percent-clipped=2.0
+2024-08-25 04:19:14,036 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=21920.0, ans=0.125
+2024-08-25 04:19:23,706 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.57 vs. limit=15.0
+2024-08-25 04:19:27,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=21973.333333333332, ans=0.125
+2024-08-25 04:19:40,259 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.42 vs. limit=12.0
+2024-08-25 04:19:43,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=22026.666666666668, ans=0.125
+2024-08-25 04:19:44,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=22026.666666666668, ans=0.07
+2024-08-25 04:19:49,399 INFO [train.py:1114] (2/4) Epoch 2, batch 1650, loss[loss=0.3871, simple_loss=0.4007, pruned_loss=0.1353, ctc_loss=0.2575, over 19633.00 frames. ], tot_loss[loss=0.3693, simple_loss=0.3722, pruned_loss=0.1329, ctc_loss=0.2514, over 3832415.29 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:19:58,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=22080.0, ans=0.125
+2024-08-25 04:20:03,820 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=22133.333333333332, ans=0.1
+2024-08-25 04:20:06,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=22133.333333333332, ans=0.006057971014492754
+2024-08-25 04:20:46,926 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.71 vs. limit=12.0
+2024-08-25 04:20:48,535 INFO [train.py:1114] (2/4) Epoch 2, batch 1700, loss[loss=0.3132, simple_loss=0.3289, pruned_loss=0.1062, ctc_loss=0.2129, over 19693.00 frames. ], tot_loss[loss=0.3664, simple_loss=0.3707, pruned_loss=0.1312, ctc_loss=0.2488, over 3846676.64 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:20:55,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=22346.666666666668, ans=0.0
+2024-08-25 04:20:58,714 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.52 vs. limit=22.5
+2024-08-25 04:20:59,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=22400.0, ans=0.1
+2024-08-25 04:21:07,592 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=22400.0, ans=0.0
+2024-08-25 04:21:16,630 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.264e+02 2.715e+02 3.253e+02 5.462e+02, threshold=5.430e+02, percent-clipped=0.0
+2024-08-25 04:21:27,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=22506.666666666668, ans=0.0
+2024-08-25 04:21:33,909 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=22506.666666666668, ans=0.1
+2024-08-25 04:21:45,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=22560.0, ans=0.025
+2024-08-25 04:21:48,269 INFO [train.py:1114] (2/4) Epoch 2, batch 1750, loss[loss=0.3319, simple_loss=0.3387, pruned_loss=0.1205, ctc_loss=0.2102, over 19650.00 frames. ], tot_loss[loss=0.3647, simple_loss=0.3693, pruned_loss=0.1306, ctc_loss=0.2471, over 3850482.56 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:22:27,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=22773.333333333332, ans=0.025
+2024-08-25 04:22:51,785 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.30 vs. limit=10.0
+2024-08-25 04:22:59,114 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=22826.666666666668, ans=0.025
+2024-08-25 04:23:02,446 INFO [train.py:1114] (2/4) Epoch 2, batch 1800, loss[loss=0.3667, simple_loss=0.3777, pruned_loss=0.1301, ctc_loss=0.2387, over 19615.00 frames. ], tot_loss[loss=0.3648, simple_loss=0.3694, pruned_loss=0.1307, ctc_loss=0.2471, over 3853017.99 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:23:09,259 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:23:11,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22880.0, ans=0.1
+2024-08-25 04:23:21,582 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=22933.333333333332, ans=0.125
+2024-08-25 04:23:28,009 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.473e+02 2.913e+02 3.585e+02 6.262e+02, threshold=5.825e+02, percent-clipped=5.0
+2024-08-25 04:23:39,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=23040.0, ans=0.125
+2024-08-25 04:23:40,263 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=23040.0, ans=0.2
+2024-08-25 04:23:47,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=23093.333333333332, ans=0.0
+2024-08-25 04:23:54,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=23093.333333333332, ans=0.125
+2024-08-25 04:23:59,519 INFO [train.py:1114] (2/4) Epoch 2, batch 1850, loss[loss=0.3787, simple_loss=0.384, pruned_loss=0.1332, ctc_loss=0.2673, over 19588.00 frames. ], tot_loss[loss=0.3637, simple_loss=0.3689, pruned_loss=0.1301, ctc_loss=0.2458, over 3854273.72 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:24:11,816 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.86 vs. limit=6.0
+2024-08-25 04:24:13,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=23200.0, ans=0.1
+2024-08-25 04:24:26,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=23253.333333333332, ans=0.1
+2024-08-25 04:24:38,089 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=23306.666666666668, ans=0.125
+2024-08-25 04:24:53,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=23360.0, ans=0.2
+2024-08-25 04:24:54,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=23360.0, ans=0.025
+2024-08-25 04:24:56,429 INFO [train.py:1114] (2/4) Epoch 2, batch 1900, loss[loss=0.3979, simple_loss=0.3961, pruned_loss=0.1443, ctc_loss=0.2776, over 19637.00 frames. ], tot_loss[loss=0.3646, simple_loss=0.3696, pruned_loss=0.1305, ctc_loss=0.2463, over 3859639.00 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 32.0
+2024-08-25 04:25:21,302 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.247e+02 2.781e+02 3.399e+02 7.136e+02, threshold=5.561e+02, percent-clipped=3.0
+2024-08-25 04:25:55,277 INFO [train.py:1114] (2/4) Epoch 2, batch 1950, loss[loss=0.3643, simple_loss=0.3668, pruned_loss=0.1344, ctc_loss=0.2323, over 19582.00 frames. ], tot_loss[loss=0.3641, simple_loss=0.3701, pruned_loss=0.1299, ctc_loss=0.2455, over 3868670.13 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:26:01,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=23680.0, ans=0.125
+2024-08-25 04:26:04,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=23680.0, ans=0.125
+2024-08-25 04:26:11,368 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.79 vs. limit=15.0
+2024-08-25 04:26:20,486 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.64 vs. limit=15.0
+2024-08-25 04:26:27,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=23786.666666666668, ans=0.125
+2024-08-25 04:26:43,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=23893.333333333332, ans=0.00567536231884058
+2024-08-25 04:26:49,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=23893.333333333332, ans=0.125
+2024-08-25 04:26:54,476 INFO [train.py:1114] (2/4) Epoch 2, batch 2000, loss[loss=0.3354, simple_loss=0.3334, pruned_loss=0.1226, ctc_loss=0.2303, over 19678.00 frames. ], tot_loss[loss=0.364, simple_loss=0.3702, pruned_loss=0.1299, ctc_loss=0.2453, over 3854662.64 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:26:54,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=23946.666666666668, ans=0.025
+2024-08-25 04:27:20,452 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.625e+02 3.128e+02 3.968e+02 6.078e+02, threshold=6.255e+02, percent-clipped=2.0
+2024-08-25 04:27:41,059 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=24160.0, ans=0.2
+2024-08-25 04:27:42,202 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=24160.0, ans=0.0
+2024-08-25 04:27:51,150 INFO [train.py:1114] (2/4) Epoch 2, batch 2050, loss[loss=0.3056, simple_loss=0.3201, pruned_loss=0.1058, ctc_loss=0.199, over 19714.00 frames. ], tot_loss[loss=0.3626, simple_loss=0.3685, pruned_loss=0.1295, ctc_loss=0.2443, over 3850224.89 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:27:58,424 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.34 vs. limit=15.0
+2024-08-25 04:28:07,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=24266.666666666668, ans=0.025
+2024-08-25 04:28:17,725 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.02 vs. limit=6.0
+2024-08-25 04:28:22,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=24320.0, ans=0.125
+2024-08-25 04:28:27,450 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.12 vs. limit=10.0
+2024-08-25 04:28:29,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=24373.333333333332, ans=0.025
+2024-08-25 04:28:30,373 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=24373.333333333332, ans=0.0055710144927536235
+2024-08-25 04:28:44,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=24426.666666666668, ans=0.125
+2024-08-25 04:28:45,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=24426.666666666668, ans=0.0
+2024-08-25 04:28:47,785 INFO [train.py:1114] (2/4) Epoch 2, batch 2100, loss[loss=0.3591, simple_loss=0.3672, pruned_loss=0.1286, ctc_loss=0.2348, over 19778.00 frames. ], tot_loss[loss=0.3601, simple_loss=0.3669, pruned_loss=0.1283, ctc_loss=0.2422, over 3858887.10 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:28:48,323 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.76 vs. limit=15.0
+2024-08-25 04:28:48,946 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=24480.0, ans=0.125
+2024-08-25 04:29:04,628 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.73 vs. limit=15.0
+2024-08-25 04:29:14,134 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.311e+02 2.619e+02 3.137e+02 5.086e+02, threshold=5.238e+02, percent-clipped=0.0
+2024-08-25 04:29:21,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=24640.0, ans=0.125
+2024-08-25 04:29:27,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=24640.0, ans=0.125
+2024-08-25 04:29:41,459 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.55 vs. limit=15.0
+2024-08-25 04:29:44,330 INFO [train.py:1114] (2/4) Epoch 2, batch 2150, loss[loss=0.3118, simple_loss=0.3386, pruned_loss=0.1028, ctc_loss=0.1986, over 19844.00 frames. ], tot_loss[loss=0.3576, simple_loss=0.3652, pruned_loss=0.127, ctc_loss=0.2397, over 3869343.48 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 04:29:54,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=24800.0, ans=0.125
+2024-08-25 04:29:57,329 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.76 vs. limit=15.0
+2024-08-25 04:30:08,370 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=24853.333333333332, ans=0.1
+2024-08-25 04:30:12,810 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=24853.333333333332, ans=0.125
+2024-08-25 04:30:18,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 04:30:22,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 04:30:23,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=24906.666666666668, ans=0.0
+2024-08-25 04:30:39,661 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.14 vs. limit=15.0
+2024-08-25 04:30:40,047 INFO [train.py:1114] (2/4) Epoch 2, batch 2200, loss[loss=0.3499, simple_loss=0.3677, pruned_loss=0.1211, ctc_loss=0.2249, over 19581.00 frames. ], tot_loss[loss=0.3565, simple_loss=0.3648, pruned_loss=0.1264, ctc_loss=0.2385, over 3867352.30 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:30:45,711 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25013.333333333332, ans=0.1
+2024-08-25 04:30:58,005 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.60 vs. limit=15.0
+2024-08-25 04:31:02,090 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25120.0, ans=0.1
+2024-08-25 04:31:02,792 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.06 vs. limit=12.0
+2024-08-25 04:31:06,342 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.398e+02 2.814e+02 3.505e+02 8.042e+02, threshold=5.628e+02, percent-clipped=3.0
+2024-08-25 04:31:09,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=25120.0, ans=0.0
+2024-08-25 04:31:13,187 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=25173.333333333332, ans=0.125
+2024-08-25 04:31:33,073 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=25226.666666666668, ans=0.5
+2024-08-25 04:31:37,458 INFO [train.py:1114] (2/4) Epoch 2, batch 2250, loss[loss=0.3276, simple_loss=0.3627, pruned_loss=0.1051, ctc_loss=0.206, over 19623.00 frames. ], tot_loss[loss=0.3569, simple_loss=0.3651, pruned_loss=0.1266, ctc_loss=0.2387, over 3866805.21 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:31:37,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=25280.0, ans=0.5
+2024-08-25 04:31:39,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=25280.0, ans=0.0
+2024-08-25 04:31:50,016 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=25333.333333333332, ans=0.125
+2024-08-25 04:32:09,702 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.72 vs. limit=22.5
+2024-08-25 04:32:33,459 INFO [train.py:1114] (2/4) Epoch 2, batch 2300, loss[loss=0.3403, simple_loss=0.3465, pruned_loss=0.1215, ctc_loss=0.2281, over 19496.00 frames. ], tot_loss[loss=0.3571, simple_loss=0.3647, pruned_loss=0.127, ctc_loss=0.239, over 3860956.39 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 16.0
+2024-08-25 04:32:40,327 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.553e-02
+2024-08-25 04:32:42,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=25546.666666666668, ans=0.1
+2024-08-25 04:32:59,595 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=16.45 vs. limit=22.5
+2024-08-25 04:33:02,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=25653.333333333332, ans=0.005292753623188406
+2024-08-25 04:33:03,049 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.317e+02 2.709e+02 3.466e+02 6.027e+02, threshold=5.417e+02, percent-clipped=4.0
+2024-08-25 04:33:05,766 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25653.333333333332, ans=0.1
+2024-08-25 04:33:07,224 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.66 vs. limit=15.0
+2024-08-25 04:33:17,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=25706.666666666668, ans=0.125
+2024-08-25 04:33:28,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=25760.0, ans=0.07
+2024-08-25 04:33:32,438 INFO [train.py:1114] (2/4) Epoch 2, batch 2350, loss[loss=0.3592, simple_loss=0.3709, pruned_loss=0.1267, ctc_loss=0.2354, over 19700.00 frames. ], tot_loss[loss=0.3567, simple_loss=0.3645, pruned_loss=0.1268, ctc_loss=0.2386, over 3863339.85 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 16.0
+2024-08-25 04:33:39,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=25813.333333333332, ans=0.04949747468305833
+2024-08-25 04:33:54,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=25866.666666666668, ans=0.035
+2024-08-25 04:34:03,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=25920.0, ans=0.125
+2024-08-25 04:34:21,715 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26026.666666666668, ans=0.1
+2024-08-25 04:34:30,680 INFO [train.py:1114] (2/4) Epoch 2, batch 2400, loss[loss=0.3517, simple_loss=0.369, pruned_loss=0.1195, ctc_loss=0.2387, over 19256.00 frames. ], tot_loss[loss=0.3578, simple_loss=0.3661, pruned_loss=0.127, ctc_loss=0.2388, over 3857805.16 frames. ], batch size: 71, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 04:34:56,688 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.15 vs. limit=15.0
+2024-08-25 04:34:57,152 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.184e+02 2.505e+02 3.102e+02 8.045e+02, threshold=5.010e+02, percent-clipped=5.0
+2024-08-25 04:34:57,613 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=26186.666666666668, ans=0.005176811594202899
+2024-08-25 04:35:27,214 INFO [train.py:1114] (2/4) Epoch 2, batch 2450, loss[loss=0.4411, simple_loss=0.4041, pruned_loss=0.1738, ctc_loss=0.326, over 12860.00 frames. ], tot_loss[loss=0.3675, simple_loss=0.3718, pruned_loss=0.132, ctc_loss=0.248, over 3730769.54 frames. ], batch size: 141, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 04:35:31,379 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=17.59 vs. limit=15.0
+2024-08-25 04:35:33,437 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=26346.666666666668, ans=0.005142028985507246
+2024-08-25 04:35:42,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=26400.0, ans=0.125
+2024-08-25 04:36:48,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=26554.666666666668, ans=0.07
+2024-08-25 04:36:55,755 INFO [train.py:1114] (2/4) Epoch 3, batch 0, loss[loss=0.3559, simple_loss=0.3586, pruned_loss=0.1273, ctc_loss=0.2467, over 19414.00 frames. ], tot_loss[loss=0.3559, simple_loss=0.3586, pruned_loss=0.1273, ctc_loss=0.2467, over 19414.00 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 04:36:55,756 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 04:37:08,025 INFO [train.py:1146] (2/4) Epoch 3, validation: loss=0.2847, simple_loss=0.3461, pruned_loss=0.08168, ctc_loss=0.1499, over 944034.00 frames.
+2024-08-25 04:37:08,025 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 04:37:14,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=26554.666666666668, ans=0.125
+2024-08-25 04:37:24,390 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.03 vs. limit=22.5
+2024-08-25 04:37:27,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=26608.0, ans=0.0
+2024-08-25 04:37:32,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=26661.333333333332, ans=0.125
+2024-08-25 04:37:34,937 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:37:43,086 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=26714.666666666668, ans=0.125
+2024-08-25 04:37:50,808 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.252e+02 2.580e+02 3.143e+02 6.401e+02, threshold=5.159e+02, percent-clipped=2.0
+2024-08-25 04:38:10,077 INFO [train.py:1114] (2/4) Epoch 3, batch 50, loss[loss=0.2736, simple_loss=0.3059, pruned_loss=0.08786, ctc_loss=0.1638, over 19698.00 frames. ], tot_loss[loss=0.3592, simple_loss=0.3679, pruned_loss=0.1272, ctc_loss=0.2404, over 844960.58 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:38:13,711 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:38:58,697 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=26928.0, ans=0.125
+2024-08-25 04:39:02,411 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.26 vs. limit=15.0
+2024-08-25 04:39:09,467 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=26981.333333333332, ans=0.2
+2024-08-25 04:39:09,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=26981.333333333332, ans=0.2
+2024-08-25 04:39:27,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=27088.0, ans=0.1
+2024-08-25 04:39:28,419 INFO [train.py:1114] (2/4) Epoch 3, batch 100, loss[loss=0.3182, simple_loss=0.3404, pruned_loss=0.1071, ctc_loss=0.204, over 19726.00 frames. ], tot_loss[loss=0.3591, simple_loss=0.3682, pruned_loss=0.127, ctc_loss=0.2397, over 1500110.56 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:39:48,667 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=27141.333333333332, ans=0.125
+2024-08-25 04:39:49,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=27141.333333333332, ans=0.004969275362318841
+2024-08-25 04:40:11,092 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.221e+02 2.583e+02 3.158e+02 4.904e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-08-25 04:40:27,486 INFO [train.py:1114] (2/4) Epoch 3, batch 150, loss[loss=0.3183, simple_loss=0.3335, pruned_loss=0.1095, ctc_loss=0.2102, over 19704.00 frames. ], tot_loss[loss=0.3545, simple_loss=0.3651, pruned_loss=0.1249, ctc_loss=0.2353, over 2028676.42 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 04:40:29,043 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.59 vs. limit=15.0
+2024-08-25 04:40:29,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=27354.666666666668, ans=0.125
+2024-08-25 04:40:29,911 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=27354.666666666668, ans=0.035
+2024-08-25 04:40:30,005 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=27354.666666666668, ans=0.1
+2024-08-25 04:40:45,680 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=27408.0, ans=0.2
+2024-08-25 04:40:54,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.86 vs. limit=22.5
+2024-08-25 04:41:16,906 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=27568.0, ans=0.125
+2024-08-25 04:41:26,082 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=27568.0, ans=0.125
+2024-08-25 04:41:28,580 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.19 vs. limit=22.5
+2024-08-25 04:41:29,363 INFO [train.py:1114] (2/4) Epoch 3, batch 200, loss[loss=0.4304, simple_loss=0.3999, pruned_loss=0.1705, ctc_loss=0.2997, over 18180.00 frames. ], tot_loss[loss=0.351, simple_loss=0.3622, pruned_loss=0.1235, ctc_loss=0.2321, over 2435563.59 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:42:00,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=27728.0, ans=0.0
+2024-08-25 04:42:14,178 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.192e+02 2.550e+02 3.125e+02 5.269e+02, threshold=5.099e+02, percent-clipped=1.0
+2024-08-25 04:42:22,667 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=27834.666666666668, ans=0.125
+2024-08-25 04:42:35,064 INFO [train.py:1114] (2/4) Epoch 3, batch 250, loss[loss=0.3799, simple_loss=0.3775, pruned_loss=0.138, ctc_loss=0.2658, over 19405.00 frames. ], tot_loss[loss=0.3517, simple_loss=0.3628, pruned_loss=0.1237, ctc_loss=0.2328, over 2755297.25 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:42:37,797 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=27888.0, ans=0.125
+2024-08-25 04:42:49,560 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 04:42:56,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 04:43:09,126 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=28048.0, ans=0.125
+2024-08-25 04:43:17,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=28048.0, ans=0.125
+2024-08-25 04:43:32,592 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=28154.666666666668, ans=0.125
+2024-08-25 04:43:33,535 INFO [train.py:1114] (2/4) Epoch 3, batch 300, loss[loss=0.3625, simple_loss=0.3699, pruned_loss=0.129, ctc_loss=0.2426, over 19532.00 frames. ], tot_loss[loss=0.3489, simple_loss=0.3612, pruned_loss=0.1222, ctc_loss=0.2303, over 2999990.84 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:43:33,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=28154.666666666668, ans=0.004748985507246377
+2024-08-25 04:43:34,190 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.98 vs. limit=15.0
+2024-08-25 04:43:36,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=28154.666666666668, ans=0.125
+2024-08-25 04:43:43,651 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=28154.666666666668, ans=10.0
+2024-08-25 04:43:53,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=28208.0, ans=0.0
+2024-08-25 04:43:53,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28208.0, ans=0.1
+2024-08-25 04:44:07,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=28261.333333333332, ans=0.0047257971014492755
+2024-08-25 04:44:18,922 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.242e+02 2.624e+02 3.299e+02 5.169e+02, threshold=5.248e+02, percent-clipped=1.0
+2024-08-25 04:44:25,832 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.57 vs. limit=6.0
+2024-08-25 04:44:35,262 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=28421.333333333332, ans=0.0
+2024-08-25 04:44:35,384 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=28421.333333333332, ans=0.0
+2024-08-25 04:44:36,152 INFO [train.py:1114] (2/4) Epoch 3, batch 350, loss[loss=0.3184, simple_loss=0.3279, pruned_loss=0.1136, ctc_loss=0.2046, over 19709.00 frames. ], tot_loss[loss=0.3483, simple_loss=0.3609, pruned_loss=0.1219, ctc_loss=0.2297, over 3190487.02 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:44:40,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=28421.333333333332, ans=0.025
+2024-08-25 04:44:46,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=28421.333333333332, ans=0.125
+2024-08-25 04:44:58,383 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=28474.666666666668, ans=0.0
+2024-08-25 04:45:08,595 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=28528.0, ans=0.125
+2024-08-25 04:45:43,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=28581.333333333332, ans=0.125
+2024-08-25 04:45:52,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28634.666666666668, ans=0.1
+2024-08-25 04:46:55,910 INFO [train.py:1114] (2/4) Epoch 3, batch 400, loss[loss=0.3496, simple_loss=0.3661, pruned_loss=0.1192, ctc_loss=0.2365, over 19501.00 frames. ], tot_loss[loss=0.3449, simple_loss=0.3588, pruned_loss=0.1202, ctc_loss=0.2266, over 3341906.26 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 04:47:14,164 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=28688.0, ans=0.125
+2024-08-25 04:47:16,677 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=28741.333333333332, ans=0.125
+2024-08-25 04:47:22,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=28741.333333333332, ans=0.125
+2024-08-25 04:47:34,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=28741.333333333332, ans=0.0
+2024-08-25 04:48:22,791 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.232e+02 2.568e+02 3.025e+02 1.134e+03, threshold=5.136e+02, percent-clipped=4.0
+2024-08-25 04:48:27,810 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=28848.0, ans=0.125
+2024-08-25 04:48:39,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=28901.333333333332, ans=0.1
+2024-08-25 04:48:42,699 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:48:48,335 INFO [train.py:1114] (2/4) Epoch 3, batch 450, loss[loss=0.3157, simple_loss=0.3458, pruned_loss=0.1041, ctc_loss=0.1935, over 19616.00 frames. ], tot_loss[loss=0.3453, simple_loss=0.359, pruned_loss=0.1204, ctc_loss=0.2269, over 3450576.39 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:48:48,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=28954.666666666668, ans=0.0
+2024-08-25 04:48:51,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=28954.666666666668, ans=0.2
+2024-08-25 04:49:15,828 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.77 vs. limit=22.5
+2024-08-25 04:49:20,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=29061.333333333332, ans=0.125
+2024-08-25 04:49:28,017 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.78 vs. limit=15.0
+2024-08-25 04:49:42,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=29168.0, ans=0.004528695652173913
+2024-08-25 04:50:09,366 INFO [train.py:1114] (2/4) Epoch 3, batch 500, loss[loss=0.3291, simple_loss=0.3593, pruned_loss=0.1086, ctc_loss=0.2044, over 19718.00 frames. ], tot_loss[loss=0.3446, simple_loss=0.3582, pruned_loss=0.1202, ctc_loss=0.2266, over 3546365.53 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:50:25,894 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29274.666666666668, ans=0.1
+2024-08-25 04:50:27,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=29274.666666666668, ans=0.125
+2024-08-25 04:50:34,943 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.52 vs. limit=15.0
+2024-08-25 04:51:02,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=29381.333333333332, ans=0.125
+2024-08-25 04:51:09,141 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.370e+02 2.734e+02 3.745e+02 5.336e+02, threshold=5.469e+02, percent-clipped=1.0
+2024-08-25 04:51:21,427 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=29434.666666666668, ans=0.09899494936611666
+2024-08-25 04:51:25,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=29434.666666666668, ans=0.125
+2024-08-25 04:51:28,441 INFO [train.py:1114] (2/4) Epoch 3, batch 550, loss[loss=0.3674, simple_loss=0.3779, pruned_loss=0.1294, ctc_loss=0.2455, over 19277.00 frames. ], tot_loss[loss=0.3464, simple_loss=0.3591, pruned_loss=0.1212, ctc_loss=0.2283, over 3609812.95 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:52:02,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29594.666666666668, ans=0.1
+2024-08-25 04:52:17,884 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=29648.0, ans=0.125
+2024-08-25 04:52:52,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29648.0, ans=0.1
+2024-08-25 04:52:53,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=29701.333333333332, ans=0.125
+2024-08-25 04:52:55,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=29701.333333333332, ans=0.0
+2024-08-25 04:53:06,030 INFO [train.py:1114] (2/4) Epoch 3, batch 600, loss[loss=0.4044, simple_loss=0.4007, pruned_loss=0.1499, ctc_loss=0.271, over 19378.00 frames. ], tot_loss[loss=0.346, simple_loss=0.3588, pruned_loss=0.121, ctc_loss=0.2277, over 3667534.10 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:53:49,301 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.141e+02 2.536e+02 3.031e+02 6.622e+02, threshold=5.071e+02, percent-clipped=2.0
+2024-08-25 04:53:50,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=29914.666666666668, ans=0.1
+2024-08-25 04:53:55,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=29968.0, ans=0.2
+2024-08-25 04:54:05,123 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=30021.333333333332, ans=0.0
+2024-08-25 04:54:06,084 INFO [train.py:1114] (2/4) Epoch 3, batch 650, loss[loss=0.3284, simple_loss=0.3505, pruned_loss=0.1121, ctc_loss=0.2052, over 19773.00 frames. ], tot_loss[loss=0.3436, simple_loss=0.3571, pruned_loss=0.1199, ctc_loss=0.2259, over 3717508.31 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 32.0
+2024-08-25 04:54:12,804 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.42 vs. limit=12.0
+2024-08-25 04:54:18,326 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:54:45,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=30128.0, ans=0.125
+2024-08-25 04:55:00,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=30181.333333333332, ans=0.2
+2024-08-25 04:55:03,198 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=30181.333333333332, ans=0.125
+2024-08-25 04:55:13,209 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=30234.666666666668, ans=0.125
+2024-08-25 04:55:13,609 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=26.43 vs. limit=22.5
+2024-08-25 04:55:19,039 INFO [train.py:1114] (2/4) Epoch 3, batch 700, loss[loss=0.2915, simple_loss=0.3251, pruned_loss=0.09382, ctc_loss=0.1755, over 19726.00 frames. ], tot_loss[loss=0.3439, simple_loss=0.3576, pruned_loss=0.1199, ctc_loss=0.2261, over 3749069.74 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:56:31,337 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.88 vs. limit=15.0
+2024-08-25 04:56:33,286 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=30448.0, ans=0.125
+2024-08-25 04:56:33,306 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=30448.0, ans=0.0
+2024-08-25 04:56:35,019 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.01 vs. limit=15.0
+2024-08-25 04:56:38,925 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.292e+02 2.520e+02 3.192e+02 5.203e+02, threshold=5.040e+02, percent-clipped=1.0
+2024-08-25 04:56:39,198 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=30448.0, ans=0.0
+2024-08-25 04:56:55,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=30501.333333333332, ans=0.125
+2024-08-25 04:56:57,187 INFO [train.py:1114] (2/4) Epoch 3, batch 750, loss[loss=0.3515, simple_loss=0.3671, pruned_loss=0.1222, ctc_loss=0.2291, over 19501.00 frames. ], tot_loss[loss=0.3418, simple_loss=0.3564, pruned_loss=0.1188, ctc_loss=0.224, over 3774217.50 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:57:12,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=30608.0, ans=0.0
+2024-08-25 04:57:17,442 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=30608.0, ans=0.125
+2024-08-25 04:57:38,261 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:57:59,143 INFO [train.py:1114] (2/4) Epoch 3, batch 800, loss[loss=0.3255, simple_loss=0.3434, pruned_loss=0.1115, ctc_loss=0.2113, over 19404.00 frames. ], tot_loss[loss=0.3418, simple_loss=0.3563, pruned_loss=0.1188, ctc_loss=0.2238, over 3795776.34 frames. ], batch size: 48, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:57:59,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=30821.333333333332, ans=0.125
+2024-08-25 04:58:11,587 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.68 vs. limit=22.5
+2024-08-25 04:58:19,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=30874.666666666668, ans=0.00415768115942029
+2024-08-25 04:58:23,560 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=30928.0, ans=0.125
+2024-08-25 04:58:27,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=30928.0, ans=0.125
+2024-08-25 04:58:42,758 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.211e+02 2.622e+02 3.205e+02 5.257e+02, threshold=5.244e+02, percent-clipped=1.0
+2024-08-25 04:58:42,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=30981.333333333332, ans=0.125
+2024-08-25 04:59:01,994 INFO [train.py:1114] (2/4) Epoch 3, batch 850, loss[loss=0.348, simple_loss=0.3721, pruned_loss=0.1169, ctc_loss=0.2256, over 19648.00 frames. ], tot_loss[loss=0.3417, simple_loss=0.3562, pruned_loss=0.1189, ctc_loss=0.2239, over 3815617.43 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:59:12,289 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.85 vs. limit=10.0
+2024-08-25 04:59:15,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=31141.333333333332, ans=0.125
+2024-08-25 04:59:32,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=31194.666666666668, ans=0.1
+2024-08-25 04:59:38,724 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.41 vs. limit=15.0
+2024-08-25 04:59:43,202 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=31248.0, ans=0.2
+2024-08-25 04:59:47,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=31248.0, ans=0.125
+2024-08-25 04:59:55,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=31301.333333333332, ans=0.125
+2024-08-25 05:00:04,399 INFO [train.py:1114] (2/4) Epoch 3, batch 900, loss[loss=0.3138, simple_loss=0.3326, pruned_loss=0.106, ctc_loss=0.2073, over 19410.00 frames. ], tot_loss[loss=0.3435, simple_loss=0.3571, pruned_loss=0.1198, ctc_loss=0.2253, over 3818244.52 frames. ], batch size: 48, lr: 3.72e-02, grad_scale: 8.0
+2024-08-25 05:00:18,052 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=31408.0, ans=0.125
+2024-08-25 05:00:18,161 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:00:29,024 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.12 vs. limit=15.0
+2024-08-25 05:00:35,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=31461.333333333332, ans=0.125
+2024-08-25 05:00:54,419 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.296e+02 2.736e+02 3.525e+02 1.528e+03, threshold=5.472e+02, percent-clipped=4.0
+2024-08-25 05:00:55,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=31568.0, ans=0.025
+2024-08-25 05:01:08,271 INFO [train.py:1114] (2/4) Epoch 3, batch 950, loss[loss=0.3435, simple_loss=0.3543, pruned_loss=0.1197, ctc_loss=0.2334, over 19489.00 frames. ], tot_loss[loss=0.3427, simple_loss=0.3568, pruned_loss=0.1195, ctc_loss=0.2243, over 3820235.48 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:01:15,914 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.24 vs. limit=15.0
+2024-08-25 05:01:19,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=31674.666666666668, ans=0.125
+2024-08-25 05:01:30,625 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.79 vs. limit=22.5
+2024-08-25 05:01:42,707 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.39 vs. limit=22.5
+2024-08-25 05:02:01,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=31834.666666666668, ans=0.0
+2024-08-25 05:02:08,115 INFO [train.py:1114] (2/4) Epoch 3, batch 1000, loss[loss=0.3315, simple_loss=0.3453, pruned_loss=0.1149, ctc_loss=0.2198, over 19853.00 frames. ], tot_loss[loss=0.3435, simple_loss=0.3575, pruned_loss=0.1197, ctc_loss=0.2248, over 3815699.50 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:02:34,651 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=31994.666666666668, ans=0.0
+2024-08-25 05:02:56,479 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.163e+02 2.492e+02 3.027e+02 5.724e+02, threshold=4.983e+02, percent-clipped=1.0
+2024-08-25 05:02:56,858 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=32048.0, ans=0.125
+2024-08-25 05:03:04,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=32101.333333333332, ans=0.2
+2024-08-25 05:03:04,906 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.22 vs. limit=15.0
+2024-08-25 05:03:07,259 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.36 vs. limit=22.5
+2024-08-25 05:03:11,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=32101.333333333332, ans=0.0
+2024-08-25 05:03:13,747 INFO [train.py:1114] (2/4) Epoch 3, batch 1050, loss[loss=0.3558, simple_loss=0.3772, pruned_loss=0.1221, ctc_loss=0.2257, over 19842.00 frames. ], tot_loss[loss=0.3413, simple_loss=0.356, pruned_loss=0.1187, ctc_loss=0.223, over 3821755.97 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:03:15,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=32154.666666666668, ans=0.125
+2024-08-25 05:03:21,338 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=32154.666666666668, ans=0.125
+2024-08-25 05:03:29,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=32208.0, ans=0.125
+2024-08-25 05:04:11,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=32261.333333333332, ans=0.125
+2024-08-25 05:04:35,075 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.13 vs. limit=15.0
+2024-08-25 05:05:04,401 INFO [train.py:1114] (2/4) Epoch 3, batch 1100, loss[loss=0.3431, simple_loss=0.3525, pruned_loss=0.1201, ctc_loss=0.2337, over 19593.00 frames. ], tot_loss[loss=0.3419, simple_loss=0.3563, pruned_loss=0.119, ctc_loss=0.2238, over 3828945.65 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:05:05,806 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=32421.333333333332, ans=0.00382144927536232
+2024-08-25 05:05:42,529 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:05:51,121 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=32581.333333333332, ans=0.0037866666666666665
+2024-08-25 05:05:53,640 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=32581.333333333332, ans=0.0
+2024-08-25 05:05:57,409 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.51 vs. limit=15.0
+2024-08-25 05:06:00,571 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.355e+02 2.517e+02 3.019e+02 4.945e+02, threshold=5.033e+02, percent-clipped=0.0
+2024-08-25 05:06:20,114 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=32634.666666666668, ans=0.125
+2024-08-25 05:06:23,043 INFO [train.py:1114] (2/4) Epoch 3, batch 1150, loss[loss=0.2916, simple_loss=0.3276, pruned_loss=0.09234, ctc_loss=0.1775, over 19602.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.3552, pruned_loss=0.1184, ctc_loss=0.2226, over 3828331.19 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 8.0
+2024-08-25 05:06:38,910 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=32741.333333333332, ans=0.1
+2024-08-25 05:06:39,937 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=32741.333333333332, ans=0.125
+2024-08-25 05:07:08,523 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=32848.0, ans=0.025
+2024-08-25 05:07:24,572 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=19.01 vs. limit=22.5
+2024-08-25 05:07:32,067 INFO [train.py:1114] (2/4) Epoch 3, batch 1200, loss[loss=0.337, simple_loss=0.3584, pruned_loss=0.115, ctc_loss=0.2136, over 19838.00 frames. ], tot_loss[loss=0.3405, simple_loss=0.3557, pruned_loss=0.1182, ctc_loss=0.2221, over 3824201.59 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:07:32,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=32954.666666666664, ans=0.125
+2024-08-25 05:08:06,150 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=33061.333333333336, ans=0.125
+2024-08-25 05:08:15,274 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=33114.666666666664, ans=0.125
+2024-08-25 05:08:17,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_na.min_abs, batch_count=33114.666666666664, ans=0.02
+2024-08-25 05:08:19,689 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.128e+02 2.359e+02 2.757e+02 6.653e+02, threshold=4.718e+02, percent-clipped=2.0
+2024-08-25 05:08:38,027 INFO [train.py:1114] (2/4) Epoch 3, batch 1250, loss[loss=0.3956, simple_loss=0.3947, pruned_loss=0.144, ctc_loss=0.271, over 19526.00 frames. ], tot_loss[loss=0.3388, simple_loss=0.3552, pruned_loss=0.1172, ctc_loss=0.22, over 3841897.57 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:08:39,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33221.333333333336, ans=0.1
+2024-08-25 05:08:43,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=33221.333333333336, ans=0.003647536231884058
+2024-08-25 05:08:55,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=33274.666666666664, ans=0.125
+2024-08-25 05:08:57,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=33274.666666666664, ans=0.125
+2024-08-25 05:09:04,964 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=33328.0, ans=0.09899494936611666
+2024-08-25 05:09:14,177 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=26.15 vs. limit=22.5
+2024-08-25 05:09:14,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=33328.0, ans=0.125
+2024-08-25 05:09:17,277 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=33381.333333333336, ans=0.125
+2024-08-25 05:09:24,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=33381.333333333336, ans=0.003612753623188405
+2024-08-25 05:09:31,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=33434.666666666664, ans=0.125
+2024-08-25 05:09:41,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=33488.0, ans=0.125
+2024-08-25 05:09:42,101 INFO [train.py:1114] (2/4) Epoch 3, batch 1300, loss[loss=0.3706, simple_loss=0.3743, pruned_loss=0.1341, ctc_loss=0.2468, over 18863.00 frames. ], tot_loss[loss=0.3389, simple_loss=0.3549, pruned_loss=0.1174, ctc_loss=0.2203, over 3846381.76 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:09:43,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=33488.0, ans=0.125
+2024-08-25 05:09:49,454 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.19 vs. limit=15.0
+2024-08-25 05:10:01,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33541.333333333336, ans=0.1
+2024-08-25 05:10:01,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=33541.333333333336, ans=0.125
+2024-08-25 05:10:07,556 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.32 vs. limit=12.0
+2024-08-25 05:10:47,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=33648.0, ans=10.0
+2024-08-25 05:10:48,155 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.161e+02 2.525e+02 2.896e+02 5.464e+02, threshold=5.050e+02, percent-clipped=3.0
+2024-08-25 05:10:59,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=33701.333333333336, ans=0.125
+2024-08-25 05:11:01,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33754.666666666664, ans=0.1
+2024-08-25 05:11:02,312 INFO [train.py:1114] (2/4) Epoch 3, batch 1350, loss[loss=0.3368, simple_loss=0.3584, pruned_loss=0.1147, ctc_loss=0.2142, over 19784.00 frames. ], tot_loss[loss=0.3377, simple_loss=0.3541, pruned_loss=0.1168, ctc_loss=0.2194, over 3857682.71 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:11:07,705 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=33754.666666666664, ans=0.0
+2024-08-25 05:12:00,193 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.30 vs. limit=15.0
+2024-08-25 05:12:02,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=33914.666666666664, ans=0.125
+2024-08-25 05:12:24,146 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=33968.0, ans=0.125
+2024-08-25 05:12:26,307 INFO [train.py:1114] (2/4) Epoch 3, batch 1400, loss[loss=0.2862, simple_loss=0.311, pruned_loss=0.09492, ctc_loss=0.179, over 19666.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3538, pruned_loss=0.1166, ctc_loss=0.2187, over 3864933.31 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 16.0
+2024-08-25 05:12:27,791 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=34021.333333333336, ans=0.0
+2024-08-25 05:12:31,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=34021.333333333336, ans=0.125
+2024-08-25 05:12:31,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=34021.333333333336, ans=0.025
+2024-08-25 05:12:34,075 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=11.31 vs. limit=12.0
+2024-08-25 05:12:36,951 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.46 vs. limit=22.5
+2024-08-25 05:12:51,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=34074.666666666664, ans=0.125
+2024-08-25 05:12:51,578 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.69 vs. limit=15.0
+2024-08-25 05:13:12,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=34128.0, ans=0.125
+2024-08-25 05:13:16,003 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.67 vs. limit=12.0
+2024-08-25 05:13:21,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=34181.333333333336, ans=0.0
+2024-08-25 05:13:26,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=34181.333333333336, ans=0.125
+2024-08-25 05:13:26,915 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.29 vs. limit=15.0
+2024-08-25 05:13:31,977 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.210e+02 2.531e+02 3.096e+02 9.067e+02, threshold=5.062e+02, percent-clipped=2.0
+2024-08-25 05:14:22,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=34234.666666666664, ans=0.0
+2024-08-25 05:14:24,484 INFO [train.py:1114] (2/4) Epoch 3, batch 1450, loss[loss=0.4005, simple_loss=0.4013, pruned_loss=0.1453, ctc_loss=0.2729, over 19668.00 frames. ], tot_loss[loss=0.337, simple_loss=0.354, pruned_loss=0.1164, ctc_loss=0.2184, over 3863076.85 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:14:27,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34288.0, ans=0.1
+2024-08-25 05:14:45,536 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.64 vs. limit=5.0
+2024-08-25 05:15:02,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=34394.666666666664, ans=0.0
+2024-08-25 05:15:28,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=34501.333333333336, ans=0.2
+2024-08-25 05:15:28,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=34501.333333333336, ans=0.125
+2024-08-25 05:15:32,908 INFO [train.py:1114] (2/4) Epoch 3, batch 1500, loss[loss=0.3468, simple_loss=0.367, pruned_loss=0.1209, ctc_loss=0.2117, over 19571.00 frames. ], tot_loss[loss=0.3371, simple_loss=0.3541, pruned_loss=0.1164, ctc_loss=0.2183, over 3863439.76 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:15:35,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=34554.666666666664, ans=0.125
+2024-08-25 05:15:44,227 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=34608.0, ans=0.125
+2024-08-25 05:15:50,233 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.77 vs. limit=15.0
+2024-08-25 05:16:46,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=34714.666666666664, ans=0.003322898550724638
+2024-08-25 05:16:51,237 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.151e+02 2.498e+02 3.151e+02 6.810e+02, threshold=4.996e+02, percent-clipped=2.0
+2024-08-25 05:20:00,630 INFO [train.py:1114] (2/4) Epoch 3, batch 1550, loss[loss=0.3686, simple_loss=0.3827, pruned_loss=0.1304, ctc_loss=0.2343, over 19608.00 frames. ], tot_loss[loss=0.3376, simple_loss=0.3541, pruned_loss=0.1168, ctc_loss=0.219, over 3847786.25 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 16.0
+2024-08-25 05:21:03,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=34928.0, ans=0.0
+2024-08-25 05:21:08,321 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.11 vs. limit=22.5
+2024-08-25 05:21:17,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=34981.333333333336, ans=0.125
+2024-08-25 05:22:04,973 INFO [train.py:1114] (2/4) Epoch 3, batch 1600, loss[loss=0.3123, simple_loss=0.3411, pruned_loss=0.1021, ctc_loss=0.1983, over 19818.00 frames. ], tot_loss[loss=0.337, simple_loss=0.3534, pruned_loss=0.1166, ctc_loss=0.2187, over 3836873.53 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 05:22:12,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35088.0, ans=0.1
+2024-08-25 05:22:25,104 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.72 vs. limit=6.0
+2024-08-25 05:22:57,133 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=35194.666666666664, ans=0.0
+2024-08-25 05:22:57,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=35194.666666666664, ans=0.0
+2024-08-25 05:23:25,757 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=35248.0, ans=0.125
+2024-08-25 05:23:43,076 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.193e+02 2.529e+02 3.233e+02 6.645e+02, threshold=5.059e+02, percent-clipped=2.0
+2024-08-25 05:23:49,315 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=35301.333333333336, ans=0.2
+2024-08-25 05:24:22,993 INFO [train.py:1114] (2/4) Epoch 3, batch 1650, loss[loss=0.358, simple_loss=0.3721, pruned_loss=0.1252, ctc_loss=0.2337, over 19662.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3536, pruned_loss=0.1166, ctc_loss=0.2189, over 3832747.85 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 05:24:29,871 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=35354.666666666664, ans=0.125
+2024-08-25 05:25:24,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=35461.333333333336, ans=0.0
+2024-08-25 05:26:16,031 INFO [train.py:1114] (2/4) Epoch 3, batch 1700, loss[loss=0.2857, simple_loss=0.3033, pruned_loss=0.09726, ctc_loss=0.184, over 19677.00 frames. ], tot_loss[loss=0.3356, simple_loss=0.3527, pruned_loss=0.1157, ctc_loss=0.2176, over 3846372.42 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:26:28,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=35674.666666666664, ans=0.05
+2024-08-25 05:26:30,791 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=35674.666666666664, ans=0.125
+2024-08-25 05:26:51,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35728.0, ans=0.1
+2024-08-25 05:27:04,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=35781.333333333336, ans=0.125
+2024-08-25 05:27:09,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35781.333333333336, ans=0.1
+2024-08-25 05:27:10,195 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.342e+02 2.819e+02 3.429e+02 5.215e+02, threshold=5.637e+02, percent-clipped=1.0
+2024-08-25 05:27:23,551 INFO [train.py:1114] (2/4) Epoch 3, batch 1750, loss[loss=0.3465, simple_loss=0.3529, pruned_loss=0.1259, ctc_loss=0.2211, over 19688.00 frames. ], tot_loss[loss=0.3346, simple_loss=0.352, pruned_loss=0.1153, ctc_loss=0.2165, over 3849674.85 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:27:30,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=35888.0, ans=0.1
+2024-08-25 05:27:40,712 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.27 vs. limit=15.0
+2024-08-25 05:27:45,867 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.01 vs. limit=22.5
+2024-08-25 05:27:56,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=35994.666666666664, ans=0.2
+2024-08-25 05:28:35,286 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=36101.333333333336, ans=0.05
+2024-08-25 05:28:38,694 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=36101.333333333336, ans=0.003021449275362318
+2024-08-25 05:28:41,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=36101.333333333336, ans=0.125
+2024-08-25 05:29:16,485 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:29:19,797 INFO [train.py:1114] (2/4) Epoch 3, batch 1800, loss[loss=0.3599, simple_loss=0.3742, pruned_loss=0.1253, ctc_loss=0.2374, over 19622.00 frames. ], tot_loss[loss=0.334, simple_loss=0.3518, pruned_loss=0.1149, ctc_loss=0.2158, over 3851657.31 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:31:29,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=36208.0, ans=0.125
+2024-08-25 05:31:29,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=36208.0, ans=0.0
+2024-08-25 05:31:58,635 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.106e+02 2.466e+02 3.299e+02 1.077e+03, threshold=4.933e+02, percent-clipped=1.0
+2024-08-25 05:32:11,728 INFO [train.py:1114] (2/4) Epoch 3, batch 1850, loss[loss=0.3398, simple_loss=0.3645, pruned_loss=0.1127, ctc_loss=0.2241, over 19591.00 frames. ], tot_loss[loss=0.3344, simple_loss=0.352, pruned_loss=0.1151, ctc_loss=0.2163, over 3855442.08 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:32:35,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=36528.0, ans=0.09899494936611666
+2024-08-25 05:33:12,859 INFO [train.py:1114] (2/4) Epoch 3, batch 1900, loss[loss=0.3141, simple_loss=0.3534, pruned_loss=0.09898, ctc_loss=0.1921, over 19664.00 frames. ], tot_loss[loss=0.334, simple_loss=0.3525, pruned_loss=0.1147, ctc_loss=0.2154, over 3860442.41 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 16.0
+2024-08-25 05:33:18,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=36688.0, ans=0.125
+2024-08-25 05:33:35,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=36794.666666666664, ans=0.05
+2024-08-25 05:33:36,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=36794.666666666664, ans=0.0
+2024-08-25 05:33:52,839 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=36794.666666666664, ans=0.125
+2024-08-25 05:34:01,327 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.20 vs. limit=15.0
+2024-08-25 05:34:05,261 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.260e+02 2.560e+02 3.105e+02 5.689e+02, threshold=5.120e+02, percent-clipped=2.0
+2024-08-25 05:34:06,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=36901.333333333336, ans=0.0
+2024-08-25 05:34:10,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=36901.333333333336, ans=0.2
+2024-08-25 05:34:49,852 INFO [train.py:1114] (2/4) Epoch 3, batch 1950, loss[loss=0.287, simple_loss=0.3223, pruned_loss=0.0924, ctc_loss=0.1674, over 19572.00 frames. ], tot_loss[loss=0.3351, simple_loss=0.3539, pruned_loss=0.115, ctc_loss=0.2157, over 3869401.96 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 16.0
+2024-08-25 05:34:53,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=36954.666666666664, ans=0.0
+2024-08-25 05:35:58,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37008.0, ans=0.1
+2024-08-25 05:36:03,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=37008.0, ans=0.2
+2024-08-25 05:36:04,964 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.49 vs. limit=15.0
+2024-08-25 05:36:26,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=37008.0, ans=0.2
+2024-08-25 05:36:32,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=37061.333333333336, ans=0.125
+2024-08-25 05:36:57,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=37168.0, ans=0.95
+2024-08-25 05:36:59,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=37168.0, ans=0.125
+2024-08-25 05:37:09,009 INFO [train.py:1114] (2/4) Epoch 3, batch 2000, loss[loss=0.3015, simple_loss=0.3163, pruned_loss=0.103, ctc_loss=0.2019, over 19680.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3552, pruned_loss=0.1162, ctc_loss=0.218, over 3853775.13 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 05:37:09,572 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.22 vs. limit=15.0
+2024-08-25 05:37:12,619 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=37221.333333333336, ans=0.2
+2024-08-25 05:37:22,067 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=37274.666666666664, ans=0.125
+2024-08-25 05:37:24,238 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=37274.666666666664, ans=0.0
+2024-08-25 05:37:31,627 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=37274.666666666664, ans=0.0
+2024-08-25 05:37:34,624 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.51 vs. limit=10.0
+2024-08-25 05:37:53,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=37381.333333333336, ans=0.125
+2024-08-25 05:37:56,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37381.333333333336, ans=0.1
+2024-08-25 05:38:02,426 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.243e+02 2.650e+02 3.292e+02 1.299e+03, threshold=5.300e+02, percent-clipped=6.0
+2024-08-25 05:38:03,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=37434.666666666664, ans=0.125
+2024-08-25 05:38:04,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=37434.666666666664, ans=0.125
+2024-08-25 05:38:13,956 INFO [train.py:1114] (2/4) Epoch 3, batch 2050, loss[loss=0.2977, simple_loss=0.3238, pruned_loss=0.09856, ctc_loss=0.1862, over 19733.00 frames. ], tot_loss[loss=0.3371, simple_loss=0.3544, pruned_loss=0.1163, ctc_loss=0.2182, over 3850119.84 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:38:14,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=37488.0, ans=0.09899494936611666
+2024-08-25 05:38:24,349 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=37541.333333333336, ans=0.125
+2024-08-25 05:38:33,375 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=37541.333333333336, ans=0.2
+2024-08-25 05:38:37,897 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=37594.666666666664, ans=10.0
+2024-08-25 05:38:52,235 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=37648.0, ans=0.125
+2024-08-25 05:38:53,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=37648.0, ans=0.2
+2024-08-25 05:39:16,043 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=37701.333333333336, ans=0.125
+2024-08-25 05:39:40,789 INFO [train.py:1114] (2/4) Epoch 3, batch 2100, loss[loss=0.3486, simple_loss=0.3672, pruned_loss=0.1187, ctc_loss=0.2314, over 19774.00 frames. ], tot_loss[loss=0.3349, simple_loss=0.3529, pruned_loss=0.1152, ctc_loss=0.2163, over 3857857.51 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:40:14,189 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=37808.0, ans=0.5
+2024-08-25 05:40:43,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=37861.333333333336, ans=0.125
+2024-08-25 05:40:52,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=37914.666666666664, ans=0.1
+2024-08-25 05:40:58,546 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.072e+02 2.352e+02 2.718e+02 4.903e+02, threshold=4.703e+02, percent-clipped=0.0
+2024-08-25 05:41:10,107 INFO [train.py:1114] (2/4) Epoch 3, batch 2150, loss[loss=0.3098, simple_loss=0.3368, pruned_loss=0.1012, ctc_loss=0.2009, over 19867.00 frames. ], tot_loss[loss=0.3337, simple_loss=0.3519, pruned_loss=0.1147, ctc_loss=0.2153, over 3869079.99 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 05:41:18,650 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.85 vs. limit=6.0
+2024-08-25 05:41:19,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=38021.333333333336, ans=0.0
+2024-08-25 05:41:22,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=38074.666666666664, ans=0.2
+2024-08-25 05:41:45,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38128.0, ans=0.1
+2024-08-25 05:41:59,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=38181.333333333336, ans=0.07
+2024-08-25 05:42:42,901 INFO [train.py:1114] (2/4) Epoch 3, batch 2200, loss[loss=0.3267, simple_loss=0.3554, pruned_loss=0.1078, ctc_loss=0.2062, over 19555.00 frames. ], tot_loss[loss=0.3341, simple_loss=0.3522, pruned_loss=0.1148, ctc_loss=0.2159, over 3867006.86 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:42:56,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=38288.0, ans=0.0
+2024-08-25 05:42:56,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38288.0, ans=0.1
+2024-08-25 05:43:00,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38341.333333333336, ans=0.1
+2024-08-25 05:43:18,771 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=38394.666666666664, ans=0.125
+2024-08-25 05:43:25,033 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.30 vs. limit=22.5
+2024-08-25 05:43:33,620 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38501.333333333336, ans=0.1
+2024-08-25 05:43:34,309 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.197e+02 2.629e+02 2.994e+02 6.107e+02, threshold=5.259e+02, percent-clipped=1.0
+2024-08-25 05:43:35,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=38501.333333333336, ans=0.05
+2024-08-25 05:43:36,909 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=38501.333333333336, ans=0.125
+2024-08-25 05:43:41,486 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.06 vs. limit=22.5
+2024-08-25 05:43:51,075 INFO [train.py:1114] (2/4) Epoch 3, batch 2250, loss[loss=0.2826, simple_loss=0.3211, pruned_loss=0.08741, ctc_loss=0.1733, over 19614.00 frames. ], tot_loss[loss=0.3336, simple_loss=0.3518, pruned_loss=0.1147, ctc_loss=0.2153, over 3866779.90 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:44:39,392 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=38661.333333333336, ans=6.0
+2024-08-25 05:45:15,073 INFO [train.py:1114] (2/4) Epoch 3, batch 2300, loss[loss=0.3082, simple_loss=0.3331, pruned_loss=0.1028, ctc_loss=0.1944, over 19505.00 frames. ], tot_loss[loss=0.333, simple_loss=0.3509, pruned_loss=0.1145, ctc_loss=0.2153, over 3860084.14 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:45:17,321 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=38821.333333333336, ans=0.125
+2024-08-25 05:45:37,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38874.666666666664, ans=0.1
+2024-08-25 05:47:15,680 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.233e+02 2.542e+02 3.133e+02 7.552e+02, threshold=5.083e+02, percent-clipped=3.0
+2024-08-25 05:47:27,923 INFO [train.py:1114] (2/4) Epoch 3, batch 2350, loss[loss=0.3775, simple_loss=0.3851, pruned_loss=0.1337, ctc_loss=0.2567, over 19681.00 frames. ], tot_loss[loss=0.3316, simple_loss=0.3502, pruned_loss=0.1138, ctc_loss=0.2135, over 3862694.85 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:47:52,002 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=39194.666666666664, ans=0.2
+2024-08-25 05:48:01,375 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.09 vs. limit=6.0
+2024-08-25 05:48:10,008 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:48:24,941 INFO [train.py:1114] (2/4) Epoch 3, batch 2400, loss[loss=0.3767, simple_loss=0.3862, pruned_loss=0.133, ctc_loss=0.253, over 19295.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3532, pruned_loss=0.1156, ctc_loss=0.2163, over 3858731.18 frames. ], batch size: 71, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 05:48:36,645 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.40 vs. limit=15.0
+2024-08-25 05:48:38,162 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=39408.0, ans=0.0
+2024-08-25 05:48:38,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=39408.0, ans=0.125
+2024-08-25 05:48:49,777 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.71 vs. limit=6.0
+2024-08-25 05:48:51,582 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=39461.333333333336, ans=0.025
+2024-08-25 05:49:10,296 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.241e+02 2.672e+02 3.161e+02 5.607e+02, threshold=5.344e+02, percent-clipped=4.0
+2024-08-25 05:49:26,435 INFO [train.py:1114] (2/4) Epoch 3, batch 2450, loss[loss=0.4669, simple_loss=0.4119, pruned_loss=0.1872, ctc_loss=0.3686, over 12663.00 frames. ], tot_loss[loss=0.3456, simple_loss=0.3589, pruned_loss=0.1209, ctc_loss=0.2265, over 3731513.20 frames. ], batch size: 140, lr: 3.53e-02, grad_scale: 32.0
+2024-08-25 05:49:39,618 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=18.45 vs. limit=15.0
+2024-08-25 05:49:47,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=39674.666666666664, ans=0.2
+2024-08-25 05:49:48,633 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.69 vs. limit=15.0
+2024-08-25 05:49:49,656 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=39728.0, ans=0.1
+2024-08-25 05:50:01,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=39781.333333333336, ans=0.09899494936611666
+2024-08-25 05:50:50,838 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.57 vs. limit=12.0
+2024-08-25 05:51:05,719 INFO [train.py:1114] (2/4) Epoch 4, batch 0, loss[loss=0.3285, simple_loss=0.3448, pruned_loss=0.114, ctc_loss=0.2105, over 19424.00 frames. ], tot_loss[loss=0.3285, simple_loss=0.3448, pruned_loss=0.114, ctc_loss=0.2105, over 19424.00 frames. ], batch size: 48, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:51:05,719 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 05:51:35,404 INFO [train.py:1146] (2/4) Epoch 4, validation: loss=0.2629, simple_loss=0.3337, pruned_loss=0.07032, ctc_loss=0.1284, over 944034.00 frames.
+2024-08-25 05:51:35,405 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 05:51:59,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=39936.0, ans=0.0
+2024-08-25 05:52:29,652 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=40042.666666666664, ans=0.125
+2024-08-25 05:52:41,516 INFO [train.py:1114] (2/4) Epoch 4, batch 50, loss[loss=0.2997, simple_loss=0.3187, pruned_loss=0.103, ctc_loss=0.1867, over 19707.00 frames. ], tot_loss[loss=0.3439, simple_loss=0.3577, pruned_loss=0.1199, ctc_loss=0.2257, over 844557.52 frames. ], batch size: 47, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:52:47,054 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.147e+02 2.483e+02 2.920e+02 4.932e+02, threshold=4.967e+02, percent-clipped=0.0
+2024-08-25 05:53:08,333 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.98 vs. limit=15.0
+2024-08-25 05:53:24,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=40202.666666666664, ans=0.0
+2024-08-25 05:53:28,257 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.19 vs. limit=15.0
+2024-08-25 05:53:48,393 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=40256.0, ans=0.0
+2024-08-25 05:54:05,597 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.61 vs. limit=15.0
+2024-08-25 05:54:08,155 INFO [train.py:1114] (2/4) Epoch 4, batch 100, loss[loss=0.316, simple_loss=0.3419, pruned_loss=0.1072, ctc_loss=0.1888, over 19712.00 frames. ], tot_loss[loss=0.3366, simple_loss=0.3546, pruned_loss=0.1157, ctc_loss=0.2178, over 1498880.38 frames. ], batch size: 51, lr: 3.29e-02, grad_scale: 32.0
+2024-08-25 05:54:14,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=40362.666666666664, ans=0.0
+2024-08-25 05:54:32,170 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=40362.666666666664, ans=0.002095072463768116
+2024-08-25 05:54:36,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=40416.0, ans=0.0
+2024-08-25 05:54:37,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=40416.0, ans=0.025
+2024-08-25 05:54:41,946 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.64 vs. limit=15.0
+2024-08-25 05:55:17,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=40469.333333333336, ans=0.1
+2024-08-25 05:55:38,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=40576.0, ans=0.125
+2024-08-25 05:55:39,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=40576.0, ans=0.125
+2024-08-25 05:55:51,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=40576.0, ans=0.125
+2024-08-25 05:55:55,482 INFO [train.py:1114] (2/4) Epoch 4, batch 150, loss[loss=0.2547, simple_loss=0.2945, pruned_loss=0.07853, ctc_loss=0.1447, over 19732.00 frames. ], tot_loss[loss=0.3274, simple_loss=0.349, pruned_loss=0.111, ctc_loss=0.2096, over 2028245.15 frames. ], batch size: 47, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:55:56,490 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.033e+02 2.286e+02 2.661e+02 4.118e+02, threshold=4.571e+02, percent-clipped=0.0
+2024-08-25 05:56:17,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=40682.666666666664, ans=0.2
+2024-08-25 05:56:23,043 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=40736.0, ans=0.04949747468305833
+2024-08-25 05:56:36,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=40736.0, ans=0.125
+2024-08-25 05:56:52,640 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=40842.666666666664, ans=0.0
+2024-08-25 05:56:57,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=40842.666666666664, ans=0.125
+2024-08-25 05:57:00,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40842.666666666664, ans=0.1
+2024-08-25 05:57:03,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=40896.0, ans=0.125
+2024-08-25 05:57:04,737 INFO [train.py:1114] (2/4) Epoch 4, batch 200, loss[loss=0.3923, simple_loss=0.3891, pruned_loss=0.145, ctc_loss=0.2634, over 18389.00 frames. ], tot_loss[loss=0.325, simple_loss=0.3469, pruned_loss=0.11, ctc_loss=0.2076, over 2436426.00 frames. ], batch size: 86, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:57:25,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=40896.0, ans=0.125
+2024-08-25 05:57:30,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=40949.333333333336, ans=0.2
+2024-08-25 05:57:35,071 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=40949.333333333336, ans=0.125
+2024-08-25 05:57:35,521 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.60 vs. limit=15.0
+2024-08-25 05:57:40,877 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.12 vs. limit=6.0
+2024-08-25 05:57:49,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=41002.666666666664, ans=0.125
+2024-08-25 05:58:31,153 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.91 vs. limit=15.0
+2024-08-25 05:59:03,066 INFO [train.py:1114] (2/4) Epoch 4, batch 250, loss[loss=0.357, simple_loss=0.3653, pruned_loss=0.128, ctc_loss=0.2318, over 19404.00 frames. ], tot_loss[loss=0.3234, simple_loss=0.3461, pruned_loss=0.1092, ctc_loss=0.2058, over 2756072.04 frames. ], batch size: 67, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 05:59:04,091 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.098e+02 2.387e+02 2.939e+02 4.251e+02, threshold=4.774e+02, percent-clipped=0.0
+2024-08-25 05:59:17,679 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.62 vs. limit=15.0
+2024-08-25 05:59:25,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=41216.0, ans=0.0019095652173913048
+2024-08-25 05:59:48,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=41322.666666666664, ans=0.125
+2024-08-25 05:59:49,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=41322.666666666664, ans=0.125
+2024-08-25 05:59:57,056 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:00:12,536 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.50 vs. limit=15.0
+2024-08-25 06:00:14,189 INFO [train.py:1114] (2/4) Epoch 4, batch 300, loss[loss=0.3781, simple_loss=0.3903, pruned_loss=0.1345, ctc_loss=0.2423, over 19523.00 frames. ], tot_loss[loss=0.3235, simple_loss=0.3461, pruned_loss=0.1093, ctc_loss=0.2059, over 3001487.19 frames. ], batch size: 61, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 06:00:14,499 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=41429.333333333336, ans=0.125
+2024-08-25 06:00:32,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=41482.666666666664, ans=0.025
+2024-08-25 06:00:33,292 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=41482.666666666664, ans=0.0
+2024-08-25 06:00:39,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=41536.0, ans=0.0
+2024-08-25 06:00:56,302 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.22 vs. limit=15.0
+2024-08-25 06:00:59,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=41536.0, ans=0.1
+2024-08-25 06:01:02,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=41536.0, ans=0.125
+2024-08-25 06:01:13,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=41589.333333333336, ans=22.5
+2024-08-25 06:01:36,652 INFO [train.py:1114] (2/4) Epoch 4, batch 350, loss[loss=0.2878, simple_loss=0.3165, pruned_loss=0.09426, ctc_loss=0.1765, over 19751.00 frames. ], tot_loss[loss=0.3245, simple_loss=0.3469, pruned_loss=0.1097, ctc_loss=0.2065, over 3191000.54 frames. ], batch size: 48, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:01:37,791 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.143e+02 2.517e+02 2.887e+02 6.595e+02, threshold=5.034e+02, percent-clipped=1.0
+2024-08-25 06:01:45,677 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.08 vs. limit=15.0
+2024-08-25 06:02:16,801 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=41856.0, ans=0.125
+2024-08-25 06:02:17,509 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.42 vs. limit=15.0
+2024-08-25 06:02:19,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=41856.0, ans=0.0
+2024-08-25 06:02:38,803 INFO [train.py:1114] (2/4) Epoch 4, batch 400, loss[loss=0.3024, simple_loss=0.3412, pruned_loss=0.0943, ctc_loss=0.1875, over 19489.00 frames. ], tot_loss[loss=0.3231, simple_loss=0.3459, pruned_loss=0.109, ctc_loss=0.2052, over 3342031.38 frames. ], batch size: 54, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:03:02,690 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=42016.0, ans=0.125
+2024-08-25 06:03:06,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=42016.0, ans=0.125
+2024-08-25 06:03:10,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=42069.333333333336, ans=0.125
+2024-08-25 06:03:25,923 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.98 vs. limit=15.0
+2024-08-25 06:04:04,053 INFO [train.py:1114] (2/4) Epoch 4, batch 450, loss[loss=0.2943, simple_loss=0.3351, pruned_loss=0.09189, ctc_loss=0.1742, over 19613.00 frames. ], tot_loss[loss=0.3229, simple_loss=0.3458, pruned_loss=0.109, ctc_loss=0.205, over 3449509.33 frames. ], batch size: 55, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:04:06,521 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.107e+02 2.479e+02 2.897e+02 5.564e+02, threshold=4.958e+02, percent-clipped=2.0
+2024-08-25 06:04:13,896 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=42229.333333333336, ans=0.1
+2024-08-25 06:05:05,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=42389.333333333336, ans=0.125
+2024-08-25 06:05:29,111 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=42442.666666666664, ans=0.0
+2024-08-25 06:05:32,235 INFO [train.py:1114] (2/4) Epoch 4, batch 500, loss[loss=0.3203, simple_loss=0.3553, pruned_loss=0.1041, ctc_loss=0.1925, over 19651.00 frames. ], tot_loss[loss=0.3205, simple_loss=0.3439, pruned_loss=0.1079, ctc_loss=0.2032, over 3545956.63 frames. ], batch size: 63, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:05:39,936 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.42 vs. limit=10.0
+2024-08-25 06:05:45,867 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=12.51 vs. limit=15.0
+2024-08-25 06:05:48,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=42549.333333333336, ans=0.125
+2024-08-25 06:06:31,428 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.41 vs. limit=22.5
+2024-08-25 06:06:41,100 INFO [train.py:1114] (2/4) Epoch 4, batch 550, loss[loss=0.3367, simple_loss=0.3618, pruned_loss=0.1133, ctc_loss=0.2126, over 19266.00 frames. ], tot_loss[loss=0.3211, simple_loss=0.3444, pruned_loss=0.1082, ctc_loss=0.2035, over 3607040.34 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:06:44,774 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.027e+02 2.416e+02 2.881e+02 5.051e+02, threshold=4.833e+02, percent-clipped=1.0
+2024-08-25 06:07:03,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=42816.0, ans=0.125
+2024-08-25 06:07:36,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=42922.666666666664, ans=0.1
+2024-08-25 06:07:36,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=42922.666666666664, ans=0.0
+2024-08-25 06:07:41,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=42976.0, ans=0.0
+2024-08-25 06:07:47,534 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.50 vs. limit=15.0
+2024-08-25 06:07:50,587 INFO [train.py:1114] (2/4) Epoch 4, batch 600, loss[loss=0.3651, simple_loss=0.3733, pruned_loss=0.1306, ctc_loss=0.239, over 19372.00 frames. ], tot_loss[loss=0.3211, simple_loss=0.3446, pruned_loss=0.1082, ctc_loss=0.2032, over 3664404.59 frames. ], batch size: 67, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:08:00,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=43029.333333333336, ans=0.2
+2024-08-25 06:08:05,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=43082.666666666664, ans=0.05
+2024-08-25 06:08:07,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=43082.666666666664, ans=0.1
+2024-08-25 06:08:13,114 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.73 vs. limit=15.0
+2024-08-25 06:08:16,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=43136.0, ans=0.2
+2024-08-25 06:08:58,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=43242.666666666664, ans=0.125
+2024-08-25 06:09:00,704 INFO [train.py:1114] (2/4) Epoch 4, batch 650, loss[loss=0.3049, simple_loss=0.3351, pruned_loss=0.0993, ctc_loss=0.1903, over 19765.00 frames. ], tot_loss[loss=0.3204, simple_loss=0.3439, pruned_loss=0.108, ctc_loss=0.2024, over 3714588.22 frames. ], batch size: 54, lr: 3.23e-02, grad_scale: 16.0
+2024-08-25 06:09:12,673 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=43296.0, ans=0.0014573913043478253
+2024-08-25 06:09:15,857 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.140e+02 2.544e+02 3.023e+02 7.017e+02, threshold=5.088e+02, percent-clipped=9.0
+2024-08-25 06:09:23,257 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=43349.333333333336, ans=0.1
+2024-08-25 06:09:59,498 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=2.461e-02
+2024-08-25 06:10:03,417 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.97 vs. limit=15.0
+2024-08-25 06:10:07,587 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=43509.333333333336, ans=0.125
+2024-08-25 06:10:15,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=43509.333333333336, ans=0.125
+2024-08-25 06:10:18,931 INFO [train.py:1114] (2/4) Epoch 4, batch 700, loss[loss=0.2728, simple_loss=0.3148, pruned_loss=0.08298, ctc_loss=0.1619, over 19721.00 frames. ], tot_loss[loss=0.3216, simple_loss=0.3449, pruned_loss=0.1085, ctc_loss=0.2036, over 3746358.54 frames. ], batch size: 51, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:10:22,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=43562.666666666664, ans=0.125
+2024-08-25 06:10:30,144 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.97 vs. limit=15.0
+2024-08-25 06:10:36,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=43616.0, ans=0.2
+2024-08-25 06:11:10,052 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:11:12,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=43776.0, ans=0.125
+2024-08-25 06:11:20,059 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.94 vs. limit=6.0
+2024-08-25 06:11:23,840 INFO [train.py:1114] (2/4) Epoch 4, batch 750, loss[loss=0.284, simple_loss=0.3264, pruned_loss=0.08768, ctc_loss=0.1659, over 19518.00 frames. ], tot_loss[loss=0.3199, simple_loss=0.3438, pruned_loss=0.1076, ctc_loss=0.2019, over 3773031.37 frames. ], batch size: 54, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:11:28,468 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.94 vs. limit=15.0
+2024-08-25 06:11:28,686 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.141e+02 2.481e+02 2.931e+02 4.472e+02, threshold=4.962e+02, percent-clipped=0.0
+2024-08-25 06:11:29,222 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.89 vs. limit=6.0
+2024-08-25 06:11:38,427 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=43829.333333333336, ans=0.0013414492753623195
+2024-08-25 06:11:42,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=43882.666666666664, ans=0.0
+2024-08-25 06:11:43,805 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=43882.666666666664, ans=0.001329855072463769
+2024-08-25 06:11:44,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=43882.666666666664, ans=0.05
+2024-08-25 06:12:01,865 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:12:19,614 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.01 vs. limit=15.0
+2024-08-25 06:12:23,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=44042.666666666664, ans=0.2
+2024-08-25 06:12:28,688 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.25 vs. limit=6.0
+2024-08-25 06:12:29,317 INFO [train.py:1114] (2/4) Epoch 4, batch 800, loss[loss=0.2731, simple_loss=0.3024, pruned_loss=0.08879, ctc_loss=0.1654, over 19794.00 frames. ], tot_loss[loss=0.3183, simple_loss=0.3428, pruned_loss=0.1069, ctc_loss=0.2001, over 3794584.01 frames. ], batch size: 49, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:12:38,361 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.96 vs. limit=6.0
+2024-08-25 06:12:49,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=44149.333333333336, ans=0.1
+2024-08-25 06:13:16,672 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=44309.333333333336, ans=0.0
+2024-08-25 06:13:28,057 INFO [train.py:1114] (2/4) Epoch 4, batch 850, loss[loss=0.3174, simple_loss=0.3493, pruned_loss=0.1037, ctc_loss=0.1954, over 19663.00 frames. ], tot_loss[loss=0.3168, simple_loss=0.3419, pruned_loss=0.1061, ctc_loss=0.1987, over 3814262.28 frames. ], batch size: 59, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:13:29,262 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=44362.666666666664, ans=0.0
+2024-08-25 06:13:31,265 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.074e+02 2.402e+02 2.888e+02 5.555e+02, threshold=4.804e+02, percent-clipped=1.0
+2024-08-25 06:13:33,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=44362.666666666664, ans=0.0
+2024-08-25 06:13:41,185 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.69 vs. limit=15.0
+2024-08-25 06:13:43,493 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.97 vs. limit=15.0
+2024-08-25 06:13:48,249 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.38 vs. limit=22.5
+2024-08-25 06:13:48,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=44416.0, ans=0.1
+2024-08-25 06:13:50,606 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=9.32 vs. limit=15.0
+2024-08-25 06:14:14,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn2.whiten.whitening_limit, batch_count=44522.666666666664, ans=22.5
+2024-08-25 06:14:17,276 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=44576.0, ans=0.0
+2024-08-25 06:14:21,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=44576.0, ans=0.025
+2024-08-25 06:14:32,299 INFO [train.py:1114] (2/4) Epoch 4, batch 900, loss[loss=0.3589, simple_loss=0.3491, pruned_loss=0.1363, ctc_loss=0.2406, over 19806.00 frames. ], tot_loss[loss=0.3176, simple_loss=0.3423, pruned_loss=0.1065, ctc_loss=0.1995, over 3817793.98 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:14:33,817 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=44629.333333333336, ans=0.1
+2024-08-25 06:15:14,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=44789.333333333336, ans=0.2
+2024-08-25 06:15:14,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=44789.333333333336, ans=0.125
+2024-08-25 06:15:26,112 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=44842.666666666664, ans=0.125
+2024-08-25 06:15:33,491 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.15 vs. limit=22.5
+2024-08-25 06:15:38,547 INFO [train.py:1114] (2/4) Epoch 4, batch 950, loss[loss=0.2742, simple_loss=0.3109, pruned_loss=0.08751, ctc_loss=0.1563, over 19494.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3428, pruned_loss=0.1067, ctc_loss=0.2002, over 3819918.60 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:15:42,141 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.101e+02 2.364e+02 2.735e+02 6.196e+02, threshold=4.728e+02, percent-clipped=2.0
+2024-08-25 06:15:48,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=44896.0, ans=0.0011095652173913045
+2024-08-25 06:16:02,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=44949.333333333336, ans=0.0010979710144927539
+2024-08-25 06:16:10,007 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:16:42,342 INFO [train.py:1114] (2/4) Epoch 4, batch 1000, loss[loss=0.2864, simple_loss=0.322, pruned_loss=0.09009, ctc_loss=0.1766, over 19849.00 frames. ], tot_loss[loss=0.319, simple_loss=0.3435, pruned_loss=0.1071, ctc_loss=0.2008, over 3814838.08 frames. ], batch size: 52, lr: 3.19e-02, grad_scale: 32.0
+2024-08-25 06:16:46,360 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.45 vs. limit=15.0
+2024-08-25 06:17:13,004 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.98 vs. limit=22.5
+2024-08-25 06:17:35,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=45322.666666666664, ans=0.09899494936611666
+2024-08-25 06:17:47,517 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.99 vs. limit=22.5
+2024-08-25 06:18:10,624 INFO [train.py:1114] (2/4) Epoch 4, batch 1050, loss[loss=0.3423, simple_loss=0.3633, pruned_loss=0.1168, ctc_loss=0.2191, over 19836.00 frames. ], tot_loss[loss=0.318, simple_loss=0.3426, pruned_loss=0.1067, ctc_loss=0.2002, over 3822005.55 frames. ], batch size: 57, lr: 3.19e-02, grad_scale: 16.0
+2024-08-25 06:18:24,441 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.09 vs. limit=22.5
+2024-08-25 06:18:26,186 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.982e+02 2.200e+02 2.634e+02 5.388e+02, threshold=4.401e+02, percent-clipped=1.0
+2024-08-25 06:18:35,877 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=45482.666666666664, ans=0.09899494936611666
+2024-08-25 06:18:55,478 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=45536.0, ans=0.125
+2024-08-25 06:18:59,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=45536.0, ans=0.07
+2024-08-25 06:19:16,167 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.55 vs. limit=6.0
+2024-08-25 06:19:20,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=45642.666666666664, ans=0.2
+2024-08-25 06:19:21,693 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=45642.666666666664, ans=0.05
+2024-08-25 06:19:29,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=45642.666666666664, ans=0.0
+2024-08-25 06:19:36,304 INFO [train.py:1114] (2/4) Epoch 4, batch 1100, loss[loss=0.3009, simple_loss=0.3219, pruned_loss=0.1016, ctc_loss=0.1915, over 19606.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3421, pruned_loss=0.1063, ctc_loss=0.1995, over 3829079.95 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:19:57,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45749.333333333336, ans=0.1
+2024-08-25 06:20:04,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=45749.333333333336, ans=0.5
+2024-08-25 06:20:21,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=45856.0, ans=0.125
+2024-08-25 06:20:27,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=45856.0, ans=0.125
+2024-08-25 06:20:32,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=45856.0, ans=0.0
+2024-08-25 06:20:51,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=45962.666666666664, ans=0.125
+2024-08-25 06:20:52,258 INFO [train.py:1114] (2/4) Epoch 4, batch 1150, loss[loss=0.2674, simple_loss=0.3127, pruned_loss=0.08047, ctc_loss=0.1528, over 19592.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.3423, pruned_loss=0.1067, ctc_loss=0.2004, over 3830223.75 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:20:57,032 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.122e+02 2.390e+02 2.706e+02 4.199e+02, threshold=4.779e+02, percent-clipped=0.0
+2024-08-25 06:21:12,932 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=2.542e-03
+2024-08-25 06:21:20,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=46016.0, ans=0.0008660869565217388
+2024-08-25 06:21:25,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=46069.333333333336, ans=0.125
+2024-08-25 06:21:26,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=46069.333333333336, ans=0.125
+2024-08-25 06:21:35,984 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.21 vs. limit=15.0
+2024-08-25 06:21:53,431 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.08 vs. limit=22.5
+2024-08-25 06:21:59,986 INFO [train.py:1114] (2/4) Epoch 4, batch 1200, loss[loss=0.291, simple_loss=0.3327, pruned_loss=0.08995, ctc_loss=0.1735, over 19845.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3429, pruned_loss=0.1066, ctc_loss=0.2007, over 3825950.41 frames. ], batch size: 57, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:22:14,274 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=46229.333333333336, ans=0.0
+2024-08-25 06:22:30,530 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=46336.0, ans=0.09899494936611666
+2024-08-25 06:22:40,725 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=46336.0, ans=0.0
+2024-08-25 06:22:53,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=46389.333333333336, ans=0.0
+2024-08-25 06:23:00,719 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=46389.333333333336, ans=0.1
+2024-08-25 06:23:14,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=46442.666666666664, ans=0.125
+2024-08-25 06:23:17,968 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:23:21,321 INFO [train.py:1114] (2/4) Epoch 4, batch 1250, loss[loss=0.3405, simple_loss=0.3626, pruned_loss=0.1153, ctc_loss=0.2197, over 19543.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.3432, pruned_loss=0.1063, ctc_loss=0.2, over 3843771.03 frames. ], batch size: 61, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:23:26,220 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.962e+02 2.225e+02 2.468e+02 3.508e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 06:24:26,555 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=46656.0, ans=0.0007269565217391302
+2024-08-25 06:24:48,929 INFO [train.py:1114] (2/4) Epoch 4, batch 1300, loss[loss=0.313, simple_loss=0.3415, pruned_loss=0.1037, ctc_loss=0.1926, over 18907.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3421, pruned_loss=0.106, ctc_loss=0.1994, over 3847068.18 frames. ], batch size: 76, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:24:55,029 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.32 vs. limit=15.0
+2024-08-25 06:25:05,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=46816.0, ans=0.125
+2024-08-25 06:25:52,894 INFO [train.py:1114] (2/4) Epoch 4, batch 1350, loss[loss=0.3131, simple_loss=0.3433, pruned_loss=0.1023, ctc_loss=0.1958, over 19758.00 frames. ], tot_loss[loss=0.3143, simple_loss=0.3405, pruned_loss=0.1047, ctc_loss=0.1966, over 3858266.94 frames. ], batch size: 54, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:25:54,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=47029.333333333336, ans=0.2
+2024-08-25 06:26:07,744 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.269e+02 2.560e+02 3.229e+02 4.886e+02, threshold=5.120e+02, percent-clipped=5.0
+2024-08-25 06:26:37,403 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=47082.666666666664, ans=0.125
+2024-08-25 06:26:59,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=47189.333333333336, ans=0.125
+2024-08-25 06:27:15,316 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.45 vs. limit=15.0
+2024-08-25 06:27:20,739 INFO [train.py:1114] (2/4) Epoch 4, batch 1400, loss[loss=0.2384, simple_loss=0.2817, pruned_loss=0.06995, ctc_loss=0.1379, over 19705.00 frames. ], tot_loss[loss=0.3138, simple_loss=0.3399, pruned_loss=0.1045, ctc_loss=0.1965, over 3865064.39 frames. ], batch size: 46, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:27:27,137 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.65 vs. limit=15.0
+2024-08-25 06:27:27,278 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.25 vs. limit=15.0
+2024-08-25 06:27:52,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=47349.333333333336, ans=0.125
+2024-08-25 06:27:57,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=47349.333333333336, ans=0.1
+2024-08-25 06:28:02,386 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=2.522e-03
+2024-08-25 06:28:27,539 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=47456.0, ans=0.125
+2024-08-25 06:28:43,719 INFO [train.py:1114] (2/4) Epoch 4, batch 1450, loss[loss=0.3301, simple_loss=0.3592, pruned_loss=0.109, ctc_loss=0.2076, over 19676.00 frames. ], tot_loss[loss=0.3154, simple_loss=0.341, pruned_loss=0.1054, ctc_loss=0.1979, over 3863127.45 frames. ], batch size: 63, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:28:45,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=47562.666666666664, ans=0.0005298550724637686
+2024-08-25 06:28:48,581 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.026e+02 2.327e+02 2.659e+02 4.329e+02, threshold=4.654e+02, percent-clipped=0.0
+2024-08-25 06:29:02,221 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.05 vs. limit=10.0
+2024-08-25 06:29:13,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=47669.333333333336, ans=0.0
+2024-08-25 06:29:21,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=47722.666666666664, ans=0.1
+2024-08-25 06:29:44,349 INFO [train.py:1114] (2/4) Epoch 4, batch 1500, loss[loss=0.3336, simple_loss=0.3616, pruned_loss=0.1124, ctc_loss=0.2019, over 19574.00 frames. ], tot_loss[loss=0.3156, simple_loss=0.3415, pruned_loss=0.1053, ctc_loss=0.1978, over 3862632.16 frames. ], batch size: 57, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:29:50,421 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=47829.333333333336, ans=0.125
+2024-08-25 06:30:34,798 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=48042.666666666664, ans=0.0
+2024-08-25 06:31:38,010 INFO [train.py:1114] (2/4) Epoch 4, batch 1550, loss[loss=0.3627, simple_loss=0.3799, pruned_loss=0.126, ctc_loss=0.2338, over 19574.00 frames. ], tot_loss[loss=0.3157, simple_loss=0.3414, pruned_loss=0.1054, ctc_loss=0.198, over 3847553.78 frames. ], batch size: 60, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:31:49,980 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.013e+02 2.262e+02 2.770e+02 1.090e+03, threshold=4.525e+02, percent-clipped=1.0
+2024-08-25 06:32:14,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=48149.333333333336, ans=0.0
+2024-08-25 06:32:43,051 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-25 06:33:15,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=48309.333333333336, ans=0.025
+2024-08-25 06:33:26,254 INFO [train.py:1114] (2/4) Epoch 4, batch 1600, loss[loss=0.3508, simple_loss=0.3732, pruned_loss=0.1197, ctc_loss=0.2223, over 19829.00 frames. ], tot_loss[loss=0.3164, simple_loss=0.3417, pruned_loss=0.1058, ctc_loss=0.1985, over 3837485.55 frames. ], batch size: 57, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:33:28,841 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=48362.666666666664, ans=0.025
+2024-08-25 06:33:33,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=48362.666666666664, ans=0.125
+2024-08-25 06:33:36,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=48362.666666666664, ans=0.0
+2024-08-25 06:33:37,333 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=48416.0, ans=0.1
+2024-08-25 06:33:48,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=48416.0, ans=0.125
+2024-08-25 06:33:50,056 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=48416.0, ans=0.125
+2024-08-25 06:33:53,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=48416.0, ans=0.1
+2024-08-25 06:33:58,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=48416.0, ans=0.1
+2024-08-25 06:34:40,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=48522.666666666664, ans=0.2
+2024-08-25 06:35:09,764 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=48576.0, ans=0.1
+2024-08-25 06:35:15,037 INFO [train.py:1114] (2/4) Epoch 4, batch 1650, loss[loss=0.3435, simple_loss=0.3656, pruned_loss=0.117, ctc_loss=0.2187, over 19647.00 frames. ], tot_loss[loss=0.3177, simple_loss=0.3423, pruned_loss=0.1065, ctc_loss=0.1999, over 3833438.53 frames. ], batch size: 59, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:35:21,183 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.079e+02 2.506e+02 2.996e+02 5.422e+02, threshold=5.011e+02, percent-clipped=2.0
+2024-08-25 06:35:23,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=48629.333333333336, ans=0.0002979710144927535
+2024-08-25 06:35:27,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=48629.333333333336, ans=0.0
+2024-08-25 06:35:50,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=48682.666666666664, ans=0.125
+2024-08-25 06:35:58,766 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.26 vs. limit=12.0
+2024-08-25 06:36:32,056 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=48842.666666666664, ans=0.025
+2024-08-25 06:36:37,448 INFO [train.py:1114] (2/4) Epoch 4, batch 1700, loss[loss=0.2844, simple_loss=0.3075, pruned_loss=0.09548, ctc_loss=0.1757, over 19645.00 frames. ], tot_loss[loss=0.3162, simple_loss=0.3416, pruned_loss=0.1057, ctc_loss=0.1983, over 3847276.12 frames. ], batch size: 46, lr: 3.12e-02, grad_scale: 32.0
+2024-08-25 06:36:41,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=48896.0, ans=0.2
+2024-08-25 06:37:07,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=48949.333333333336, ans=0.125
+2024-08-25 06:37:22,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=49002.666666666664, ans=0.00021681159420289947
+2024-08-25 06:37:25,139 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=49002.666666666664, ans=0.0
+2024-08-25 06:37:28,881 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.17 vs. limit=15.0
+2024-08-25 06:38:15,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=49109.333333333336, ans=0.125
+2024-08-25 06:38:25,878 INFO [train.py:1114] (2/4) Epoch 4, batch 1750, loss[loss=0.3025, simple_loss=0.3156, pruned_loss=0.1059, ctc_loss=0.1941, over 19645.00 frames. ], tot_loss[loss=0.3145, simple_loss=0.3402, pruned_loss=0.105, ctc_loss=0.1971, over 3852181.27 frames. ], batch size: 45, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:38:29,063 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.03 vs. limit=22.5
+2024-08-25 06:38:31,264 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.88 vs. limit=15.0
+2024-08-25 06:38:33,085 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 1.987e+02 2.278e+02 2.713e+02 5.908e+02, threshold=4.555e+02, percent-clipped=1.0
+2024-08-25 06:38:41,365 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=49216.0, ans=0.125
+2024-08-25 06:39:31,715 INFO [train.py:1114] (2/4) Epoch 4, batch 1800, loss[loss=0.309, simple_loss=0.3442, pruned_loss=0.1003, ctc_loss=0.1828, over 19611.00 frames. ], tot_loss[loss=0.3139, simple_loss=0.3399, pruned_loss=0.1047, ctc_loss=0.1963, over 3852946.72 frames. ], batch size: 55, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:40:10,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=49482.666666666664, ans=0.2
+2024-08-25 06:40:19,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=49536.0, ans=0.2
+2024-08-25 06:40:19,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=49536.0, ans=0.025
+2024-08-25 06:40:26,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=49536.0, ans=0.125
+2024-08-25 06:40:43,724 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=49642.666666666664, ans=0.125
+2024-08-25 06:40:54,735 INFO [train.py:1114] (2/4) Epoch 4, batch 1850, loss[loss=0.3356, simple_loss=0.3547, pruned_loss=0.1141, ctc_loss=0.2208, over 19561.00 frames. ], tot_loss[loss=0.3133, simple_loss=0.3395, pruned_loss=0.1044, ctc_loss=0.1958, over 3856583.60 frames. ], batch size: 57, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:41:01,667 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.149e+02 2.307e+02 2.574e+02 4.619e+02, threshold=4.614e+02, percent-clipped=1.0
+2024-08-25 06:41:01,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=49696.0, ans=0.125
+2024-08-25 06:41:05,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=49696.0, ans=0.025
+2024-08-25 06:41:10,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=49749.333333333336, ans=0.1
+2024-08-25 06:41:15,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=49749.333333333336, ans=0.0
+2024-08-25 06:41:29,232 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=49802.666666666664, ans=0.125
+2024-08-25 06:41:50,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=49909.333333333336, ans=0.1
+2024-08-25 06:41:59,170 INFO [train.py:1114] (2/4) Epoch 4, batch 1900, loss[loss=0.3183, simple_loss=0.3549, pruned_loss=0.1026, ctc_loss=0.1916, over 19638.00 frames. ], tot_loss[loss=0.3135, simple_loss=0.34, pruned_loss=0.1045, ctc_loss=0.1955, over 3861482.76 frames. ], batch size: 59, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:42:40,360 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=50016.0, ans=10.0
+2024-08-25 06:42:41,573 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=50016.0, ans=0.125
+2024-08-25 06:43:08,900 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.63 vs. limit=15.0
+2024-08-25 06:43:11,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=50122.666666666664, ans=0.125
+2024-08-25 06:43:39,882 INFO [train.py:1114] (2/4) Epoch 4, batch 1950, loss[loss=0.3125, simple_loss=0.3423, pruned_loss=0.1041, ctc_loss=0.1864, over 19573.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3416, pruned_loss=0.1049, ctc_loss=0.1963, over 3870492.34 frames. ], batch size: 52, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:43:45,602 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.065e+02 2.259e+02 2.635e+02 4.732e+02, threshold=4.517e+02, percent-clipped=1.0
+2024-08-25 06:43:47,949 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=50229.333333333336, ans=0.125
+2024-08-25 06:43:58,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=50282.666666666664, ans=0.1
+2024-08-25 06:44:10,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=50336.0, ans=0.2
+2024-08-25 06:44:14,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=50389.333333333336, ans=0.1
+2024-08-25 06:44:23,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=50389.333333333336, ans=0.07
+2024-08-25 06:44:48,806 INFO [train.py:1114] (2/4) Epoch 4, batch 2000, loss[loss=0.2673, simple_loss=0.2954, pruned_loss=0.08693, ctc_loss=0.1633, over 19668.00 frames. ], tot_loss[loss=0.3151, simple_loss=0.3416, pruned_loss=0.1051, ctc_loss=0.1964, over 3853499.94 frames. ], batch size: 45, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:44:50,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.92 vs. limit=6.0
+2024-08-25 06:45:49,610 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.07 vs. limit=15.0
+2024-08-25 06:46:19,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=50709.333333333336, ans=0.0
+2024-08-25 06:46:22,676 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.66 vs. limit=10.0
+2024-08-25 06:46:34,389 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.37 vs. limit=15.0
+2024-08-25 06:46:35,053 INFO [train.py:1114] (2/4) Epoch 4, batch 2050, loss[loss=0.2525, simple_loss=0.2934, pruned_loss=0.07676, ctc_loss=0.1453, over 19717.00 frames. ], tot_loss[loss=0.3158, simple_loss=0.3415, pruned_loss=0.1056, ctc_loss=0.1976, over 3850611.27 frames. ], batch size: 47, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:46:45,615 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.046e+02 2.338e+02 2.720e+02 4.537e+02, threshold=4.675e+02, percent-clipped=1.0
+2024-08-25 06:47:00,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=50816.0, ans=0.125
+2024-08-25 06:47:09,427 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.47 vs. limit=15.0
+2024-08-25 06:47:12,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=50869.333333333336, ans=0.2
+2024-08-25 06:47:15,077 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=50922.666666666664, ans=0.07
+2024-08-25 06:47:16,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=50922.666666666664, ans=0.0
+2024-08-25 06:47:28,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=50922.666666666664, ans=0.125
+2024-08-25 06:47:31,043 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.62 vs. limit=15.0
+2024-08-25 06:47:31,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=50976.0, ans=0.0
+2024-08-25 06:47:47,558 INFO [train.py:1114] (2/4) Epoch 4, batch 2100, loss[loss=0.3288, simple_loss=0.3478, pruned_loss=0.1126, ctc_loss=0.2116, over 19763.00 frames. ], tot_loss[loss=0.3147, simple_loss=0.3408, pruned_loss=0.105, ctc_loss=0.1967, over 3858330.99 frames. ], batch size: 54, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:47:52,189 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=51029.333333333336, ans=0.2
+2024-08-25 06:47:53,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51029.333333333336, ans=0.1
+2024-08-25 06:48:07,002 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=51029.333333333336, ans=0.0
+2024-08-25 06:48:21,757 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.37 vs. limit=22.5
+2024-08-25 06:48:26,715 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=51082.666666666664, ans=0.2
+2024-08-25 06:48:42,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=51136.0, ans=0.025
+2024-08-25 06:48:42,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=51136.0, ans=0.2
+2024-08-25 06:48:44,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=51136.0, ans=0.05
+2024-08-25 06:48:57,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51136.0, ans=0.1
+2024-08-25 06:48:57,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=51136.0, ans=0.125
+2024-08-25 06:48:59,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=51136.0, ans=0.0
+2024-08-25 06:49:02,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=51189.333333333336, ans=0.125
+2024-08-25 06:49:38,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=51242.666666666664, ans=0.2
+2024-08-25 06:49:45,556 INFO [train.py:1114] (2/4) Epoch 4, batch 2150, loss[loss=0.2835, simple_loss=0.3242, pruned_loss=0.08776, ctc_loss=0.1683, over 19858.00 frames. ], tot_loss[loss=0.3134, simple_loss=0.3397, pruned_loss=0.1044, ctc_loss=0.1955, over 3869328.31 frames. ], batch size: 52, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:49:48,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=51296.0, ans=0.0
+2024-08-25 06:49:54,452 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.035e+02 2.305e+02 2.639e+02 4.596e+02, threshold=4.610e+02, percent-clipped=0.0
+2024-08-25 06:50:13,051 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.66 vs. limit=22.5
+2024-08-25 06:50:13,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=51349.333333333336, ans=0.125
+2024-08-25 06:50:24,209 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=51402.666666666664, ans=0.125
+2024-08-25 06:51:12,522 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.93 vs. limit=15.0
+2024-08-25 06:51:15,400 INFO [train.py:1114] (2/4) Epoch 4, batch 2200, loss[loss=0.2867, simple_loss=0.3244, pruned_loss=0.08946, ctc_loss=0.1752, over 19574.00 frames. ], tot_loss[loss=0.3125, simple_loss=0.3392, pruned_loss=0.1039, ctc_loss=0.1947, over 3868226.85 frames. ], batch size: 57, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:51:21,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=51562.666666666664, ans=0.125
+2024-08-25 06:51:44,751 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=51616.0, ans=0.0
+2024-08-25 06:52:00,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=51722.666666666664, ans=0.2
+2024-08-25 06:52:07,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=51722.666666666664, ans=0.125
+2024-08-25 06:52:14,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=51776.0, ans=0.125
+2024-08-25 06:52:24,911 INFO [train.py:1114] (2/4) Epoch 4, batch 2250, loss[loss=0.3165, simple_loss=0.3478, pruned_loss=0.1039, ctc_loss=0.1936, over 19617.00 frames. ], tot_loss[loss=0.3134, simple_loss=0.3399, pruned_loss=0.1044, ctc_loss=0.1956, over 3867770.48 frames. ], batch size: 55, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:52:32,001 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.164e+02 2.622e+02 3.263e+02 6.940e+02, threshold=5.245e+02, percent-clipped=2.0
+2024-08-25 06:52:40,504 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.34 vs. limit=12.0
+2024-08-25 06:52:57,144 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=51936.0, ans=10.0
+2024-08-25 06:53:07,696 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=51989.333333333336, ans=0.025
+2024-08-25 06:53:16,529 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=52042.666666666664, ans=0.0
+2024-08-25 06:53:30,937 INFO [train.py:1114] (2/4) Epoch 4, batch 2300, loss[loss=0.2919, simple_loss=0.3267, pruned_loss=0.09497, ctc_loss=0.168, over 19514.00 frames. ], tot_loss[loss=0.3127, simple_loss=0.3389, pruned_loss=0.1041, ctc_loss=0.1954, over 3861902.31 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:53:43,294 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=9.47 vs. limit=12.0
+2024-08-25 06:54:07,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=52202.666666666664, ans=0.1
+2024-08-25 06:54:09,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=52256.0, ans=0.1
+2024-08-25 06:54:26,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=52256.0, ans=0.125
+2024-08-25 06:54:29,643 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=52309.333333333336, ans=0.0
+2024-08-25 06:54:43,481 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.96 vs. limit=6.0
+2024-08-25 06:54:53,365 INFO [train.py:1114] (2/4) Epoch 4, batch 2350, loss[loss=0.3478, simple_loss=0.3687, pruned_loss=0.1189, ctc_loss=0.2226, over 19663.00 frames. ], tot_loss[loss=0.3116, simple_loss=0.3381, pruned_loss=0.1036, ctc_loss=0.1947, over 3863950.36 frames. ], batch size: 63, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 06:54:58,715 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.121e+02 2.497e+02 3.048e+02 4.745e+02, threshold=4.995e+02, percent-clipped=0.0
+2024-08-25 06:55:03,399 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.94 vs. limit=15.0
+2024-08-25 06:55:04,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=52362.666666666664, ans=0.125
+2024-08-25 06:55:13,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=52416.0, ans=0.125
+2024-08-25 06:58:44,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=52469.333333333336, ans=0.125
+2024-08-25 07:06:41,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52576.0, ans=0.1
+2024-08-25 07:06:42,088 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=52576.0, ans=0.125
+2024-08-25 07:07:01,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=52576.0, ans=0.0
+2024-08-25 07:07:21,839 INFO [train.py:1114] (2/4) Epoch 4, batch 2400, loss[loss=0.336, simple_loss=0.3519, pruned_loss=0.1159, ctc_loss=0.2206, over 19289.00 frames. ], tot_loss[loss=0.3141, simple_loss=0.3405, pruned_loss=0.1046, ctc_loss=0.1963, over 3858454.32 frames. ], batch size: 71, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 07:10:21,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=52629.333333333336, ans=0.2
+2024-08-25 07:12:26,874 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=52682.666666666664, ans=0.0
+2024-08-25 07:18:51,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=52736.0, ans=0.125
+2024-08-25 07:19:50,355 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=52736.0, ans=0.09899494936611666
+2024-08-25 07:25:12,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=52789.333333333336, ans=0.09899494936611666
+2024-08-25 07:34:58,787 INFO [train.py:1114] (2/4) Epoch 4, batch 2450, loss[loss=0.3494, simple_loss=0.3485, pruned_loss=0.125, ctc_loss=0.2506, over 13795.00 frames. ], tot_loss[loss=0.3239, simple_loss=0.346, pruned_loss=0.1097, ctc_loss=0.206, over 3732216.21 frames. ], batch size: 140, lr: 3.05e-02, grad_scale: 16.0
+2024-08-25 07:36:27,110 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.096e+02 2.355e+02 2.735e+02 5.246e+02, threshold=4.710e+02, percent-clipped=1.0
+2024-08-25 07:42:13,090 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_ff3.min_abs, batch_count=53002.666666666664, ans=0.2
+2024-08-25 07:43:06,719 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=53002.666666666664, ans=0.125
+2024-08-25 07:43:24,978 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.80 vs. limit=15.0
+2024-08-25 07:46:30,896 INFO [train.py:1114] (2/4) Epoch 5, batch 0, loss[loss=0.304, simple_loss=0.3263, pruned_loss=0.1033, ctc_loss=0.1875, over 19811.00 frames. ], tot_loss[loss=0.304, simple_loss=0.3263, pruned_loss=0.1033, ctc_loss=0.1875, over 19811.00 frames. ], batch size: 49, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 07:46:30,897 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 07:49:02,113 INFO [train.py:1146] (2/4) Epoch 5, validation: loss=0.2543, simple_loss=0.3259, pruned_loss=0.06691, ctc_loss=0.1221, over 944034.00 frames.
+2024-08-25 07:49:02,114 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 07:55:35,559 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.37 vs. limit=15.0
+2024-08-25 07:57:00,859 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=53210.666666666664, ans=0.125
+2024-08-25 07:59:08,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=53264.0, ans=0.0
+2024-08-25 07:59:09,053 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.58 vs. limit=22.5
+2024-08-25 07:59:09,793 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=53264.0, ans=0.125
+2024-08-25 07:59:25,679 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:01:56,944 INFO [train.py:1114] (2/4) Epoch 5, batch 50, loss[loss=0.2555, simple_loss=0.2989, pruned_loss=0.07684, ctc_loss=0.1463, over 19692.00 frames. ], tot_loss[loss=0.313, simple_loss=0.3392, pruned_loss=0.104, ctc_loss=0.1967, over 843425.80 frames. ], batch size: 47, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 08:03:51,551 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 1.984e+02 2.202e+02 2.522e+02 4.045e+02, threshold=4.404e+02, percent-clipped=0.0
+2024-08-25 08:04:17,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=53424.0, ans=0.0
+2024-08-25 08:06:10,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=53530.666666666664, ans=10.0
+2024-08-25 08:06:33,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=53530.666666666664, ans=0.0
+2024-08-25 08:06:33,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=53530.666666666664, ans=0.125
+2024-08-25 08:06:36,240 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:06:39,319 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=53530.666666666664, ans=0.0
+2024-08-25 08:07:17,134 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=53584.0, ans=0.0
+2024-08-25 08:07:22,869 INFO [train.py:1114] (2/4) Epoch 5, batch 100, loss[loss=0.3155, simple_loss=0.333, pruned_loss=0.1071, ctc_loss=0.2094, over 19718.00 frames. ], tot_loss[loss=0.3153, simple_loss=0.3426, pruned_loss=0.1046, ctc_loss=0.1974, over 1498153.10 frames. ], batch size: 51, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:07:44,867 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.03 vs. limit=22.5
+2024-08-25 08:08:16,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=53690.666666666664, ans=0.0
+2024-08-25 08:08:47,227 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.96 vs. limit=15.0
+2024-08-25 08:09:45,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=53797.333333333336, ans=0.125
+2024-08-25 08:10:03,778 INFO [train.py:1114] (2/4) Epoch 5, batch 150, loss[loss=0.3058, simple_loss=0.3216, pruned_loss=0.1063, ctc_loss=0.1936, over 19685.00 frames. ], tot_loss[loss=0.3097, simple_loss=0.3384, pruned_loss=0.102, ctc_loss=0.1924, over 2028120.81 frames. ], batch size: 47, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:10:08,324 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=53904.0, ans=0.125
+2024-08-25 08:10:28,595 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=53904.0, ans=0.125
+2024-08-25 08:10:40,322 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.115e+02 2.389e+02 2.764e+02 4.531e+02, threshold=4.777e+02, percent-clipped=1.0
+2024-08-25 08:11:16,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=54064.0, ans=0.125
+2024-08-25 08:11:53,424 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.18 vs. limit=15.0
+2024-08-25 08:12:01,152 INFO [train.py:1114] (2/4) Epoch 5, batch 200, loss[loss=0.3726, simple_loss=0.3729, pruned_loss=0.1365, ctc_loss=0.2486, over 18092.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.3352, pruned_loss=0.1002, ctc_loss=0.1889, over 2435948.10 frames. ], batch size: 85, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:13:57,960 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.10 vs. limit=15.0
+2024-08-25 08:13:58,403 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=54170.666666666664, ans=0.125
+2024-08-25 08:13:58,549 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=54170.666666666664, ans=0.0
+2024-08-25 08:14:34,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=54277.333333333336, ans=0.125
+2024-08-25 08:15:07,819 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=54330.666666666664, ans=0.0
+2024-08-25 08:15:58,937 INFO [train.py:1114] (2/4) Epoch 5, batch 250, loss[loss=0.3259, simple_loss=0.3524, pruned_loss=0.11, ctc_loss=0.1984, over 19349.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3349, pruned_loss=0.09989, ctc_loss=0.1884, over 2756553.01 frames. ], batch size: 67, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:16:47,933 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 1.969e+02 2.164e+02 2.373e+02 3.326e+02, threshold=4.328e+02, percent-clipped=0.0
+2024-08-25 08:17:04,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=54597.333333333336, ans=0.2
+2024-08-25 08:17:04,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=54597.333333333336, ans=0.125
+2024-08-25 08:17:06,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=54597.333333333336, ans=0.2
+2024-08-25 08:17:26,756 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=54650.666666666664, ans=0.125
+2024-08-25 08:17:32,566 INFO [train.py:1114] (2/4) Epoch 5, batch 300, loss[loss=0.318, simple_loss=0.3411, pruned_loss=0.1076, ctc_loss=0.1991, over 19510.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3337, pruned_loss=0.09925, ctc_loss=0.1872, over 3000980.14 frames. ], batch size: 61, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:17:53,424 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.93 vs. limit=10.0
+2024-08-25 08:18:12,104 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=54810.666666666664, ans=0.0
+2024-08-25 08:18:27,930 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=54917.333333333336, ans=0.09899494936611666
+2024-08-25 08:18:35,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=54917.333333333336, ans=0.09899494936611666
+2024-08-25 08:18:38,526 INFO [train.py:1114] (2/4) Epoch 5, batch 350, loss[loss=0.3103, simple_loss=0.3302, pruned_loss=0.1051, ctc_loss=0.2008, over 19759.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.3339, pruned_loss=0.09899, ctc_loss=0.186, over 3190823.52 frames. ], batch size: 48, lr: 2.80e-02, grad_scale: 16.0
+2024-08-25 08:18:47,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=54970.666666666664, ans=0.1
+2024-08-25 08:18:51,998 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=54970.666666666664, ans=0.1
+2024-08-25 08:19:01,375 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=55024.0, ans=0.0
+2024-08-25 08:19:10,792 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.967e+02 2.265e+02 2.794e+02 4.039e+02, threshold=4.529e+02, percent-clipped=0.0
+2024-08-25 08:19:18,217 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=55077.333333333336, ans=0.0
+2024-08-25 08:19:30,477 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.97 vs. limit=22.5
+2024-08-25 08:19:47,794 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.69 vs. limit=15.0
+2024-08-25 08:19:47,961 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.02 vs. limit=22.5
+2024-08-25 08:19:52,016 INFO [train.py:1114] (2/4) Epoch 5, batch 400, loss[loss=0.3009, simple_loss=0.34, pruned_loss=0.09407, ctc_loss=0.1843, over 19486.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3336, pruned_loss=0.09854, ctc_loss=0.1852, over 3342865.79 frames. ], batch size: 54, lr: 2.80e-02, grad_scale: 32.0
+2024-08-25 08:19:55,418 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.89 vs. limit=6.0
+2024-08-25 08:19:59,027 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.16 vs. limit=15.0
+2024-08-25 08:20:21,832 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=55344.0, ans=0.2
+2024-08-25 08:20:27,697 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=55344.0, ans=0.2
+2024-08-25 08:21:07,970 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=55450.666666666664, ans=0.0
+2024-08-25 08:21:10,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=55450.666666666664, ans=0.125
+2024-08-25 08:21:24,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=55450.666666666664, ans=0.1
+2024-08-25 08:21:27,047 INFO [train.py:1114] (2/4) Epoch 5, batch 450, loss[loss=0.3207, simple_loss=0.352, pruned_loss=0.1055, ctc_loss=0.1961, over 19619.00 frames. ], tot_loss[loss=0.3025, simple_loss=0.3337, pruned_loss=0.09862, ctc_loss=0.1852, over 3452048.21 frames. ], batch size: 55, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:21:27,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=55504.0, ans=0.0
+2024-08-25 08:21:27,287 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=55504.0, ans=0.0
+2024-08-25 08:21:29,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=55504.0, ans=0.125
+2024-08-25 08:21:47,862 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.008e+02 2.249e+02 2.774e+02 4.428e+02, threshold=4.498e+02, percent-clipped=0.0
+2024-08-25 08:21:55,838 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=55610.666666666664, ans=0.125
+2024-08-25 08:22:06,190 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:22:58,231 INFO [train.py:1114] (2/4) Epoch 5, batch 500, loss[loss=0.3169, simple_loss=0.3493, pruned_loss=0.1048, ctc_loss=0.1869, over 19677.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.3323, pruned_loss=0.09786, ctc_loss=0.1837, over 3548439.66 frames. ], batch size: 63, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:23:29,058 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55824.0, ans=0.1
+2024-08-25 08:23:59,529 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=55930.666666666664, ans=0.2
+2024-08-25 08:24:21,970 INFO [train.py:1114] (2/4) Epoch 5, batch 550, loss[loss=0.3263, simple_loss=0.352, pruned_loss=0.1077, ctc_loss=0.2132, over 19298.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3324, pruned_loss=0.09796, ctc_loss=0.1841, over 3609573.60 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:24:47,080 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 1.991e+02 2.247e+02 2.867e+02 6.260e+02, threshold=4.494e+02, percent-clipped=1.0
+2024-08-25 08:24:58,547 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.90 vs. limit=10.0
+2024-08-25 08:25:37,726 INFO [train.py:1114] (2/4) Epoch 5, batch 600, loss[loss=0.3608, simple_loss=0.379, pruned_loss=0.1248, ctc_loss=0.2327, over 19362.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3333, pruned_loss=0.09812, ctc_loss=0.1842, over 3667641.63 frames. ], batch size: 67, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:26:13,199 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.07 vs. limit=15.0
+2024-08-25 08:26:25,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=56464.0, ans=0.2
+2024-08-25 08:26:27,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=56464.0, ans=0.125
+2024-08-25 08:26:47,455 INFO [train.py:1114] (2/4) Epoch 5, batch 650, loss[loss=0.3036, simple_loss=0.3337, pruned_loss=0.09983, ctc_loss=0.1846, over 19763.00 frames. ], tot_loss[loss=0.3015, simple_loss=0.3328, pruned_loss=0.09826, ctc_loss=0.1843, over 3717753.44 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:27:04,233 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.17 vs. limit=15.0
+2024-08-25 08:27:09,521 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=56624.0, ans=0.125
+2024-08-25 08:27:13,352 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.957e+02 2.352e+02 2.685e+02 4.359e+02, threshold=4.704e+02, percent-clipped=0.0
+2024-08-25 08:27:29,690 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:27:36,573 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.25 vs. limit=22.5
+2024-08-25 08:28:10,098 INFO [train.py:1114] (2/4) Epoch 5, batch 700, loss[loss=0.2856, simple_loss=0.3184, pruned_loss=0.0925, ctc_loss=0.1695, over 19738.00 frames. ], tot_loss[loss=0.3019, simple_loss=0.3331, pruned_loss=0.09842, ctc_loss=0.1845, over 3749019.41 frames. ], batch size: 51, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:28:27,500 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.54 vs. limit=15.0
+2024-08-25 08:28:27,525 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.07 vs. limit=15.0
+2024-08-25 08:28:28,547 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.53 vs. limit=15.0
+2024-08-25 08:28:34,063 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.97 vs. limit=22.5
+2024-08-25 08:29:41,378 INFO [train.py:1114] (2/4) Epoch 5, batch 750, loss[loss=0.2903, simple_loss=0.3322, pruned_loss=0.09044, ctc_loss=0.1688, over 19513.00 frames. ], tot_loss[loss=0.3001, simple_loss=0.332, pruned_loss=0.09754, ctc_loss=0.1826, over 3774483.61 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:30:11,284 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=57104.0, ans=0.125
+2024-08-25 08:30:20,673 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=57157.333333333336, ans=0.125
+2024-08-25 08:30:38,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=57157.333333333336, ans=0.2
+2024-08-25 08:30:40,361 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.099e+02 2.472e+02 3.181e+02 5.803e+02, threshold=4.945e+02, percent-clipped=2.0
+2024-08-25 08:30:50,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=57210.666666666664, ans=0.125
+2024-08-25 08:32:05,745 INFO [train.py:1114] (2/4) Epoch 5, batch 800, loss[loss=0.275, simple_loss=0.305, pruned_loss=0.08878, ctc_loss=0.1688, over 19784.00 frames. ], tot_loss[loss=0.2989, simple_loss=0.3312, pruned_loss=0.09691, ctc_loss=0.1818, over 3797225.22 frames. ], batch size: 49, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:32:10,700 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=57370.666666666664, ans=0.5
+2024-08-25 08:32:50,791 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=8.59 vs. limit=15.0
+2024-08-25 08:33:20,771 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=57530.666666666664, ans=0.125
+2024-08-25 08:33:37,744 INFO [train.py:1114] (2/4) Epoch 5, batch 850, loss[loss=0.2701, simple_loss=0.3234, pruned_loss=0.07727, ctc_loss=0.1559, over 19643.00 frames. ], tot_loss[loss=0.2972, simple_loss=0.33, pruned_loss=0.09609, ctc_loss=0.1805, over 3816385.78 frames. ], batch size: 59, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:33:42,799 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.691e-03
+2024-08-25 08:34:02,529 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.83 vs. limit=15.0
+2024-08-25 08:34:26,560 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.963e+02 2.197e+02 2.544e+02 4.330e+02, threshold=4.395e+02, percent-clipped=0.0
+2024-08-25 08:34:51,732 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=57797.333333333336, ans=0.125
+2024-08-25 08:34:57,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=57850.666666666664, ans=0.125
+2024-08-25 08:35:08,650 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.54 vs. limit=15.0
+2024-08-25 08:35:17,364 INFO [train.py:1114] (2/4) Epoch 5, batch 900, loss[loss=0.2671, simple_loss=0.297, pruned_loss=0.08647, ctc_loss=0.1608, over 19431.00 frames. ], tot_loss[loss=0.2981, simple_loss=0.3305, pruned_loss=0.09662, ctc_loss=0.1813, over 3820078.46 frames. ], batch size: 48, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:35:17,649 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=57904.0, ans=0.05
+2024-08-25 08:35:24,689 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.17 vs. limit=6.0
+2024-08-25 08:35:32,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=57904.0, ans=0.125
+2024-08-25 08:35:40,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=57957.333333333336, ans=0.05
+2024-08-25 08:35:48,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=57957.333333333336, ans=0.125
+2024-08-25 08:36:22,513 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.68 vs. limit=15.0
+2024-08-25 08:36:23,300 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:36:28,057 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.89 vs. limit=15.0
+2024-08-25 08:36:37,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=58117.333333333336, ans=0.1
+2024-08-25 08:36:41,314 INFO [train.py:1114] (2/4) Epoch 5, batch 950, loss[loss=0.2964, simple_loss=0.3175, pruned_loss=0.1004, ctc_loss=0.1866, over 19505.00 frames. ], tot_loss[loss=0.2977, simple_loss=0.3302, pruned_loss=0.09639, ctc_loss=0.181, over 3818664.89 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:36:48,817 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.99 vs. limit=15.0
+2024-08-25 08:36:49,576 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=58170.666666666664, ans=0.125
+2024-08-25 08:36:51,812 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=58224.0, ans=0.125
+2024-08-25 08:36:56,259 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.30 vs. limit=22.5
+2024-08-25 08:37:02,458 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.021e+02 2.236e+02 2.607e+02 6.234e+02, threshold=4.471e+02, percent-clipped=1.0
+2024-08-25 08:37:15,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=58277.333333333336, ans=0.025
+2024-08-25 08:37:23,593 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=58330.666666666664, ans=0.2
+2024-08-25 08:37:44,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=58384.0, ans=0.125
+2024-08-25 08:37:49,069 INFO [train.py:1114] (2/4) Epoch 5, batch 1000, loss[loss=0.2682, simple_loss=0.3161, pruned_loss=0.07939, ctc_loss=0.1538, over 19850.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.332, pruned_loss=0.09767, ctc_loss=0.1832, over 3815651.64 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:38:16,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=58490.666666666664, ans=0.125
+2024-08-25 08:38:42,081 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=58544.0, ans=0.1
+2024-08-25 08:39:00,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=58597.333333333336, ans=0.1
+2024-08-25 08:39:20,351 INFO [train.py:1114] (2/4) Epoch 5, batch 1050, loss[loss=0.3409, simple_loss=0.3614, pruned_loss=0.1169, ctc_loss=0.2162, over 19839.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3316, pruned_loss=0.09782, ctc_loss=0.1833, over 3822385.69 frames. ], batch size: 57, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:39:27,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=58704.0, ans=0.1
+2024-08-25 08:39:28,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=58704.0, ans=0.125
+2024-08-25 08:39:30,809 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=58757.333333333336, ans=0.0
+2024-08-25 08:39:34,666 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.12 vs. limit=22.5
+2024-08-25 08:39:41,248 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.929e+02 2.228e+02 2.594e+02 4.447e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 08:39:46,396 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=58757.333333333336, ans=0.125
+2024-08-25 08:40:22,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=58864.0, ans=0.0
+2024-08-25 08:40:38,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=58917.333333333336, ans=0.125
+2024-08-25 08:40:42,204 INFO [train.py:1114] (2/4) Epoch 5, batch 1100, loss[loss=0.2735, simple_loss=0.314, pruned_loss=0.08521, ctc_loss=0.1563, over 19577.00 frames. ], tot_loss[loss=0.3011, simple_loss=0.3319, pruned_loss=0.0983, ctc_loss=0.1844, over 3830046.76 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:40:57,657 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.99 vs. limit=22.5
+2024-08-25 08:41:02,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=59024.0, ans=0.1
+2024-08-25 08:41:34,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59130.666666666664, ans=0.1
+2024-08-25 08:41:35,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59130.666666666664, ans=0.1
+2024-08-25 08:41:51,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=59184.0, ans=0.125
+2024-08-25 08:41:54,272 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.80 vs. limit=15.0
+2024-08-25 08:42:06,690 INFO [train.py:1114] (2/4) Epoch 5, batch 1150, loss[loss=0.2968, simple_loss=0.3328, pruned_loss=0.09475, ctc_loss=0.1786, over 19601.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.3317, pruned_loss=0.0981, ctc_loss=0.1842, over 3829557.59 frames. ], batch size: 52, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:42:12,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=59237.333333333336, ans=0.125
+2024-08-25 08:42:19,899 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=59237.333333333336, ans=0.125
+2024-08-25 08:42:27,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=59290.666666666664, ans=0.09899494936611666
+2024-08-25 08:42:30,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=59290.666666666664, ans=0.125
+2024-08-25 08:42:38,170 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.022e+02 2.244e+02 2.636e+02 4.087e+02, threshold=4.489e+02, percent-clipped=0.0
+2024-08-25 08:42:57,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=59344.0, ans=0.0
+2024-08-25 08:42:59,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=59344.0, ans=0.125
+2024-08-25 08:43:33,326 INFO [train.py:1114] (2/4) Epoch 5, batch 1200, loss[loss=0.2901, simple_loss=0.3365, pruned_loss=0.0889, ctc_loss=0.1645, over 19839.00 frames. ], tot_loss[loss=0.3019, simple_loss=0.3327, pruned_loss=0.09848, ctc_loss=0.1851, over 3825952.48 frames. ], batch size: 57, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:43:36,093 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=59504.0, ans=0.125
+2024-08-25 08:43:52,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=59504.0, ans=0.125
+2024-08-25 08:43:54,879 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.51 vs. limit=6.0
+2024-08-25 08:43:59,641 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.47 vs. limit=15.0
+2024-08-25 08:44:14,610 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=59610.666666666664, ans=0.125
+2024-08-25 08:44:16,805 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=59610.666666666664, ans=0.0
+2024-08-25 08:44:24,143 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=59664.0, ans=0.125
+2024-08-25 08:44:27,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=59664.0, ans=0.0
+2024-08-25 08:44:55,327 INFO [train.py:1114] (2/4) Epoch 5, batch 1250, loss[loss=0.2701, simple_loss=0.3208, pruned_loss=0.07936, ctc_loss=0.152, over 19528.00 frames. ], tot_loss[loss=0.3007, simple_loss=0.3325, pruned_loss=0.0977, ctc_loss=0.1837, over 3843642.12 frames. ], batch size: 61, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:45:03,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=59770.666666666664, ans=0.125
+2024-08-25 08:45:21,219 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 1.906e+02 2.098e+02 2.362e+02 4.005e+02, threshold=4.196e+02, percent-clipped=0.0
+2024-08-25 08:45:27,112 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=59877.333333333336, ans=0.0
+2024-08-25 08:45:53,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=59984.0, ans=0.2
+2024-08-25 08:45:56,535 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.60 vs. limit=22.5
+2024-08-25 08:46:03,575 INFO [train.py:1114] (2/4) Epoch 5, batch 1300, loss[loss=0.3041, simple_loss=0.3356, pruned_loss=0.09962, ctc_loss=0.1834, over 18890.00 frames. ], tot_loss[loss=0.2995, simple_loss=0.3315, pruned_loss=0.09712, ctc_loss=0.1829, over 3846186.35 frames. ], batch size: 76, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:46:27,426 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=60090.666666666664, ans=0.1
+2024-08-25 08:46:30,335 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=60090.666666666664, ans=0.125
+2024-08-25 08:46:36,192 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.80 vs. limit=15.0
+2024-08-25 08:47:00,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=60197.333333333336, ans=0.0
+2024-08-25 08:47:06,642 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=60197.333333333336, ans=0.125
+2024-08-25 08:47:20,024 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.49 vs. limit=15.0
+2024-08-25 08:47:27,091 INFO [train.py:1114] (2/4) Epoch 5, batch 1350, loss[loss=0.2882, simple_loss=0.3332, pruned_loss=0.0879, ctc_loss=0.1687, over 19786.00 frames. ], tot_loss[loss=0.2977, simple_loss=0.3304, pruned_loss=0.09626, ctc_loss=0.1811, over 3857362.45 frames. ], batch size: 54, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:47:31,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=60304.0, ans=10.0
+2024-08-25 08:48:06,335 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.950e+02 2.204e+02 2.621e+02 4.331e+02, threshold=4.409e+02, percent-clipped=1.0
+2024-08-25 08:48:13,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=60410.666666666664, ans=0.125
+2024-08-25 08:49:09,966 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=60517.333333333336, ans=0.125
+2024-08-25 08:49:14,323 INFO [train.py:1114] (2/4) Epoch 5, batch 1400, loss[loss=0.2557, simple_loss=0.2938, pruned_loss=0.07982, ctc_loss=0.1449, over 19671.00 frames. ], tot_loss[loss=0.296, simple_loss=0.3294, pruned_loss=0.09539, ctc_loss=0.1794, over 3864150.13 frames. ], batch size: 46, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:49:18,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60570.666666666664, ans=0.1
+2024-08-25 08:49:31,620 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.32 vs. limit=10.0
+2024-08-25 08:49:38,322 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=60624.0, ans=0.0
+2024-08-25 08:49:38,590 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.30 vs. limit=22.5
+2024-08-25 09:01:57,443 INFO [train.py:1114] (2/4) Epoch 5, batch 1450, loss[loss=0.3223, simple_loss=0.351, pruned_loss=0.1076, ctc_loss=0.1956, over 19676.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3303, pruned_loss=0.09571, ctc_loss=0.1799, over 3862100.12 frames. ], batch size: 63, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 09:07:40,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=60837.333333333336, ans=0.1
+2024-08-25 09:12:44,217 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.62 vs. limit=10.0
+2024-08-25 09:14:29,264 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 1.942e+02 2.164e+02 2.480e+02 4.633e+02, threshold=4.329e+02, percent-clipped=1.0
+2024-08-25 09:15:31,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=60944.0, ans=0.125
+2024-08-25 09:24:21,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=60997.333333333336, ans=0.125
+2024-08-25 09:24:21,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=60997.333333333336, ans=0.0
+2024-08-25 09:25:28,209 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.70 vs. limit=15.0
+2024-08-25 09:25:28,350 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.52 vs. limit=6.0
+2024-08-25 09:26:47,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=60997.333333333336, ans=0.0
+2024-08-25 09:34:05,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=61050.666666666664, ans=0.2
+2024-08-25 09:36:13,473 INFO [train.py:1114] (2/4) Epoch 5, batch 1500, loss[loss=0.308, simple_loss=0.3456, pruned_loss=0.09762, ctc_loss=0.1881, over 19586.00 frames. ], tot_loss[loss=0.297, simple_loss=0.3304, pruned_loss=0.09576, ctc_loss=0.1801, over 3861725.35 frames. ], batch size: 57, lr: 2.70e-02, grad_scale: 32.0
+2024-08-25 09:44:18,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=61157.333333333336, ans=0.2
+2024-08-25 09:49:01,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=61157.333333333336, ans=0.125
+2024-08-25 09:57:47,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=61264.0, ans=0.125
+2024-08-25 09:58:47,691 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.08 vs. limit=15.0
+2024-08-25 10:00:40,010 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.66 vs. limit=12.0
+2024-08-25 10:00:40,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=61264.0, ans=0.0
+2024-08-25 10:06:52,302 INFO [train.py:1114] (2/4) Epoch 5, batch 1550, loss[loss=0.324, simple_loss=0.3515, pruned_loss=0.1075, ctc_loss=0.2037, over 19584.00 frames. ], tot_loss[loss=0.2977, simple_loss=0.3307, pruned_loss=0.09615, ctc_loss=0.181, over 3847400.37 frames. ], batch size: 60, lr: 2.70e-02, grad_scale: 16.0
+2024-08-25 10:12:14,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=61424.0, ans=0.0
+2024-08-25 10:13:22,588 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.97 vs. limit=22.5
+2024-08-25 10:14:16,524 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=61424.0, ans=0.0
+2024-08-25 10:14:47,414 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.971e+02 2.260e+02 2.611e+02 5.554e+02, threshold=4.519e+02, percent-clipped=3.0
+2024-08-25 10:25:58,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=61584.0, ans=0.125
+2024-08-25 10:28:13,711 INFO [train.py:1114] (2/4) Epoch 5, batch 1600, loss[loss=0.2897, simple_loss=0.3281, pruned_loss=0.09212, ctc_loss=0.1674, over 19851.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.3295, pruned_loss=0.09523, ctc_loss=0.1792, over 3835621.73 frames. ], batch size: 57, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:28:35,673 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=61637.333333333336, ans=0.0
+2024-08-25 10:30:45,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=61690.666666666664, ans=0.125
+2024-08-25 10:31:37,249 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=61690.666666666664, ans=0.125
+2024-08-25 10:36:46,198 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=61797.333333333336, ans=0.125
+2024-08-25 10:39:04,865 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.62 vs. limit=15.0
+2024-08-25 10:40:18,071 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=61850.666666666664, ans=0.125
+2024-08-25 10:40:18,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=61850.666666666664, ans=0.5
+2024-08-25 10:40:45,722 INFO [train.py:1114] (2/4) Epoch 5, batch 1650, loss[loss=0.3146, simple_loss=0.3487, pruned_loss=0.1007, ctc_loss=0.1978, over 19673.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.3295, pruned_loss=0.09519, ctc_loss=0.1794, over 3832831.11 frames. ], batch size: 59, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:40:49,507 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.91 vs. limit=22.5
+2024-08-25 10:43:04,120 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.985e+02 2.336e+02 2.616e+02 4.728e+02, threshold=4.672e+02, percent-clipped=1.0
+2024-08-25 10:43:35,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=62010.666666666664, ans=0.125
+2024-08-25 10:43:52,913 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.42 vs. limit=15.0
+2024-08-25 10:46:21,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=62117.333333333336, ans=0.0
+2024-08-25 10:46:43,742 INFO [train.py:1114] (2/4) Epoch 5, batch 1700, loss[loss=0.2799, simple_loss=0.3072, pruned_loss=0.09206, ctc_loss=0.1714, over 19662.00 frames. ], tot_loss[loss=0.2948, simple_loss=0.329, pruned_loss=0.09469, ctc_loss=0.1783, over 3847310.42 frames. ], batch size: 46, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:48:01,659 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.05 vs. limit=10.0
+2024-08-25 10:48:25,776 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.65 vs. limit=15.0
+2024-08-25 10:48:56,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=62277.333333333336, ans=0.0
+2024-08-25 10:48:57,996 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=62277.333333333336, ans=0.125
+2024-08-25 10:48:59,783 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.02 vs. limit=15.0
+2024-08-25 10:49:17,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=62277.333333333336, ans=0.1
+2024-08-25 10:50:17,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=62384.0, ans=0.125
+2024-08-25 10:50:54,972 INFO [train.py:1114] (2/4) Epoch 5, batch 1750, loss[loss=0.2526, simple_loss=0.2903, pruned_loss=0.07711, ctc_loss=0.1518, over 19603.00 frames. ], tot_loss[loss=0.2953, simple_loss=0.329, pruned_loss=0.09506, ctc_loss=0.179, over 3851911.58 frames. ], batch size: 45, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:51:05,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=62437.333333333336, ans=0.025
+2024-08-25 10:51:09,209 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=62437.333333333336, ans=0.025
+2024-08-25 10:51:13,039 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=62437.333333333336, ans=0.2
+2024-08-25 10:51:35,962 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.59 vs. limit=6.0
+2024-08-25 10:51:43,377 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.31 vs. limit=15.0
+2024-08-25 10:53:52,982 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.010e+02 2.326e+02 2.972e+02 6.446e+02, threshold=4.653e+02, percent-clipped=3.0
+2024-08-25 10:56:53,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=62650.666666666664, ans=0.125
+2024-08-25 10:57:11,558 INFO [train.py:1114] (2/4) Epoch 5, batch 1800, loss[loss=0.3243, simple_loss=0.3541, pruned_loss=0.1074, ctc_loss=0.1994, over 19623.00 frames. ], tot_loss[loss=0.2954, simple_loss=0.3292, pruned_loss=0.09501, ctc_loss=0.1787, over 3853742.09 frames. ], batch size: 55, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:57:28,293 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=62704.0, ans=0.125
+2024-08-25 10:57:55,950 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.65 vs. limit=15.0
+2024-08-25 10:58:25,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=62810.666666666664, ans=0.0
+2024-08-25 10:58:39,366 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.90 vs. limit=6.0
+2024-08-25 10:58:42,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=62864.0, ans=0.125
+2024-08-25 10:58:57,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=62917.333333333336, ans=0.0
+2024-08-25 10:58:59,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=62917.333333333336, ans=0.0
+2024-08-25 10:59:01,187 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=62917.333333333336, ans=0.2
+2024-08-25 10:59:06,170 INFO [train.py:1114] (2/4) Epoch 5, batch 1850, loss[loss=0.3015, simple_loss=0.3459, pruned_loss=0.09423, ctc_loss=0.1717, over 19584.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3284, pruned_loss=0.0943, ctc_loss=0.1774, over 3856585.11 frames. ], batch size: 57, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 10:59:12,847 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.04 vs. limit=15.0
+2024-08-25 10:59:14,826 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=62970.666666666664, ans=0.125
+2024-08-25 10:59:25,375 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63024.0, ans=0.1
+2024-08-25 10:59:32,447 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.044e+02 2.314e+02 2.820e+02 4.474e+02, threshold=4.628e+02, percent-clipped=0.0
+2024-08-25 10:59:42,776 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=63077.333333333336, ans=0.125
+2024-08-25 10:59:53,773 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.34 vs. limit=15.0
+2024-08-25 11:00:20,610 INFO [train.py:1114] (2/4) Epoch 5, batch 1900, loss[loss=0.2921, simple_loss=0.3301, pruned_loss=0.09192, ctc_loss=0.1756, over 19653.00 frames. ], tot_loss[loss=0.2948, simple_loss=0.3293, pruned_loss=0.09454, ctc_loss=0.1779, over 3860612.57 frames. ], batch size: 59, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:00:20,766 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=63237.333333333336, ans=0.125
+2024-08-25 11:00:26,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=63237.333333333336, ans=0.125
+2024-08-25 11:00:28,987 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.89 vs. limit=15.0
+2024-08-25 11:00:34,222 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:00:40,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=63290.666666666664, ans=0.2
+2024-08-25 11:01:09,526 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.29 vs. limit=8.0
+2024-08-25 11:01:10,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=63344.0, ans=0.125
+2024-08-25 11:01:55,417 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=63397.333333333336, ans=0.0
+2024-08-25 11:01:58,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=63450.666666666664, ans=0.2
+2024-08-25 11:02:34,501 INFO [train.py:1114] (2/4) Epoch 5, batch 1950, loss[loss=0.3133, simple_loss=0.3371, pruned_loss=0.1055, ctc_loss=0.196, over 19587.00 frames. ], tot_loss[loss=0.2948, simple_loss=0.3298, pruned_loss=0.09443, ctc_loss=0.1775, over 3869917.03 frames. ], batch size: 52, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:02:42,933 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=63504.0, ans=0.0
+2024-08-25 11:02:58,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63557.333333333336, ans=0.1
+2024-08-25 11:03:16,680 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.932e+02 2.130e+02 2.461e+02 4.838e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 11:03:45,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=63610.666666666664, ans=0.125
+2024-08-25 11:04:15,898 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=63717.333333333336, ans=0.125
+2024-08-25 11:04:25,921 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.34 vs. limit=15.0
+2024-08-25 11:04:37,798 INFO [train.py:1114] (2/4) Epoch 5, batch 2000, loss[loss=0.2706, simple_loss=0.2952, pruned_loss=0.0901, ctc_loss=0.1645, over 19684.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3308, pruned_loss=0.09531, ctc_loss=0.1789, over 3853024.26 frames. ], batch size: 45, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:04:58,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=63824.0, ans=0.0
+2024-08-25 11:04:59,388 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.46 vs. limit=6.0
+2024-08-25 11:05:01,323 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.535e-03
+2024-08-25 11:05:02,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=63824.0, ans=0.125
+2024-08-25 11:05:34,852 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=63930.666666666664, ans=0.0
+2024-08-25 11:06:07,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=64037.333333333336, ans=0.125
+2024-08-25 11:06:08,374 INFO [train.py:1114] (2/4) Epoch 5, batch 2050, loss[loss=0.2516, simple_loss=0.2979, pruned_loss=0.07407, ctc_loss=0.1432, over 19738.00 frames. ], tot_loss[loss=0.2951, simple_loss=0.3293, pruned_loss=0.09484, ctc_loss=0.1781, over 3850417.08 frames. ], batch size: 47, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:06:24,864 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=64090.666666666664, ans=0.125
+2024-08-25 11:06:29,147 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.037e+02 2.272e+02 2.892e+02 6.343e+02, threshold=4.544e+02, percent-clipped=1.0
+2024-08-25 11:06:49,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=64144.0, ans=0.2
+2024-08-25 11:06:52,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=64197.333333333336, ans=0.0
+2024-08-25 11:07:05,100 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.34 vs. limit=6.0
+2024-08-25 11:07:30,928 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=64250.666666666664, ans=0.0
+2024-08-25 11:07:48,389 INFO [train.py:1114] (2/4) Epoch 5, batch 2100, loss[loss=0.2622, simple_loss=0.3108, pruned_loss=0.07832, ctc_loss=0.1423, over 19770.00 frames. ], tot_loss[loss=0.2931, simple_loss=0.328, pruned_loss=0.09384, ctc_loss=0.1762, over 3858122.29 frames. ], batch size: 54, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:07:49,466 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=64304.0, ans=0.1
+2024-08-25 11:07:49,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=64304.0, ans=0.2
+2024-08-25 11:07:54,221 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.63 vs. limit=15.0
+2024-08-25 11:08:13,334 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=64357.333333333336, ans=0.125
+2024-08-25 11:08:33,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=64464.0, ans=0.1
+2024-08-25 11:08:52,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=64464.0, ans=0.0
+2024-08-25 11:09:21,109 INFO [train.py:1114] (2/4) Epoch 5, batch 2150, loss[loss=0.2681, simple_loss=0.3152, pruned_loss=0.08152, ctc_loss=0.1447, over 19854.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.3274, pruned_loss=0.09379, ctc_loss=0.1761, over 3869402.62 frames. ], batch size: 52, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:09:23,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=64570.666666666664, ans=0.125
+2024-08-25 11:09:34,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64624.0, ans=0.1
+2024-08-25 11:09:44,528 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.041e+02 2.279e+02 2.689e+02 3.624e+02, threshold=4.557e+02, percent-clipped=0.0
+2024-08-25 11:10:26,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=64784.0, ans=0.5
+2024-08-25 11:10:34,041 INFO [train.py:1114] (2/4) Epoch 5, batch 2200, loss[loss=0.3016, simple_loss=0.3423, pruned_loss=0.095, ctc_loss=0.1774, over 19582.00 frames. ], tot_loss[loss=0.2933, simple_loss=0.3277, pruned_loss=0.09406, ctc_loss=0.1768, over 3867300.66 frames. ], batch size: 57, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:11:19,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=65050.666666666664, ans=0.04949747468305833
+2024-08-25 11:11:20,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=65050.666666666664, ans=0.0
+2024-08-25 11:11:23,800 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=65050.666666666664, ans=0.04949747468305833
+2024-08-25 11:11:29,236 INFO [train.py:1114] (2/4) Epoch 5, batch 2250, loss[loss=0.2917, simple_loss=0.3377, pruned_loss=0.08941, ctc_loss=0.1674, over 19599.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3281, pruned_loss=0.09427, ctc_loss=0.1773, over 3867046.89 frames. ], batch size: 55, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:11:35,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=65104.0, ans=0.0
+2024-08-25 11:11:45,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=65157.333333333336, ans=0.025
+2024-08-25 11:11:51,987 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.180e+02 2.514e+02 3.003e+02 5.559e+02, threshold=5.029e+02, percent-clipped=2.0
+2024-08-25 11:12:17,669 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=65264.0, ans=0.0
+2024-08-25 11:12:38,250 INFO [train.py:1114] (2/4) Epoch 5, batch 2300, loss[loss=0.2553, simple_loss=0.2986, pruned_loss=0.07762, ctc_loss=0.142, over 19505.00 frames. ], tot_loss[loss=0.293, simple_loss=0.3268, pruned_loss=0.09416, ctc_loss=0.1771, over 3860953.91 frames. ], batch size: 49, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:13:16,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=65530.666666666664, ans=0.125
+2024-08-25 11:13:21,594 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.76 vs. limit=15.0
+2024-08-25 11:13:30,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=65584.0, ans=0.125
+2024-08-25 11:13:30,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=65584.0, ans=0.2
+2024-08-25 11:13:33,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=65584.0, ans=0.0
+2024-08-25 11:13:37,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=65584.0, ans=6.0
+2024-08-25 11:13:38,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=65584.0, ans=0.125
+2024-08-25 11:13:40,595 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:13:50,095 INFO [train.py:1114] (2/4) Epoch 5, batch 2350, loss[loss=0.3255, simple_loss=0.3597, pruned_loss=0.1064, ctc_loss=0.196, over 19668.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.3264, pruned_loss=0.09368, ctc_loss=0.1759, over 3863446.28 frames. ], batch size: 63, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:14:31,467 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.936e+02 2.303e+02 2.820e+02 4.151e+02, threshold=4.606e+02, percent-clipped=0.0
+2024-08-25 11:14:56,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=65797.33333333333, ans=0.1
+2024-08-25 11:14:56,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-25 11:15:01,280 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=65797.33333333333, ans=0.04949747468305833
+2024-08-25 11:15:23,151 INFO [train.py:1114] (2/4) Epoch 5, batch 2400, loss[loss=0.3012, simple_loss=0.3465, pruned_loss=0.09296, ctc_loss=0.1753, over 19291.00 frames. ], tot_loss[loss=0.295, simple_loss=0.3292, pruned_loss=0.09489, ctc_loss=0.1776, over 3857528.14 frames. ], batch size: 71, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:15:34,826 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.81 vs. limit=15.0
+2024-08-25 11:15:41,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=65957.33333333333, ans=0.125
+2024-08-25 11:16:07,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=66010.66666666667, ans=0.0
+2024-08-25 11:16:12,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=66064.0, ans=0.2
+2024-08-25 11:16:33,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=66064.0, ans=0.125
+2024-08-25 11:16:40,591 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.63 vs. limit=15.0
+2024-08-25 11:16:56,808 INFO [train.py:1114] (2/4) Epoch 5, batch 2450, loss[loss=0.3843, simple_loss=0.3775, pruned_loss=0.1422, ctc_loss=0.2666, over 14018.00 frames. ], tot_loss[loss=0.3044, simple_loss=0.3347, pruned_loss=0.09968, ctc_loss=0.1869, over 3729975.43 frames. ], batch size: 141, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:17:16,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=66224.0, ans=0.125
+2024-08-25 11:17:17,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=66224.0, ans=0.125
+2024-08-25 11:17:39,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=66224.0, ans=0.0
+2024-08-25 11:17:41,183 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=66224.0, ans=0.2
+2024-08-25 11:17:43,161 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.021e+02 2.221e+02 2.524e+02 3.558e+02, threshold=4.443e+02, percent-clipped=0.0
+2024-08-25 11:17:45,555 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=66277.33333333333, ans=0.125
+2024-08-25 11:17:54,071 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=66277.33333333333, ans=0.125
+2024-08-25 11:17:56,123 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=66277.33333333333, ans=0.125
+2024-08-25 11:17:59,730 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.91 vs. limit=15.0
+2024-08-25 11:18:01,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=66330.66666666667, ans=0.1
+2024-08-25 11:18:03,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=66330.66666666667, ans=0.125
+2024-08-25 11:19:28,342 INFO [train.py:1114] (2/4) Epoch 6, batch 0, loss[loss=0.3045, simple_loss=0.3251, pruned_loss=0.1046, ctc_loss=0.187, over 19792.00 frames. ], tot_loss[loss=0.3045, simple_loss=0.3251, pruned_loss=0.1046, ctc_loss=0.187, over 19792.00 frames. ], batch size: 49, lr: 2.45e-02, grad_scale: 32.0
+2024-08-25 11:19:28,342 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 11:20:29,250 INFO [train.py:1146] (2/4) Epoch 6, validation: loss=0.2388, simple_loss=0.3147, pruned_loss=0.05993, ctc_loss=0.1076, over 944034.00 frames.
+2024-08-25 11:20:29,251 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 11:20:29,672 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.82 vs. limit=15.0
+2024-08-25 11:20:48,592 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.52 vs. limit=15.0
+2024-08-25 11:21:07,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=66538.66666666667, ans=0.1
+2024-08-25 11:21:25,007 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=66592.0, ans=0.125
+2024-08-25 11:21:56,937 INFO [train.py:1114] (2/4) Epoch 6, batch 50, loss[loss=0.2449, simple_loss=0.2931, pruned_loss=0.0717, ctc_loss=0.1333, over 19723.00 frames. ], tot_loss[loss=0.297, simple_loss=0.3303, pruned_loss=0.09584, ctc_loss=0.1803, over 845158.26 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:21:58,760 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.52 vs. limit=12.0
+2024-08-25 11:22:02,109 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.66 vs. limit=6.0
+2024-08-25 11:22:12,254 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.44 vs. limit=22.5
+2024-08-25 11:22:24,880 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=66752.0, ans=0.125
+2024-08-25 11:22:35,661 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=66752.0, ans=0.125
+2024-08-25 11:22:38,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=66752.0, ans=0.125
+2024-08-25 11:22:45,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=66752.0, ans=0.025
+2024-08-25 11:22:50,716 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 1.959e+02 2.174e+02 2.569e+02 5.460e+02, threshold=4.347e+02, percent-clipped=1.0
+2024-08-25 11:23:07,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=66858.66666666667, ans=0.125
+2024-08-25 11:23:11,739 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.83 vs. limit=22.5
+2024-08-25 11:23:18,888 INFO [train.py:1114] (2/4) Epoch 6, batch 100, loss[loss=0.2726, simple_loss=0.3096, pruned_loss=0.08551, ctc_loss=0.1613, over 19707.00 frames. ], tot_loss[loss=0.2964, simple_loss=0.3308, pruned_loss=0.09516, ctc_loss=0.1793, over 1498722.63 frames. ], batch size: 51, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:23:38,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=66965.33333333333, ans=0.025
+2024-08-25 11:24:15,047 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=67125.33333333333, ans=0.125
+2024-08-25 11:24:21,823 INFO [train.py:1114] (2/4) Epoch 6, batch 150, loss[loss=0.2842, simple_loss=0.3069, pruned_loss=0.09597, ctc_loss=0.1738, over 19740.00 frames. ], tot_loss[loss=0.2926, simple_loss=0.3279, pruned_loss=0.0935, ctc_loss=0.1758, over 2027884.99 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:24:38,621 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.55 vs. limit=15.0
+2024-08-25 11:25:04,952 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.947e+02 2.172e+02 2.650e+02 4.091e+02, threshold=4.343e+02, percent-clipped=0.0
+2024-08-25 11:25:20,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=67392.0, ans=0.2
+2024-08-25 11:25:22,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=67392.0, ans=0.1
+2024-08-25 11:25:35,918 INFO [train.py:1114] (2/4) Epoch 6, batch 200, loss[loss=0.3581, simple_loss=0.3678, pruned_loss=0.1272, ctc_loss=0.2352, over 18250.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3269, pruned_loss=0.09333, ctc_loss=0.1754, over 2435615.73 frames. ], batch size: 85, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:25:59,475 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.27 vs. limit=15.0
+2024-08-25 11:26:02,704 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.84 vs. limit=12.0
+2024-08-25 11:26:04,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=67498.66666666667, ans=0.125
+2024-08-25 11:26:12,507 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.whiten.whitening_limit, batch_count=67552.0, ans=15.0
+2024-08-25 11:26:37,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=67552.0, ans=0.035
+2024-08-25 11:27:19,526 INFO [train.py:1114] (2/4) Epoch 6, batch 250, loss[loss=0.2868, simple_loss=0.3348, pruned_loss=0.0865, ctc_loss=0.1644, over 19388.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3261, pruned_loss=0.09274, ctc_loss=0.1745, over 2755871.45 frames. ], batch size: 67, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:27:30,146 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=67712.0, ans=0.125
+2024-08-25 11:27:44,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=67765.33333333333, ans=0.07
+2024-08-25 11:28:14,795 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.43 vs. limit=15.0
+2024-08-25 11:28:36,847 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 1.900e+02 2.111e+02 2.483e+02 4.707e+02, threshold=4.222e+02, percent-clipped=1.0
+2024-08-25 11:29:32,390 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=67925.33333333333, ans=0.1
+2024-08-25 11:29:38,980 INFO [train.py:1114] (2/4) Epoch 6, batch 300, loss[loss=0.3361, simple_loss=0.3501, pruned_loss=0.1177, ctc_loss=0.2169, over 19499.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3252, pruned_loss=0.09218, ctc_loss=0.1735, over 3000609.96 frames. ], batch size: 61, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:30:18,410 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.10 vs. limit=15.0
+2024-08-25 11:30:40,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=68085.33333333333, ans=0.125
+2024-08-25 11:30:43,420 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=68085.33333333333, ans=0.125
+2024-08-25 11:30:59,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=68138.66666666667, ans=0.0
+2024-08-25 11:31:00,389 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=68138.66666666667, ans=0.1
+2024-08-25 11:31:07,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=68138.66666666667, ans=0.025
+2024-08-25 11:31:09,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=68138.66666666667, ans=0.0
+2024-08-25 11:31:10,715 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.93 vs. limit=15.0
+2024-08-25 11:31:26,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=68192.0, ans=0.125
+2024-08-25 11:31:39,944 INFO [train.py:1114] (2/4) Epoch 6, batch 350, loss[loss=0.2304, simple_loss=0.2774, pruned_loss=0.06631, ctc_loss=0.127, over 19758.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.3248, pruned_loss=0.09175, ctc_loss=0.1726, over 3191275.62 frames. ], batch size: 48, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:31:47,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=68245.33333333333, ans=0.0
+2024-08-25 11:32:15,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=68298.66666666667, ans=0.2
+2024-08-25 11:32:35,309 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.039e+02 2.360e+02 2.872e+02 5.301e+02, threshold=4.720e+02, percent-clipped=2.0
+2024-08-25 11:32:40,511 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=68405.33333333333, ans=0.0
+2024-08-25 11:33:02,107 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.30 vs. limit=15.0
+2024-08-25 11:33:02,560 INFO [train.py:1114] (2/4) Epoch 6, batch 400, loss[loss=0.313, simple_loss=0.3419, pruned_loss=0.1046, ctc_loss=0.1873, over 19497.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3243, pruned_loss=0.09137, ctc_loss=0.1716, over 3342789.81 frames. ], batch size: 54, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:33:12,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=68512.0, ans=0.0
+2024-08-25 11:33:12,401 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.15 vs. limit=22.5
+2024-08-25 11:33:17,095 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.80 vs. limit=6.0
+2024-08-25 11:33:18,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=68565.33333333333, ans=0.1
+2024-08-25 11:33:20,616 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.76 vs. limit=10.0
+2024-08-25 11:33:24,798 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:33:58,783 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.67 vs. limit=15.0
+2024-08-25 11:34:06,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=68725.33333333333, ans=0.125
+2024-08-25 11:34:13,430 INFO [train.py:1114] (2/4) Epoch 6, batch 450, loss[loss=0.267, simple_loss=0.32, pruned_loss=0.07664, ctc_loss=0.1519, over 19615.00 frames. ], tot_loss[loss=0.2863, simple_loss=0.3236, pruned_loss=0.09044, ctc_loss=0.1702, over 3451041.50 frames. ], batch size: 55, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:34:23,324 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=68778.66666666667, ans=0.2
+2024-08-25 11:34:30,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=68832.0, ans=0.1
+2024-08-25 11:34:46,990 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.23 vs. limit=15.0
+2024-08-25 11:34:48,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=68938.66666666667, ans=0.2
+2024-08-25 11:34:49,670 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.969e+02 2.191e+02 2.793e+02 4.218e+02, threshold=4.382e+02, percent-clipped=0.0
+2024-08-25 11:34:50,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=68938.66666666667, ans=0.125
+2024-08-25 11:34:59,797 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=68992.0, ans=0.0
+2024-08-25 11:35:09,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=69045.33333333333, ans=0.2
+2024-08-25 11:35:10,603 INFO [train.py:1114] (2/4) Epoch 6, batch 500, loss[loss=0.3129, simple_loss=0.337, pruned_loss=0.1065, ctc_loss=0.1895, over 19687.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3222, pruned_loss=0.08971, ctc_loss=0.1688, over 3545450.38 frames. ], batch size: 63, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:35:14,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=69045.33333333333, ans=0.125
+2024-08-25 11:35:21,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=69098.66666666667, ans=0.1
+2024-08-25 11:35:40,624 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=69152.0, ans=0.125
+2024-08-25 11:35:41,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=69152.0, ans=0.1
+2024-08-25 11:36:10,444 INFO [train.py:1114] (2/4) Epoch 6, batch 550, loss[loss=0.324, simple_loss=0.3464, pruned_loss=0.1078, ctc_loss=0.215, over 19302.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3228, pruned_loss=0.09039, ctc_loss=0.17, over 3606897.15 frames. ], batch size: 71, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:36:16,789 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.249e-02
+2024-08-25 11:36:25,548 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=69365.33333333333, ans=0.125
+2024-08-25 11:36:46,536 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.100e+02 2.439e+02 2.966e+02 5.259e+02, threshold=4.878e+02, percent-clipped=1.0
+2024-08-25 11:36:54,902 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=69472.0, ans=0.2
+2024-08-25 11:37:07,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=69525.33333333333, ans=0.2
+2024-08-25 11:37:07,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=69525.33333333333, ans=0.2
+2024-08-25 11:37:12,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=69525.33333333333, ans=0.2
+2024-08-25 11:37:27,253 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.08 vs. limit=10.0
+2024-08-25 11:37:28,774 INFO [train.py:1114] (2/4) Epoch 6, batch 600, loss[loss=0.3176, simple_loss=0.3481, pruned_loss=0.105, ctc_loss=0.1928, over 19395.00 frames. ], tot_loss[loss=0.2857, simple_loss=0.3229, pruned_loss=0.09023, ctc_loss=0.1699, over 3664060.99 frames. ], batch size: 67, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:37:50,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=69632.0, ans=0.125
+2024-08-25 11:38:04,560 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=69685.33333333333, ans=0.125
+2024-08-25 11:38:11,836 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.46 vs. limit=6.0
+2024-08-25 11:38:58,901 INFO [train.py:1114] (2/4) Epoch 6, batch 650, loss[loss=0.2643, simple_loss=0.3162, pruned_loss=0.07817, ctc_loss=0.1401, over 19766.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3226, pruned_loss=0.09012, ctc_loss=0.1695, over 3714679.70 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:39:05,854 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=69845.33333333333, ans=0.125
+2024-08-25 11:39:06,018 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=69845.33333333333, ans=0.0
+2024-08-25 11:39:50,474 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.931e+02 2.137e+02 2.425e+02 3.711e+02, threshold=4.274e+02, percent-clipped=0.0
+2024-08-25 11:40:16,220 INFO [train.py:1114] (2/4) Epoch 6, batch 700, loss[loss=0.2961, simple_loss=0.3159, pruned_loss=0.09884, ctc_loss=0.1966, over 19709.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3227, pruned_loss=0.08985, ctc_loss=0.1692, over 3746895.95 frames. ], batch size: 51, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:40:41,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=70165.33333333333, ans=0.125
+2024-08-25 11:41:40,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=70272.0, ans=0.125
+2024-08-25 11:42:12,745 INFO [train.py:1114] (2/4) Epoch 6, batch 750, loss[loss=0.2856, simple_loss=0.3339, pruned_loss=0.08503, ctc_loss=0.1681, over 19511.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3223, pruned_loss=0.08969, ctc_loss=0.1689, over 3773056.43 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:42:37,211 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=70485.33333333333, ans=0.2
+2024-08-25 11:43:04,764 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.72 vs. limit=10.0
+2024-08-25 11:43:06,588 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=70538.66666666667, ans=0.2
+2024-08-25 11:43:09,521 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.022e+02 2.297e+02 2.693e+02 4.652e+02, threshold=4.594e+02, percent-clipped=2.0
+2024-08-25 11:43:10,149 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=9.28 vs. limit=15.0
+2024-08-25 11:43:14,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=70538.66666666667, ans=0.0
+2024-08-25 11:43:21,357 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=70592.0, ans=0.5
+2024-08-25 11:43:27,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=70592.0, ans=0.1
+2024-08-25 11:43:31,985 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.44 vs. limit=22.5
+2024-08-25 11:43:34,909 INFO [train.py:1114] (2/4) Epoch 6, batch 800, loss[loss=0.2428, simple_loss=0.2848, pruned_loss=0.07288, ctc_loss=0.1377, over 19389.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3222, pruned_loss=0.08957, ctc_loss=0.1685, over 3794047.98 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:43:36,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=70645.33333333333, ans=0.125
+2024-08-25 11:44:24,816 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.93 vs. limit=15.0
+2024-08-25 11:44:46,476 INFO [train.py:1114] (2/4) Epoch 6, batch 850, loss[loss=0.2826, simple_loss=0.3208, pruned_loss=0.08789, ctc_loss=0.1717, over 19665.00 frames. ], tot_loss[loss=0.2833, simple_loss=0.3215, pruned_loss=0.08909, ctc_loss=0.1675, over 3813304.36 frames. ], batch size: 59, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:45:13,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=70965.33333333333, ans=0.0
+2024-08-25 11:45:15,384 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=70965.33333333333, ans=0.1
+2024-08-25 11:45:15,883 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.47 vs. limit=15.0
+2024-08-25 11:45:20,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=70965.33333333333, ans=0.0
+2024-08-25 11:45:46,252 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.893e+02 2.077e+02 2.374e+02 4.075e+02, threshold=4.154e+02, percent-clipped=0.0
+2024-08-25 11:45:55,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71125.33333333333, ans=0.1
+2024-08-25 11:46:02,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=71125.33333333333, ans=0.125
+2024-08-25 11:46:07,487 INFO [train.py:1114] (2/4) Epoch 6, batch 900, loss[loss=0.2727, simple_loss=0.3026, pruned_loss=0.08787, ctc_loss=0.1678, over 19785.00 frames. ], tot_loss[loss=0.2835, simple_loss=0.3215, pruned_loss=0.08919, ctc_loss=0.1676, over 3818368.18 frames. ], batch size: 49, lr: 2.39e-02, grad_scale: 16.0
+2024-08-25 11:46:19,514 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.04 vs. limit=15.0
+2024-08-25 11:46:31,548 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.98 vs. limit=6.0
+2024-08-25 11:46:32,110 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=71232.0, ans=0.2
+2024-08-25 11:46:43,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=71285.33333333333, ans=0.025
+2024-08-25 11:46:54,805 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=71338.66666666667, ans=0.0
+2024-08-25 11:47:01,115 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.27 vs. limit=22.5
+2024-08-25 11:47:12,595 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=71392.0, ans=0.2
+2024-08-25 11:47:21,596 INFO [train.py:1114] (2/4) Epoch 6, batch 950, loss[loss=0.2941, simple_loss=0.3186, pruned_loss=0.09886, ctc_loss=0.1795, over 19507.00 frames. ], tot_loss[loss=0.2832, simple_loss=0.3216, pruned_loss=0.08894, ctc_loss=0.1673, over 3820269.97 frames. ], batch size: 49, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:47:22,610 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.88 vs. limit=22.5
+2024-08-25 11:47:26,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71445.33333333333, ans=0.1
+2024-08-25 11:47:28,167 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=71445.33333333333, ans=0.0
+2024-08-25 11:47:42,052 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.85 vs. limit=15.0
+2024-08-25 11:48:12,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=71552.0, ans=0.0
+2024-08-25 11:48:13,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=71552.0, ans=0.1
+2024-08-25 11:48:21,823 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.67 vs. limit=15.0
+2024-08-25 11:48:23,508 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.900e+02 2.167e+02 2.553e+02 4.088e+02, threshold=4.334e+02, percent-clipped=0.0
+2024-08-25 11:48:25,856 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=71605.33333333333, ans=0.0
+2024-08-25 11:49:03,420 INFO [train.py:1114] (2/4) Epoch 6, batch 1000, loss[loss=0.2573, simple_loss=0.2977, pruned_loss=0.07851, ctc_loss=0.1498, over 19872.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3224, pruned_loss=0.08968, ctc_loss=0.1686, over 3816465.51 frames. ], batch size: 52, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:49:10,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=71712.0, ans=0.125
+2024-08-25 11:49:13,069 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=71712.0, ans=0.125
+2024-08-25 11:49:14,925 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.07 vs. limit=15.0
+2024-08-25 11:49:25,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=71765.33333333333, ans=0.07
+2024-08-25 11:50:05,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=71872.0, ans=0.0
+2024-08-25 11:50:33,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=71925.33333333333, ans=0.125
+2024-08-25 11:50:46,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=71925.33333333333, ans=0.125
+2024-08-25 11:50:57,806 INFO [train.py:1114] (2/4) Epoch 6, batch 1050, loss[loss=0.2971, simple_loss=0.3388, pruned_loss=0.09373, ctc_loss=0.1699, over 19849.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3215, pruned_loss=0.08939, ctc_loss=0.1681, over 3823461.72 frames. ], batch size: 57, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:51:03,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71978.66666666667, ans=0.1
+2024-08-25 11:51:42,584 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.48 vs. limit=12.0
+2024-08-25 11:51:43,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=72085.33333333333, ans=0.025
+2024-08-25 11:51:53,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=72085.33333333333, ans=0.07
+2024-08-25 11:52:00,134 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.944e+02 2.201e+02 2.550e+02 3.957e+02, threshold=4.403e+02, percent-clipped=0.0
+2024-08-25 11:52:48,882 INFO [train.py:1114] (2/4) Epoch 6, batch 1100, loss[loss=0.2398, simple_loss=0.2933, pruned_loss=0.06758, ctc_loss=0.1278, over 19575.00 frames. ], tot_loss[loss=0.283, simple_loss=0.321, pruned_loss=0.08902, ctc_loss=0.1674, over 3830010.05 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:53:07,834 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.85 vs. limit=22.5
+2024-08-25 11:53:58,660 INFO [train.py:1114] (2/4) Epoch 6, batch 1150, loss[loss=0.3008, simple_loss=0.3298, pruned_loss=0.09939, ctc_loss=0.1825, over 19575.00 frames. ], tot_loss[loss=0.2845, simple_loss=0.3218, pruned_loss=0.08988, ctc_loss=0.1687, over 3829261.88 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:54:04,042 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=72512.0, ans=0.2
+2024-08-25 11:54:27,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=72565.33333333333, ans=0.0
+2024-08-25 11:54:43,452 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.952e+02 2.194e+02 2.505e+02 4.680e+02, threshold=4.387e+02, percent-clipped=1.0
+2024-08-25 11:54:54,905 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=72725.33333333333, ans=0.025
+2024-08-25 11:55:06,394 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:55:08,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=72725.33333333333, ans=0.125
+2024-08-25 11:55:08,695 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=72725.33333333333, ans=0.0
+2024-08-25 11:55:09,122 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.35 vs. limit=15.0
+2024-08-25 11:55:11,898 INFO [train.py:1114] (2/4) Epoch 6, batch 1200, loss[loss=0.2657, simple_loss=0.3201, pruned_loss=0.07814, ctc_loss=0.1377, over 19839.00 frames. ], tot_loss[loss=0.2859, simple_loss=0.3229, pruned_loss=0.09048, ctc_loss=0.17, over 3825281.55 frames. ], batch size: 57, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:55:22,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72778.66666666667, ans=0.1
+2024-08-25 11:55:50,345 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.03 vs. limit=15.0
+2024-08-25 11:56:20,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=72938.66666666667, ans=0.125
+2024-08-25 11:56:25,187 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=72938.66666666667, ans=0.0
+2024-08-25 11:56:41,817 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72992.0, ans=0.1
+2024-08-25 11:56:44,369 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.53 vs. limit=15.0
+2024-08-25 11:56:53,105 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.32 vs. limit=12.0
+2024-08-25 11:56:55,077 INFO [train.py:1114] (2/4) Epoch 6, batch 1250, loss[loss=0.3028, simple_loss=0.331, pruned_loss=0.1005, ctc_loss=0.1839, over 19541.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3235, pruned_loss=0.09033, ctc_loss=0.1698, over 3842971.79 frames. ], batch size: 61, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:57:34,077 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=73152.0, ans=0.09899494936611666
+2024-08-25 11:57:55,341 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=73152.0, ans=0.125
+2024-08-25 11:58:03,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=73205.33333333333, ans=0.5
+2024-08-25 11:58:13,325 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.073e+02 2.305e+02 2.660e+02 4.224e+02, threshold=4.609e+02, percent-clipped=0.0
+2024-08-25 11:58:26,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=73258.66666666667, ans=0.125
+2024-08-25 11:58:35,040 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.08 vs. limit=22.5
+2024-08-25 11:58:46,686 INFO [train.py:1114] (2/4) Epoch 6, batch 1300, loss[loss=0.2985, simple_loss=0.3413, pruned_loss=0.09138, ctc_loss=0.1821, over 18813.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3225, pruned_loss=0.08925, ctc_loss=0.168, over 3845774.18 frames. ], batch size: 76, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:58:46,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=73312.0, ans=0.125
+2024-08-25 11:59:03,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=73312.0, ans=0.125
+2024-08-25 11:59:34,435 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=73365.33333333333, ans=0.05
+2024-08-25 11:59:53,856 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=73472.0, ans=0.2
+2024-08-25 12:00:13,503 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=73525.33333333333, ans=0.0
+2024-08-25 12:00:19,994 INFO [train.py:1114] (2/4) Epoch 6, batch 1350, loss[loss=0.2976, simple_loss=0.334, pruned_loss=0.09379, ctc_loss=0.1841, over 19767.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3222, pruned_loss=0.08914, ctc_loss=0.1675, over 3857695.64 frames. ], batch size: 54, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 12:00:56,390 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.74 vs. limit=15.0
+2024-08-25 12:01:01,697 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=73738.66666666667, ans=0.125
+2024-08-25 12:01:04,995 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.025e+02 2.295e+02 2.579e+02 4.133e+02, threshold=4.590e+02, percent-clipped=0.0
+2024-08-25 12:01:30,906 INFO [train.py:1114] (2/4) Epoch 6, batch 1400, loss[loss=0.2691, simple_loss=0.3019, pruned_loss=0.08707, ctc_loss=0.1554, over 19662.00 frames. ], tot_loss[loss=0.283, simple_loss=0.3217, pruned_loss=0.08881, ctc_loss=0.1667, over 3865002.34 frames. ], batch size: 46, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:01:34,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=73845.33333333333, ans=0.125
+2024-08-25 12:01:50,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=73898.66666666667, ans=0.1
+2024-08-25 12:02:22,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=73952.0, ans=0.2
+2024-08-25 12:02:39,610 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=74058.66666666667, ans=0.2
+2024-08-25 12:02:40,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=74058.66666666667, ans=0.05
+2024-08-25 12:02:52,962 INFO [train.py:1114] (2/4) Epoch 6, batch 1450, loss[loss=0.2876, simple_loss=0.3269, pruned_loss=0.09001, ctc_loss=0.1706, over 19682.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.3224, pruned_loss=0.08937, ctc_loss=0.1679, over 3862462.85 frames. ], batch size: 63, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:03:07,855 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.46 vs. limit=12.0
+2024-08-25 12:03:44,239 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=74218.66666666667, ans=0.1
+2024-08-25 12:03:53,268 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 1.998e+02 2.330e+02 2.811e+02 4.670e+02, threshold=4.661e+02, percent-clipped=1.0
+2024-08-25 12:04:24,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=74378.66666666667, ans=0.125
+2024-08-25 12:04:25,538 INFO [train.py:1114] (2/4) Epoch 6, batch 1500, loss[loss=0.2853, simple_loss=0.3285, pruned_loss=0.08809, ctc_loss=0.1645, over 19537.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3231, pruned_loss=0.08994, ctc_loss=0.1689, over 3861892.67 frames. ], batch size: 57, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:04:28,582 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.64 vs. limit=10.0
+2024-08-25 12:04:50,189 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=74485.33333333333, ans=0.0
+2024-08-25 12:05:38,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=74538.66666666667, ans=0.125
+2024-08-25 12:05:38,793 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=74538.66666666667, ans=0.125
+2024-08-25 12:05:42,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=74538.66666666667, ans=0.025
+2024-08-25 12:05:43,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74538.66666666667, ans=0.1
+2024-08-25 12:06:01,376 INFO [train.py:1114] (2/4) Epoch 6, batch 1550, loss[loss=0.3382, simple_loss=0.3583, pruned_loss=0.1152, ctc_loss=0.2191, over 19637.00 frames. ], tot_loss[loss=0.2859, simple_loss=0.3233, pruned_loss=0.09032, ctc_loss=0.1698, over 3847052.20 frames. ], batch size: 60, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:06:06,686 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.47 vs. limit=12.0
+2024-08-25 12:06:11,964 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=74698.66666666667, ans=0.125
+2024-08-25 12:06:12,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=74698.66666666667, ans=0.125
+2024-08-25 12:06:14,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=74698.66666666667, ans=0.2
+2024-08-25 12:06:19,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=74698.66666666667, ans=0.125
+2024-08-25 12:06:37,903 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.061e+02 2.512e+02 3.027e+02 4.789e+02, threshold=5.024e+02, percent-clipped=1.0
+2024-08-25 12:07:01,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=74912.0, ans=0.0
+2024-08-25 12:07:01,753 INFO [train.py:1114] (2/4) Epoch 6, batch 1600, loss[loss=0.2728, simple_loss=0.3298, pruned_loss=0.07823, ctc_loss=0.1483, over 19842.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3224, pruned_loss=0.08945, ctc_loss=0.1682, over 3836413.87 frames. ], batch size: 57, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:07:58,986 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=75125.33333333333, ans=0.125
+2024-08-25 12:08:00,999 INFO [train.py:1114] (2/4) Epoch 6, batch 1650, loss[loss=0.3018, simple_loss=0.3386, pruned_loss=0.0964, ctc_loss=0.1802, over 19652.00 frames. ], tot_loss[loss=0.2833, simple_loss=0.3215, pruned_loss=0.08905, ctc_loss=0.1673, over 3832415.40 frames. ], batch size: 59, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:08:07,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=75178.66666666667, ans=0.125
+2024-08-25 12:08:15,269 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.78 vs. limit=22.5
+2024-08-25 12:08:16,033 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=75232.0, ans=0.125
+2024-08-25 12:08:17,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=75232.0, ans=0.1
+2024-08-25 12:08:34,877 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.97 vs. limit=12.0
+2024-08-25 12:08:37,766 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.893e+02 2.381e+02 2.784e+02 7.281e+02, threshold=4.762e+02, percent-clipped=1.0
+2024-08-25 12:08:44,746 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=75338.66666666667, ans=0.0
+2024-08-25 12:08:49,234 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=75392.0, ans=0.125
+2024-08-25 12:08:57,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys.whitening_limit, batch_count=75392.0, ans=6.0
+2024-08-25 12:09:00,131 INFO [train.py:1114] (2/4) Epoch 6, batch 1700, loss[loss=0.2442, simple_loss=0.2858, pruned_loss=0.07321, ctc_loss=0.1403, over 19646.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.3207, pruned_loss=0.08802, ctc_loss=0.1658, over 3847042.32 frames. ], batch size: 46, lr: 2.33e-02, grad_scale: 32.0
+2024-08-25 12:09:02,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=75445.33333333333, ans=0.0
+2024-08-25 12:09:20,774 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=75498.66666666667, ans=0.0
+2024-08-25 12:09:20,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=75498.66666666667, ans=10.0
+2024-08-25 12:09:37,173 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=75605.33333333333, ans=0.0
+2024-08-25 12:09:37,330 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=75605.33333333333, ans=0.125
+2024-08-25 12:09:55,935 INFO [train.py:1114] (2/4) Epoch 6, batch 1750, loss[loss=0.2421, simple_loss=0.2762, pruned_loss=0.07497, ctc_loss=0.1455, over 19618.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3198, pruned_loss=0.08748, ctc_loss=0.1648, over 3852057.66 frames. ], batch size: 45, lr: 2.33e-02, grad_scale: 16.0
+2024-08-25 12:10:08,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=75765.33333333333, ans=0.0
+2024-08-25 12:10:10,218 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=75765.33333333333, ans=0.125
+2024-08-25 12:10:13,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=75765.33333333333, ans=0.125
+2024-08-25 12:10:32,754 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.890e+02 2.130e+02 2.587e+02 4.262e+02, threshold=4.260e+02, percent-clipped=0.0
+2024-08-25 12:10:35,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=75872.0, ans=0.0
+2024-08-25 12:10:38,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=75925.33333333333, ans=0.0
+2024-08-25 12:10:41,642 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=75925.33333333333, ans=0.1
+2024-08-25 12:10:50,378 INFO [train.py:1114] (2/4) Epoch 6, batch 1800, loss[loss=0.3008, simple_loss=0.3378, pruned_loss=0.09652, ctc_loss=0.1772, over 19612.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3198, pruned_loss=0.08725, ctc_loss=0.1644, over 3854282.85 frames. ], batch size: 55, lr: 2.33e-02, grad_scale: 8.0
+2024-08-25 12:11:03,444 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=76032.0, ans=0.1
+2024-08-25 12:11:13,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=76085.33333333333, ans=0.0
+2024-08-25 12:11:21,363 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=76085.33333333333, ans=0.2
+2024-08-25 12:11:35,673 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.73 vs. limit=10.0
+2024-08-25 12:11:36,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76192.0, ans=0.1
+2024-08-25 12:11:44,880 INFO [train.py:1114] (2/4) Epoch 6, batch 1850, loss[loss=0.2926, simple_loss=0.3356, pruned_loss=0.09107, ctc_loss=0.1688, over 19589.00 frames. ], tot_loss[loss=0.2798, simple_loss=0.3193, pruned_loss=0.08725, ctc_loss=0.1644, over 3857785.01 frames. ], batch size: 57, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:11:45,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=76245.33333333333, ans=0.0
+2024-08-25 12:12:01,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=76298.66666666667, ans=0.125
+2024-08-25 12:12:06,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=76352.0, ans=0.0
+2024-08-25 12:12:22,229 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.994e+02 2.285e+02 2.712e+02 4.413e+02, threshold=4.569e+02, percent-clipped=2.0
+2024-08-25 12:12:43,373 INFO [train.py:1114] (2/4) Epoch 6, batch 1900, loss[loss=0.303, simple_loss=0.342, pruned_loss=0.09536, ctc_loss=0.1831, over 19662.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.3195, pruned_loss=0.08687, ctc_loss=0.1636, over 3862819.37 frames. ], batch size: 59, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:12:53,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=76565.33333333333, ans=0.0
+2024-08-25 12:13:02,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=76565.33333333333, ans=0.2
+2024-08-25 12:13:04,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=76618.66666666667, ans=0.0
+2024-08-25 12:13:05,574 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.38 vs. limit=15.0
+2024-08-25 12:13:07,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=76618.66666666667, ans=0.125
+2024-08-25 12:13:11,182 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.38 vs. limit=15.0
+2024-08-25 12:13:40,540 INFO [train.py:1114] (2/4) Epoch 6, batch 1950, loss[loss=0.2702, simple_loss=0.3098, pruned_loss=0.08335, ctc_loss=0.1595, over 19603.00 frames. ], tot_loss[loss=0.2796, simple_loss=0.3203, pruned_loss=0.08675, ctc_loss=0.1635, over 3871571.98 frames. ], batch size: 52, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:13:49,672 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=76778.66666666667, ans=0.2
+2024-08-25 12:14:04,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=76885.33333333333, ans=0.125
+2024-08-25 12:14:11,131 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=76885.33333333333, ans=0.1
+2024-08-25 12:14:13,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=76938.66666666667, ans=0.125
+2024-08-25 12:14:15,719 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=76938.66666666667, ans=0.1
+2024-08-25 12:14:18,626 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 1.890e+02 2.137e+02 2.349e+02 3.743e+02, threshold=4.275e+02, percent-clipped=0.0
+2024-08-25 12:14:35,984 INFO [train.py:1114] (2/4) Epoch 6, batch 2000, loss[loss=0.2617, simple_loss=0.2892, pruned_loss=0.08493, ctc_loss=0.1608, over 19653.00 frames. ], tot_loss[loss=0.2807, simple_loss=0.3208, pruned_loss=0.08736, ctc_loss=0.1646, over 3855460.36 frames. ], batch size: 45, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:14:38,373 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:14:39,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=77045.33333333333, ans=0.0
+2024-08-25 12:14:49,952 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.16 vs. limit=15.0
+2024-08-25 12:15:28,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=77258.66666666667, ans=0.1
+2024-08-25 12:15:30,091 INFO [train.py:1114] (2/4) Epoch 6, batch 2050, loss[loss=0.2523, simple_loss=0.2929, pruned_loss=0.0767, ctc_loss=0.1457, over 19722.00 frames. ], tot_loss[loss=0.2806, simple_loss=0.32, pruned_loss=0.08765, ctc_loss=0.1649, over 3851218.18 frames. ], batch size: 47, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:15:44,852 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=77312.0, ans=0.125
+2024-08-25 12:15:56,215 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=77365.33333333333, ans=0.125
+2024-08-25 12:15:59,899 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.34 vs. limit=15.0
+2024-08-25 12:16:00,623 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=77418.66666666667, ans=0.07
+2024-08-25 12:16:14,687 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.955e+02 2.380e+02 2.986e+02 1.021e+03, threshold=4.760e+02, percent-clipped=7.0
+2024-08-25 12:16:17,373 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.23 vs. limit=15.0
+2024-08-25 12:16:32,240 INFO [train.py:1114] (2/4) Epoch 6, batch 2100, loss[loss=0.2725, simple_loss=0.3147, pruned_loss=0.08303, ctc_loss=0.1608, over 19787.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.3192, pruned_loss=0.08697, ctc_loss=0.1636, over 3858067.76 frames. ], batch size: 54, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:16:37,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=77578.66666666667, ans=0.0
+2024-08-25 12:16:53,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=77685.33333333333, ans=0.0
+2024-08-25 12:17:07,005 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=77738.66666666667, ans=0.0
+2024-08-25 12:17:28,069 INFO [train.py:1114] (2/4) Epoch 6, batch 2150, loss[loss=0.2863, simple_loss=0.3202, pruned_loss=0.09269, ctc_loss=0.1674, over 19851.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3188, pruned_loss=0.08674, ctc_loss=0.1629, over 3868909.89 frames. ], batch size: 52, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:17:38,843 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=77898.66666666667, ans=0.125
+2024-08-25 12:17:45,311 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=77898.66666666667, ans=0.0
+2024-08-25 12:18:07,069 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.08 vs. limit=15.0
+2024-08-25 12:18:17,795 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.01 vs. limit=15.0
+2024-08-25 12:18:19,486 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.877e+02 2.258e+02 2.799e+02 6.726e+02, threshold=4.515e+02, percent-clipped=2.0
+2024-08-25 12:18:25,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.whiten.whitening_limit, batch_count=78058.66666666667, ans=12.0
+2024-08-25 12:19:01,757 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=78058.66666666667, ans=0.07
+2024-08-25 12:19:06,971 INFO [train.py:1114] (2/4) Epoch 6, batch 2200, loss[loss=0.2865, simple_loss=0.332, pruned_loss=0.08719, ctc_loss=0.1664, over 19581.00 frames. ], tot_loss[loss=0.2785, simple_loss=0.3186, pruned_loss=0.08672, ctc_loss=0.1627, over 3867615.28 frames. ], batch size: 57, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:19:20,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=78165.33333333333, ans=0.0
+2024-08-25 12:19:26,273 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.11 vs. limit=10.0
+2024-08-25 12:19:28,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=78218.66666666667, ans=0.025
+2024-08-25 12:20:02,383 INFO [train.py:1114] (2/4) Epoch 6, batch 2250, loss[loss=0.2441, simple_loss=0.3026, pruned_loss=0.06713, ctc_loss=0.1284, over 19622.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3188, pruned_loss=0.08686, ctc_loss=0.1631, over 3866433.00 frames. ], batch size: 55, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:20:14,730 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.00 vs. limit=15.0
+2024-08-25 12:20:15,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=78432.0, ans=0.025
+2024-08-25 12:20:36,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=78538.66666666667, ans=0.125
+2024-08-25 12:20:38,633 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.005e+02 2.234e+02 2.581e+02 4.325e+02, threshold=4.468e+02, percent-clipped=0.0
+2024-08-25 12:20:42,157 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=78538.66666666667, ans=0.125
+2024-08-25 12:20:44,235 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=78592.0, ans=0.1
+2024-08-25 12:20:56,304 INFO [train.py:1114] (2/4) Epoch 6, batch 2300, loss[loss=0.2481, simple_loss=0.2906, pruned_loss=0.07506, ctc_loss=0.1389, over 19498.00 frames. ], tot_loss[loss=0.2772, simple_loss=0.3174, pruned_loss=0.08617, ctc_loss=0.1617, over 3860656.19 frames. ], batch size: 49, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:20:59,800 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=78645.33333333333, ans=0.125
+2024-08-25 12:21:04,993 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=78645.33333333333, ans=0.1
+2024-08-25 12:21:18,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=78752.0, ans=0.125
+2024-08-25 12:21:22,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=78752.0, ans=0.025
+2024-08-25 12:21:45,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=78858.66666666667, ans=0.2
+2024-08-25 12:21:52,649 INFO [train.py:1114] (2/4) Epoch 6, batch 2350, loss[loss=0.3017, simple_loss=0.343, pruned_loss=0.09596, ctc_loss=0.1714, over 19700.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.318, pruned_loss=0.08672, ctc_loss=0.1625, over 3863792.78 frames. ], batch size: 63, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:21:59,811 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=78912.0, ans=0.1
+2024-08-25 12:22:26,446 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.00 vs. limit=15.0
+2024-08-25 12:22:28,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=79072.0, ans=0.2
+2024-08-25 12:22:30,287 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 2.097e+02 2.553e+02 3.084e+02 6.792e+02, threshold=5.106e+02, percent-clipped=2.0
+2024-08-25 12:22:44,418 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.89 vs. limit=22.5
+2024-08-25 12:22:46,249 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.82 vs. limit=15.0
+2024-08-25 12:22:47,979 INFO [train.py:1114] (2/4) Epoch 6, batch 2400, loss[loss=0.284, simple_loss=0.3308, pruned_loss=0.08641, ctc_loss=0.1608, over 19254.00 frames. ], tot_loss[loss=0.2805, simple_loss=0.3204, pruned_loss=0.08752, ctc_loss=0.1638, over 3857246.83 frames. ], batch size: 71, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:23:15,364 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:23:35,492 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.54 vs. limit=15.0
+2024-08-25 12:23:45,690 INFO [train.py:1114] (2/4) Epoch 6, batch 2450, loss[loss=0.3147, simple_loss=0.3289, pruned_loss=0.1088, ctc_loss=0.207, over 13151.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.3256, pruned_loss=0.09189, ctc_loss=0.1724, over 3729667.10 frames. ], batch size: 140, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:23:48,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=79445.33333333333, ans=0.125
+2024-08-25 12:23:49,396 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=79445.33333333333, ans=0.125
+2024-08-25 12:24:14,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=79498.66666666667, ans=0.0
+2024-08-25 12:26:11,531 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.78 vs. limit=22.5
+2024-08-25 12:26:37,494 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=8.09 vs. limit=12.0
+2024-08-25 12:27:38,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=79605.33333333333, ans=0.0
+2024-08-25 12:27:38,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=79605.33333333333, ans=0.2
+2024-08-25 12:28:01,634 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.056e+02 2.291e+02 2.526e+02 5.572e+02, threshold=4.582e+02, percent-clipped=1.0
+2024-08-25 12:28:10,368 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.82 vs. limit=15.0
+2024-08-25 12:29:27,615 INFO [train.py:1114] (2/4) Epoch 7, batch 0, loss[loss=0.2845, simple_loss=0.3123, pruned_loss=0.09362, ctc_loss=0.1739, over 19811.00 frames. ], tot_loss[loss=0.2845, simple_loss=0.3123, pruned_loss=0.09362, ctc_loss=0.1739, over 19811.00 frames. ], batch size: 49, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:29:27,615 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 12:29:44,290 INFO [train.py:1146] (2/4) Epoch 7, validation: loss=0.2269, simple_loss=0.307, pruned_loss=0.05393, ctc_loss=0.0975, over 944034.00 frames.
+2024-08-25 12:29:44,290 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 12:29:45,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=79658.66666666667, ans=10.0
+2024-08-25 12:29:45,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=79658.66666666667, ans=0.04949747468305833
+2024-08-25 12:29:45,949 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.09 vs. limit=22.5
+2024-08-25 12:29:47,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=79658.66666666667, ans=0.1
+2024-08-25 12:30:05,474 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=79658.66666666667, ans=0.0
+2024-08-25 12:30:24,549 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=79712.0, ans=0.125
+2024-08-25 12:31:10,747 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=79765.33333333333, ans=0.125
+2024-08-25 12:33:04,672 INFO [train.py:1114] (2/4) Epoch 7, batch 50, loss[loss=0.2518, simple_loss=0.295, pruned_loss=0.07432, ctc_loss=0.1498, over 19711.00 frames. ], tot_loss[loss=0.2833, simple_loss=0.3232, pruned_loss=0.08843, ctc_loss=0.1663, over 843578.70 frames. ], batch size: 47, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:33:07,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=79925.33333333333, ans=0.125
+2024-08-25 12:33:19,865 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=13.98 vs. limit=12.0
+2024-08-25 12:33:43,956 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=20.20 vs. limit=22.5
+2024-08-25 12:33:57,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=80085.33333333333, ans=0.125
+2024-08-25 12:34:17,266 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.999e+02 2.246e+02 2.808e+02 5.514e+02, threshold=4.492e+02, percent-clipped=3.0
+2024-08-25 12:34:24,281 INFO [train.py:1114] (2/4) Epoch 7, batch 100, loss[loss=0.2408, simple_loss=0.2945, pruned_loss=0.06666, ctc_loss=0.1342, over 19722.00 frames. ], tot_loss[loss=0.2834, simple_loss=0.3228, pruned_loss=0.08866, ctc_loss=0.1669, over 1498623.17 frames. ], batch size: 51, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:34:30,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=80192.0, ans=0.04949747468305833
+2024-08-25 12:34:54,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=80298.66666666667, ans=0.0
+2024-08-25 12:35:06,506 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.08 vs. limit=22.5
+2024-08-25 12:35:13,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=80405.33333333333, ans=0.025
+2024-08-25 12:35:23,290 INFO [train.py:1114] (2/4) Epoch 7, batch 150, loss[loss=0.2655, simple_loss=0.3002, pruned_loss=0.08394, ctc_loss=0.1574, over 19710.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.3197, pruned_loss=0.08668, ctc_loss=0.1635, over 2027456.19 frames. ], batch size: 47, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:35:23,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=80458.66666666667, ans=0.125
+2024-08-25 12:35:24,064 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.37 vs. limit=8.0
+2024-08-25 12:35:34,937 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=80458.66666666667, ans=0.025
+2024-08-25 12:35:36,499 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.66 vs. limit=15.0
+2024-08-25 12:35:38,181 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80512.0, ans=0.1
+2024-08-25 12:35:38,645 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.38 vs. limit=12.0
+2024-08-25 12:35:40,974 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.95 vs. limit=22.5
+2024-08-25 12:36:15,485 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=80672.0, ans=0.125
+2024-08-25 12:36:18,829 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.959e+02 2.217e+02 2.953e+02 5.735e+02, threshold=4.434e+02, percent-clipped=2.0
+2024-08-25 12:36:22,661 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=80672.0, ans=0.07
+2024-08-25 12:36:26,003 INFO [train.py:1114] (2/4) Epoch 7, batch 200, loss[loss=0.3542, simple_loss=0.3656, pruned_loss=0.1247, ctc_loss=0.2333, over 18253.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3176, pruned_loss=0.08547, ctc_loss=0.1609, over 2436140.00 frames. ], batch size: 85, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:36:29,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=80725.33333333333, ans=0.125
+2024-08-25 12:36:37,388 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=80778.66666666667, ans=0.125
+2024-08-25 12:36:50,840 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=80832.0, ans=0.0
+2024-08-25 12:36:53,094 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=80832.0, ans=0.125
+2024-08-25 12:37:03,269 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=80885.33333333333, ans=0.1
+2024-08-25 12:37:04,396 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=80885.33333333333, ans=0.125
+2024-08-25 12:37:04,686 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.56 vs. limit=6.0
+2024-08-25 12:37:19,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=80938.66666666667, ans=0.125
+2024-08-25 12:37:22,879 INFO [train.py:1114] (2/4) Epoch 7, batch 250, loss[loss=0.3292, simple_loss=0.3559, pruned_loss=0.1108, ctc_loss=0.2024, over 19380.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3169, pruned_loss=0.08478, ctc_loss=0.1597, over 2756169.51 frames. ], batch size: 67, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:38:16,698 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.901e+02 2.294e+02 2.833e+02 4.254e+02, threshold=4.587e+02, percent-clipped=0.0
+2024-08-25 12:38:21,940 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.48 vs. limit=15.0
+2024-08-25 12:38:23,364 INFO [train.py:1114] (2/4) Epoch 7, batch 300, loss[loss=0.3232, simple_loss=0.353, pruned_loss=0.1068, ctc_loss=0.1993, over 19535.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3159, pruned_loss=0.08438, ctc_loss=0.1593, over 3001114.53 frames. ], batch size: 61, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:38:34,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=81312.0, ans=0.09899494936611666
+2024-08-25 12:38:35,567 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=81312.0, ans=0.125
+2024-08-25 12:38:41,801 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.24 vs. limit=10.0
+2024-08-25 12:38:42,663 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=81312.0, ans=0.125
+2024-08-25 12:38:47,328 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.81 vs. limit=15.0
+2024-08-25 12:39:06,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=81418.66666666667, ans=0.05
+2024-08-25 12:39:07,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=81418.66666666667, ans=0.125
+2024-08-25 12:39:44,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=81472.0, ans=0.125
+2024-08-25 12:39:52,684 INFO [train.py:1114] (2/4) Epoch 7, batch 350, loss[loss=0.2622, simple_loss=0.2975, pruned_loss=0.08301, ctc_loss=0.152, over 19740.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3155, pruned_loss=0.08381, ctc_loss=0.1582, over 3191443.24 frames. ], batch size: 48, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:40:01,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=81525.33333333333, ans=0.1
+2024-08-25 12:40:05,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=81578.66666666667, ans=0.125
+2024-08-25 12:40:25,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=81632.0, ans=0.125
+2024-08-25 12:40:43,966 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.980e+02 2.268e+02 2.810e+02 5.782e+02, threshold=4.535e+02, percent-clipped=1.0
+2024-08-25 12:40:45,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=81738.66666666667, ans=0.125
+2024-08-25 12:40:50,666 INFO [train.py:1114] (2/4) Epoch 7, batch 400, loss[loss=0.256, simple_loss=0.3114, pruned_loss=0.07327, ctc_loss=0.1352, over 19495.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.315, pruned_loss=0.0835, ctc_loss=0.1575, over 3343247.64 frames. ], batch size: 54, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:40:52,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=81792.0, ans=0.125
+2024-08-25 12:41:00,073 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=81792.0, ans=0.125
+2024-08-25 12:41:03,828 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=81845.33333333333, ans=0.125
+2024-08-25 12:41:09,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=81845.33333333333, ans=0.0
+2024-08-25 12:41:18,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81898.66666666667, ans=0.1
+2024-08-25 12:41:22,165 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.73 vs. limit=15.0
+2024-08-25 12:41:52,351 INFO [train.py:1114] (2/4) Epoch 7, batch 450, loss[loss=0.2554, simple_loss=0.3104, pruned_loss=0.0725, ctc_loss=0.1383, over 19607.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3154, pruned_loss=0.08387, ctc_loss=0.1581, over 3450420.32 frames. ], batch size: 55, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:42:01,144 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82058.66666666667, ans=0.1
+2024-08-25 12:42:08,712 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=82112.0, ans=0.125
+2024-08-25 12:42:23,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=82165.33333333333, ans=0.125
+2024-08-25 12:42:26,483 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.89 vs. limit=22.5
+2024-08-25 12:42:43,144 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 1.947e+02 2.448e+02 2.960e+02 4.262e+02, threshold=4.896e+02, percent-clipped=0.0
+2024-08-25 12:42:51,475 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.57 vs. limit=22.5
+2024-08-25 12:42:52,049 INFO [train.py:1114] (2/4) Epoch 7, batch 500, loss[loss=0.3172, simple_loss=0.3429, pruned_loss=0.1059, ctc_loss=0.1989, over 19666.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.3151, pruned_loss=0.08364, ctc_loss=0.1575, over 3546182.01 frames. ], batch size: 63, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:43:02,094 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=82325.33333333333, ans=0.2
+2024-08-25 12:43:02,566 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.37 vs. limit=22.5
+2024-08-25 12:43:07,270 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=8.51 vs. limit=12.0
+2024-08-25 12:43:12,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=82378.66666666667, ans=0.125
+2024-08-25 12:43:16,252 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=82432.0, ans=0.0
+2024-08-25 12:43:17,369 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=82432.0, ans=0.125
+2024-08-25 12:43:45,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=82538.66666666667, ans=0.0
+2024-08-25 12:43:45,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=82538.66666666667, ans=0.125
+2024-08-25 12:43:51,834 INFO [train.py:1114] (2/4) Epoch 7, batch 550, loss[loss=0.3026, simple_loss=0.3413, pruned_loss=0.09491, ctc_loss=0.1853, over 19277.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3154, pruned_loss=0.08411, ctc_loss=0.1584, over 3608003.59 frames. ], batch size: 71, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:44:04,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=82645.33333333333, ans=0.125
+2024-08-25 12:44:35,838 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=82752.0, ans=0.0
+2024-08-25 12:44:44,962 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.000e+02 2.364e+02 2.910e+02 5.356e+02, threshold=4.728e+02, percent-clipped=1.0
+2024-08-25 12:44:52,599 INFO [train.py:1114] (2/4) Epoch 7, batch 600, loss[loss=0.3198, simple_loss=0.3495, pruned_loss=0.1068, ctc_loss=0.1916, over 19349.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.3162, pruned_loss=0.08418, ctc_loss=0.1584, over 3666110.17 frames. ], batch size: 67, lr: 2.11e-02, grad_scale: 16.0
+2024-08-25 12:44:59,284 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=82858.66666666667, ans=0.0
+2024-08-25 12:45:02,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=82858.66666666667, ans=0.0
+2024-08-25 12:45:12,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=82912.0, ans=0.2
+2024-08-25 12:45:28,423 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn1.whiten.whitening_limit, batch_count=83018.66666666667, ans=22.5
+2024-08-25 12:45:38,391 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=83072.0, ans=0.125
+2024-08-25 12:45:46,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=83072.0, ans=0.0
+2024-08-25 12:45:51,130 INFO [train.py:1114] (2/4) Epoch 7, batch 650, loss[loss=0.291, simple_loss=0.334, pruned_loss=0.08936, ctc_loss=0.1729, over 19768.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3153, pruned_loss=0.08383, ctc_loss=0.1578, over 3716263.92 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:46:02,773 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=83178.66666666667, ans=0.125
+2024-08-25 12:46:09,143 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.01 vs. limit=22.5
+2024-08-25 12:46:32,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=83285.33333333333, ans=0.025
+2024-08-25 12:46:47,177 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.844e+02 2.004e+02 2.285e+02 4.065e+02, threshold=4.009e+02, percent-clipped=0.0
+2024-08-25 12:46:52,901 INFO [train.py:1114] (2/4) Epoch 7, batch 700, loss[loss=0.2352, simple_loss=0.2894, pruned_loss=0.0652, ctc_loss=0.1266, over 19700.00 frames. ], tot_loss[loss=0.2731, simple_loss=0.3152, pruned_loss=0.08387, ctc_loss=0.1579, over 3748949.27 frames. ], batch size: 51, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:47:07,584 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=83445.33333333333, ans=0.125
+2024-08-25 12:47:13,195 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=83445.33333333333, ans=0.125
+2024-08-25 12:47:13,269 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=83445.33333333333, ans=0.125
+2024-08-25 12:47:21,059 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=83498.66666666667, ans=0.1
+2024-08-25 12:47:49,583 INFO [train.py:1114] (2/4) Epoch 7, batch 750, loss[loss=0.2957, simple_loss=0.335, pruned_loss=0.09344, ctc_loss=0.1737, over 19515.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3143, pruned_loss=0.08326, ctc_loss=0.1567, over 3774137.13 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:48:12,625 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=83712.0, ans=0.5
+2024-08-25 12:48:26,780 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=83818.66666666667, ans=0.125
+2024-08-25 12:48:45,006 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 1.885e+02 2.166e+02 2.690e+02 4.534e+02, threshold=4.331e+02, percent-clipped=3.0
+2024-08-25 12:48:50,697 INFO [train.py:1114] (2/4) Epoch 7, batch 800, loss[loss=0.2385, simple_loss=0.2879, pruned_loss=0.06835, ctc_loss=0.1311, over 19792.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3139, pruned_loss=0.08327, ctc_loss=0.1566, over 3796585.84 frames. ], batch size: 49, lr: 2.10e-02, grad_scale: 32.0
+2024-08-25 12:48:58,126 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.74 vs. limit=22.5
+2024-08-25 12:48:59,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=83925.33333333333, ans=0.025
+2024-08-25 12:49:01,280 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.21 vs. limit=15.0
+2024-08-25 12:49:17,128 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:49:25,712 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.37 vs. limit=22.5
+2024-08-25 12:49:39,439 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.32 vs. limit=12.0
+2024-08-25 12:49:40,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=84138.66666666667, ans=0.025
+2024-08-25 12:49:42,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=84138.66666666667, ans=0.125
+2024-08-25 12:49:45,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=84138.66666666667, ans=0.1
+2024-08-25 12:49:45,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=84138.66666666667, ans=0.125
+2024-08-25 12:49:48,118 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=84138.66666666667, ans=0.05
+2024-08-25 12:49:49,475 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.65 vs. limit=22.5
+2024-08-25 12:49:51,358 INFO [train.py:1114] (2/4) Epoch 7, batch 850, loss[loss=0.272, simple_loss=0.3224, pruned_loss=0.08153, ctc_loss=0.1462, over 19644.00 frames. ], tot_loss[loss=0.2697, simple_loss=0.3129, pruned_loss=0.08228, ctc_loss=0.1547, over 3815446.47 frames. ], batch size: 59, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:49:52,932 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.75 vs. limit=10.0
+2024-08-25 12:50:11,551 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.76 vs. limit=15.0
+2024-08-25 12:50:15,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=84298.66666666667, ans=0.0
+2024-08-25 12:50:20,549 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=84298.66666666667, ans=0.0
+2024-08-25 12:50:25,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=84352.0, ans=0.0
+2024-08-25 12:50:34,830 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.44 vs. limit=15.0
+2024-08-25 12:50:43,490 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 1.946e+02 2.270e+02 2.825e+02 4.143e+02, threshold=4.540e+02, percent-clipped=0.0
+2024-08-25 12:50:47,099 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=84405.33333333333, ans=0.125
+2024-08-25 12:50:49,134 INFO [train.py:1114] (2/4) Epoch 7, batch 900, loss[loss=0.2712, simple_loss=0.3067, pruned_loss=0.08663, ctc_loss=0.1562, over 19388.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3128, pruned_loss=0.08211, ctc_loss=0.1543, over 3818886.99 frames. ], batch size: 48, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:51:02,848 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=84458.66666666667, ans=0.0
+2024-08-25 12:51:20,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=84512.0, ans=0.1
+2024-08-25 12:51:21,950 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.63 vs. limit=22.5
+2024-08-25 12:51:55,586 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.76 vs. limit=22.5
+2024-08-25 12:52:01,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=84672.0, ans=0.0
+2024-08-25 12:52:04,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=84725.33333333333, ans=0.05
+2024-08-25 12:52:05,349 INFO [train.py:1114] (2/4) Epoch 7, batch 950, loss[loss=0.2373, simple_loss=0.285, pruned_loss=0.06898, ctc_loss=0.1293, over 19514.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3125, pruned_loss=0.08172, ctc_loss=0.1537, over 3819380.73 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 16.0
+2024-08-25 12:52:06,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=84725.33333333333, ans=0.1
+2024-08-25 12:52:10,323 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=84725.33333333333, ans=0.2
+2024-08-25 12:52:30,446 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:52:30,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=84832.0, ans=0.125
+2024-08-25 12:52:35,365 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=84832.0, ans=0.125
+2024-08-25 12:52:53,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=84938.66666666667, ans=10.0
+2024-08-25 12:52:59,189 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.065e+02 2.373e+02 2.949e+02 1.128e+03, threshold=4.746e+02, percent-clipped=6.0
+2024-08-25 12:53:05,293 INFO [train.py:1114] (2/4) Epoch 7, batch 1000, loss[loss=0.2456, simple_loss=0.3001, pruned_loss=0.06883, ctc_loss=0.1334, over 19845.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.314, pruned_loss=0.08272, ctc_loss=0.1553, over 3815233.65 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:53:15,871 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.92 vs. limit=15.0
+2024-08-25 12:53:46,035 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.80 vs. limit=15.0
+2024-08-25 12:53:47,885 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=85152.0, ans=0.0
+2024-08-25 12:53:49,649 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.83 vs. limit=15.0
+2024-08-25 12:54:05,111 INFO [train.py:1114] (2/4) Epoch 7, batch 1050, loss[loss=0.2614, simple_loss=0.3088, pruned_loss=0.07822, ctc_loss=0.144, over 19839.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3131, pruned_loss=0.0822, ctc_loss=0.1542, over 3822145.97 frames. ], batch size: 57, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:54:05,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=85258.66666666667, ans=0.0
+2024-08-25 12:54:13,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=85258.66666666667, ans=0.125
+2024-08-25 12:54:17,035 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.98 vs. limit=22.5
+2024-08-25 12:54:30,058 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=85365.33333333333, ans=0.04949747468305833
+2024-08-25 12:54:30,248 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.46 vs. limit=15.0
+2024-08-25 12:54:44,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=85418.66666666667, ans=0.025
+2024-08-25 12:54:46,330 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.43 vs. limit=22.5
+2024-08-25 12:54:52,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=85472.0, ans=0.125
+2024-08-25 12:54:54,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=85472.0, ans=0.2
+2024-08-25 12:55:01,667 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 1.918e+02 2.325e+02 2.776e+02 4.591e+02, threshold=4.650e+02, percent-clipped=1.0
+2024-08-25 12:55:06,561 INFO [train.py:1114] (2/4) Epoch 7, batch 1100, loss[loss=0.2653, simple_loss=0.3026, pruned_loss=0.08327, ctc_loss=0.1537, over 19583.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3126, pruned_loss=0.08211, ctc_loss=0.1541, over 3830826.14 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:55:14,141 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85525.33333333333, ans=0.1
+2024-08-25 12:55:22,005 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=85578.66666666667, ans=0.015
+2024-08-25 12:55:27,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=85578.66666666667, ans=0.125
+2024-08-25 12:55:51,331 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=85685.33333333333, ans=0.125
+2024-08-25 12:56:05,756 INFO [train.py:1114] (2/4) Epoch 7, batch 1150, loss[loss=0.2723, simple_loss=0.3014, pruned_loss=0.08952, ctc_loss=0.1603, over 19580.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3129, pruned_loss=0.08224, ctc_loss=0.1543, over 3830840.94 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:56:14,187 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=85792.0, ans=0.1
+2024-08-25 12:56:15,732 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.33 vs. limit=15.0
+2024-08-25 12:56:25,097 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=85845.33333333333, ans=0.1
+2024-08-25 12:56:33,186 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.36 vs. limit=15.0
+2024-08-25 12:56:34,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=85898.66666666667, ans=0.05
+2024-08-25 12:56:38,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=85898.66666666667, ans=0.025
+2024-08-25 12:56:43,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=85952.0, ans=0.125
+2024-08-25 12:56:58,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=86005.33333333333, ans=0.1
+2024-08-25 12:57:02,979 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.959e+02 2.167e+02 2.666e+02 4.946e+02, threshold=4.335e+02, percent-clipped=2.0
+2024-08-25 12:57:03,217 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=86005.33333333333, ans=0.0
+2024-08-25 12:57:07,708 INFO [train.py:1114] (2/4) Epoch 7, batch 1200, loss[loss=0.2819, simple_loss=0.3277, pruned_loss=0.08491, ctc_loss=0.1659, over 19829.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3142, pruned_loss=0.08293, ctc_loss=0.1558, over 3825576.42 frames. ], batch size: 57, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:57:48,758 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.78 vs. limit=15.0
+2024-08-25 12:57:50,230 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.32 vs. limit=8.0
+2024-08-25 12:57:50,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=86218.66666666667, ans=0.025
+2024-08-25 12:58:05,922 INFO [train.py:1114] (2/4) Epoch 7, batch 1250, loss[loss=0.281, simple_loss=0.3224, pruned_loss=0.08779, ctc_loss=0.16, over 19526.00 frames. ], tot_loss[loss=0.2714, simple_loss=0.3148, pruned_loss=0.08289, ctc_loss=0.1556, over 3843045.10 frames. ], batch size: 61, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:58:07,321 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=86325.33333333333, ans=0.125
+2024-08-25 12:58:24,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=86378.66666666667, ans=0.025
+2024-08-25 12:58:48,487 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.96 vs. limit=15.0
+2024-08-25 12:58:55,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=86538.66666666667, ans=0.04949747468305833
+2024-08-25 12:58:59,737 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:59:02,859 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.964e+02 2.304e+02 2.729e+02 5.465e+02, threshold=4.608e+02, percent-clipped=2.0
+2024-08-25 12:59:05,885 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.22 vs. limit=22.5
+2024-08-25 12:59:07,507 INFO [train.py:1114] (2/4) Epoch 7, batch 1300, loss[loss=0.3072, simple_loss=0.3416, pruned_loss=0.09971, ctc_loss=0.1834, over 18934.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3136, pruned_loss=0.08213, ctc_loss=0.1542, over 3846312.24 frames. ], batch size: 76, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:59:13,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=86592.0, ans=0.125
+2024-08-25 12:59:18,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=86645.33333333333, ans=0.125
+2024-08-25 12:59:19,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=86645.33333333333, ans=0.125
+2024-08-25 12:59:31,681 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:59:44,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=86752.0, ans=0.2
+2024-08-25 12:59:46,906 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=86752.0, ans=0.1
+2024-08-25 13:00:07,968 INFO [train.py:1114] (2/4) Epoch 7, batch 1350, loss[loss=0.2717, simple_loss=0.3078, pruned_loss=0.08587, ctc_loss=0.1598, over 19768.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3128, pruned_loss=0.08167, ctc_loss=0.1532, over 3856738.72 frames. ], batch size: 54, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 13:00:09,477 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=86858.66666666667, ans=0.0
+2024-08-25 13:00:23,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=86912.0, ans=0.05
+2024-08-25 13:00:45,008 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=87018.66666666667, ans=0.125
+2024-08-25 13:00:49,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=87018.66666666667, ans=0.125
+2024-08-25 13:01:52,649 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=87072.0, ans=0.0
+2024-08-25 13:01:57,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=87072.0, ans=0.125
+2024-08-25 13:01:59,607 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.935e+02 2.309e+02 3.009e+02 4.449e+02, threshold=4.618e+02, percent-clipped=0.0
+2024-08-25 13:02:03,334 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=87125.33333333333, ans=0.1
+2024-08-25 13:02:03,720 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.36 vs. limit=22.5
+2024-08-25 13:02:04,217 INFO [train.py:1114] (2/4) Epoch 7, batch 1400, loss[loss=0.2375, simple_loss=0.2827, pruned_loss=0.07037, ctc_loss=0.129, over 19666.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3127, pruned_loss=0.08155, ctc_loss=0.153, over 3863622.39 frames. ], batch size: 46, lr: 2.06e-02, grad_scale: 32.0
+2024-08-25 13:02:05,446 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=87125.33333333333, ans=0.0
+2024-08-25 13:02:12,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=87125.33333333333, ans=0.125
+2024-08-25 13:02:13,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=87125.33333333333, ans=0.0
+2024-08-25 13:02:14,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=87178.66666666667, ans=0.0
+2024-08-25 13:02:14,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=87178.66666666667, ans=0.5
+2024-08-25 13:02:19,183 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=87178.66666666667, ans=0.125
+2024-08-25 13:02:21,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=87178.66666666667, ans=0.125
+2024-08-25 13:02:32,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=87232.0, ans=0.0
+2024-08-25 13:02:37,168 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=87232.0, ans=0.125
+2024-08-25 13:02:57,582 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=87338.66666666667, ans=0.125
+2024-08-25 13:03:05,391 INFO [train.py:1114] (2/4) Epoch 7, batch 1450, loss[loss=0.2671, simple_loss=0.3136, pruned_loss=0.07905, ctc_loss=0.1562, over 19690.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.314, pruned_loss=0.08238, ctc_loss=0.1546, over 3861288.08 frames. ], batch size: 63, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:03:31,381 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=87498.66666666667, ans=0.0
+2024-08-25 13:03:33,988 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.89 vs. limit=15.0
+2024-08-25 13:03:34,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=87498.66666666667, ans=0.95
+2024-08-25 13:04:46,558 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.015e+02 2.285e+02 2.716e+02 4.465e+02, threshold=4.569e+02, percent-clipped=0.0
+2024-08-25 13:04:50,184 INFO [train.py:1114] (2/4) Epoch 7, batch 1500, loss[loss=0.2986, simple_loss=0.3332, pruned_loss=0.09486, ctc_loss=0.1859, over 19598.00 frames. ], tot_loss[loss=0.2699, simple_loss=0.3139, pruned_loss=0.08212, ctc_loss=0.1543, over 3861141.13 frames. ], batch size: 57, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:04:57,054 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.64 vs. limit=15.0
+2024-08-25 13:05:12,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=87712.0, ans=0.125
+2024-08-25 13:05:17,567 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=87712.0, ans=0.025
+2024-08-25 13:05:26,019 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.85 vs. limit=15.0
+2024-08-25 13:05:29,663 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=87765.33333333333, ans=0.025
+2024-08-25 13:05:57,430 INFO [train.py:1114] (2/4) Epoch 7, batch 1550, loss[loss=0.2392, simple_loss=0.2978, pruned_loss=0.06563, ctc_loss=0.1233, over 19605.00 frames. ], tot_loss[loss=0.2701, simple_loss=0.3139, pruned_loss=0.08217, ctc_loss=0.1546, over 3846994.56 frames. ], batch size: 60, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:06:00,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=87925.33333333333, ans=0.125
+2024-08-25 13:06:12,561 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.49 vs. limit=15.0
+2024-08-25 13:06:17,746 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=87978.66666666667, ans=0.125
+2024-08-25 13:06:22,716 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.20 vs. limit=22.5
+2024-08-25 13:06:45,524 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.59 vs. limit=15.0
+2024-08-25 13:06:46,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=88138.66666666667, ans=0.0
+2024-08-25 13:06:55,903 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.880e+02 2.225e+02 2.757e+02 4.141e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 13:07:00,957 INFO [train.py:1114] (2/4) Epoch 7, batch 1600, loss[loss=0.2382, simple_loss=0.3022, pruned_loss=0.06386, ctc_loss=0.1163, over 19855.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3131, pruned_loss=0.0817, ctc_loss=0.1537, over 3836156.02 frames. ], batch size: 57, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:07:01,401 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.97 vs. limit=15.0
+2024-08-25 13:07:10,627 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=88192.0, ans=0.125
+2024-08-25 13:07:10,973 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.16 vs. limit=15.0
+2024-08-25 13:07:35,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=88352.0, ans=0.0
+2024-08-25 13:07:44,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=88352.0, ans=0.2
+2024-08-25 13:07:47,682 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=88405.33333333333, ans=10.0
+2024-08-25 13:07:58,853 INFO [train.py:1114] (2/4) Epoch 7, batch 1650, loss[loss=0.2673, simple_loss=0.3142, pruned_loss=0.07986, ctc_loss=0.1518, over 19662.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3129, pruned_loss=0.08175, ctc_loss=0.1538, over 3832636.10 frames. ], batch size: 59, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:08:06,919 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.81 vs. limit=15.0
+2024-08-25 13:08:34,333 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=6.42 vs. limit=15.0
+2024-08-25 13:08:54,962 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.917e+02 2.131e+02 2.729e+02 4.248e+02, threshold=4.261e+02, percent-clipped=0.0
+2024-08-25 13:08:58,413 INFO [train.py:1114] (2/4) Epoch 7, batch 1700, loss[loss=0.247, simple_loss=0.2878, pruned_loss=0.07432, ctc_loss=0.1441, over 19706.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3124, pruned_loss=0.08139, ctc_loss=0.153, over 3847235.14 frames. ], batch size: 46, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:09:08,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=88725.33333333333, ans=0.0
+2024-08-25 13:09:14,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=88778.66666666667, ans=0.125
+2024-08-25 13:09:23,188 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.62 vs. limit=6.0
+2024-08-25 13:09:27,219 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=88832.0, ans=0.125
+2024-08-25 13:09:47,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=88938.66666666667, ans=0.125
+2024-08-25 13:09:55,059 INFO [train.py:1114] (2/4) Epoch 7, batch 1750, loss[loss=0.2247, simple_loss=0.2764, pruned_loss=0.06302, ctc_loss=0.1176, over 19661.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3116, pruned_loss=0.08084, ctc_loss=0.1522, over 3851361.92 frames. ], batch size: 45, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:17:19,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=89098.66666666667, ans=0.125
+2024-08-25 13:17:19,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=89098.66666666667, ans=0.125
+2024-08-25 13:17:24,396 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=89098.66666666667, ans=0.1
+2024-08-25 13:17:36,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=89152.0, ans=0.0
+2024-08-25 13:25:10,963 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=89205.33333333333, ans=0.1
+2024-08-25 13:29:44,220 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.972e+02 2.344e+02 2.828e+02 4.449e+02, threshold=4.688e+02, percent-clipped=1.0
+2024-08-25 13:29:47,702 INFO [train.py:1114] (2/4) Epoch 7, batch 1800, loss[loss=0.2591, simple_loss=0.317, pruned_loss=0.07361, ctc_loss=0.1348, over 19615.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3119, pruned_loss=0.08077, ctc_loss=0.152, over 3853077.02 frames. ], batch size: 55, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:36:55,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=89365.33333333333, ans=0.2
+2024-08-25 13:37:26,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.67 vs. limit=15.0
+2024-08-25 13:38:00,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=89365.33333333333, ans=0.125
+2024-08-25 13:39:06,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=89418.66666666667, ans=0.125
+2024-08-25 13:39:29,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=89472.0, ans=0.125
+2024-08-25 13:39:35,207 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.92 vs. limit=15.0
+2024-08-25 13:39:43,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=89472.0, ans=15.0
+2024-08-25 13:40:34,835 INFO [train.py:1114] (2/4) Epoch 7, batch 1850, loss[loss=0.2615, simple_loss=0.3223, pruned_loss=0.07346, ctc_loss=0.1342, over 19598.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3116, pruned_loss=0.08076, ctc_loss=0.1521, over 3856867.39 frames. ], batch size: 57, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:42:29,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=89578.66666666667, ans=0.125
+2024-08-25 13:42:30,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=89632.0, ans=0.125
+2024-08-25 13:43:18,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=89685.33333333333, ans=0.125
+2024-08-25 13:44:00,417 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=89738.66666666667, ans=0.0
+2024-08-25 13:44:01,300 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.852e+02 2.070e+02 2.397e+02 4.608e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-25 13:44:01,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=89738.66666666667, ans=0.0
+2024-08-25 13:44:05,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=89792.0, ans=0.2
+2024-08-25 13:44:07,979 INFO [train.py:1114] (2/4) Epoch 7, batch 1900, loss[loss=0.2524, simple_loss=0.315, pruned_loss=0.06741, ctc_loss=0.1373, over 19692.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3121, pruned_loss=0.08086, ctc_loss=0.1521, over 3861026.33 frames. ], batch size: 59, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:44:09,238 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=89792.0, ans=0.0
+2024-08-25 13:44:09,637 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.09 vs. limit=15.0
+2024-08-25 13:45:01,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=89898.66666666667, ans=0.2
+2024-08-25 13:45:21,764 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.14 vs. limit=15.0
+2024-08-25 13:45:33,583 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=90005.33333333333, ans=0.125
+2024-08-25 13:45:39,729 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=90058.66666666667, ans=0.0
+2024-08-25 13:45:41,191 INFO [train.py:1114] (2/4) Epoch 7, batch 1950, loss[loss=0.2692, simple_loss=0.3106, pruned_loss=0.08288, ctc_loss=0.1554, over 19590.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3134, pruned_loss=0.08138, ctc_loss=0.1532, over 3870001.50 frames. ], batch size: 52, lr: 2.04e-02, grad_scale: 16.0
+2024-08-25 13:45:54,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=90058.66666666667, ans=0.07
+2024-08-25 13:45:56,294 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=90112.0, ans=0.0
+2024-08-25 13:46:10,729 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=90165.33333333333, ans=0.0
+2024-08-25 13:46:29,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=90218.66666666667, ans=0.125
+2024-08-25 13:46:38,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=90272.0, ans=0.125
+2024-08-25 13:46:39,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=90272.0, ans=0.09899494936611666
+2024-08-25 13:46:41,970 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.66 vs. limit=15.0
+2024-08-25 13:46:42,775 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 1.896e+02 2.177e+02 2.703e+02 3.964e+02, threshold=4.354e+02, percent-clipped=0.0
+2024-08-25 13:46:45,048 INFO [train.py:1114] (2/4) Epoch 7, batch 2000, loss[loss=0.2505, simple_loss=0.2845, pruned_loss=0.07945, ctc_loss=0.1442, over 19660.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3137, pruned_loss=0.08171, ctc_loss=0.1534, over 3854536.84 frames. ], batch size: 45, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:46:52,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=90325.33333333333, ans=0.1
+2024-08-25 13:47:07,978 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.75 vs. limit=15.0
+2024-08-25 13:47:24,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=90485.33333333333, ans=0.025
+2024-08-25 13:47:26,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=90485.33333333333, ans=0.125
+2024-08-25 13:47:36,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=90538.66666666667, ans=0.125
+2024-08-25 13:47:41,009 INFO [train.py:1114] (2/4) Epoch 7, batch 2050, loss[loss=0.2319, simple_loss=0.273, pruned_loss=0.06969, ctc_loss=0.1284, over 19706.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3122, pruned_loss=0.08112, ctc_loss=0.1522, over 3850158.93 frames. ], batch size: 47, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:47:44,663 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=90592.0, ans=0.0
+2024-08-25 13:47:46,643 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:47:57,793 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=8.92 vs. limit=12.0
+2024-08-25 13:48:09,275 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=90698.66666666667, ans=0.0
+2024-08-25 13:48:16,899 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=90752.0, ans=0.125
+2024-08-25 13:48:27,007 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90805.33333333333, ans=0.1
+2024-08-25 13:48:35,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=90805.33333333333, ans=0.125
+2024-08-25 13:48:36,335 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.053e+02 2.413e+02 3.017e+02 5.203e+02, threshold=4.827e+02, percent-clipped=2.0
+2024-08-25 13:48:36,499 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=90805.33333333333, ans=0.0
+2024-08-25 13:48:38,600 INFO [train.py:1114] (2/4) Epoch 7, batch 2100, loss[loss=0.2708, simple_loss=0.3194, pruned_loss=0.08063, ctc_loss=0.1523, over 19772.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3117, pruned_loss=0.08091, ctc_loss=0.1517, over 3857497.22 frames. ], batch size: 54, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:49:00,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=90912.0, ans=0.125
+2024-08-25 13:49:01,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90912.0, ans=0.1
+2024-08-25 13:49:08,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90965.33333333333, ans=0.1
+2024-08-25 13:49:22,657 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=91018.66666666667, ans=0.125
+2024-08-25 13:49:25,031 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=16.87 vs. limit=15.0
+2024-08-25 13:49:33,276 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=91072.0, ans=0.1
+2024-08-25 13:49:35,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=91072.0, ans=0.125
+2024-08-25 13:49:43,235 INFO [train.py:1114] (2/4) Epoch 7, batch 2150, loss[loss=0.2486, simple_loss=0.2953, pruned_loss=0.07305, ctc_loss=0.1393, over 19856.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.3107, pruned_loss=0.08043, ctc_loss=0.1508, over 3868550.33 frames. ], batch size: 52, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:49:43,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=91125.33333333333, ans=0.125
+2024-08-25 13:49:51,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=91125.33333333333, ans=0.0
+2024-08-25 13:50:04,604 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=91232.0, ans=0.0
+2024-08-25 13:50:05,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=91232.0, ans=0.125
+2024-08-25 13:50:07,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=91232.0, ans=0.0
+2024-08-25 13:50:21,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=91285.33333333333, ans=0.1
+2024-08-25 13:50:36,448 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 1.920e+02 2.200e+02 2.924e+02 5.090e+02, threshold=4.400e+02, percent-clipped=1.0
+2024-08-25 13:50:39,128 INFO [train.py:1114] (2/4) Epoch 7, batch 2200, loss[loss=0.2646, simple_loss=0.3177, pruned_loss=0.07534, ctc_loss=0.1522, over 19579.00 frames. ], tot_loss[loss=0.2658, simple_loss=0.3107, pruned_loss=0.08037, ctc_loss=0.1506, over 3867474.02 frames. ], batch size: 57, lr: 2.02e-02, grad_scale: 32.0
+2024-08-25 13:50:39,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=91392.0, ans=0.125
+2024-08-25 13:51:22,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=91552.0, ans=0.0
+2024-08-25 13:51:22,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=91552.0, ans=0.0
+2024-08-25 13:51:22,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=91552.0, ans=0.0
+2024-08-25 13:51:31,170 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.51 vs. limit=22.5
+2024-08-25 13:51:34,988 INFO [train.py:1114] (2/4) Epoch 7, batch 2250, loss[loss=0.2613, simple_loss=0.3115, pruned_loss=0.07694, ctc_loss=0.1434, over 19606.00 frames. ], tot_loss[loss=0.2661, simple_loss=0.3109, pruned_loss=0.08052, ctc_loss=0.1509, over 3867046.13 frames. ], batch size: 55, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:51:42,752 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.99 vs. limit=15.0
+2024-08-25 13:51:43,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=91658.66666666667, ans=0.0
+2024-08-25 13:51:43,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=91658.66666666667, ans=0.025
+2024-08-25 13:51:54,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=91712.0, ans=0.0
+2024-08-25 13:51:56,321 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.85 vs. limit=6.0
+2024-08-25 13:52:22,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=91872.0, ans=0.0
+2024-08-25 13:52:25,350 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=91872.0, ans=0.07
+2024-08-25 13:52:28,398 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.146e+02 2.677e+02 3.204e+02 4.930e+02, threshold=5.354e+02, percent-clipped=3.0
+2024-08-25 13:52:29,555 INFO [train.py:1114] (2/4) Epoch 7, batch 2300, loss[loss=0.2475, simple_loss=0.2935, pruned_loss=0.07346, ctc_loss=0.1364, over 19512.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3106, pruned_loss=0.08092, ctc_loss=0.1516, over 3860740.89 frames. ], batch size: 49, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:52:36,934 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.75 vs. limit=6.0
+2024-08-25 13:52:43,451 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.88 vs. limit=22.5
+2024-08-25 13:52:59,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=92032.0, ans=0.1
+2024-08-25 13:53:14,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=92138.66666666667, ans=0.1
+2024-08-25 13:53:24,736 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.55 vs. limit=22.5
+2024-08-25 13:53:25,163 INFO [train.py:1114] (2/4) Epoch 7, batch 2350, loss[loss=0.2983, simple_loss=0.3382, pruned_loss=0.09478, ctc_loss=0.1721, over 19682.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3111, pruned_loss=0.08151, ctc_loss=0.1528, over 3863305.38 frames. ], batch size: 63, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:53:25,335 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=92192.0, ans=0.0
+2024-08-25 13:53:28,789 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.05 vs. limit=15.0
+2024-08-25 13:53:30,141 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.45 vs. limit=15.0
+2024-08-25 13:53:45,939 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=92298.66666666667, ans=0.0
+2024-08-25 13:53:48,056 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=92298.66666666667, ans=0.125
+2024-08-25 13:54:00,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=92352.0, ans=0.0
+2024-08-25 13:54:08,437 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.14 vs. limit=12.0
+2024-08-25 13:54:18,221 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.985e+02 2.336e+02 2.802e+02 4.974e+02, threshold=4.671e+02, percent-clipped=0.0
+2024-08-25 13:54:19,268 INFO [train.py:1114] (2/4) Epoch 7, batch 2400, loss[loss=0.2687, simple_loss=0.3214, pruned_loss=0.07856, ctc_loss=0.1472, over 19256.00 frames. ], tot_loss[loss=0.2697, simple_loss=0.3133, pruned_loss=0.08224, ctc_loss=0.1538, over 3857578.70 frames. ], batch size: 71, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:54:33,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=92512.0, ans=22.5
+2024-08-25 13:54:45,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=92565.33333333333, ans=0.125
+2024-08-25 13:55:47,138 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.30 vs. limit=10.0
+2024-08-25 13:55:50,097 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:56:09,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=92672.0, ans=0.0
+2024-08-25 13:56:11,576 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=92672.0, ans=0.125
+2024-08-25 13:56:13,555 INFO [train.py:1114] (2/4) Epoch 7, batch 2450, loss[loss=0.3821, simple_loss=0.3762, pruned_loss=0.1428, ctc_loss=0.2561, over 13041.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3181, pruned_loss=0.08607, ctc_loss=0.1616, over 3730281.85 frames. ], batch size: 140, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:56:53,388 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=92832.0, ans=0.0
+2024-08-25 13:57:54,288 INFO [train.py:1114] (2/4) Epoch 8, batch 0, loss[loss=0.2416, simple_loss=0.2875, pruned_loss=0.07149, ctc_loss=0.1321, over 19399.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2875, pruned_loss=0.07149, ctc_loss=0.1321, over 19399.00 frames. ], batch size: 48, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 13:57:54,289 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 13:59:56,298 INFO [train.py:1146] (2/4) Epoch 8, validation: loss=0.2171, simple_loss=0.2997, pruned_loss=0.04948, ctc_loss=0.08904, over 944034.00 frames.
+2024-08-25 13:59:56,299 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 13:59:56,850 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.81 vs. limit=15.0
+2024-08-25 14:01:03,639 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.158e+02 2.483e+02 2.902e+02 5.180e+02, threshold=4.965e+02, percent-clipped=2.0
+2024-08-25 14:01:11,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=92986.66666666667, ans=0.2
+2024-08-25 14:01:43,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=93040.0, ans=0.125
+2024-08-25 14:02:12,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=93146.66666666667, ans=0.09899494936611666
+2024-08-25 14:02:16,261 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.33 vs. limit=22.5
+2024-08-25 14:02:17,063 INFO [train.py:1114] (2/4) Epoch 8, batch 50, loss[loss=0.212, simple_loss=0.2734, pruned_loss=0.05481, ctc_loss=0.1024, over 19716.00 frames. ], tot_loss[loss=0.2697, simple_loss=0.3135, pruned_loss=0.08193, ctc_loss=0.1552, over 843278.58 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:02:20,963 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=93200.0, ans=0.125
+2024-08-25 14:02:24,583 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.67 vs. limit=12.0
+2024-08-25 14:02:30,082 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93253.33333333333, ans=0.1
+2024-08-25 14:02:54,616 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=93360.0, ans=0.125
+2024-08-25 14:02:59,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=93360.0, ans=0.125
+2024-08-25 14:05:00,864 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=93413.33333333333, ans=0.015
+2024-08-25 14:05:03,235 INFO [train.py:1114] (2/4) Epoch 8, batch 100, loss[loss=0.2577, simple_loss=0.3111, pruned_loss=0.07395, ctc_loss=0.1409, over 19700.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3157, pruned_loss=0.08214, ctc_loss=0.1556, over 1498192.96 frames. ], batch size: 51, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:05:14,933 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.910e+02 2.219e+02 2.660e+02 5.043e+02, threshold=4.439e+02, percent-clipped=1.0
+2024-08-25 14:05:29,122 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=93573.33333333333, ans=0.0
+2024-08-25 14:05:48,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=93626.66666666667, ans=0.025
+2024-08-25 14:07:11,334 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.07 vs. limit=15.0
+2024-08-25 14:07:16,377 INFO [train.py:1114] (2/4) Epoch 8, batch 150, loss[loss=0.248, simple_loss=0.2911, pruned_loss=0.07479, ctc_loss=0.1385, over 19729.00 frames. ], tot_loss[loss=0.2658, simple_loss=0.3118, pruned_loss=0.07986, ctc_loss=0.1503, over 2026296.86 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:08:13,096 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.54 vs. limit=8.0
+2024-08-25 14:08:14,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=93786.66666666667, ans=0.125
+2024-08-25 14:08:14,670 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=93786.66666666667, ans=0.1
+2024-08-25 14:09:23,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=93893.33333333333, ans=0.125
+2024-08-25 14:09:25,851 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=93893.33333333333, ans=0.0
+2024-08-25 14:10:16,285 INFO [train.py:1114] (2/4) Epoch 8, batch 200, loss[loss=0.3218, simple_loss=0.3483, pruned_loss=0.1085, ctc_loss=0.1962, over 18115.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3096, pruned_loss=0.07878, ctc_loss=0.1485, over 2433444.82 frames. ], batch size: 85, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:10:29,225 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.854e+02 2.093e+02 2.544e+02 5.078e+02, threshold=4.187e+02, percent-clipped=1.0
+2024-08-25 14:10:34,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=94053.33333333333, ans=0.0
+2024-08-25 14:10:46,351 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=94106.66666666667, ans=0.125
+2024-08-25 14:10:57,137 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.21 vs. limit=15.0
+2024-08-25 14:11:07,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=94213.33333333333, ans=0.0
+2024-08-25 14:11:17,834 INFO [train.py:1114] (2/4) Epoch 8, batch 250, loss[loss=0.2921, simple_loss=0.3294, pruned_loss=0.09414, ctc_loss=0.1664, over 19420.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3097, pruned_loss=0.07875, ctc_loss=0.1483, over 2753964.23 frames. ], batch size: 67, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:11:18,043 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=94266.66666666667, ans=0.125
+2024-08-25 14:12:17,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=94266.66666666667, ans=0.125
+2024-08-25 14:13:12,954 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=94480.0, ans=0.125
+2024-08-25 14:13:21,901 INFO [train.py:1114] (2/4) Epoch 8, batch 300, loss[loss=0.313, simple_loss=0.3414, pruned_loss=0.1031, ctc_loss=0.1958, over 19526.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.3101, pruned_loss=0.07928, ctc_loss=0.1492, over 2999223.72 frames. ], batch size: 61, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:13:33,345 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 1.987e+02 2.340e+02 3.022e+02 6.047e+02, threshold=4.681e+02, percent-clipped=9.0
+2024-08-25 14:14:19,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=94640.0, ans=0.125
+2024-08-25 14:14:21,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=94640.0, ans=0.0
+2024-08-25 14:14:27,895 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.90 vs. limit=15.0
+2024-08-25 14:14:28,905 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=94693.33333333333, ans=0.125
+2024-08-25 14:14:32,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=94693.33333333333, ans=0.2
+2024-08-25 14:14:41,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=94746.66666666667, ans=0.0
+2024-08-25 14:14:52,105 INFO [train.py:1114] (2/4) Epoch 8, batch 350, loss[loss=0.244, simple_loss=0.2835, pruned_loss=0.07255, ctc_loss=0.1484, over 19759.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3093, pruned_loss=0.07825, ctc_loss=0.1473, over 3189915.13 frames. ], batch size: 48, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:15:03,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=94853.33333333333, ans=0.125
+2024-08-25 14:16:28,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=94960.0, ans=0.125
+2024-08-25 14:16:39,492 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=95013.33333333333, ans=0.125
+2024-08-25 14:16:50,822 INFO [train.py:1114] (2/4) Epoch 8, batch 400, loss[loss=0.2681, simple_loss=0.3147, pruned_loss=0.08002, ctc_loss=0.1537, over 19503.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3081, pruned_loss=0.07733, ctc_loss=0.1456, over 3341611.21 frames. ], batch size: 54, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:16:53,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=95066.66666666667, ans=0.0
+2024-08-25 14:16:58,919 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=95066.66666666667, ans=0.0
+2024-08-25 14:17:03,860 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.019e+02 2.528e+02 3.132e+02 5.852e+02, threshold=5.056e+02, percent-clipped=7.0
+2024-08-25 14:17:07,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=95120.0, ans=0.1
+2024-08-25 14:17:32,930 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=95226.66666666667, ans=0.2
+2024-08-25 14:18:38,406 INFO [train.py:1114] (2/4) Epoch 8, batch 450, loss[loss=0.2655, simple_loss=0.3104, pruned_loss=0.08006, ctc_loss=0.151, over 19616.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3084, pruned_loss=0.07789, ctc_loss=0.1466, over 3450604.61 frames. ], batch size: 55, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:18:43,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=95333.33333333333, ans=0.125
+2024-08-25 14:18:43,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95333.33333333333, ans=0.1
+2024-08-25 14:18:45,143 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=95333.33333333333, ans=0.125
+2024-08-25 14:18:52,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=95386.66666666667, ans=0.125
+2024-08-25 14:19:10,805 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=95440.0, ans=0.125
+2024-08-25 14:19:15,774 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.30 vs. limit=12.0
+2024-08-25 14:19:16,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=95493.33333333333, ans=0.125
+2024-08-25 14:19:19,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=95493.33333333333, ans=0.2
+2024-08-25 14:19:19,423 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=95493.33333333333, ans=0.125
+2024-08-25 14:19:25,927 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=95493.33333333333, ans=0.125
+2024-08-25 14:19:39,033 INFO [train.py:1114] (2/4) Epoch 8, batch 500, loss[loss=0.2805, simple_loss=0.3252, pruned_loss=0.08674, ctc_loss=0.1558, over 19651.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3079, pruned_loss=0.07772, ctc_loss=0.1463, over 3546424.92 frames. ], batch size: 63, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:19:52,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=95600.0, ans=0.125
+2024-08-25 14:21:42,068 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.925e+02 2.242e+02 2.655e+02 4.786e+02, threshold=4.483e+02, percent-clipped=0.0
+2024-08-25 14:21:44,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=95653.33333333333, ans=0.125
+2024-08-25 14:21:58,534 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=95706.66666666667, ans=0.125
+2024-08-25 14:22:36,071 INFO [train.py:1114] (2/4) Epoch 8, batch 550, loss[loss=0.279, simple_loss=0.3232, pruned_loss=0.08496, ctc_loss=0.1624, over 19348.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3084, pruned_loss=0.07841, ctc_loss=0.1477, over 3609011.82 frames. ], batch size: 71, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:22:41,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=95866.66666666667, ans=0.025
+2024-08-25 14:23:55,613 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.77 vs. limit=15.0
+2024-08-25 14:24:04,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=95920.0, ans=0.09899494936611666
+2024-08-25 14:25:24,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=96026.66666666667, ans=0.0
+2024-08-25 14:25:43,178 INFO [train.py:1114] (2/4) Epoch 8, batch 600, loss[loss=0.2854, simple_loss=0.3257, pruned_loss=0.089, ctc_loss=0.168, over 19422.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.3078, pruned_loss=0.07782, ctc_loss=0.1467, over 3667148.39 frames. ], batch size: 67, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:25:54,322 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 1.975e+02 2.461e+02 2.998e+02 6.685e+02, threshold=4.922e+02, percent-clipped=2.0
+2024-08-25 14:26:05,286 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=96186.66666666667, ans=0.0
+2024-08-25 14:26:31,615 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=96293.33333333333, ans=0.2
+2024-08-25 14:29:19,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=96346.66666666667, ans=0.2
+2024-08-25 14:29:20,690 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=96346.66666666667, ans=0.1
+2024-08-25 14:29:23,583 INFO [train.py:1114] (2/4) Epoch 8, batch 650, loss[loss=0.2806, simple_loss=0.3218, pruned_loss=0.08705, ctc_loss=0.1631, over 19771.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3071, pruned_loss=0.07736, ctc_loss=0.1461, over 3717737.71 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:29:35,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=96453.33333333333, ans=0.125
+2024-08-25 14:31:06,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=96613.33333333333, ans=0.2
+2024-08-25 14:31:24,402 INFO [train.py:1114] (2/4) Epoch 8, batch 700, loss[loss=0.2604, simple_loss=0.3039, pruned_loss=0.07856, ctc_loss=0.1496, over 19715.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3081, pruned_loss=0.07759, ctc_loss=0.1468, over 3749215.44 frames. ], batch size: 51, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:31:31,008 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.76 vs. limit=15.0
+2024-08-25 14:31:36,078 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 1.952e+02 2.228e+02 2.907e+02 4.140e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 14:31:36,322 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=96720.0, ans=0.125
+2024-08-25 14:31:52,710 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.26 vs. limit=22.5
+2024-08-25 14:31:57,217 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=96773.33333333333, ans=0.2
+2024-08-25 14:32:21,413 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.08 vs. limit=10.0
+2024-08-25 14:32:26,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=96880.0, ans=0.125
+2024-08-25 14:32:35,099 INFO [train.py:1114] (2/4) Epoch 8, batch 750, loss[loss=0.2894, simple_loss=0.3323, pruned_loss=0.08849, ctc_loss=0.1736, over 19518.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3078, pruned_loss=0.0773, ctc_loss=0.1462, over 3775297.67 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:32:38,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=96933.33333333333, ans=0.125
+2024-08-25 14:32:57,755 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.78 vs. limit=15.0
+2024-08-25 14:33:05,751 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=97040.0, ans=0.0
+2024-08-25 14:33:08,889 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=97040.0, ans=0.2
+2024-08-25 14:33:08,894 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=97040.0, ans=0.0
+2024-08-25 14:33:32,469 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=97093.33333333333, ans=0.0
+2024-08-25 14:33:41,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=97146.66666666667, ans=0.035
+2024-08-25 14:33:41,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=97146.66666666667, ans=0.125
+2024-08-25 14:33:44,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=97200.0, ans=0.125
+2024-08-25 14:33:45,429 INFO [train.py:1114] (2/4) Epoch 8, batch 800, loss[loss=0.211, simple_loss=0.2703, pruned_loss=0.05474, ctc_loss=0.1056, over 19412.00 frames. ], tot_loss[loss=0.2597, simple_loss=0.3072, pruned_loss=0.07699, ctc_loss=0.1457, over 3795967.75 frames. ], batch size: 48, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:34:35,078 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 1.855e+02 2.176e+02 2.933e+02 4.905e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-25 14:34:40,295 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:34:49,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=97306.66666666667, ans=0.1
+2024-08-25 14:35:01,201 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.58 vs. limit=15.0
+2024-08-25 14:35:09,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=97413.33333333333, ans=0.125
+2024-08-25 14:35:22,241 INFO [train.py:1114] (2/4) Epoch 8, batch 850, loss[loss=0.2815, simple_loss=0.3273, pruned_loss=0.08392, ctc_loss=0.1697, over 19658.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.307, pruned_loss=0.07704, ctc_loss=0.1453, over 3815049.30 frames. ], batch size: 59, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:35:38,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=97520.0, ans=0.0
+2024-08-25 14:35:54,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=97573.33333333333, ans=0.125
+2024-08-25 14:35:55,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=97626.66666666667, ans=0.125
+2024-08-25 14:35:58,144 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=97626.66666666667, ans=0.125
+2024-08-25 14:36:05,972 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=97626.66666666667, ans=0.0
+2024-08-25 14:36:10,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=97680.0, ans=0.125
+2024-08-25 14:36:12,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=97680.0, ans=0.1
+2024-08-25 14:36:15,454 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=97680.0, ans=0.0
+2024-08-25 14:36:19,810 INFO [train.py:1114] (2/4) Epoch 8, batch 900, loss[loss=0.2898, simple_loss=0.3149, pruned_loss=0.09547, ctc_loss=0.1843, over 19796.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3071, pruned_loss=0.07736, ctc_loss=0.1455, over 3819189.68 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:38:24,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=97733.33333333333, ans=0.015
+2024-08-25 14:38:30,482 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 1.935e+02 2.327e+02 2.780e+02 5.034e+02, threshold=4.654e+02, percent-clipped=2.0
+2024-08-25 14:39:05,999 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=97893.33333333333, ans=0.0
+2024-08-25 14:39:10,093 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=97946.66666666667, ans=0.1
+2024-08-25 14:39:56,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=97946.66666666667, ans=0.0
+2024-08-25 14:40:01,392 INFO [train.py:1114] (2/4) Epoch 8, batch 950, loss[loss=0.2269, simple_loss=0.2795, pruned_loss=0.06344, ctc_loss=0.1188, over 19477.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3082, pruned_loss=0.0783, ctc_loss=0.1473, over 3820856.06 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:40:08,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys.whitening_limit, batch_count=98000.0, ans=6.0
+2024-08-25 14:40:16,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=98053.33333333333, ans=0.125
+2024-08-25 14:41:22,650 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=98106.66666666667, ans=0.0
+2024-08-25 14:43:16,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=98213.33333333333, ans=0.125
+2024-08-25 14:43:18,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=98213.33333333333, ans=0.125
+2024-08-25 14:43:19,262 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=98213.33333333333, ans=0.125
+2024-08-25 14:43:23,181 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.13 vs. limit=6.0
+2024-08-25 14:43:26,534 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=98213.33333333333, ans=0.025
+2024-08-25 14:43:29,288 INFO [train.py:1114] (2/4) Epoch 8, batch 1000, loss[loss=0.2173, simple_loss=0.2772, pruned_loss=0.05722, ctc_loss=0.1073, over 19843.00 frames. ], tot_loss[loss=0.2629, simple_loss=0.3089, pruned_loss=0.07886, ctc_loss=0.1481, over 3817543.07 frames. ], batch size: 52, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:43:29,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=98266.66666666667, ans=0.0
+2024-08-25 14:43:31,160 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.53 vs. limit=10.0
+2024-08-25 14:43:37,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=98266.66666666667, ans=0.125
+2024-08-25 14:43:37,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=98266.66666666667, ans=0.07
+2024-08-25 14:43:47,365 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.014e+02 2.465e+02 3.304e+02 4.205e+02, threshold=4.930e+02, percent-clipped=0.0
+2024-08-25 14:43:47,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=98320.0, ans=0.025
+2024-08-25 14:46:09,305 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.84 vs. limit=6.0
+2024-08-25 14:46:19,454 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=98426.66666666667, ans=0.0
+2024-08-25 14:46:30,399 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=98480.0, ans=0.2
+2024-08-25 14:46:35,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=98480.0, ans=0.1
+2024-08-25 14:46:44,426 INFO [train.py:1114] (2/4) Epoch 8, batch 1050, loss[loss=0.2676, simple_loss=0.3102, pruned_loss=0.07958, ctc_loss=0.1645, over 19822.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3078, pruned_loss=0.07801, ctc_loss=0.1467, over 3824746.11 frames. ], batch size: 57, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:46:45,754 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=98533.33333333333, ans=0.1
+2024-08-25 14:46:59,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=98586.66666666667, ans=0.015
+2024-08-25 14:47:21,402 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=98693.33333333333, ans=0.125
+2024-08-25 14:47:32,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=98746.66666666667, ans=0.025
+2024-08-25 14:47:42,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=98746.66666666667, ans=0.125
+2024-08-25 14:47:44,604 INFO [train.py:1114] (2/4) Epoch 8, batch 1100, loss[loss=0.2536, simple_loss=0.3052, pruned_loss=0.07268, ctc_loss=0.1415, over 19583.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3075, pruned_loss=0.07771, ctc_loss=0.1465, over 3831219.00 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:47:53,438 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=98800.0, ans=0.125
+2024-08-25 14:47:58,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=98853.33333333333, ans=0.125
+2024-08-25 14:48:13,768 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.814e+02 2.071e+02 2.620e+02 3.682e+02, threshold=4.142e+02, percent-clipped=0.0
+2024-08-25 14:48:36,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=98853.33333333333, ans=0.125
+2024-08-25 14:48:37,023 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=98853.33333333333, ans=0.1
+2024-08-25 14:49:11,021 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.82 vs. limit=22.5
+2024-08-25 14:49:53,263 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=99013.33333333333, ans=0.125
+2024-08-25 14:50:00,857 INFO [train.py:1114] (2/4) Epoch 8, batch 1150, loss[loss=0.2466, simple_loss=0.2974, pruned_loss=0.07087, ctc_loss=0.1352, over 19609.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.3074, pruned_loss=0.07764, ctc_loss=0.1462, over 3830468.15 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:51:03,375 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.82 vs. limit=15.0
+2024-08-25 14:51:03,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=99066.66666666667, ans=0.125
+2024-08-25 14:51:05,760 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.16 vs. limit=10.0
+2024-08-25 14:51:11,181 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.51 vs. limit=10.0
+2024-08-25 14:51:26,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=99173.33333333333, ans=0.0
+2024-08-25 14:52:33,137 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.11 vs. limit=10.0
+2024-08-25 14:52:35,450 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.65 vs. limit=8.0
+2024-08-25 14:52:48,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=99280.0, ans=0.0
+2024-08-25 14:52:50,328 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.67 vs. limit=10.0
+2024-08-25 14:52:50,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=99333.33333333333, ans=0.125
+2024-08-25 14:52:51,786 INFO [train.py:1114] (2/4) Epoch 8, batch 1200, loss[loss=0.2825, simple_loss=0.3282, pruned_loss=0.08476, ctc_loss=0.1683, over 19841.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3088, pruned_loss=0.07843, ctc_loss=0.1477, over 3825339.57 frames. ], batch size: 57, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:53:02,016 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.59 vs. limit=15.0
+2024-08-25 14:53:06,255 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.879e+02 2.149e+02 2.634e+02 4.011e+02, threshold=4.298e+02, percent-clipped=0.0
+2024-08-25 14:53:09,979 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=99386.66666666667, ans=0.0
+2024-08-25 14:53:25,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=99440.0, ans=0.125
+2024-08-25 14:53:29,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=99493.33333333333, ans=0.1
+2024-08-25 14:53:52,368 INFO [train.py:1114] (2/4) Epoch 8, batch 1250, loss[loss=0.299, simple_loss=0.334, pruned_loss=0.09791, ctc_loss=0.1703, over 19507.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3084, pruned_loss=0.07775, ctc_loss=0.1461, over 3843318.97 frames. ], batch size: 61, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:55:33,673 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.40 vs. limit=15.0
+2024-08-25 14:56:01,071 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=99813.33333333333, ans=0.1
+2024-08-25 14:56:05,545 INFO [train.py:1114] (2/4) Epoch 8, batch 1300, loss[loss=0.2956, simple_loss=0.3326, pruned_loss=0.094, ctc_loss=0.1768, over 18875.00 frames. ], tot_loss[loss=0.2603, simple_loss=0.3077, pruned_loss=0.07735, ctc_loss=0.1454, over 3845959.95 frames. ], batch size: 76, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:56:17,010 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.809e+02 2.147e+02 2.747e+02 4.726e+02, threshold=4.293e+02, percent-clipped=4.0
+2024-08-25 14:56:20,945 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.98 vs. limit=22.5
+2024-08-25 14:56:29,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=99973.33333333333, ans=0.1
+2024-08-25 14:57:48,049 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=99973.33333333333, ans=0.2
+2024-08-25 14:57:50,099 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=99973.33333333333, ans=0.0
+2024-08-25 14:57:51,338 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=99973.33333333333, ans=0.2
+2024-08-25 14:58:01,623 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100026.66666666667, ans=0.125
+2024-08-25 14:58:03,911 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=100026.66666666667, ans=0.09899494936611666
+2024-08-25 14:58:50,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=100080.0, ans=0.0
+2024-08-25 14:58:53,781 INFO [train.py:1114] (2/4) Epoch 8, batch 1350, loss[loss=0.2659, simple_loss=0.3065, pruned_loss=0.0825, ctc_loss=0.1508, over 19755.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3068, pruned_loss=0.07657, ctc_loss=0.1439, over 3857252.32 frames. ], batch size: 54, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:58:54,488 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.92 vs. limit=15.0
+2024-08-25 14:58:59,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=100133.33333333333, ans=0.0
+2024-08-25 14:59:22,841 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=5.55 vs. limit=12.0
+2024-08-25 14:59:22,899 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.69 vs. limit=6.0
+2024-08-25 14:59:27,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=100293.33333333333, ans=0.125
+2024-08-25 14:59:40,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=100346.66666666667, ans=10.0
+2024-08-25 14:59:47,615 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=100346.66666666667, ans=0.125
+2024-08-25 14:59:51,900 INFO [train.py:1114] (2/4) Epoch 8, batch 1400, loss[loss=0.2263, simple_loss=0.2771, pruned_loss=0.06346, ctc_loss=0.1213, over 19645.00 frames. ], tot_loss[loss=0.258, simple_loss=0.306, pruned_loss=0.07632, ctc_loss=0.1431, over 3864103.57 frames. ], batch size: 46, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:59:58,136 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.61 vs. limit=22.5
+2024-08-25 15:00:02,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=100453.33333333333, ans=0.2
+2024-08-25 15:00:03,305 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.018e+02 2.600e+02 3.300e+02 7.375e+02, threshold=5.199e+02, percent-clipped=11.0
+2024-08-25 15:00:04,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=100453.33333333333, ans=0.2
+2024-08-25 15:00:07,080 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=100453.33333333333, ans=0.0
+2024-08-25 15:00:13,035 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:00:22,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100506.66666666667, ans=0.1
+2024-08-25 15:00:56,740 INFO [train.py:1114] (2/4) Epoch 8, batch 1450, loss[loss=0.2476, simple_loss=0.3046, pruned_loss=0.06818, ctc_loss=0.1359, over 19681.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3071, pruned_loss=0.07683, ctc_loss=0.1439, over 3862316.08 frames. ], batch size: 63, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:01:02,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=100666.66666666667, ans=0.125
+2024-08-25 15:01:04,915 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:01:15,750 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.35 vs. limit=15.0
+2024-08-25 15:01:16,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=100720.0, ans=0.025
+2024-08-25 15:01:21,589 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=100773.33333333333, ans=0.04949747468305833
+2024-08-25 15:01:37,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=100826.66666666667, ans=0.125
+2024-08-25 15:03:17,729 INFO [train.py:1114] (2/4) Epoch 8, batch 1500, loss[loss=0.2435, simple_loss=0.3, pruned_loss=0.06795, ctc_loss=0.1274, over 19585.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3078, pruned_loss=0.0771, ctc_loss=0.1447, over 3861611.60 frames. ], batch size: 57, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:05:16,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=100933.33333333333, ans=0.125
+2024-08-25 15:05:24,434 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.972e+02 2.271e+02 2.845e+02 5.404e+02, threshold=4.542e+02, percent-clipped=1.0
+2024-08-25 15:07:40,584 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=100986.66666666667, ans=0.2
+2024-08-25 15:08:28,002 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.59 vs. limit=12.0
+2024-08-25 15:09:24,826 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=101093.33333333333, ans=0.025
+2024-08-25 15:10:05,627 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=101146.66666666667, ans=0.0
+2024-08-25 15:10:18,911 INFO [train.py:1114] (2/4) Epoch 8, batch 1550, loss[loss=0.2624, simple_loss=0.3141, pruned_loss=0.07691, ctc_loss=0.142, over 19589.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3076, pruned_loss=0.07716, ctc_loss=0.1451, over 3846035.29 frames. ], batch size: 60, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:10:51,271 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.55 vs. limit=10.0
+2024-08-25 15:12:29,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=101253.33333333333, ans=0.125
+2024-08-25 15:12:42,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=101306.66666666667, ans=0.0
+2024-08-25 15:13:04,490 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=101360.0, ans=0.125
+2024-08-25 15:13:08,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101360.0, ans=0.125
+2024-08-25 15:13:17,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=101360.0, ans=0.125
+2024-08-25 15:13:48,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=101413.33333333333, ans=0.0
+2024-08-25 15:13:57,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=101413.33333333333, ans=0.0
+2024-08-25 15:14:07,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101413.33333333333, ans=0.1
+2024-08-25 15:14:11,853 INFO [train.py:1114] (2/4) Epoch 8, batch 1600, loss[loss=0.2635, simple_loss=0.3213, pruned_loss=0.07349, ctc_loss=0.1468, over 19851.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3069, pruned_loss=0.07677, ctc_loss=0.1443, over 3835025.06 frames. ], batch size: 57, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:14:31,968 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.915e+02 2.222e+02 2.696e+02 4.640e+02, threshold=4.444e+02, percent-clipped=1.0
+2024-08-25 15:14:43,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=101520.0, ans=0.0
+2024-08-25 15:14:56,880 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=101573.33333333333, ans=0.0
+2024-08-25 15:15:14,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=101626.66666666667, ans=0.0
+2024-08-25 15:15:30,467 INFO [train.py:1114] (2/4) Epoch 8, batch 1650, loss[loss=0.2446, simple_loss=0.3035, pruned_loss=0.06712, ctc_loss=0.1285, over 19654.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3067, pruned_loss=0.07689, ctc_loss=0.1446, over 3830497.38 frames. ], batch size: 59, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:15:33,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=101733.33333333333, ans=0.125
+2024-08-25 15:15:35,835 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=101733.33333333333, ans=0.0
+2024-08-25 15:15:55,567 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.67 vs. limit=15.0
+2024-08-25 15:16:25,252 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:16:28,210 INFO [train.py:1114] (2/4) Epoch 8, batch 1700, loss[loss=0.2218, simple_loss=0.2671, pruned_loss=0.0649, ctc_loss=0.1166, over 19672.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.306, pruned_loss=0.07632, ctc_loss=0.1437, over 3845037.64 frames. ], batch size: 46, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:16:33,846 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.37 vs. limit=5.0
+2024-08-25 15:16:37,485 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=102000.0, ans=0.125
+2024-08-25 15:16:40,733 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.920e+02 2.237e+02 2.711e+02 4.644e+02, threshold=4.474e+02, percent-clipped=2.0
+2024-08-25 15:16:46,189 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.16 vs. limit=6.0
+2024-08-25 15:16:48,088 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=102053.33333333333, ans=0.125
+2024-08-25 15:17:14,885 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=102160.0, ans=0.125
+2024-08-25 15:17:15,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=102160.0, ans=0.125
+2024-08-25 15:17:39,506 INFO [train.py:1114] (2/4) Epoch 8, batch 1750, loss[loss=0.2376, simple_loss=0.2814, pruned_loss=0.0692, ctc_loss=0.1385, over 19663.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3051, pruned_loss=0.07568, ctc_loss=0.1425, over 3849345.66 frames. ], batch size: 45, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:17:49,486 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.50 vs. limit=15.0
+2024-08-25 15:19:51,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=102373.33333333333, ans=0.0
+2024-08-25 15:20:03,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=102426.66666666667, ans=0.0
+2024-08-25 15:20:04,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=102426.66666666667, ans=0.125
+2024-08-25 15:20:19,625 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=102480.0, ans=0.0
+2024-08-25 15:20:25,840 INFO [train.py:1114] (2/4) Epoch 8, batch 1800, loss[loss=0.2742, simple_loss=0.3211, pruned_loss=0.08251, ctc_loss=0.1556, over 19624.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3051, pruned_loss=0.07557, ctc_loss=0.1422, over 3851056.13 frames. ], batch size: 55, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:20:33,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=102533.33333333333, ans=0.2
+2024-08-25 15:20:37,816 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 1.874e+02 2.230e+02 2.859e+02 4.439e+02, threshold=4.460e+02, percent-clipped=0.0
+2024-08-25 15:23:39,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=102640.0, ans=0.0
+2024-08-25 15:24:47,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=102640.0, ans=0.125
+2024-08-25 15:24:54,209 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=102693.33333333333, ans=0.125
+2024-08-25 15:26:49,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102693.33333333333, ans=0.1
+2024-08-25 15:26:52,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=102746.66666666667, ans=0.0
+2024-08-25 15:26:54,988 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_na.min_abs, batch_count=102746.66666666667, ans=0.02
+2024-08-25 15:28:59,121 INFO [train.py:1114] (2/4) Epoch 8, batch 1850, loss[loss=0.251, simple_loss=0.3114, pruned_loss=0.07005, ctc_loss=0.1264, over 19594.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3049, pruned_loss=0.07561, ctc_loss=0.1424, over 3854441.68 frames. ], batch size: 57, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:29:06,195 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=102800.0, ans=0.0
+2024-08-25 15:29:17,936 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.90 vs. limit=15.0
+2024-08-25 15:30:28,776 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.84 vs. limit=15.0
+2024-08-25 15:32:38,690 INFO [train.py:1114] (2/4) Epoch 8, batch 1900, loss[loss=0.2593, simple_loss=0.3177, pruned_loss=0.07187, ctc_loss=0.143, over 19626.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3065, pruned_loss=0.07672, ctc_loss=0.1441, over 3859222.94 frames. ], batch size: 59, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:32:52,960 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.872e+02 2.139e+02 2.618e+02 5.849e+02, threshold=4.279e+02, percent-clipped=4.0
+2024-08-25 15:32:59,112 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=103120.0, ans=0.0
+2024-08-25 15:33:07,708 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=103173.33333333333, ans=0.2
+2024-08-25 15:33:07,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=103173.33333333333, ans=0.0
+2024-08-25 15:33:08,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=103173.33333333333, ans=0.0
+2024-08-25 15:33:15,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=103226.66666666667, ans=0.125
+2024-08-25 15:33:21,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=103226.66666666667, ans=0.0
+2024-08-25 15:33:37,637 INFO [train.py:1114] (2/4) Epoch 8, batch 1950, loss[loss=0.2459, simple_loss=0.3, pruned_loss=0.07002, ctc_loss=0.1293, over 19608.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3073, pruned_loss=0.07641, ctc_loss=0.1433, over 3868747.99 frames. ], batch size: 52, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:33:55,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103333.33333333333, ans=0.125
+2024-08-25 15:33:58,574 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.72 vs. limit=15.0
+2024-08-25 15:34:06,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=103386.66666666667, ans=0.0
+2024-08-25 15:34:17,814 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.32 vs. limit=10.0
+2024-08-25 15:34:42,899 INFO [train.py:1114] (2/4) Epoch 8, batch 2000, loss[loss=0.236, simple_loss=0.2726, pruned_loss=0.0731, ctc_loss=0.133, over 19671.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3073, pruned_loss=0.07645, ctc_loss=0.1432, over 3852877.11 frames. ], batch size: 45, lr: 1.81e-02, grad_scale: 32.0
+2024-08-25 15:34:47,114 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=103600.0, ans=0.0
+2024-08-25 15:34:55,652 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 2.022e+02 2.450e+02 4.734e+02, threshold=4.043e+02, percent-clipped=1.0
+2024-08-25 15:35:05,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=103706.66666666667, ans=0.0
+2024-08-25 15:35:11,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=103706.66666666667, ans=0.125
+2024-08-25 15:35:37,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103866.66666666667, ans=0.1
+2024-08-25 15:35:38,622 INFO [train.py:1114] (2/4) Epoch 8, batch 2050, loss[loss=0.2506, simple_loss=0.295, pruned_loss=0.07426, ctc_loss=0.1442, over 19713.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.3065, pruned_loss=0.07635, ctc_loss=0.1432, over 3850629.72 frames. ], batch size: 47, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:35:42,858 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=103866.66666666667, ans=0.07
+2024-08-25 15:35:47,134 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103866.66666666667, ans=0.125
+2024-08-25 15:36:14,492 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=104026.66666666667, ans=0.0
+2024-08-25 15:36:23,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=104080.0, ans=0.125
+2024-08-25 15:36:32,764 INFO [train.py:1114] (2/4) Epoch 8, batch 2100, loss[loss=0.2686, simple_loss=0.315, pruned_loss=0.08059, ctc_loss=0.1525, over 19765.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.305, pruned_loss=0.07509, ctc_loss=0.141, over 3858009.65 frames. ], batch size: 54, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:36:44,887 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.055e+02 2.348e+02 2.987e+02 4.948e+02, threshold=4.695e+02, percent-clipped=5.0
+2024-08-25 15:37:27,205 INFO [train.py:1114] (2/4) Epoch 8, batch 2150, loss[loss=0.2297, simple_loss=0.2911, pruned_loss=0.0601, ctc_loss=0.1202, over 19862.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3034, pruned_loss=0.07414, ctc_loss=0.1393, over 3868816.83 frames. ], batch size: 52, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:37:50,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=104506.66666666667, ans=0.125
+2024-08-25 15:37:58,326 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.19 vs. limit=6.0
+2024-08-25 15:38:02,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=104560.0, ans=0.125
+2024-08-25 15:38:06,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=104560.0, ans=0.2
+2024-08-25 15:38:10,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=104613.33333333333, ans=0.125
+2024-08-25 15:38:14,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=104613.33333333333, ans=0.0
+2024-08-25 15:38:15,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=104613.33333333333, ans=0.125
+2024-08-25 15:38:23,247 INFO [train.py:1114] (2/4) Epoch 8, batch 2200, loss[loss=0.2725, simple_loss=0.3115, pruned_loss=0.08387, ctc_loss=0.1645, over 19589.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3047, pruned_loss=0.07505, ctc_loss=0.1409, over 3866506.60 frames. ], batch size: 57, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:38:25,995 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.76 vs. limit=6.0
+2024-08-25 15:38:31,426 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.13 vs. limit=15.0
+2024-08-25 15:38:32,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=104666.66666666667, ans=0.0
+2024-08-25 15:38:34,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=104720.0, ans=0.125
+2024-08-25 15:38:35,670 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.961e+02 2.280e+02 3.038e+02 5.675e+02, threshold=4.560e+02, percent-clipped=2.0
+2024-08-25 15:38:41,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=104720.0, ans=0.125
+2024-08-25 15:38:54,195 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=104773.33333333333, ans=6.0
+2024-08-25 15:38:55,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=104826.66666666667, ans=0.0
+2024-08-25 15:39:01,227 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=104826.66666666667, ans=0.07
+2024-08-25 15:39:09,504 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.88 vs. limit=22.5
+2024-08-25 15:39:12,586 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=104880.0, ans=0.0
+2024-08-25 15:39:12,926 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.53 vs. limit=15.0
+2024-08-25 15:39:19,052 INFO [train.py:1114] (2/4) Epoch 8, batch 2250, loss[loss=0.2501, simple_loss=0.3052, pruned_loss=0.07015, ctc_loss=0.1369, over 19607.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3046, pruned_loss=0.07473, ctc_loss=0.1403, over 3866394.67 frames. ], batch size: 55, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:39:26,059 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=104933.33333333333, ans=0.0
+2024-08-25 15:39:39,442 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.27 vs. limit=12.0
+2024-08-25 15:39:46,990 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.41 vs. limit=12.0
+2024-08-25 15:39:54,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=105093.33333333333, ans=0.2
+2024-08-25 15:40:14,510 INFO [train.py:1114] (2/4) Epoch 8, batch 2300, loss[loss=0.2248, simple_loss=0.2805, pruned_loss=0.06179, ctc_loss=0.114, over 19512.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3039, pruned_loss=0.07497, ctc_loss=0.1408, over 3859835.77 frames. ], batch size: 49, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:40:28,018 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.907e+02 2.167e+02 2.593e+02 4.976e+02, threshold=4.335e+02, percent-clipped=1.0
+2024-08-25 15:40:33,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.93 vs. limit=15.0
+2024-08-25 15:40:50,973 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.38 vs. limit=15.0
+2024-08-25 15:41:02,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=105413.33333333333, ans=0.04949747468305833
+2024-08-25 15:41:11,095 INFO [train.py:1114] (2/4) Epoch 8, batch 2350, loss[loss=0.2856, simple_loss=0.3331, pruned_loss=0.08655, ctc_loss=0.1626, over 19671.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3039, pruned_loss=0.07496, ctc_loss=0.1406, over 3862756.37 frames. ], batch size: 63, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:41:11,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=105466.66666666667, ans=0.125
+2024-08-25 15:41:31,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-25 15:41:34,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-25 15:41:37,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=105573.33333333333, ans=0.0
+2024-08-25 15:41:41,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=105573.33333333333, ans=15.0
+2024-08-25 15:41:41,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=105573.33333333333, ans=0.2
+2024-08-25 15:41:58,587 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.76 vs. limit=15.0
+2024-08-25 15:42:06,022 INFO [train.py:1114] (2/4) Epoch 8, batch 2400, loss[loss=0.2595, simple_loss=0.312, pruned_loss=0.07531, ctc_loss=0.141, over 19234.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3059, pruned_loss=0.07586, ctc_loss=0.1422, over 3857949.06 frames. ], batch size: 71, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:42:18,065 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.983e+02 2.255e+02 2.870e+02 5.067e+02, threshold=4.510e+02, percent-clipped=2.0
+2024-08-25 15:42:22,935 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.23 vs. limit=22.5
+2024-08-25 15:42:27,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105840.0, ans=0.1
+2024-08-25 15:42:33,651 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=105840.0, ans=0.125
+2024-08-25 15:42:35,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=105840.0, ans=0.125
+2024-08-25 15:42:44,196 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.64 vs. limit=15.0
+2024-08-25 15:42:51,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=105946.66666666667, ans=0.125
+2024-08-25 15:42:52,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=105946.66666666667, ans=0.125
+2024-08-25 15:43:01,715 INFO [train.py:1114] (2/4) Epoch 8, batch 2450, loss[loss=0.3284, simple_loss=0.3451, pruned_loss=0.1121, ctc_loss=0.2186, over 13439.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3109, pruned_loss=0.07977, ctc_loss=0.1499, over 3729396.28 frames. ], batch size: 140, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:43:05,672 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.46 vs. limit=22.5
+2024-08-25 15:43:19,567 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=25.13 vs. limit=22.5
+2024-08-25 15:44:31,271 INFO [train.py:1114] (2/4) Epoch 9, batch 0, loss[loss=0.2534, simple_loss=0.2945, pruned_loss=0.07741, ctc_loss=0.1439, over 19818.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.2945, pruned_loss=0.07741, ctc_loss=0.1439, over 19818.00 frames. ], batch size: 49, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:44:31,272 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 15:44:49,827 INFO [train.py:1146] (2/4) Epoch 9, validation: loss=0.21, simple_loss=0.2947, pruned_loss=0.04621, ctc_loss=0.08206, over 944034.00 frames.
+2024-08-25 15:44:49,828 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 15:44:53,365 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=106208.0, ans=0.0
+2024-08-25 15:44:58,022 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.28 vs. limit=22.5
+2024-08-25 15:45:13,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=106314.66666666667, ans=0.125
+2024-08-25 15:45:15,530 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.154e+02 2.510e+02 2.953e+02 5.707e+02, threshold=5.019e+02, percent-clipped=2.0
+2024-08-25 15:46:12,173 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=106368.0, ans=0.1
+2024-08-25 15:46:13,563 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.91 vs. limit=15.0
+2024-08-25 15:46:22,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106368.0, ans=0.125
+2024-08-25 15:46:23,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=106421.33333333333, ans=0.125
+2024-08-25 15:46:28,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=106421.33333333333, ans=0.04949747468305833
+2024-08-25 15:46:30,472 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.01 vs. limit=6.0
+2024-08-25 15:46:34,987 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.79 vs. limit=15.0
+2024-08-25 15:46:36,880 INFO [train.py:1114] (2/4) Epoch 9, batch 50, loss[loss=0.223, simple_loss=0.2753, pruned_loss=0.06137, ctc_loss=0.1198, over 19720.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3098, pruned_loss=0.07727, ctc_loss=0.1458, over 844815.75 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:47:04,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=106581.33333333333, ans=0.0
+2024-08-25 15:47:08,274 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=106581.33333333333, ans=0.2
+2024-08-25 15:47:28,913 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106634.66666666667, ans=0.1
+2024-08-25 15:47:44,291 INFO [train.py:1114] (2/4) Epoch 9, batch 100, loss[loss=0.2921, simple_loss=0.3272, pruned_loss=0.09505, ctc_loss=0.1672, over 19704.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3103, pruned_loss=0.07721, ctc_loss=0.1452, over 1499610.03 frames. ], batch size: 51, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:48:09,488 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.842e+02 2.163e+02 2.785e+02 4.838e+02, threshold=4.326e+02, percent-clipped=0.0
+2024-08-25 15:48:12,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=106848.0, ans=0.125
+2024-08-25 15:48:29,880 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.28 vs. limit=10.0
+2024-08-25 15:48:42,140 INFO [train.py:1114] (2/4) Epoch 9, batch 150, loss[loss=0.2221, simple_loss=0.2782, pruned_loss=0.06094, ctc_loss=0.1104, over 19714.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3066, pruned_loss=0.0753, ctc_loss=0.1416, over 2027682.09 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:48:42,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=107008.0, ans=0.125
+2024-08-25 15:49:05,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107114.66666666667, ans=0.1
+2024-08-25 15:49:29,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=107221.33333333333, ans=0.125
+2024-08-25 15:49:41,043 INFO [train.py:1114] (2/4) Epoch 9, batch 200, loss[loss=0.2629, simple_loss=0.3166, pruned_loss=0.07516, ctc_loss=0.1472, over 18323.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3036, pruned_loss=0.07379, ctc_loss=0.139, over 2435014.65 frames. ], batch size: 85, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:49:57,068 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=107328.0, ans=0.125
+2024-08-25 15:50:06,167 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.799e+02 2.039e+02 2.617e+02 5.282e+02, threshold=4.078e+02, percent-clipped=1.0
+2024-08-25 15:50:45,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=107381.33333333333, ans=0.0
+2024-08-25 15:50:51,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=107434.66666666667, ans=0.125
+2024-08-25 15:51:17,129 INFO [train.py:1114] (2/4) Epoch 9, batch 250, loss[loss=0.2604, simple_loss=0.3123, pruned_loss=0.07452, ctc_loss=0.1486, over 19373.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3032, pruned_loss=0.07358, ctc_loss=0.1387, over 2755550.65 frames. ], batch size: 67, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:51:17,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=107541.33333333333, ans=0.015
+2024-08-25 15:51:18,437 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=107541.33333333333, ans=0.0
+2024-08-25 15:51:54,746 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=107701.33333333333, ans=0.0
+2024-08-25 15:51:56,710 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=107701.33333333333, ans=0.0
+2024-08-25 15:52:18,792 INFO [train.py:1114] (2/4) Epoch 9, batch 300, loss[loss=0.3005, simple_loss=0.3343, pruned_loss=0.09742, ctc_loss=0.1796, over 19520.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3022, pruned_loss=0.07296, ctc_loss=0.1377, over 2999755.96 frames. ], batch size: 61, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:52:27,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=107808.0, ans=0.0
+2024-08-25 15:52:30,550 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=107861.33333333333, ans=0.025
+2024-08-25 15:52:35,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=107861.33333333333, ans=0.2
+2024-08-25 15:52:37,633 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=107861.33333333333, ans=0.2
+2024-08-25 15:52:47,059 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 1.831e+02 2.248e+02 2.885e+02 5.251e+02, threshold=4.495e+02, percent-clipped=2.0
+2024-08-25 15:52:55,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-08-25 15:53:18,372 INFO [train.py:1114] (2/4) Epoch 9, batch 350, loss[loss=0.2259, simple_loss=0.2769, pruned_loss=0.06326, ctc_loss=0.1208, over 19763.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3031, pruned_loss=0.07356, ctc_loss=0.1384, over 3188993.13 frames. ], batch size: 48, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:53:37,885 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=108128.0, ans=0.1
+2024-08-25 15:53:40,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108181.33333333333, ans=0.125
+2024-08-25 15:53:48,922 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.83 vs. limit=22.5
+2024-08-25 15:53:53,719 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108234.66666666667, ans=0.125
+2024-08-25 15:54:11,797 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=108288.0, ans=0.09899494936611666
+2024-08-25 15:54:13,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=108288.0, ans=0.0
+2024-08-25 15:54:14,928 INFO [train.py:1114] (2/4) Epoch 9, batch 400, loss[loss=0.2292, simple_loss=0.294, pruned_loss=0.05889, ctc_loss=0.1165, over 19495.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.302, pruned_loss=0.07294, ctc_loss=0.1373, over 3340930.96 frames. ], batch size: 54, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:54:18,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108341.33333333333, ans=0.0
+2024-08-25 15:54:25,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=108394.66666666667, ans=0.025
+2024-08-25 15:54:43,459 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.039e+02 2.514e+02 3.062e+02 4.428e+02, threshold=5.028e+02, percent-clipped=0.0
+2024-08-25 15:55:08,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108554.66666666667, ans=0.0
+2024-08-25 15:55:11,788 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=108554.66666666667, ans=0.125
+2024-08-25 15:55:17,444 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=108608.0, ans=0.125
+2024-08-25 15:55:18,543 INFO [train.py:1114] (2/4) Epoch 9, batch 450, loss[loss=0.2674, simple_loss=0.3222, pruned_loss=0.07734, ctc_loss=0.1449, over 19606.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3028, pruned_loss=0.07345, ctc_loss=0.138, over 3448148.97 frames. ], batch size: 55, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:55:38,144 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=108661.33333333333, ans=0.125
+2024-08-25 15:55:43,677 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=108714.66666666667, ans=0.07
+2024-08-25 15:55:50,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=108714.66666666667, ans=0.125
+2024-08-25 15:57:29,909 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.94 vs. limit=15.0
+2024-08-25 15:58:59,104 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=108821.33333333333, ans=0.125
+2024-08-25 15:59:00,643 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.88 vs. limit=12.0
+2024-08-25 15:59:07,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=108821.33333333333, ans=0.1
+2024-08-25 15:59:11,169 INFO [train.py:1114] (2/4) Epoch 9, batch 500, loss[loss=0.2814, simple_loss=0.3226, pruned_loss=0.08672, ctc_loss=0.1671, over 19674.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3017, pruned_loss=0.07307, ctc_loss=0.1373, over 3544506.49 frames. ], batch size: 63, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:59:21,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=108928.0, ans=0.125
+2024-08-25 15:59:37,508 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.839e+02 2.298e+02 3.023e+02 4.931e+02, threshold=4.596e+02, percent-clipped=0.0
+2024-08-25 15:59:42,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=108981.33333333333, ans=0.2
+2024-08-25 15:59:57,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=109088.0, ans=0.0
+2024-08-25 16:00:00,886 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=109088.0, ans=0.125
+2024-08-25 16:00:08,617 INFO [train.py:1114] (2/4) Epoch 9, batch 550, loss[loss=0.2741, simple_loss=0.3262, pruned_loss=0.08143, ctc_loss=0.1477, over 19227.00 frames. ], tot_loss[loss=0.252, simple_loss=0.302, pruned_loss=0.07337, ctc_loss=0.1379, over 3606222.95 frames. ], batch size: 71, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:00:19,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=109194.66666666667, ans=0.0
+2024-08-25 16:00:25,743 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.21 vs. limit=15.0
+2024-08-25 16:00:44,051 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109301.33333333333, ans=0.1
+2024-08-25 16:01:12,456 INFO [train.py:1114] (2/4) Epoch 9, batch 600, loss[loss=0.2608, simple_loss=0.3108, pruned_loss=0.07711, ctc_loss=0.1414, over 19389.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3022, pruned_loss=0.07321, ctc_loss=0.1377, over 3663539.65 frames. ], batch size: 67, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:01:31,239 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=109408.0, ans=0.035
+2024-08-25 16:01:43,321 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.63 vs. limit=15.0
+2024-08-25 16:01:51,491 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.960e+02 2.208e+02 2.721e+02 5.490e+02, threshold=4.416e+02, percent-clipped=2.0
+2024-08-25 16:02:21,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=109514.66666666667, ans=0.125
+2024-08-25 16:02:29,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=109568.0, ans=0.125
+2024-08-25 16:02:37,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109621.33333333333, ans=0.1
+2024-08-25 16:02:47,615 INFO [train.py:1114] (2/4) Epoch 9, batch 650, loss[loss=0.2573, simple_loss=0.3043, pruned_loss=0.07646, ctc_loss=0.1435, over 19766.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3013, pruned_loss=0.07282, ctc_loss=0.137, over 3713816.89 frames. ], batch size: 54, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:02:49,020 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=109674.66666666667, ans=0.125
+2024-08-25 16:03:07,548 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=109728.0, ans=0.125
+2024-08-25 16:03:15,538 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.91 vs. limit=15.0
+2024-08-25 16:03:36,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109888.0, ans=0.1
+2024-08-25 16:03:44,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=109888.0, ans=0.125
+2024-08-25 16:03:47,855 INFO [train.py:1114] (2/4) Epoch 9, batch 700, loss[loss=0.2273, simple_loss=0.281, pruned_loss=0.06275, ctc_loss=0.1202, over 19724.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3013, pruned_loss=0.07264, ctc_loss=0.1366, over 3746667.85 frames. ], batch size: 51, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:03:53,935 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=109941.33333333333, ans=0.125
+2024-08-25 16:04:00,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=109994.66666666667, ans=0.2
+2024-08-25 16:04:09,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=109994.66666666667, ans=0.2
+2024-08-25 16:04:14,384 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 1.949e+02 2.382e+02 2.859e+02 4.618e+02, threshold=4.764e+02, percent-clipped=1.0
+2024-08-25 16:04:44,759 INFO [train.py:1114] (2/4) Epoch 9, batch 750, loss[loss=0.2402, simple_loss=0.3029, pruned_loss=0.06417, ctc_loss=0.1227, over 19493.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3017, pruned_loss=0.07333, ctc_loss=0.1378, over 3772977.45 frames. ], batch size: 54, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:05:00,715 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=110261.33333333333, ans=0.0
+2024-08-25 16:05:21,252 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=110314.66666666667, ans=0.015
+2024-08-25 16:05:29,005 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.21 vs. limit=22.5
+2024-08-25 16:05:31,175 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.71 vs. limit=6.0
+2024-08-25 16:05:31,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=110368.0, ans=0.125
+2024-08-25 16:05:32,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=110368.0, ans=0.0
+2024-08-25 16:05:37,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110421.33333333333, ans=0.1
+2024-08-25 16:05:43,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=110421.33333333333, ans=0.0
+2024-08-25 16:05:48,074 INFO [train.py:1114] (2/4) Epoch 9, batch 800, loss[loss=0.2233, simple_loss=0.2749, pruned_loss=0.06195, ctc_loss=0.1193, over 19792.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3017, pruned_loss=0.07338, ctc_loss=0.1382, over 3794489.35 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:06:03,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=110528.0, ans=0.125
+2024-08-25 16:06:14,068 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=110581.33333333333, ans=0.05
+2024-08-25 16:06:14,969 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.861e+02 2.104e+02 2.558e+02 4.618e+02, threshold=4.207e+02, percent-clipped=0.0
+2024-08-25 16:06:30,864 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=110634.66666666667, ans=0.0
+2024-08-25 16:06:47,178 INFO [train.py:1114] (2/4) Epoch 9, batch 850, loss[loss=0.2422, simple_loss=0.3048, pruned_loss=0.06552, ctc_loss=0.121, over 19666.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.301, pruned_loss=0.07278, ctc_loss=0.137, over 3814034.66 frames. ], batch size: 59, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:07:03,860 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.59 vs. limit=22.5
+2024-08-25 16:07:10,450 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=110848.0, ans=0.0
+2024-08-25 16:07:23,322 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=110901.33333333333, ans=0.125
+2024-08-25 16:07:29,873 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.14 vs. limit=15.0
+2024-08-25 16:08:42,723 INFO [train.py:1114] (2/4) Epoch 9, batch 900, loss[loss=0.2277, simple_loss=0.2789, pruned_loss=0.06485, ctc_loss=0.1169, over 19414.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3015, pruned_loss=0.07305, ctc_loss=0.1374, over 3818465.82 frames. ], batch size: 48, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:09:03,915 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=15.0
+2024-08-25 16:09:12,342 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.982e+02 2.328e+02 2.784e+02 5.806e+02, threshold=4.657e+02, percent-clipped=1.0
+2024-08-25 16:09:18,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=111114.66666666667, ans=0.2
+2024-08-25 16:09:41,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=111221.33333333333, ans=0.125
+2024-08-25 16:09:43,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=111221.33333333333, ans=0.2
+2024-08-25 16:09:47,298 INFO [train.py:1114] (2/4) Epoch 9, batch 950, loss[loss=0.2195, simple_loss=0.2727, pruned_loss=0.06085, ctc_loss=0.1113, over 19493.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3012, pruned_loss=0.07287, ctc_loss=0.1369, over 3821199.31 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:09:49,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=111274.66666666667, ans=0.125
+2024-08-25 16:09:55,066 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.24 vs. limit=15.0
+2024-08-25 16:09:59,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=111328.0, ans=0.0
+2024-08-25 16:10:01,455 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=111328.0, ans=0.1
+2024-08-25 16:10:05,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=111328.0, ans=0.025
+2024-08-25 16:10:15,558 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=111381.33333333333, ans=0.125
+2024-08-25 16:10:22,350 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=111434.66666666667, ans=0.0
+2024-08-25 16:10:23,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=111434.66666666667, ans=0.125
+2024-08-25 16:10:37,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=111488.0, ans=0.0
+2024-08-25 16:10:45,141 INFO [train.py:1114] (2/4) Epoch 9, batch 1000, loss[loss=0.2633, simple_loss=0.3115, pruned_loss=0.07805, ctc_loss=0.1476, over 19842.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3027, pruned_loss=0.07386, ctc_loss=0.1386, over 3816943.94 frames. ], batch size: 52, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:10:53,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=111541.33333333333, ans=0.0
+2024-08-25 16:11:05,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=111594.66666666667, ans=0.125
+2024-08-25 16:11:11,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=111648.0, ans=0.125
+2024-08-25 16:11:11,857 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=111648.0, ans=0.0
+2024-08-25 16:11:13,886 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.864e+02 2.156e+02 2.793e+02 4.751e+02, threshold=4.311e+02, percent-clipped=1.0
+2024-08-25 16:11:20,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=111648.0, ans=0.125
+2024-08-25 16:11:23,747 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=111701.33333333333, ans=0.0
+2024-08-25 16:11:45,641 INFO [train.py:1114] (2/4) Epoch 9, batch 1050, loss[loss=0.2406, simple_loss=0.2984, pruned_loss=0.06517, ctc_loss=0.1314, over 19836.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3019, pruned_loss=0.07375, ctc_loss=0.1386, over 3823969.67 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:12:04,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=111861.33333333333, ans=0.0
+2024-08-25 16:12:14,811 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=111914.66666666667, ans=0.125
+2024-08-25 16:12:17,016 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:12:22,929 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=111914.66666666667, ans=0.125
+2024-08-25 16:12:36,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=112021.33333333333, ans=0.0
+2024-08-25 16:12:37,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=112021.33333333333, ans=0.125
+2024-08-25 16:12:51,813 INFO [train.py:1114] (2/4) Epoch 9, batch 1100, loss[loss=0.2358, simple_loss=0.2978, pruned_loss=0.06334, ctc_loss=0.1181, over 19581.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3011, pruned_loss=0.07284, ctc_loss=0.137, over 3831415.43 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:13:03,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=112128.0, ans=0.125
+2024-08-25 16:13:10,471 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.76 vs. limit=6.0
+2024-08-25 16:13:19,826 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 1.820e+02 2.090e+02 2.645e+02 4.523e+02, threshold=4.179e+02, percent-clipped=2.0
+2024-08-25 16:13:35,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=112234.66666666667, ans=0.125
+2024-08-25 16:13:37,951 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.63 vs. limit=15.0
+2024-08-25 16:13:45,647 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.09 vs. limit=15.0
+2024-08-25 16:13:47,584 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=112288.0, ans=0.125
+2024-08-25 16:13:50,952 INFO [train.py:1114] (2/4) Epoch 9, batch 1150, loss[loss=0.2767, simple_loss=0.3152, pruned_loss=0.08728, ctc_loss=0.159, over 19579.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3012, pruned_loss=0.07299, ctc_loss=0.1373, over 3830568.01 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:14:10,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112394.66666666667, ans=0.125
+2024-08-25 16:14:40,361 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112554.66666666667, ans=0.1
+2024-08-25 16:14:44,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=112554.66666666667, ans=0.0
+2024-08-25 16:14:51,130 INFO [train.py:1114] (2/4) Epoch 9, batch 1200, loss[loss=0.2535, simple_loss=0.3102, pruned_loss=0.07138, ctc_loss=0.1353, over 19858.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3021, pruned_loss=0.07313, ctc_loss=0.1377, over 3825514.24 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:14:52,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=112608.0, ans=0.0
+2024-08-25 16:14:55,922 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112608.0, ans=0.1
+2024-08-25 16:15:51,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=112661.33333333333, ans=0.0
+2024-08-25 16:16:05,757 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.875e+02 2.166e+02 2.598e+02 4.323e+02, threshold=4.331e+02, percent-clipped=2.0
+2024-08-25 16:16:11,690 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=112714.66666666667, ans=15.0
+2024-08-25 16:16:12,371 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=112714.66666666667, ans=0.1
+2024-08-25 16:16:14,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=112768.0, ans=0.2
+2024-08-25 16:16:17,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=112768.0, ans=0.0
+2024-08-25 16:16:19,390 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=112768.0, ans=0.125
+2024-08-25 16:16:30,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112821.33333333333, ans=0.1
+2024-08-25 16:16:38,900 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.87 vs. limit=15.0
+2024-08-25 16:16:39,514 INFO [train.py:1114] (2/4) Epoch 9, batch 1250, loss[loss=0.2781, simple_loss=0.3287, pruned_loss=0.08243, ctc_loss=0.1568, over 19524.00 frames. ], tot_loss[loss=0.2521, simple_loss=0.3026, pruned_loss=0.0732, ctc_loss=0.138, over 3842813.32 frames. ], batch size: 61, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:17:28,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=113088.0, ans=0.125
+2024-08-25 16:17:29,649 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:17:33,286 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=113088.0, ans=0.2
+2024-08-25 16:17:40,927 INFO [train.py:1114] (2/4) Epoch 9, batch 1300, loss[loss=0.2541, simple_loss=0.3081, pruned_loss=0.0738, ctc_loss=0.131, over 18940.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.302, pruned_loss=0.07293, ctc_loss=0.1374, over 3845203.26 frames. ], batch size: 76, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:17:41,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=113141.33333333333, ans=0.5
+2024-08-25 16:18:08,524 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 1.959e+02 2.315e+02 2.984e+02 4.812e+02, threshold=4.630e+02, percent-clipped=1.0
+2024-08-25 16:18:08,811 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=113248.0, ans=0.0
+2024-08-25 16:18:17,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=113301.33333333333, ans=0.2
+2024-08-25 16:18:21,191 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=113301.33333333333, ans=0.125
+2024-08-25 16:18:25,742 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=113301.33333333333, ans=0.125
+2024-08-25 16:18:29,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=113354.66666666667, ans=0.125
+2024-08-25 16:18:37,511 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=113354.66666666667, ans=0.125
+2024-08-25 16:18:42,152 INFO [train.py:1114] (2/4) Epoch 9, batch 1350, loss[loss=0.2634, simple_loss=0.3035, pruned_loss=0.08165, ctc_loss=0.1502, over 19767.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3013, pruned_loss=0.07237, ctc_loss=0.1362, over 3856385.13 frames. ], batch size: 54, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:18:42,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=113408.0, ans=0.125
+2024-08-25 16:19:20,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=113568.0, ans=0.125
+2024-08-25 16:19:34,560 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.77 vs. limit=15.0
+2024-08-25 16:19:38,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=113674.66666666667, ans=0.2
+2024-08-25 16:19:40,007 INFO [train.py:1114] (2/4) Epoch 9, batch 1400, loss[loss=0.2087, simple_loss=0.2616, pruned_loss=0.05751, ctc_loss=0.1019, over 19642.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3008, pruned_loss=0.07213, ctc_loss=0.1357, over 3863319.77 frames. ], batch size: 46, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:19:43,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=113674.66666666667, ans=0.125
+2024-08-25 16:19:48,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=113674.66666666667, ans=0.0
+2024-08-25 16:20:07,561 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.860e+02 2.127e+02 2.545e+02 4.134e+02, threshold=4.253e+02, percent-clipped=0.0
+2024-08-25 16:20:21,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=113834.66666666667, ans=0.2
+2024-08-25 16:20:43,000 INFO [train.py:1114] (2/4) Epoch 9, batch 1450, loss[loss=0.2956, simple_loss=0.3349, pruned_loss=0.09308, ctc_loss=0.175, over 19659.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3018, pruned_loss=0.07277, ctc_loss=0.137, over 3861359.41 frames. ], batch size: 63, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:20:48,294 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.48 vs. limit=15.0
+2024-08-25 16:21:01,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=113994.66666666667, ans=0.125
+2024-08-25 16:21:09,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=114048.0, ans=0.125
+2024-08-25 16:21:17,993 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.06 vs. limit=15.0
+2024-08-25 16:21:18,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=114101.33333333333, ans=0.07
+2024-08-25 16:21:33,885 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=114154.66666666667, ans=0.0
+2024-08-25 16:21:45,886 INFO [train.py:1114] (2/4) Epoch 9, batch 1500, loss[loss=0.2324, simple_loss=0.3, pruned_loss=0.05977, ctc_loss=0.1133, over 19598.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3026, pruned_loss=0.07328, ctc_loss=0.1379, over 3861063.45 frames. ], batch size: 57, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:21:56,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=114261.33333333333, ans=0.0
+2024-08-25 16:22:07,815 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.77 vs. limit=10.0
+2024-08-25 16:22:13,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=114314.66666666667, ans=0.0
+2024-08-25 16:22:15,434 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 1.928e+02 2.180e+02 2.740e+02 4.350e+02, threshold=4.360e+02, percent-clipped=2.0
+2024-08-25 16:22:19,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=114314.66666666667, ans=0.0
+2024-08-25 16:22:32,314 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=114368.0, ans=0.125
+2024-08-25 16:22:39,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=114421.33333333333, ans=0.125
+2024-08-25 16:22:45,655 INFO [train.py:1114] (2/4) Epoch 9, batch 1550, loss[loss=0.2599, simple_loss=0.3098, pruned_loss=0.0757, ctc_loss=0.1463, over 19612.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3023, pruned_loss=0.07313, ctc_loss=0.1377, over 3845360.99 frames. ], batch size: 60, lr: 1.64e-02, grad_scale: 16.0
+2024-08-25 16:22:55,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=114474.66666666667, ans=0.025
+2024-08-25 16:23:21,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=114634.66666666667, ans=0.0
+2024-08-25 16:23:27,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=114634.66666666667, ans=0.125
+2024-08-25 16:23:47,229 INFO [train.py:1114] (2/4) Epoch 9, batch 1600, loss[loss=0.27, simple_loss=0.3221, pruned_loss=0.07922, ctc_loss=0.1489, over 19833.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3018, pruned_loss=0.07284, ctc_loss=0.1373, over 3835886.41 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 32.0
+2024-08-25 16:23:48,669 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=114741.33333333333, ans=0.05
+2024-08-25 16:23:57,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=114741.33333333333, ans=0.125
+2024-08-25 16:24:13,979 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=12.84 vs. limit=15.0
+2024-08-25 16:24:16,807 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.930e+02 2.504e+02 3.084e+02 5.673e+02, threshold=5.009e+02, percent-clipped=4.0
+2024-08-25 16:24:45,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115008.0, ans=0.1
+2024-08-25 16:24:46,367 INFO [train.py:1114] (2/4) Epoch 9, batch 1650, loss[loss=0.2617, simple_loss=0.3127, pruned_loss=0.07632, ctc_loss=0.1454, over 19659.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3019, pruned_loss=0.07293, ctc_loss=0.1374, over 3832639.29 frames. ], batch size: 59, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:24:51,006 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=115008.0, ans=0.0
+2024-08-25 16:24:53,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115008.0, ans=0.1
+2024-08-25 16:25:09,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=115114.66666666667, ans=0.0
+2024-08-25 16:25:12,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=115114.66666666667, ans=0.125
+2024-08-25 16:25:23,727 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.56 vs. limit=22.5
+2024-08-25 16:25:31,735 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=115168.0, ans=0.125
+2024-08-25 16:25:38,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=115221.33333333333, ans=0.125
+2024-08-25 16:25:44,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=115274.66666666667, ans=0.125
+2024-08-25 16:25:45,178 INFO [train.py:1114] (2/4) Epoch 9, batch 1700, loss[loss=0.2098, simple_loss=0.2622, pruned_loss=0.05643, ctc_loss=0.1114, over 19674.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3009, pruned_loss=0.07197, ctc_loss=0.1357, over 3847087.37 frames. ], batch size: 46, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:25:45,786 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.03 vs. limit=15.0
+2024-08-25 16:25:49,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=115274.66666666667, ans=0.0
+2024-08-25 16:25:59,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=115328.0, ans=0.125
+2024-08-25 16:26:08,975 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=115381.33333333333, ans=0.0
+2024-08-25 16:26:10,413 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.37 vs. limit=15.0
+2024-08-25 16:26:13,047 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.773e+02 1.969e+02 2.283e+02 4.673e+02, threshold=3.938e+02, percent-clipped=0.0
+2024-08-25 16:26:30,041 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.93 vs. limit=22.5
+2024-08-25 16:26:31,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=115488.0, ans=0.05
+2024-08-25 16:26:41,717 INFO [train.py:1114] (2/4) Epoch 9, batch 1750, loss[loss=0.2035, simple_loss=0.2616, pruned_loss=0.05237, ctc_loss=0.1015, over 19680.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.2997, pruned_loss=0.07112, ctc_loss=0.1343, over 3851166.61 frames. ], batch size: 45, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:26:42,314 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.52 vs. limit=15.0
+2024-08-25 16:26:47,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=115541.33333333333, ans=0.025
+2024-08-25 16:27:27,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=115594.66666666667, ans=0.0
+2024-08-25 16:27:30,782 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.12 vs. limit=22.5
+2024-08-25 16:27:36,081 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=115594.66666666667, ans=0.0
+2024-08-25 16:27:49,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=115701.33333333333, ans=0.125
+2024-08-25 16:28:01,794 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.41 vs. limit=22.5
+2024-08-25 16:28:04,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=115754.66666666667, ans=0.125
+2024-08-25 16:28:08,206 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=115754.66666666667, ans=0.2
+2024-08-25 16:28:12,438 INFO [train.py:1114] (2/4) Epoch 9, batch 1800, loss[loss=0.2867, simple_loss=0.3257, pruned_loss=0.09149, ctc_loss=0.1617, over 19621.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.2998, pruned_loss=0.07115, ctc_loss=0.1343, over 3852683.02 frames. ], batch size: 55, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:28:27,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=115808.0, ans=0.04949747468305833
+2024-08-25 16:28:44,535 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=115914.66666666667, ans=0.125
+2024-08-25 16:28:49,005 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 1.840e+02 2.097e+02 2.711e+02 4.220e+02, threshold=4.193e+02, percent-clipped=2.0
+2024-08-25 16:28:56,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=115914.66666666667, ans=0.0
+2024-08-25 16:29:16,771 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=116021.33333333333, ans=0.0
+2024-08-25 16:29:20,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=116021.33333333333, ans=0.125
+2024-08-25 16:29:25,109 INFO [train.py:1114] (2/4) Epoch 9, batch 1850, loss[loss=0.2424, simple_loss=0.3005, pruned_loss=0.06602, ctc_loss=0.1306, over 19592.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.2988, pruned_loss=0.07037, ctc_loss=0.1327, over 3857004.18 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:29:37,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=116128.0, ans=0.0
+2024-08-25 16:29:41,422 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.50 vs. limit=22.5
+2024-08-25 16:29:43,774 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=15.36 vs. limit=22.5
+2024-08-25 16:30:20,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=116181.33333333333, ans=0.025
+2024-08-25 16:30:44,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=116288.0, ans=0.2
+2024-08-25 16:30:46,797 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:30:56,364 INFO [train.py:1114] (2/4) Epoch 9, batch 1900, loss[loss=0.2496, simple_loss=0.312, pruned_loss=0.06844, ctc_loss=0.1259, over 19680.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3002, pruned_loss=0.07124, ctc_loss=0.1341, over 3861462.69 frames. ], batch size: 59, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:31:01,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=116341.33333333333, ans=0.125
+2024-08-25 16:32:04,164 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=116394.66666666667, ans=0.2
+2024-08-25 16:32:16,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=116448.0, ans=0.125
+2024-08-25 16:32:21,938 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.810e+02 2.075e+02 2.674e+02 4.757e+02, threshold=4.150e+02, percent-clipped=3.0
+2024-08-25 16:32:24,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=116448.0, ans=0.0
+2024-08-25 16:32:31,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten.whitening_limit, batch_count=116501.33333333333, ans=15.0
+2024-08-25 16:33:05,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=116608.0, ans=0.125
+2024-08-25 16:33:06,107 INFO [train.py:1114] (2/4) Epoch 9, batch 1950, loss[loss=0.2626, simple_loss=0.3082, pruned_loss=0.0788, ctc_loss=0.1486, over 19584.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.3011, pruned_loss=0.07128, ctc_loss=0.1339, over 3869958.39 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:33:10,860 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=116608.0, ans=0.125
+2024-08-25 16:33:32,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=116714.66666666667, ans=0.125
+2024-08-25 16:33:49,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=116768.0, ans=0.125
+2024-08-25 16:34:02,733 INFO [train.py:1114] (2/4) Epoch 9, batch 2000, loss[loss=0.2139, simple_loss=0.2656, pruned_loss=0.06021, ctc_loss=0.1047, over 19662.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.302, pruned_loss=0.0721, ctc_loss=0.1354, over 3854092.86 frames. ], batch size: 45, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:34:30,980 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 1.787e+02 2.122e+02 2.673e+02 5.196e+02, threshold=4.245e+02, percent-clipped=10.0
+2024-08-25 16:34:51,313 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=117088.0, ans=0.0
+2024-08-25 16:34:51,326 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=117088.0, ans=0.125
+2024-08-25 16:34:59,599 INFO [train.py:1114] (2/4) Epoch 9, batch 2050, loss[loss=0.2139, simple_loss=0.2678, pruned_loss=0.05845, ctc_loss=0.1076, over 19713.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3011, pruned_loss=0.07208, ctc_loss=0.1353, over 3849627.61 frames. ], batch size: 47, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:35:14,745 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=117194.66666666667, ans=0.0
+2024-08-25 16:35:21,620 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=117248.0, ans=0.125
+2024-08-25 16:35:26,047 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=117248.0, ans=0.0
+2024-08-25 16:35:35,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=117301.33333333333, ans=0.0
+2024-08-25 16:36:04,957 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.08 vs. limit=15.0
+2024-08-25 16:36:11,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=117354.66666666667, ans=0.05
+2024-08-25 16:36:55,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=117354.66666666667, ans=0.2
+2024-08-25 16:36:57,853 INFO [train.py:1114] (2/4) Epoch 9, batch 2100, loss[loss=0.2273, simple_loss=0.2794, pruned_loss=0.06379, ctc_loss=0.1191, over 19747.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.2999, pruned_loss=0.07151, ctc_loss=0.1343, over 3858525.45 frames. ], batch size: 54, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:37:24,334 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=117461.33333333333, ans=0.125
+2024-08-25 16:37:24,447 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=117461.33333333333, ans=0.125
+2024-08-25 16:37:26,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=117461.33333333333, ans=0.0
+2024-08-25 16:37:36,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=117514.66666666667, ans=0.125
+2024-08-25 16:37:38,958 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.824e+02 2.012e+02 2.446e+02 4.504e+02, threshold=4.025e+02, percent-clipped=2.0
+2024-08-25 16:37:41,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=117514.66666666667, ans=0.0
+2024-08-25 16:37:43,598 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=117568.0, ans=0.125
+2024-08-25 16:37:51,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=117568.0, ans=0.125
+2024-08-25 16:38:01,333 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=117621.33333333333, ans=0.035
+2024-08-25 16:38:06,801 INFO [train.py:1114] (2/4) Epoch 9, batch 2150, loss[loss=0.2373, simple_loss=0.2869, pruned_loss=0.06665, ctc_loss=0.1362, over 19882.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.2989, pruned_loss=0.07095, ctc_loss=0.1332, over 3869334.64 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:38:09,587 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.77 vs. limit=15.0
+2024-08-25 16:38:27,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=117728.0, ans=0.07
+2024-08-25 16:38:30,820 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=117781.33333333333, ans=10.0
+2024-08-25 16:38:31,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=117781.33333333333, ans=0.0
+2024-08-25 16:38:43,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=117834.66666666667, ans=0.0
+2024-08-25 16:38:46,498 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=117834.66666666667, ans=0.125
+2024-08-25 16:38:47,641 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=117834.66666666667, ans=0.2
+2024-08-25 16:39:02,681 INFO [train.py:1114] (2/4) Epoch 9, batch 2200, loss[loss=0.2689, simple_loss=0.3206, pruned_loss=0.07815, ctc_loss=0.1521, over 19586.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.2991, pruned_loss=0.07099, ctc_loss=0.1332, over 3867498.81 frames. ], batch size: 57, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:39:03,199 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.18 vs. limit=10.0
+2024-08-25 16:39:06,845 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.29 vs. limit=12.0
+2024-08-25 16:39:30,924 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.840e+02 2.263e+02 2.882e+02 6.553e+02, threshold=4.526e+02, percent-clipped=9.0
+2024-08-25 16:39:37,613 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.33 vs. limit=15.0
+2024-08-25 16:39:48,884 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=118154.66666666667, ans=0.1
+2024-08-25 16:39:59,969 INFO [train.py:1114] (2/4) Epoch 9, batch 2250, loss[loss=0.2263, simple_loss=0.2903, pruned_loss=0.05929, ctc_loss=0.1094, over 19599.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.2989, pruned_loss=0.07069, ctc_loss=0.1328, over 3866850.68 frames. ], batch size: 55, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:40:04,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=118208.0, ans=0.125
+2024-08-25 16:40:04,592 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=118208.0, ans=0.125
+2024-08-25 16:40:17,474 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=118261.33333333333, ans=0.125
+2024-08-25 16:40:26,371 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=118314.66666666667, ans=0.125
+2024-08-25 16:40:37,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=118368.0, ans=0.1
+2024-08-25 16:40:47,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=118421.33333333333, ans=0.2
+2024-08-25 16:40:54,817 INFO [train.py:1114] (2/4) Epoch 9, batch 2300, loss[loss=0.2097, simple_loss=0.2686, pruned_loss=0.05458, ctc_loss=0.1039, over 19500.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.2989, pruned_loss=0.07125, ctc_loss=0.1338, over 3860346.86 frames. ], batch size: 49, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:40:56,086 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=118474.66666666667, ans=0.07
+2024-08-25 16:41:03,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=118474.66666666667, ans=0.125
+2024-08-25 16:41:17,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=118581.33333333333, ans=0.2
+2024-08-25 16:41:23,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=118581.33333333333, ans=0.0
+2024-08-25 16:41:24,915 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.864e+02 2.265e+02 3.023e+02 5.230e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 16:41:33,598 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=118634.66666666667, ans=0.0
+2024-08-25 16:41:33,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=118634.66666666667, ans=0.0
+2024-08-25 16:41:51,064 INFO [train.py:1114] (2/4) Epoch 9, batch 2350, loss[loss=0.2722, simple_loss=0.3206, pruned_loss=0.08243, ctc_loss=0.1471, over 19656.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.2989, pruned_loss=0.07127, ctc_loss=0.1336, over 3863287.06 frames. ], batch size: 63, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:41:55,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=118741.33333333333, ans=0.1
+2024-08-25 16:41:55,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=118741.33333333333, ans=0.0
+2024-08-25 16:42:03,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=118794.66666666667, ans=0.0
+2024-08-25 16:42:07,942 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.45 vs. limit=10.0
+2024-08-25 16:42:30,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=118848.0, ans=0.125
+2024-08-25 16:42:50,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=118954.66666666667, ans=0.125
+2024-08-25 16:43:02,778 INFO [train.py:1114] (2/4) Epoch 9, batch 2400, loss[loss=0.2613, simple_loss=0.3102, pruned_loss=0.0778, ctc_loss=0.1422, over 19295.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3016, pruned_loss=0.07241, ctc_loss=0.1355, over 3857793.22 frames. ], batch size: 71, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:43:06,918 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=119008.0, ans=0.125
+2024-08-25 16:43:11,865 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:43:13,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=119008.0, ans=0.125
+2024-08-25 16:43:17,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=119061.33333333333, ans=0.125
+2024-08-25 16:43:24,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=119061.33333333333, ans=0.0
+2024-08-25 16:43:28,350 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=119114.66666666667, ans=0.2
+2024-08-25 16:43:31,715 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=119114.66666666667, ans=0.125
+2024-08-25 16:43:32,521 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.930e+02 2.301e+02 2.799e+02 4.768e+02, threshold=4.601e+02, percent-clipped=1.0
+2024-08-25 16:43:38,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=119168.0, ans=0.025
+2024-08-25 16:43:39,234 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=119168.0, ans=0.0
+2024-08-25 16:43:44,894 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=119168.0, ans=0.2
+2024-08-25 16:43:59,368 INFO [train.py:1114] (2/4) Epoch 9, batch 2450, loss[loss=0.3162, simple_loss=0.3343, pruned_loss=0.1067, ctc_loss=0.2116, over 13407.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.306, pruned_loss=0.07596, ctc_loss=0.1426, over 3728202.43 frames. ], batch size: 141, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:43:59,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=119274.66666666667, ans=0.125
+2024-08-25 16:44:20,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=119381.33333333333, ans=0.125
+2024-08-25 16:44:28,407 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=119381.33333333333, ans=0.1
+2024-08-25 16:44:29,612 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=8.571e-02
+2024-08-25 16:45:25,698 INFO [train.py:1114] (2/4) Epoch 10, batch 0, loss[loss=0.2264, simple_loss=0.2835, pruned_loss=0.06158, ctc_loss=0.1153, over 19806.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.2835, pruned_loss=0.06158, ctc_loss=0.1153, over 19806.00 frames. ], batch size: 49, lr: 1.53e-02, grad_scale: 32.0
+2024-08-25 16:45:25,699 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 16:46:37,102 INFO [train.py:1146] (2/4) Epoch 10, validation: loss=0.2041, simple_loss=0.2903, pruned_loss=0.04356, ctc_loss=0.07708, over 944034.00 frames.
+2024-08-25 16:46:37,103 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 16:46:44,349 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.74 vs. limit=12.0
+2024-08-25 16:46:49,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=119536.0, ans=0.125
+2024-08-25 16:46:55,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=119536.0, ans=0.025
+2024-08-25 16:46:56,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=119536.0, ans=0.0
+2024-08-25 16:46:58,459 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.45 vs. limit=6.0
+2024-08-25 16:47:46,596 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 1.955e+02 2.116e+02 2.362e+02 4.652e+02, threshold=4.231e+02, percent-clipped=1.0
+2024-08-25 16:47:50,619 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.93 vs. limit=15.0
+2024-08-25 16:48:18,139 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=119696.0, ans=0.125
+2024-08-25 16:48:20,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=119696.0, ans=0.125
+2024-08-25 16:48:28,323 INFO [train.py:1114] (2/4) Epoch 10, batch 50, loss[loss=0.2137, simple_loss=0.2664, pruned_loss=0.05856, ctc_loss=0.1099, over 19753.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3026, pruned_loss=0.07235, ctc_loss=0.1366, over 844913.23 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:49:10,489 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.48 vs. limit=22.5
+2024-08-25 16:49:17,420 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=119749.33333333333, ans=0.07
+2024-08-25 16:49:50,354 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.98 vs. limit=15.0
+2024-08-25 16:50:01,573 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=119802.66666666667, ans=0.0
+2024-08-25 16:50:41,621 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.54 vs. limit=22.5
+2024-08-25 16:50:44,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=119909.33333333333, ans=0.025
+2024-08-25 16:50:51,421 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=119909.33333333333, ans=10.0
+2024-08-25 16:51:10,538 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.64 vs. limit=15.0
+2024-08-25 16:51:25,690 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=119962.66666666667, ans=0.025
+2024-08-25 16:51:37,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=119962.66666666667, ans=0.1
+2024-08-25 16:52:34,138 INFO [train.py:1114] (2/4) Epoch 10, batch 100, loss[loss=0.2242, simple_loss=0.2773, pruned_loss=0.06246, ctc_loss=0.1156, over 19728.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3041, pruned_loss=0.07265, ctc_loss=0.1377, over 1498754.48 frames. ], batch size: 51, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:52:57,492 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=120069.33333333333, ans=0.0
+2024-08-25 16:53:10,756 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=120069.33333333333, ans=0.07
+2024-08-25 16:53:24,677 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.13 vs. limit=22.5
+2024-08-25 16:53:28,993 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=120122.66666666667, ans=0.1
+2024-08-25 16:53:35,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=120176.0, ans=0.04949747468305833
+2024-08-25 16:53:46,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=120176.0, ans=0.0
+2024-08-25 16:53:47,838 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 1.798e+02 2.253e+02 2.860e+02 4.134e+02, threshold=4.507e+02, percent-clipped=0.0
+2024-08-25 16:54:47,468 INFO [train.py:1114] (2/4) Epoch 10, batch 150, loss[loss=0.2231, simple_loss=0.2709, pruned_loss=0.06283, ctc_loss=0.1243, over 19717.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.302, pruned_loss=0.07197, ctc_loss=0.1361, over 2027457.93 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:54:58,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-25 16:55:01,387 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.63 vs. limit=15.0
+2024-08-25 16:55:59,700 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=120496.0, ans=0.0
+2024-08-25 16:56:01,844 INFO [train.py:1114] (2/4) Epoch 10, batch 200, loss[loss=0.297, simple_loss=0.3344, pruned_loss=0.09328, ctc_loss=0.1829, over 18194.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.2998, pruned_loss=0.07116, ctc_loss=0.1342, over 2435318.47 frames. ], batch size: 85, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:56:19,731 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.63 vs. limit=15.0
+2024-08-25 16:57:34,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=120656.0, ans=0.125
+2024-08-25 16:57:34,397 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.21 vs. limit=10.0
+2024-08-25 16:58:07,756 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.824e+02 2.064e+02 2.548e+02 6.143e+02, threshold=4.128e+02, percent-clipped=2.0
+2024-08-25 16:58:16,020 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=120762.66666666667, ans=0.1
+2024-08-25 16:58:18,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=120762.66666666667, ans=0.125
+2024-08-25 16:58:33,002 INFO [train.py:1114] (2/4) Epoch 10, batch 250, loss[loss=0.2739, simple_loss=0.3213, pruned_loss=0.08282, ctc_loss=0.1522, over 19422.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.2986, pruned_loss=0.07032, ctc_loss=0.1326, over 2755755.23 frames. ], batch size: 67, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:59:07,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=120922.66666666667, ans=0.025
+2024-08-25 16:59:42,633 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.37 vs. limit=15.0
+2024-08-25 16:59:53,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=120976.0, ans=0.125
+2024-08-25 16:59:54,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=120976.0, ans=0.04949747468305833
+2024-08-25 17:00:02,354 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.31 vs. limit=12.0
+2024-08-25 17:00:08,743 INFO [train.py:1114] (2/4) Epoch 10, batch 300, loss[loss=0.2622, simple_loss=0.312, pruned_loss=0.07772, ctc_loss=0.1426, over 19522.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.2977, pruned_loss=0.06963, ctc_loss=0.1311, over 2999790.65 frames. ], batch size: 61, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:00:13,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=121082.66666666667, ans=0.0
+2024-08-25 17:00:22,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=121082.66666666667, ans=0.125
+2024-08-25 17:00:33,572 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=121136.0, ans=0.125
+2024-08-25 17:01:01,167 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.908e+02 2.186e+02 2.769e+02 4.118e+02, threshold=4.372e+02, percent-clipped=0.0
+2024-08-25 17:01:06,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=121296.0, ans=0.0
+2024-08-25 17:01:11,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=121296.0, ans=0.2
+2024-08-25 17:02:40,279 INFO [train.py:1114] (2/4) Epoch 10, batch 350, loss[loss=0.2296, simple_loss=0.2774, pruned_loss=0.06621, ctc_loss=0.1233, over 19752.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.2977, pruned_loss=0.06953, ctc_loss=0.1307, over 3189245.73 frames. ], batch size: 48, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:02:57,698 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=2.592e-03
+2024-08-25 17:03:07,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=121456.0, ans=0.0
+2024-08-25 17:03:21,559 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.76 vs. limit=6.0
+2024-08-25 17:03:24,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=121509.33333333333, ans=0.1
+2024-08-25 17:03:42,419 INFO [train.py:1114] (2/4) Epoch 10, batch 400, loss[loss=0.229, simple_loss=0.3023, pruned_loss=0.05634, ctc_loss=0.1075, over 19490.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2969, pruned_loss=0.06881, ctc_loss=0.1293, over 3340173.43 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:03:47,218 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=121616.0, ans=0.025
+2024-08-25 17:03:49,247 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=15.70 vs. limit=22.5
+2024-08-25 17:03:53,891 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.60 vs. limit=12.0
+2024-08-25 17:04:23,039 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=121776.0, ans=0.1
+2024-08-25 17:04:28,319 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=121776.0, ans=0.025
+2024-08-25 17:04:33,752 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 1.874e+02 2.151e+02 2.761e+02 4.102e+02, threshold=4.302e+02, percent-clipped=0.0
+2024-08-25 17:04:34,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=121776.0, ans=0.0
+2024-08-25 17:04:50,468 INFO [train.py:1114] (2/4) Epoch 10, batch 450, loss[loss=0.2551, simple_loss=0.3137, pruned_loss=0.071, ctc_loss=0.1365, over 19622.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.298, pruned_loss=0.06957, ctc_loss=0.1306, over 3449928.55 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:07:43,905 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=121989.33333333333, ans=0.125
+2024-08-25 17:07:59,004 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=122042.66666666667, ans=0.1
+2024-08-25 17:08:34,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=122096.0, ans=15.0
+2024-08-25 17:09:04,034 INFO [train.py:1114] (2/4) Epoch 10, batch 500, loss[loss=0.2555, simple_loss=0.3126, pruned_loss=0.07282, ctc_loss=0.1321, over 19669.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2969, pruned_loss=0.06885, ctc_loss=0.1291, over 3545733.99 frames. ], batch size: 63, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:09:43,924 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=122256.0, ans=0.125
+2024-08-25 17:10:07,114 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=122256.0, ans=0.0
+2024-08-25 17:10:20,656 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=122309.33333333333, ans=0.0
+2024-08-25 17:10:36,239 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.797e+02 2.290e+02 2.870e+02 3.920e+02, threshold=4.579e+02, percent-clipped=0.0
+2024-08-25 17:10:37,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=122309.33333333333, ans=0.1
+2024-08-25 17:10:38,780 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-25 17:10:43,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=122362.66666666667, ans=0.0
+2024-08-25 17:10:51,448 INFO [train.py:1114] (2/4) Epoch 10, batch 550, loss[loss=0.2667, simple_loss=0.3155, pruned_loss=0.07907, ctc_loss=0.1494, over 19210.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2969, pruned_loss=0.06882, ctc_loss=0.1292, over 3607666.55 frames. ], batch size: 71, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:11:05,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=122416.0, ans=0.1
+2024-08-25 17:11:10,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=122416.0, ans=0.0
+2024-08-25 17:11:51,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=122469.33333333333, ans=0.0
+2024-08-25 17:11:57,399 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=122469.33333333333, ans=0.125
+2024-08-25 17:13:39,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=122522.66666666667, ans=0.1
+2024-08-25 17:13:40,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=122522.66666666667, ans=0.2
+2024-08-25 17:13:45,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=122522.66666666667, ans=0.0
+2024-08-25 17:14:20,770 INFO [train.py:1114] (2/4) Epoch 10, batch 600, loss[loss=0.2783, simple_loss=0.3251, pruned_loss=0.08431, ctc_loss=0.1573, over 19421.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.2974, pruned_loss=0.06915, ctc_loss=0.1299, over 3665635.06 frames. ], batch size: 67, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:14:33,449 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=122736.0, ans=0.025
+2024-08-25 17:14:59,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=122842.66666666667, ans=0.125
+2024-08-25 17:15:07,881 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=122842.66666666667, ans=0.0
+2024-08-25 17:15:08,639 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 2.061e+02 2.496e+02 4.365e+02, threshold=4.122e+02, percent-clipped=0.0
+2024-08-25 17:15:11,360 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.21 vs. limit=15.0
+2024-08-25 17:15:23,907 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=122949.33333333333, ans=0.125
+2024-08-25 17:15:24,813 INFO [train.py:1114] (2/4) Epoch 10, batch 650, loss[loss=0.2138, simple_loss=0.2828, pruned_loss=0.05273, ctc_loss=0.0981, over 19758.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.2965, pruned_loss=0.06869, ctc_loss=0.1291, over 3715687.27 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:15:35,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=123002.66666666667, ans=0.2
+2024-08-25 17:16:01,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=123109.33333333333, ans=10.0
+2024-08-25 17:16:04,875 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.44 vs. limit=22.5
+2024-08-25 17:16:33,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=123216.0, ans=0.125
+2024-08-25 17:16:34,666 INFO [train.py:1114] (2/4) Epoch 10, batch 700, loss[loss=0.2411, simple_loss=0.2892, pruned_loss=0.06964, ctc_loss=0.1343, over 19725.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2971, pruned_loss=0.06901, ctc_loss=0.1295, over 3748664.78 frames. ], batch size: 51, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:17:33,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=123216.0, ans=0.125
+2024-08-25 17:17:39,846 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=123269.33333333333, ans=0.0
+2024-08-25 17:17:56,361 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=123322.66666666667, ans=0.125
+2024-08-25 17:18:00,972 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=123322.66666666667, ans=0.125
+2024-08-25 17:18:04,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=123376.0, ans=0.0
+2024-08-25 17:18:05,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=123376.0, ans=0.125
+2024-08-25 17:18:13,484 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 1.934e+02 2.276e+02 3.026e+02 5.626e+02, threshold=4.552e+02, percent-clipped=3.0
+2024-08-25 17:18:17,269 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=123429.33333333333, ans=0.0
+2024-08-25 17:18:26,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=123429.33333333333, ans=0.05
+2024-08-25 17:18:28,246 INFO [train.py:1114] (2/4) Epoch 10, batch 750, loss[loss=0.229, simple_loss=0.294, pruned_loss=0.05981, ctc_loss=0.1112, over 19517.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2964, pruned_loss=0.0686, ctc_loss=0.1287, over 3775255.51 frames. ], batch size: 54, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:18:31,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=123482.66666666667, ans=0.1
+2024-08-25 17:18:47,257 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=123536.0, ans=0.125
+2024-08-25 17:19:07,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=123589.33333333333, ans=0.1
+2024-08-25 17:19:10,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=123642.66666666667, ans=0.1
+2024-08-25 17:19:23,430 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=123696.0, ans=0.0
+2024-08-25 17:19:32,723 INFO [train.py:1114] (2/4) Epoch 10, batch 800, loss[loss=0.2221, simple_loss=0.2664, pruned_loss=0.06592, ctc_loss=0.115, over 19798.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.2956, pruned_loss=0.06797, ctc_loss=0.1277, over 3797522.83 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:19:58,487 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=123749.33333333333, ans=0.2
+2024-08-25 17:20:05,595 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.20 vs. limit=15.0
+2024-08-25 17:20:08,040 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.46 vs. limit=15.0
+2024-08-25 17:20:17,629 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=5.388e-03
+2024-08-25 17:20:29,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=123909.33333333333, ans=0.2
+2024-08-25 17:20:33,027 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.887e+02 2.136e+02 2.736e+02 3.984e+02, threshold=4.273e+02, percent-clipped=0.0
+2024-08-25 17:20:47,976 INFO [train.py:1114] (2/4) Epoch 10, batch 850, loss[loss=0.2324, simple_loss=0.3015, pruned_loss=0.05974, ctc_loss=0.1096, over 19643.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2957, pruned_loss=0.06804, ctc_loss=0.1279, over 3814987.05 frames. ], batch size: 59, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:20:50,675 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=124016.0, ans=0.125
+2024-08-25 17:20:51,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=124016.0, ans=0.125
+2024-08-25 17:20:52,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=124016.0, ans=0.125
+2024-08-25 17:20:55,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=124016.0, ans=0.0
+2024-08-25 17:21:02,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=124069.33333333333, ans=0.0
+2024-08-25 17:21:08,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=124069.33333333333, ans=0.1
+2024-08-25 17:21:12,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=124122.66666666667, ans=0.125
+2024-08-25 17:21:30,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=124176.0, ans=0.025
+2024-08-25 17:22:28,571 INFO [train.py:1114] (2/4) Epoch 10, batch 900, loss[loss=0.2143, simple_loss=0.2666, pruned_loss=0.05823, ctc_loss=0.114, over 19427.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.2956, pruned_loss=0.06833, ctc_loss=0.1285, over 3818467.02 frames. ], batch size: 48, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:22:48,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=124336.0, ans=0.125
+2024-08-25 17:23:13,943 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.854e+02 2.167e+02 2.763e+02 5.395e+02, threshold=4.333e+02, percent-clipped=2.0
+2024-08-25 17:23:19,683 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.02 vs. limit=22.5
+2024-08-25 17:23:23,857 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=124496.0, ans=0.0
+2024-08-25 17:23:30,317 INFO [train.py:1114] (2/4) Epoch 10, batch 950, loss[loss=0.2552, simple_loss=0.301, pruned_loss=0.0756, ctc_loss=0.1454, over 19486.00 frames. ], tot_loss[loss=0.242, simple_loss=0.296, pruned_loss=0.06832, ctc_loss=0.1286, over 3819321.92 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:23:35,092 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:24:01,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=124656.0, ans=0.2
+2024-08-25 17:24:08,754 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.84 vs. limit=15.0
+2024-08-25 17:24:09,867 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.56 vs. limit=22.5
+2024-08-25 17:24:34,417 INFO [train.py:1114] (2/4) Epoch 10, batch 1000, loss[loss=0.219, simple_loss=0.2846, pruned_loss=0.05609, ctc_loss=0.1033, over 19840.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.2973, pruned_loss=0.06907, ctc_loss=0.1301, over 3814320.55 frames. ], batch size: 52, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:24:45,544 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=124869.33333333333, ans=0.125
+2024-08-25 17:25:04,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=124922.66666666667, ans=0.0
+2024-08-25 17:25:18,039 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 1.797e+02 2.069e+02 2.553e+02 4.130e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-25 17:25:27,921 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.49 vs. limit=15.0
+2024-08-25 17:25:33,303 INFO [train.py:1114] (2/4) Epoch 10, batch 1050, loss[loss=0.2932, simple_loss=0.3399, pruned_loss=0.08973, ctc_loss=0.1679, over 19855.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.2968, pruned_loss=0.06901, ctc_loss=0.1299, over 3821281.08 frames. ], batch size: 57, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:25:33,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=125082.66666666667, ans=0.0
+2024-08-25 17:25:33,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=125082.66666666667, ans=0.0
+2024-08-25 17:26:11,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=125242.66666666667, ans=0.125
+2024-08-25 17:26:15,170 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=125242.66666666667, ans=0.1
+2024-08-25 17:26:24,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=125296.0, ans=0.2
+2024-08-25 17:26:32,051 INFO [train.py:1114] (2/4) Epoch 10, batch 1100, loss[loss=0.2329, simple_loss=0.2957, pruned_loss=0.06182, ctc_loss=0.1164, over 19592.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2965, pruned_loss=0.06898, ctc_loss=0.1299, over 3828470.94 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:26:54,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=125402.66666666667, ans=0.125
+2024-08-25 17:27:18,174 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.787e+02 2.060e+02 2.560e+02 4.808e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 17:27:28,009 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=125562.66666666667, ans=0.125
+2024-08-25 17:27:33,346 INFO [train.py:1114] (2/4) Epoch 10, batch 1150, loss[loss=0.2652, simple_loss=0.3071, pruned_loss=0.08131, ctc_loss=0.1516, over 19580.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2966, pruned_loss=0.06911, ctc_loss=0.1303, over 3829001.93 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:28:38,469 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=125829.33333333333, ans=0.125
+2024-08-25 17:28:41,357 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.30 vs. limit=22.5
+2024-08-25 17:28:44,448 INFO [train.py:1114] (2/4) Epoch 10, batch 1200, loss[loss=0.2628, simple_loss=0.3117, pruned_loss=0.07787, ctc_loss=0.1455, over 19841.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2972, pruned_loss=0.06919, ctc_loss=0.1305, over 3825625.47 frames. ], batch size: 57, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:28:55,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=125936.0, ans=0.0
+2024-08-25 17:29:28,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=126042.66666666667, ans=0.0
+2024-08-25 17:29:29,737 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.42 vs. limit=6.0
+2024-08-25 17:29:30,099 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.823e+02 2.047e+02 2.358e+02 4.051e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 17:29:45,850 INFO [train.py:1114] (2/4) Epoch 10, batch 1250, loss[loss=0.2641, simple_loss=0.3141, pruned_loss=0.07821, ctc_loss=0.1442, over 19509.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.2969, pruned_loss=0.06853, ctc_loss=0.1292, over 3843280.40 frames. ], batch size: 61, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:29:46,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=126149.33333333333, ans=0.025
+2024-08-25 17:29:52,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=126149.33333333333, ans=0.0
+2024-08-25 17:30:18,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=126256.0, ans=0.125
+2024-08-25 17:30:47,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=126362.66666666667, ans=0.07
+2024-08-25 17:30:55,587 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=126362.66666666667, ans=0.0
+2024-08-25 17:30:56,998 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.34 vs. limit=6.0
+2024-08-25 17:30:59,766 INFO [train.py:1114] (2/4) Epoch 10, batch 1300, loss[loss=0.2687, simple_loss=0.3083, pruned_loss=0.08352, ctc_loss=0.1553, over 18891.00 frames. ], tot_loss[loss=0.242, simple_loss=0.2963, pruned_loss=0.06817, ctc_loss=0.1282, over 3846876.52 frames. ], batch size: 76, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:31:18,407 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=126416.0, ans=0.125
+2024-08-25 17:31:22,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=126416.0, ans=0.0
+2024-08-25 17:32:04,475 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.07 vs. limit=15.0
+2024-08-25 17:32:13,114 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.900e+02 2.303e+02 2.970e+02 5.096e+02, threshold=4.606e+02, percent-clipped=7.0
+2024-08-25 17:32:23,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=126629.33333333333, ans=0.125
+2024-08-25 17:32:28,199 INFO [train.py:1114] (2/4) Epoch 10, batch 1350, loss[loss=0.2132, simple_loss=0.2819, pruned_loss=0.05284, ctc_loss=0.09704, over 19765.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2959, pruned_loss=0.06799, ctc_loss=0.1277, over 3857709.68 frames. ], batch size: 54, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:32:45,623 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=126736.0, ans=0.125
+2024-08-25 17:32:46,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=126736.0, ans=0.025
+2024-08-25 17:32:48,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=126736.0, ans=0.1
+2024-08-25 17:32:53,381 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=126789.33333333333, ans=0.0
+2024-08-25 17:33:06,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=126842.66666666667, ans=0.125
+2024-08-25 17:33:23,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=126896.0, ans=0.0
+2024-08-25 17:33:30,493 INFO [train.py:1114] (2/4) Epoch 10, batch 1400, loss[loss=0.2142, simple_loss=0.2657, pruned_loss=0.05915, ctc_loss=0.1108, over 19682.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2956, pruned_loss=0.06789, ctc_loss=0.1273, over 3865118.71 frames. ], batch size: 46, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:33:38,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=126949.33333333333, ans=0.0
+2024-08-25 17:33:41,215 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=127002.66666666667, ans=0.125
+2024-08-25 17:34:34,278 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.46 vs. limit=15.0
+2024-08-25 17:34:37,167 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=127109.33333333333, ans=0.125
+2024-08-25 17:34:42,451 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.856e+02 2.167e+02 2.631e+02 4.500e+02, threshold=4.335e+02, percent-clipped=0.0
+2024-08-25 17:34:52,801 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=127162.66666666667, ans=0.125
+2024-08-25 17:35:02,157 INFO [train.py:1114] (2/4) Epoch 10, batch 1450, loss[loss=0.2497, simple_loss=0.3111, pruned_loss=0.06952, ctc_loss=0.123, over 19658.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2961, pruned_loss=0.06803, ctc_loss=0.1277, over 3863069.26 frames. ], batch size: 63, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:35:11,315 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.30 vs. limit=15.0
+2024-08-25 17:35:32,636 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.55 vs. limit=12.0
+2024-08-25 17:35:36,104 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=127322.66666666667, ans=0.1
+2024-08-25 17:36:02,103 INFO [train.py:1114] (2/4) Epoch 10, batch 1500, loss[loss=0.2613, simple_loss=0.3102, pruned_loss=0.07701, ctc_loss=0.1459, over 19582.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2967, pruned_loss=0.06839, ctc_loss=0.1281, over 3862044.66 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:36:09,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=127482.66666666667, ans=0.0
+2024-08-25 17:36:11,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=127482.66666666667, ans=0.95
+2024-08-25 17:36:14,498 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=127536.0, ans=0.125
+2024-08-25 17:36:33,987 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=127589.33333333333, ans=10.0
+2024-08-25 17:36:37,979 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.32 vs. limit=6.0
+2024-08-25 17:36:38,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=127589.33333333333, ans=0.0
+2024-08-25 17:36:55,675 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 1.877e+02 2.186e+02 2.626e+02 4.478e+02, threshold=4.372e+02, percent-clipped=1.0
+2024-08-25 17:37:03,528 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.78 vs. limit=15.0
+2024-08-25 17:37:06,777 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.63 vs. limit=15.0
+2024-08-25 17:37:24,239 INFO [train.py:1114] (2/4) Epoch 10, batch 1550, loss[loss=0.2517, simple_loss=0.306, pruned_loss=0.07211, ctc_loss=0.1331, over 19608.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2971, pruned_loss=0.06882, ctc_loss=0.1293, over 3845947.00 frames. ], batch size: 60, lr: 1.48e-02, grad_scale: 16.0
+2024-08-25 17:37:42,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=127802.66666666667, ans=0.0
+2024-08-25 17:37:44,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=127802.66666666667, ans=0.035
+2024-08-25 17:38:00,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127856.0, ans=0.1
+2024-08-25 17:38:54,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=127909.33333333333, ans=0.0
+2024-08-25 17:39:51,195 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=127962.66666666667, ans=0.0
+2024-08-25 17:39:52,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=127962.66666666667, ans=0.2
+2024-08-25 17:41:06,795 INFO [train.py:1114] (2/4) Epoch 10, batch 1600, loss[loss=0.2298, simple_loss=0.2991, pruned_loss=0.05804, ctc_loss=0.111, over 19828.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2966, pruned_loss=0.06876, ctc_loss=0.1293, over 3834494.88 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:43:00,356 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=128122.66666666667, ans=0.0
+2024-08-25 17:43:05,807 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.37 vs. limit=22.5
+2024-08-25 17:43:14,719 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.67 vs. limit=10.0
+2024-08-25 17:43:24,235 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 1.849e+02 2.080e+02 2.733e+02 5.175e+02, threshold=4.161e+02, percent-clipped=4.0
+2024-08-25 17:43:38,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=128229.33333333333, ans=0.125
+2024-08-25 17:43:57,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=128229.33333333333, ans=0.0
+2024-08-25 17:44:00,800 INFO [train.py:1114] (2/4) Epoch 10, batch 1650, loss[loss=0.2318, simple_loss=0.2943, pruned_loss=0.06087, ctc_loss=0.119, over 19667.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2962, pruned_loss=0.06861, ctc_loss=0.129, over 3831066.69 frames. ], batch size: 59, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:44:11,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=128336.0, ans=0.0
+2024-08-25 17:44:30,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=128336.0, ans=0.0
+2024-08-25 17:44:35,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=128336.0, ans=0.5
+2024-08-25 17:44:42,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=128389.33333333333, ans=0.125
+2024-08-25 17:45:07,975 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=128442.66666666667, ans=0.2
+2024-08-25 17:45:12,881 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=128496.0, ans=0.125
+2024-08-25 17:45:15,709 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:45:19,125 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=128496.0, ans=10.0
+2024-08-25 17:45:46,329 INFO [train.py:1114] (2/4) Epoch 10, batch 1700, loss[loss=0.1979, simple_loss=0.2558, pruned_loss=0.05235, ctc_loss=0.0879, over 19668.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.2953, pruned_loss=0.06771, ctc_loss=0.1272, over 3845478.45 frames. ], batch size: 46, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:45:47,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=128549.33333333333, ans=0.2
+2024-08-25 17:46:34,699 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=128602.66666666667, ans=0.0
+2024-08-25 17:46:42,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=128602.66666666667, ans=0.025
+2024-08-25 17:46:43,634 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.22 vs. limit=15.0
+2024-08-25 17:46:44,769 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.35 vs. limit=15.0
+2024-08-25 17:46:45,323 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=128602.66666666667, ans=0.0
+2024-08-25 17:46:49,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=128656.0, ans=0.1
+2024-08-25 17:46:56,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=128656.0, ans=0.125
+2024-08-25 17:47:10,490 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=128709.33333333333, ans=0.1
+2024-08-25 17:47:11,342 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.773e+02 2.059e+02 2.527e+02 4.467e+02, threshold=4.119e+02, percent-clipped=1.0
+2024-08-25 17:48:12,452 INFO [train.py:1114] (2/4) Epoch 10, batch 1750, loss[loss=0.2286, simple_loss=0.2756, pruned_loss=0.06602, ctc_loss=0.124, over 19644.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.2941, pruned_loss=0.06693, ctc_loss=0.1257, over 3850000.57 frames. ], batch size: 45, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:48:23,399 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=128816.0, ans=0.125
+2024-08-25 17:48:41,328 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.47 vs. limit=12.0
+2024-08-25 17:48:44,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=128922.66666666667, ans=0.125
+2024-08-25 17:48:44,490 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=128922.66666666667, ans=0.2
+2024-08-25 17:48:45,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=128922.66666666667, ans=0.125
+2024-08-25 17:48:52,886 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=128976.0, ans=0.125
+2024-08-25 17:49:07,591 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=25.14 vs. limit=22.5
+2024-08-25 17:49:11,954 INFO [train.py:1114] (2/4) Epoch 10, batch 1800, loss[loss=0.2408, simple_loss=0.2996, pruned_loss=0.06642, ctc_loss=0.1229, over 19621.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.2944, pruned_loss=0.06704, ctc_loss=0.1258, over 3852826.67 frames. ], batch size: 55, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 17:49:18,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=129082.66666666667, ans=0.125
+2024-08-25 17:50:11,884 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.36 vs. limit=15.0
+2024-08-25 17:51:01,883 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.74 vs. limit=15.0
+2024-08-25 18:04:15,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=129189.33333333333, ans=0.025
+2024-08-25 18:11:17,742 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.930e+02 2.270e+02 3.115e+02 5.695e+02, threshold=4.540e+02, percent-clipped=10.0
+2024-08-25 18:14:48,020 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=129296.0, ans=0.07
+2024-08-25 18:19:59,309 INFO [train.py:1114] (2/4) Epoch 10, batch 1850, loss[loss=0.2797, simple_loss=0.3267, pruned_loss=0.08389, ctc_loss=0.162, over 19576.00 frames. ], tot_loss[loss=0.241, simple_loss=0.2951, pruned_loss=0.06797, ctc_loss=0.1273, over 3855910.98 frames. ], batch size: 57, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:24:28,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=129402.66666666667, ans=0.0
+2024-08-25 18:25:40,964 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=129402.66666666667, ans=0.0
+2024-08-25 18:29:52,838 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=129509.33333333333, ans=0.0
+2024-08-25 18:32:27,918 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=129616.0, ans=0.0
+2024-08-25 18:32:37,448 INFO [train.py:1114] (2/4) Epoch 10, batch 1900, loss[loss=0.2249, simple_loss=0.2924, pruned_loss=0.05678, ctc_loss=0.1097, over 19650.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2958, pruned_loss=0.06805, ctc_loss=0.1274, over 3860739.14 frames. ], batch size: 59, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:36:34,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=129722.66666666667, ans=0.0
+2024-08-25 18:36:36,801 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=129722.66666666667, ans=0.2
+2024-08-25 18:36:40,992 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.76 vs. limit=15.0
+2024-08-25 18:37:43,375 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.882e+02 2.156e+02 2.772e+02 4.689e+02, threshold=4.313e+02, percent-clipped=1.0
+2024-08-25 18:37:57,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=129829.33333333333, ans=0.125
+2024-08-25 18:38:10,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=129829.33333333333, ans=0.125
+2024-08-25 18:38:20,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=129829.33333333333, ans=0.0
+2024-08-25 18:38:48,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=129882.66666666667, ans=0.1
+2024-08-25 18:38:48,697 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.38 vs. limit=10.0
+2024-08-25 18:38:51,205 INFO [train.py:1114] (2/4) Epoch 10, batch 1950, loss[loss=0.2314, simple_loss=0.288, pruned_loss=0.06345, ctc_loss=0.1196, over 19575.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2963, pruned_loss=0.06766, ctc_loss=0.1269, over 3869785.17 frames. ], batch size: 52, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:38:59,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=129882.66666666667, ans=0.125
+2024-08-25 18:39:16,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=129882.66666666667, ans=0.09899494936611666
+2024-08-25 18:40:12,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=129936.0, ans=0.0
+2024-08-25 18:41:32,650 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.44 vs. limit=10.0
+2024-08-25 18:42:12,111 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=130042.66666666667, ans=0.09899494936611666
+2024-08-25 18:44:04,322 INFO [train.py:1114] (2/4) Epoch 10, batch 2000, loss[loss=0.2306, simple_loss=0.2708, pruned_loss=0.06969, ctc_loss=0.1274, over 19625.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.2974, pruned_loss=0.06839, ctc_loss=0.1285, over 3855609.89 frames. ], batch size: 45, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:44:41,709 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.24 vs. limit=10.0
+2024-08-25 18:47:32,409 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.882e+02 2.262e+02 2.707e+02 4.864e+02, threshold=4.523e+02, percent-clipped=1.0
+2024-08-25 18:48:39,775 INFO [train.py:1114] (2/4) Epoch 10, batch 2050, loss[loss=0.2005, simple_loss=0.2631, pruned_loss=0.04943, ctc_loss=0.09763, over 19751.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2964, pruned_loss=0.06846, ctc_loss=0.1285, over 3851886.73 frames. ], batch size: 47, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:49:59,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=130469.33333333333, ans=0.2
+2024-08-25 18:50:02,438 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.05 vs. limit=15.0
+2024-08-25 18:50:28,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=130522.66666666667, ans=0.125
+2024-08-25 18:51:57,640 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=130682.66666666667, ans=0.125
+2024-08-25 18:52:20,501 INFO [train.py:1114] (2/4) Epoch 10, batch 2100, loss[loss=0.2137, simple_loss=0.277, pruned_loss=0.05548, ctc_loss=0.09852, over 19774.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2955, pruned_loss=0.06794, ctc_loss=0.1276, over 3858696.97 frames. ], batch size: 54, lr: 1.47e-02, grad_scale: 16.0
+2024-08-25 18:52:25,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=130682.66666666667, ans=0.125
+2024-08-25 18:52:29,935 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.19 vs. limit=22.5
+2024-08-25 18:52:56,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=130736.0, ans=0.025
+2024-08-25 18:52:57,402 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=130736.0, ans=0.125
+2024-08-25 18:53:29,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=130789.33333333333, ans=0.125
+2024-08-25 18:53:46,382 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=130842.66666666667, ans=0.1
+2024-08-25 18:53:48,071 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.09 vs. limit=22.5
+2024-08-25 18:53:49,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=130842.66666666667, ans=0.125
+2024-08-25 18:53:56,452 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.73 vs. limit=15.0
+2024-08-25 18:53:58,207 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.839e+02 2.296e+02 2.721e+02 6.154e+02, threshold=4.593e+02, percent-clipped=3.0
+2024-08-25 18:54:23,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=130896.0, ans=0.0
+2024-08-25 18:54:37,554 INFO [train.py:1114] (2/4) Epoch 10, batch 2150, loss[loss=0.2019, simple_loss=0.2696, pruned_loss=0.04862, ctc_loss=0.09217, over 19872.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.295, pruned_loss=0.0677, ctc_loss=0.1271, over 3869140.22 frames. ], batch size: 52, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:54:53,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=130949.33333333333, ans=0.125
+2024-08-25 18:55:54,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=131109.33333333334, ans=0.125
+2024-08-25 18:56:32,993 INFO [train.py:1114] (2/4) Epoch 10, batch 2200, loss[loss=0.2906, simple_loss=0.3352, pruned_loss=0.09019, ctc_loss=0.1644, over 19591.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.2953, pruned_loss=0.06779, ctc_loss=0.1273, over 3867343.91 frames. ], batch size: 57, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:56:44,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=131216.0, ans=0.5
+2024-08-25 18:56:46,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=131216.0, ans=0.5
+2024-08-25 18:57:09,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=131269.33333333334, ans=0.2
+2024-08-25 18:57:20,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=131322.66666666666, ans=0.05
+2024-08-25 18:57:36,510 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.70 vs. limit=22.5
+2024-08-25 18:57:49,136 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.15 vs. limit=15.0
+2024-08-25 18:57:49,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=131376.0, ans=0.2
+2024-08-25 18:57:51,453 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.773e+02 2.006e+02 2.540e+02 3.937e+02, threshold=4.013e+02, percent-clipped=0.0
+2024-08-25 18:57:59,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=131429.33333333334, ans=0.125
+2024-08-25 18:58:07,805 INFO [train.py:1114] (2/4) Epoch 10, batch 2250, loss[loss=0.2596, simple_loss=0.3145, pruned_loss=0.07363, ctc_loss=0.1434, over 19614.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.295, pruned_loss=0.0675, ctc_loss=0.1268, over 3867782.49 frames. ], batch size: 55, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:58:12,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=131482.66666666666, ans=0.0
+2024-08-25 18:58:56,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=131696.0, ans=0.125
+2024-08-25 18:58:59,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=131696.0, ans=0.125
+2024-08-25 18:59:03,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=131696.0, ans=0.125
+2024-08-25 18:59:05,060 INFO [train.py:1114] (2/4) Epoch 10, batch 2300, loss[loss=0.2235, simple_loss=0.2778, pruned_loss=0.06199, ctc_loss=0.1129, over 19489.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.2947, pruned_loss=0.06789, ctc_loss=0.1276, over 3861314.54 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:59:15,109 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.19 vs. limit=15.0
+2024-08-25 18:59:56,959 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.26 vs. limit=15.0
+2024-08-25 19:00:00,735 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.815e+02 2.310e+02 2.961e+02 4.661e+02, threshold=4.621e+02, percent-clipped=5.0
+2024-08-25 19:00:00,903 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131962.66666666666, ans=0.1
+2024-08-25 19:00:03,375 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.14 vs. limit=6.0
+2024-08-25 19:00:12,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131962.66666666666, ans=0.1
+2024-08-25 19:00:14,656 INFO [train.py:1114] (2/4) Epoch 10, batch 2350, loss[loss=0.2708, simple_loss=0.3256, pruned_loss=0.07929, ctc_loss=0.1434, over 19675.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.2949, pruned_loss=0.06807, ctc_loss=0.128, over 3863755.80 frames. ], batch size: 63, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:00:18,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=132016.0, ans=0.0
+2024-08-25 19:00:23,373 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=132016.0, ans=0.035
+2024-08-25 19:00:25,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=132069.33333333334, ans=0.0
+2024-08-25 19:00:27,667 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:00:33,314 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=132069.33333333334, ans=0.2
+2024-08-25 19:00:39,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=132122.66666666666, ans=10.0
+2024-08-25 19:00:46,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=132122.66666666666, ans=0.025
+2024-08-25 19:00:49,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=132176.0, ans=0.0
+2024-08-25 19:01:13,184 INFO [train.py:1114] (2/4) Epoch 10, batch 2400, loss[loss=0.2424, simple_loss=0.3, pruned_loss=0.06585, ctc_loss=0.1328, over 19302.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2966, pruned_loss=0.0684, ctc_loss=0.1284, over 3858059.15 frames. ], batch size: 71, lr: 1.46e-02, grad_scale: 32.0
+2024-08-25 19:01:19,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=132282.66666666666, ans=0.125
+2024-08-25 19:01:27,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.99 vs. limit=15.0
+2024-08-25 19:01:29,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=132336.0, ans=0.04949747468305833
+2024-08-25 19:01:42,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=132389.33333333334, ans=0.125
+2024-08-25 19:01:47,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=132389.33333333334, ans=0.0
+2024-08-25 19:01:51,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=132442.66666666666, ans=0.125
+2024-08-25 19:01:52,093 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=132442.66666666666, ans=0.125
+2024-08-25 19:02:10,727 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.986e+02 2.279e+02 2.618e+02 8.799e+02, threshold=4.558e+02, percent-clipped=0.0
+2024-08-25 19:02:16,773 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132496.0, ans=0.1
+2024-08-25 19:02:22,024 INFO [train.py:1114] (2/4) Epoch 10, batch 2450, loss[loss=0.275, simple_loss=0.3092, pruned_loss=0.08885, ctc_loss=0.1578, over 13563.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3015, pruned_loss=0.07226, ctc_loss=0.1359, over 3731945.56 frames. ], batch size: 140, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:02:37,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=132602.66666666666, ans=0.07
+2024-08-25 19:03:06,174 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=2.520e-03
+2024-08-25 19:04:28,602 INFO [train.py:1114] (2/4) Epoch 11, batch 0, loss[loss=0.2587, simple_loss=0.3022, pruned_loss=0.07923, ctc_loss=0.142, over 19782.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3022, pruned_loss=0.07923, ctc_loss=0.142, over 19782.00 frames. ], batch size: 49, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:04:28,602 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 19:04:55,858 INFO [train.py:1146] (2/4) Epoch 11, validation: loss=0.2031, simple_loss=0.2887, pruned_loss=0.04339, ctc_loss=0.0768, over 944034.00 frames.
+2024-08-25 19:04:55,859 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 19:04:58,784 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.89 vs. limit=22.5
+2024-08-25 19:05:11,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=132810.66666666666, ans=0.0
+2024-08-25 19:05:45,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=132917.33333333334, ans=0.0
+2024-08-25 19:05:58,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=132970.66666666666, ans=0.125
+2024-08-25 19:06:01,699 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.76 vs. limit=15.0
+2024-08-25 19:06:02,247 INFO [train.py:1114] (2/4) Epoch 11, batch 50, loss[loss=0.2113, simple_loss=0.2664, pruned_loss=0.05708, ctc_loss=0.1048, over 19726.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.2988, pruned_loss=0.0697, ctc_loss=0.1318, over 844464.80 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:06:03,364 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.050e+02 2.234e+02 2.552e+02 4.359e+02, threshold=4.468e+02, percent-clipped=1.0
+2024-08-25 19:06:06,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=133024.0, ans=0.125
+2024-08-25 19:06:09,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=133024.0, ans=0.125
+2024-08-25 19:06:09,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=133024.0, ans=0.125
+2024-08-25 19:06:48,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=133184.0, ans=0.125
+2024-08-25 19:07:30,912 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=133237.33333333334, ans=0.0
+2024-08-25 19:07:42,580 INFO [train.py:1114] (2/4) Epoch 11, batch 100, loss[loss=0.2159, simple_loss=0.2801, pruned_loss=0.05561, ctc_loss=0.101, over 19710.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2985, pruned_loss=0.06873, ctc_loss=0.1298, over 1499152.40 frames. ], batch size: 51, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:08:03,730 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:08:15,191 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.15 vs. limit=15.0
+2024-08-25 19:08:44,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=133450.66666666666, ans=0.025
+2024-08-25 19:08:51,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=133504.0, ans=0.125
+2024-08-25 19:08:57,315 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.10 vs. limit=12.0
+2024-08-25 19:09:10,095 INFO [train.py:1114] (2/4) Epoch 11, batch 150, loss[loss=0.2527, simple_loss=0.2858, pruned_loss=0.07956, ctc_loss=0.1512, over 19685.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.296, pruned_loss=0.06753, ctc_loss=0.1276, over 2028078.50 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:09:12,933 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.747e+02 2.015e+02 2.344e+02 3.708e+02, threshold=4.031e+02, percent-clipped=0.0
+2024-08-25 19:09:15,082 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=16.23 vs. limit=22.5
+2024-08-25 19:09:17,364 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.43 vs. limit=10.0
+2024-08-25 19:09:18,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=133557.33333333334, ans=0.1
+2024-08-25 19:09:49,846 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=133717.33333333334, ans=0.0
+2024-08-25 19:09:51,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=133717.33333333334, ans=0.2
+2024-08-25 19:10:00,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=133770.66666666666, ans=0.125
+2024-08-25 19:10:21,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=133770.66666666666, ans=0.0
+2024-08-25 19:10:34,572 INFO [train.py:1114] (2/4) Epoch 11, batch 200, loss[loss=0.2429, simple_loss=0.3026, pruned_loss=0.06619, ctc_loss=0.1273, over 18112.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.2946, pruned_loss=0.06664, ctc_loss=0.1259, over 2434418.27 frames. ], batch size: 85, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:10:45,800 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.93 vs. limit=15.0
+2024-08-25 19:11:00,325 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.02 vs. limit=22.5
+2024-08-25 19:11:40,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=133984.0, ans=0.125
+2024-08-25 19:11:45,967 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=133984.0, ans=0.1
+2024-08-25 19:11:51,079 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.98 vs. limit=15.0
+2024-08-25 19:11:55,593 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=134037.33333333334, ans=0.125
+2024-08-25 19:12:01,108 INFO [train.py:1114] (2/4) Epoch 11, batch 250, loss[loss=0.2451, simple_loss=0.3032, pruned_loss=0.06876, ctc_loss=0.1238, over 19405.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.295, pruned_loss=0.06701, ctc_loss=0.1264, over 2754380.29 frames. ], batch size: 67, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:12:02,129 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.716e+02 2.023e+02 2.469e+02 5.021e+02, threshold=4.046e+02, percent-clipped=3.0
+2024-08-25 19:12:09,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=134090.66666666666, ans=0.125
+2024-08-25 19:12:22,143 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.36 vs. limit=15.0
+2024-08-25 19:12:32,523 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=134197.33333333334, ans=0.2
+2024-08-25 19:12:40,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=134250.66666666666, ans=0.2
+2024-08-25 19:12:41,242 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=134250.66666666666, ans=0.0
+2024-08-25 19:12:42,484 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=134250.66666666666, ans=0.2
+2024-08-25 19:13:03,635 INFO [train.py:1114] (2/4) Epoch 11, batch 300, loss[loss=0.2722, simple_loss=0.3141, pruned_loss=0.08392, ctc_loss=0.1561, over 19517.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.2937, pruned_loss=0.06635, ctc_loss=0.125, over 2998972.64 frames. ], batch size: 61, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:13:03,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=134357.33333333334, ans=0.2
+2024-08-25 19:13:07,597 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.27 vs. limit=15.0
+2024-08-25 19:13:08,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=134357.33333333334, ans=0.2
+2024-08-25 19:13:20,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=134410.66666666666, ans=0.125
+2024-08-25 19:13:50,762 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.68 vs. limit=15.0
+2024-08-25 19:14:00,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=134570.66666666666, ans=0.025
+2024-08-25 19:14:07,016 INFO [train.py:1114] (2/4) Epoch 11, batch 350, loss[loss=0.2289, simple_loss=0.2755, pruned_loss=0.06651, ctc_loss=0.1232, over 19767.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.294, pruned_loss=0.0665, ctc_loss=0.1253, over 3189034.92 frames. ], batch size: 48, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:14:08,115 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.838e+02 2.258e+02 2.898e+02 4.827e+02, threshold=4.516e+02, percent-clipped=2.0
+2024-08-25 19:14:40,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=134677.33333333334, ans=0.2
+2024-08-25 19:15:12,656 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=134784.0, ans=0.125
+2024-08-25 19:15:45,724 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=134837.33333333334, ans=0.2
+2024-08-25 19:15:57,919 INFO [train.py:1114] (2/4) Epoch 11, batch 400, loss[loss=0.2225, simple_loss=0.2854, pruned_loss=0.05771, ctc_loss=0.1102, over 19512.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.293, pruned_loss=0.06599, ctc_loss=0.1243, over 3342037.91 frames. ], batch size: 54, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:16:03,679 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:16:12,099 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=134944.0, ans=0.125
+2024-08-25 19:16:44,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=134997.33333333334, ans=0.0
+2024-08-25 19:16:48,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=135050.66666666666, ans=0.07
+2024-08-25 19:16:49,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=135050.66666666666, ans=0.015
+2024-08-25 19:16:59,843 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=135050.66666666666, ans=0.125
+2024-08-25 19:17:01,217 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.72 vs. limit=15.0
+2024-08-25 19:17:04,575 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=135104.0, ans=0.125
+2024-08-25 19:17:22,193 INFO [train.py:1114] (2/4) Epoch 11, batch 450, loss[loss=0.2598, simple_loss=0.3149, pruned_loss=0.07479, ctc_loss=0.1375, over 19623.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.2932, pruned_loss=0.0662, ctc_loss=0.1245, over 3450391.50 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:17:31,731 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.841e+02 2.102e+02 2.681e+02 4.407e+02, threshold=4.204e+02, percent-clipped=0.0
+2024-08-25 19:17:34,649 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.83 vs. limit=15.0
+2024-08-25 19:17:41,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=135210.66666666666, ans=0.07
+2024-08-25 19:17:59,764 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=135264.0, ans=0.125
+2024-08-25 19:18:12,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=135317.33333333334, ans=0.125
+2024-08-25 19:18:34,753 INFO [train.py:1114] (2/4) Epoch 11, batch 500, loss[loss=0.2278, simple_loss=0.2931, pruned_loss=0.05968, ctc_loss=0.1082, over 19673.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.292, pruned_loss=0.0657, ctc_loss=0.1235, over 3545705.63 frames. ], batch size: 63, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:18:36,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=135424.0, ans=0.125
+2024-08-25 19:18:53,570 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.50 vs. limit=10.0
+2024-08-25 19:19:34,401 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=135584.0, ans=0.2
+2024-08-25 19:19:37,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=135584.0, ans=0.125
+2024-08-25 19:19:52,906 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=135637.33333333334, ans=0.0
+2024-08-25 19:20:17,219 INFO [train.py:1114] (2/4) Epoch 11, batch 550, loss[loss=0.2472, simple_loss=0.3028, pruned_loss=0.07013, ctc_loss=0.1282, over 19228.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2922, pruned_loss=0.06584, ctc_loss=0.1238, over 3609068.07 frames. ], batch size: 71, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:20:18,400 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.822e+02 2.069e+02 2.386e+02 4.149e+02, threshold=4.137e+02, percent-clipped=0.0
+2024-08-25 19:20:25,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=135690.66666666666, ans=0.2
+2024-08-25 19:20:36,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=135744.0, ans=0.025
+2024-08-25 19:20:42,807 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=135744.0, ans=0.125
+2024-08-25 19:20:47,515 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=135744.0, ans=0.125
+2024-08-25 19:20:58,154 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=135797.33333333334, ans=0.0
+2024-08-25 19:21:30,819 INFO [train.py:1114] (2/4) Epoch 11, batch 600, loss[loss=0.2543, simple_loss=0.3083, pruned_loss=0.07296, ctc_loss=0.136, over 19405.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2926, pruned_loss=0.06573, ctc_loss=0.1236, over 3666460.41 frames. ], batch size: 67, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:21:32,445 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=135957.33333333334, ans=0.1
+2024-08-25 19:22:26,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=136010.66666666666, ans=0.125
+2024-08-25 19:22:30,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=136010.66666666666, ans=0.125
+2024-08-25 19:22:34,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=136064.0, ans=0.05
+2024-08-25 19:22:47,003 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=136064.0, ans=0.125
+2024-08-25 19:22:49,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=136064.0, ans=0.125
+2024-08-25 19:23:41,691 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.63 vs. limit=15.0
+2024-08-25 19:23:54,560 INFO [train.py:1114] (2/4) Epoch 11, batch 650, loss[loss=0.2411, simple_loss=0.2943, pruned_loss=0.06968, ctc_loss=0.1214, over 19769.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2917, pruned_loss=0.06522, ctc_loss=0.1225, over 3716733.32 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:23:55,631 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.913e+02 2.094e+02 2.738e+02 4.984e+02, threshold=4.187e+02, percent-clipped=5.0
+2024-08-25 19:24:28,939 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=136277.33333333334, ans=0.125
+2024-08-25 19:24:50,056 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=136384.0, ans=0.035
+2024-08-25 19:24:59,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=136384.0, ans=0.125
+2024-08-25 19:25:34,149 INFO [train.py:1114] (2/4) Epoch 11, batch 700, loss[loss=0.2506, simple_loss=0.2923, pruned_loss=0.07472, ctc_loss=0.1486, over 19736.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.2913, pruned_loss=0.06465, ctc_loss=0.1216, over 3748573.45 frames. ], batch size: 51, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:25:53,544 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.27 vs. limit=22.5
+2024-08-25 19:26:32,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=136544.0, ans=0.125
+2024-08-25 19:26:42,892 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=136597.33333333334, ans=0.0
+2024-08-25 19:27:40,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=136650.66666666666, ans=0.1
+2024-08-25 19:28:07,172 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.36 vs. limit=22.5
+2024-08-25 19:28:10,010 INFO [train.py:1114] (2/4) Epoch 11, batch 750, loss[loss=0.2302, simple_loss=0.2818, pruned_loss=0.06478, ctc_loss=0.1227, over 19502.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2913, pruned_loss=0.06483, ctc_loss=0.1219, over 3773497.42 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:28:25,946 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.821e+02 2.028e+02 2.720e+02 4.524e+02, threshold=4.057e+02, percent-clipped=2.0
+2024-08-25 19:28:57,728 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=6.355e-03
+2024-08-25 19:28:59,539 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.33 vs. limit=22.5
+2024-08-25 19:29:04,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=136864.0, ans=0.2
+2024-08-25 19:32:08,683 INFO [train.py:1114] (2/4) Epoch 11, batch 800, loss[loss=0.2061, simple_loss=0.2589, pruned_loss=0.05573, ctc_loss=0.1044, over 19800.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2906, pruned_loss=0.06457, ctc_loss=0.1215, over 3795681.07 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:32:13,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=137024.0, ans=0.125
+2024-08-25 19:32:59,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=137077.33333333334, ans=0.0
+2024-08-25 19:33:38,620 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=137237.33333333334, ans=0.015
+2024-08-25 19:33:49,223 INFO [train.py:1114] (2/4) Epoch 11, batch 850, loss[loss=0.2347, simple_loss=0.3036, pruned_loss=0.05975, ctc_loss=0.1158, over 19636.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2901, pruned_loss=0.06433, ctc_loss=0.1211, over 3814999.06 frames. ], batch size: 59, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:33:50,257 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.863e+02 2.065e+02 2.415e+02 4.305e+02, threshold=4.130e+02, percent-clipped=1.0
+2024-08-25 19:33:52,724 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.37 vs. limit=12.0
+2024-08-25 19:34:29,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=137397.33333333334, ans=0.0
+2024-08-25 19:34:42,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=137450.66666666666, ans=0.025
+2024-08-25 19:35:05,245 INFO [train.py:1114] (2/4) Epoch 11, batch 900, loss[loss=0.2041, simple_loss=0.2568, pruned_loss=0.05526, ctc_loss=0.1021, over 19422.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2913, pruned_loss=0.06542, ctc_loss=0.1229, over 3820246.27 frames. ], batch size: 48, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:35:20,368 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.13 vs. limit=6.0
+2024-08-25 19:35:35,933 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.50 vs. limit=15.0
+2024-08-25 19:36:06,050 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.32 vs. limit=22.5
+2024-08-25 19:36:11,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=137717.33333333334, ans=0.125
+2024-08-25 19:36:12,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=137717.33333333334, ans=0.2
+2024-08-25 19:36:14,302 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.48 vs. limit=15.0
+2024-08-25 19:36:47,788 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=137770.66666666666, ans=0.1
+2024-08-25 19:37:18,563 INFO [train.py:1114] (2/4) Epoch 11, batch 950, loss[loss=0.2466, simple_loss=0.2907, pruned_loss=0.07224, ctc_loss=0.1451, over 19493.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.292, pruned_loss=0.06558, ctc_loss=0.1234, over 3820822.53 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:37:19,703 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.805e+02 2.081e+02 2.536e+02 4.211e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-25 19:37:39,141 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.68 vs. limit=15.0
+2024-08-25 19:38:13,527 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.42 vs. limit=10.0
+2024-08-25 19:38:16,794 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=138037.33333333334, ans=0.125
+2024-08-25 19:38:33,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=138037.33333333334, ans=0.025
+2024-08-25 19:38:48,994 INFO [train.py:1114] (2/4) Epoch 11, batch 1000, loss[loss=0.2291, simple_loss=0.289, pruned_loss=0.06125, ctc_loss=0.1169, over 19845.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.2926, pruned_loss=0.06581, ctc_loss=0.1239, over 3816113.47 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:39:37,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=138197.33333333334, ans=0.025
+2024-08-25 19:40:03,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=138304.0, ans=0.125
+2024-08-25 19:40:14,840 INFO [train.py:1114] (2/4) Epoch 11, batch 1050, loss[loss=0.2624, simple_loss=0.3178, pruned_loss=0.07502, ctc_loss=0.1422, over 19836.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2924, pruned_loss=0.06576, ctc_loss=0.1238, over 3822419.89 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:40:16,858 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.874e+02 2.329e+02 2.645e+02 4.211e+02, threshold=4.658e+02, percent-clipped=2.0
+2024-08-25 19:40:27,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=138410.66666666666, ans=0.125
+2024-08-25 19:40:35,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=138410.66666666666, ans=0.0
+2024-08-25 19:40:38,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=138464.0, ans=0.125
+2024-08-25 19:40:44,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=138464.0, ans=0.0
+2024-08-25 19:41:00,979 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.38 vs. limit=15.0
+2024-08-25 19:41:19,771 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.23 vs. limit=15.0
+2024-08-25 19:41:25,090 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=138624.0, ans=0.025
+2024-08-25 19:41:26,115 INFO [train.py:1114] (2/4) Epoch 11, batch 1100, loss[loss=0.2379, simple_loss=0.2898, pruned_loss=0.06713, ctc_loss=0.1295, over 19600.00 frames. ], tot_loss[loss=0.236, simple_loss=0.2919, pruned_loss=0.06545, ctc_loss=0.1233, over 3829270.27 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:41:26,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=138624.0, ans=0.025
+2024-08-25 19:41:55,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=138624.0, ans=10.0
+2024-08-25 19:42:17,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=138677.33333333334, ans=0.07
+2024-08-25 19:42:19,461 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.55 vs. limit=15.0
+2024-08-25 19:43:01,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=138730.66666666666, ans=0.125
+2024-08-25 19:43:14,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=138784.0, ans=0.125
+2024-08-25 19:43:21,696 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.31 vs. limit=15.0
+2024-08-25 19:43:35,984 INFO [train.py:1114] (2/4) Epoch 11, batch 1150, loss[loss=0.2199, simple_loss=0.2844, pruned_loss=0.05689, ctc_loss=0.104, over 19572.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2916, pruned_loss=0.0652, ctc_loss=0.1229, over 3828256.24 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:43:37,191 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.797e+02 2.039e+02 2.453e+02 4.580e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-25 19:44:21,544 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.57 vs. limit=6.0
+2024-08-25 19:44:34,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=139104.0, ans=0.125
+2024-08-25 19:44:41,854 INFO [train.py:1114] (2/4) Epoch 11, batch 1200, loss[loss=0.2514, simple_loss=0.3083, pruned_loss=0.07009, ctc_loss=0.136, over 19840.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.2931, pruned_loss=0.066, ctc_loss=0.1244, over 3823999.86 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:44:47,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=139157.33333333334, ans=0.125
+2024-08-25 19:44:57,225 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.26 vs. limit=6.0
+2024-08-25 19:45:09,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=139210.66666666666, ans=0.125
+2024-08-25 19:45:39,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=139264.0, ans=0.0
+2024-08-25 19:46:15,671 INFO [train.py:1114] (2/4) Epoch 11, batch 1250, loss[loss=0.2598, simple_loss=0.3129, pruned_loss=0.0757, ctc_loss=0.1384, over 19537.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.2932, pruned_loss=0.06576, ctc_loss=0.1239, over 3842558.34 frames. ], batch size: 61, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:46:16,709 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.769e+02 1.992e+02 2.545e+02 3.633e+02, threshold=3.984e+02, percent-clipped=0.0
+2024-08-25 19:46:17,035 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=139424.0, ans=0.125
+2024-08-25 19:46:57,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=139530.66666666666, ans=0.125
+2024-08-25 19:47:40,592 INFO [train.py:1114] (2/4) Epoch 11, batch 1300, loss[loss=0.2689, simple_loss=0.3143, pruned_loss=0.08176, ctc_loss=0.1498, over 18926.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.2925, pruned_loss=0.06532, ctc_loss=0.123, over 3846410.19 frames. ], batch size: 76, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:48:31,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=139850.66666666666, ans=0.125
+2024-08-25 19:48:31,807 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=139850.66666666666, ans=0.0
+2024-08-25 19:48:59,268 INFO [train.py:1114] (2/4) Epoch 11, batch 1350, loss[loss=0.2165, simple_loss=0.285, pruned_loss=0.05329, ctc_loss=0.1036, over 19745.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2917, pruned_loss=0.06495, ctc_loss=0.1222, over 3857898.02 frames. ], batch size: 54, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:49:01,651 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.851e+02 2.124e+02 2.742e+02 4.665e+02, threshold=4.248e+02, percent-clipped=3.0
+2024-08-25 19:49:13,722 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=140010.66666666666, ans=0.125
+2024-08-25 19:49:14,919 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=140010.66666666666, ans=0.0
+2024-08-25 19:49:15,243 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.84 vs. limit=15.0
+2024-08-25 19:49:31,421 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=140064.0, ans=0.125
+2024-08-25 19:49:49,131 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=140117.33333333334, ans=0.1
+2024-08-25 19:50:07,177 INFO [train.py:1114] (2/4) Epoch 11, batch 1400, loss[loss=0.2115, simple_loss=0.2651, pruned_loss=0.0574, ctc_loss=0.108, over 19688.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2916, pruned_loss=0.06511, ctc_loss=0.1222, over 3865014.61 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:50:13,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_na.min_abs, batch_count=140224.0, ans=0.02
+2024-08-25 19:50:13,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=140224.0, ans=0.125
+2024-08-25 19:50:14,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=140224.0, ans=0.0
+2024-08-25 19:50:17,234 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=140224.0, ans=0.2
+2024-08-25 19:50:47,111 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=140277.33333333334, ans=0.025
+2024-08-25 19:50:52,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=140330.66666666666, ans=0.0
+2024-08-25 19:51:02,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=140330.66666666666, ans=0.1
+2024-08-25 19:51:03,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=140330.66666666666, ans=0.2
+2024-08-25 19:51:14,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=140330.66666666666, ans=0.1
+2024-08-25 19:51:38,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=140437.33333333334, ans=0.05
+2024-08-25 19:51:39,610 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=140437.33333333334, ans=0.125
+2024-08-25 19:51:42,658 INFO [train.py:1114] (2/4) Epoch 11, batch 1450, loss[loss=0.224, simple_loss=0.2923, pruned_loss=0.05619, ctc_loss=0.1083, over 19682.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.2923, pruned_loss=0.06521, ctc_loss=0.1228, over 3862777.01 frames. ], batch size: 63, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:51:44,219 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=140490.66666666666, ans=0.1
+2024-08-25 19:51:45,015 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.813e+02 2.052e+02 2.523e+02 4.896e+02, threshold=4.103e+02, percent-clipped=2.0
+2024-08-25 19:51:47,674 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.41 vs. limit=12.0
+2024-08-25 19:51:58,524 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=140544.0, ans=22.5
+2024-08-25 19:52:06,675 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=140544.0, ans=0.1
+2024-08-25 19:52:07,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=140544.0, ans=0.025
+2024-08-25 19:52:07,978 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.89 vs. limit=15.0
+2024-08-25 19:52:15,121 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=140597.33333333334, ans=0.125
+2024-08-25 19:52:26,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=140597.33333333334, ans=0.025
+2024-08-25 19:52:51,964 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=140650.66666666666, ans=0.125
+2024-08-25 19:53:19,915 INFO [train.py:1114] (2/4) Epoch 11, batch 1500, loss[loss=0.2271, simple_loss=0.3035, pruned_loss=0.05378, ctc_loss=0.1077, over 19580.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2927, pruned_loss=0.06523, ctc_loss=0.1229, over 3861628.27 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:53:24,375 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=140757.33333333334, ans=0.125
+2024-08-25 19:53:24,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=140757.33333333334, ans=0.125
+2024-08-25 19:53:27,523 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.26 vs. limit=22.5
+2024-08-25 19:53:49,141 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:53:54,281 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=140810.66666666666, ans=0.0
+2024-08-25 19:54:15,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=140864.0, ans=0.125
+2024-08-25 19:55:07,099 INFO [train.py:1114] (2/4) Epoch 11, batch 1550, loss[loss=0.2759, simple_loss=0.3236, pruned_loss=0.08434, ctc_loss=0.1488, over 19619.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.2929, pruned_loss=0.06585, ctc_loss=0.1242, over 3845590.94 frames. ], batch size: 60, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:55:10,749 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 1.804e+02 2.014e+02 2.422e+02 4.168e+02, threshold=4.028e+02, percent-clipped=1.0
+2024-08-25 19:55:37,454 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=141024.0, ans=0.0
+2024-08-25 19:56:10,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=141130.66666666666, ans=0.125
+2024-08-25 19:56:59,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=141184.0, ans=0.2
+2024-08-25 19:57:02,243 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.59 vs. limit=8.0
+2024-08-25 19:57:12,857 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=141237.33333333334, ans=0.125
+2024-08-25 19:57:19,250 INFO [train.py:1114] (2/4) Epoch 11, batch 1600, loss[loss=0.2602, simple_loss=0.3239, pruned_loss=0.07158, ctc_loss=0.1336, over 19841.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.2927, pruned_loss=0.06571, ctc_loss=0.1241, over 3834471.70 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:58:10,249 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.24 vs. limit=15.0
+2024-08-25 19:58:15,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=141397.33333333334, ans=0.04949747468305833
+2024-08-25 19:58:41,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=141504.0, ans=0.0
+2024-08-25 19:59:26,357 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=141557.33333333334, ans=0.125
+2024-08-25 19:59:27,422 INFO [train.py:1114] (2/4) Epoch 11, batch 1650, loss[loss=0.228, simple_loss=0.2901, pruned_loss=0.06027, ctc_loss=0.1133, over 19663.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.2923, pruned_loss=0.06556, ctc_loss=0.1239, over 3830731.41 frames. ], batch size: 59, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:59:29,884 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.768e+02 1.990e+02 2.303e+02 4.438e+02, threshold=3.979e+02, percent-clipped=2.0
+2024-08-25 19:59:43,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=141557.33333333334, ans=0.09899494936611666
+2024-08-25 19:59:45,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=141557.33333333334, ans=0.0
+2024-08-25 20:00:00,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=141610.66666666666, ans=0.125
+2024-08-25 20:00:00,596 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=141610.66666666666, ans=0.0
+2024-08-25 20:00:30,963 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff3.min_abs, batch_count=141717.33333333334, ans=0.2
+2024-08-25 20:00:33,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.79 vs. limit=15.0
+2024-08-25 20:00:41,992 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=141770.66666666666, ans=0.125
+2024-08-25 20:01:05,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=141770.66666666666, ans=0.0
+2024-08-25 20:01:12,651 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.82 vs. limit=15.0
+2024-08-25 20:01:17,961 INFO [train.py:1114] (2/4) Epoch 11, batch 1700, loss[loss=0.1996, simple_loss=0.2583, pruned_loss=0.05101, ctc_loss=0.09732, over 19670.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2917, pruned_loss=0.0652, ctc_loss=0.1231, over 3845271.98 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:01:38,878 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.29 vs. limit=15.0
+2024-08-25 20:01:46,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=141930.66666666666, ans=0.125
+2024-08-25 20:01:55,377 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=141984.0, ans=0.1
+2024-08-25 20:02:16,948 INFO [train.py:1114] (2/4) Epoch 11, batch 1750, loss[loss=0.1829, simple_loss=0.2441, pruned_loss=0.04398, ctc_loss=0.08453, over 19655.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.2915, pruned_loss=0.06491, ctc_loss=0.1224, over 3850929.04 frames. ], batch size: 45, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:02:20,525 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.814e+02 2.107e+02 2.366e+02 3.890e+02, threshold=4.214e+02, percent-clipped=0.0
+2024-08-25 20:02:40,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=142090.66666666666, ans=0.0
+2024-08-25 20:02:46,731 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.82 vs. limit=15.0
+2024-08-25 20:03:06,473 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=142144.0, ans=0.125
+2024-08-25 20:03:06,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=142144.0, ans=0.0
+2024-08-25 20:03:17,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=142197.33333333334, ans=0.0
+2024-08-25 20:03:28,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=142250.66666666666, ans=0.0
+2024-08-25 20:03:58,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=142250.66666666666, ans=0.125
+2024-08-25 20:04:02,624 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=142304.0, ans=0.0
+2024-08-25 20:04:24,924 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=142304.0, ans=0.125
+2024-08-25 20:04:27,053 INFO [train.py:1114] (2/4) Epoch 11, batch 1800, loss[loss=0.2466, simple_loss=0.3039, pruned_loss=0.0687, ctc_loss=0.1295, over 19597.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2914, pruned_loss=0.06478, ctc_loss=0.1222, over 3852758.36 frames. ], batch size: 55, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:05:03,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=142410.66666666666, ans=0.125
+2024-08-25 20:05:23,563 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.00 vs. limit=15.0
+2024-08-25 20:05:30,152 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.35 vs. limit=15.0
+2024-08-25 20:05:32,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=142517.33333333334, ans=0.125
+2024-08-25 20:05:41,131 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.64 vs. limit=5.0
+2024-08-25 20:06:15,204 INFO [train.py:1114] (2/4) Epoch 11, batch 1850, loss[loss=0.2118, simple_loss=0.2922, pruned_loss=0.04666, ctc_loss=0.09512, over 19561.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.2907, pruned_loss=0.06419, ctc_loss=0.1211, over 3856960.27 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:06:18,513 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 1.849e+02 2.256e+02 2.966e+02 5.642e+02, threshold=4.511e+02, percent-clipped=6.0
+2024-08-25 20:06:25,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=142677.33333333334, ans=0.125
+2024-08-25 20:07:20,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=142784.0, ans=0.035
+2024-08-25 20:07:38,744 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=142837.33333333334, ans=0.07
+2024-08-25 20:07:51,845 INFO [train.py:1114] (2/4) Epoch 11, batch 1900, loss[loss=0.2654, simple_loss=0.3237, pruned_loss=0.07497, ctc_loss=0.143, over 19655.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2914, pruned_loss=0.06429, ctc_loss=0.121, over 3861403.51 frames. ], batch size: 59, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:07:56,851 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.95 vs. limit=22.5
+2024-08-25 20:12:09,764 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=142997.33333333334, ans=0.125
+2024-08-25 20:37:10,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=143050.66666666666, ans=0.125
+2024-08-25 20:37:10,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=143050.66666666666, ans=0.025
+2024-08-25 20:39:08,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=143104.0, ans=0.0
+2024-08-25 20:55:30,027 INFO [train.py:1114] (2/4) Epoch 11, batch 1950, loss[loss=0.2467, simple_loss=0.2992, pruned_loss=0.07177, ctc_loss=0.1268, over 19592.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2925, pruned_loss=0.06473, ctc_loss=0.1217, over 3870394.31 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 21:03:39,808 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.850e+02 2.123e+02 2.695e+02 5.282e+02, threshold=4.246e+02, percent-clipped=2.0
+2024-08-25 21:15:08,854 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.88 vs. limit=22.5
+2024-08-25 21:22:29,344 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.82 vs. limit=15.0
+2024-08-25 21:28:58,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=143264.0, ans=0.125
+2024-08-25 21:32:13,539 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=143317.33333333334, ans=0.2
+2024-08-25 21:32:13,900 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.64 vs. limit=15.0
+2024-08-25 21:38:45,259 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.73 vs. limit=15.0
+2024-08-25 21:44:03,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=143370.66666666666, ans=0.0
+2024-08-25 21:46:38,196 INFO [train.py:1114] (2/4) Epoch 11, batch 2000, loss[loss=0.2161, simple_loss=0.2609, pruned_loss=0.06247, ctc_loss=0.1157, over 19610.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.2931, pruned_loss=0.06526, ctc_loss=0.1227, over 3855528.82 frames. ], batch size: 45, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 21:57:15,112 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=143424.0, ans=0.1
+2024-08-25 22:03:02,216 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.53 vs. limit=10.0
+2024-08-25 22:06:23,651 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.25 vs. limit=12.0
+2024-08-25 22:09:03,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=143584.0, ans=0.1
+2024-08-25 22:15:14,852 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.19 vs. limit=22.5
+2024-08-25 22:19:42,827 INFO [train.py:1114] (2/4) Epoch 11, batch 2050, loss[loss=0.2174, simple_loss=0.2687, pruned_loss=0.06091, ctc_loss=0.1107, over 19726.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2915, pruned_loss=0.06478, ctc_loss=0.1216, over 3851246.06 frames. ], batch size: 47, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:20:13,483 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.838e+02 2.216e+02 2.724e+02 4.008e+02, threshold=4.432e+02, percent-clipped=0.0
+2024-08-25 22:27:00,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=143797.33333333334, ans=0.125
+2024-08-25 22:28:46,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=143850.66666666666, ans=0.125
+2024-08-25 22:32:23,629 INFO [train.py:1114] (2/4) Epoch 11, batch 2100, loss[loss=0.2403, simple_loss=0.2968, pruned_loss=0.0666, ctc_loss=0.1264, over 19773.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2912, pruned_loss=0.06459, ctc_loss=0.1213, over 3858179.99 frames. ], batch size: 54, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:33:48,814 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.24 vs. limit=22.5
+2024-08-25 22:34:13,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=143957.33333333334, ans=0.0
+2024-08-25 22:34:13,809 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 22:34:13,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=143957.33333333334, ans=0.2
+2024-08-25 22:34:32,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=143957.33333333334, ans=0.2
+2024-08-25 22:34:32,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=143957.33333333334, ans=0.1
+2024-08-25 22:35:39,441 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.75 vs. limit=15.0
+2024-08-25 22:35:47,823 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.22 vs. limit=15.0
+2024-08-25 22:36:29,783 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=24.45 vs. limit=22.5
+2024-08-25 22:36:59,923 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=144117.33333333334, ans=0.0
+2024-08-25 22:37:44,586 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=144117.33333333334, ans=0.2
+2024-08-25 22:37:46,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=144170.66666666666, ans=0.04949747468305833
+2024-08-25 22:39:07,578 INFO [train.py:1114] (2/4) Epoch 11, batch 2150, loss[loss=0.2315, simple_loss=0.292, pruned_loss=0.0617, ctc_loss=0.1191, over 19856.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2901, pruned_loss=0.06378, ctc_loss=0.1199, over 3869964.16 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:39:51,935 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 1.804e+02 2.068e+02 2.942e+02 5.639e+02, threshold=4.136e+02, percent-clipped=4.0
+2024-08-25 22:40:53,049 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=144277.33333333334, ans=0.125
+2024-08-25 22:42:57,264 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=144384.0, ans=0.1
+2024-08-25 22:42:57,787 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.67 vs. limit=15.0
+2024-08-25 22:43:53,035 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=144437.33333333334, ans=0.025
+2024-08-25 22:44:02,560 INFO [train.py:1114] (2/4) Epoch 11, batch 2200, loss[loss=0.2492, simple_loss=0.3004, pruned_loss=0.07214, ctc_loss=0.1343, over 19595.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2901, pruned_loss=0.06379, ctc_loss=0.1199, over 3868361.65 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:44:52,357 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.47 vs. limit=22.5
+2024-08-25 22:46:43,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=144597.33333333334, ans=0.125
+2024-08-25 22:47:10,877 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.51 vs. limit=15.0
+2024-08-25 22:47:21,903 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=144650.66666666666, ans=0.125
+2024-08-25 22:48:34,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=144704.0, ans=0.1
+2024-08-25 22:48:34,577 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.23 vs. limit=15.0
+2024-08-25 22:48:35,130 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 22:48:51,281 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=144704.0, ans=0.125
+2024-08-25 22:49:03,012 INFO [train.py:1114] (2/4) Epoch 11, batch 2250, loss[loss=0.2271, simple_loss=0.2952, pruned_loss=0.0566, ctc_loss=0.1142, over 19626.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2901, pruned_loss=0.0635, ctc_loss=0.1193, over 3867766.00 frames. ], batch size: 55, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:49:04,121 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=144757.33333333334, ans=0.2
+2024-08-25 22:49:05,163 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.42 vs. limit=10.0
+2024-08-25 22:49:09,619 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.818e+02 2.110e+02 2.782e+02 6.628e+02, threshold=4.220e+02, percent-clipped=3.0
+2024-08-25 22:49:34,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=144810.66666666666, ans=0.125
+2024-08-25 22:50:08,869 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.36 vs. limit=15.0
+2024-08-25 22:50:35,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=144970.66666666666, ans=0.125
+2024-08-25 22:50:46,943 INFO [train.py:1114] (2/4) Epoch 11, batch 2300, loss[loss=0.2071, simple_loss=0.2727, pruned_loss=0.05177, ctc_loss=0.09501, over 19502.00 frames. ], tot_loss[loss=0.233, simple_loss=0.2899, pruned_loss=0.06397, ctc_loss=0.1202, over 3860445.86 frames. ], batch size: 49, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:51:07,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=145024.0, ans=0.1
+2024-08-25 22:51:09,884 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.52 vs. limit=10.0
+2024-08-25 22:51:17,745 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.77 vs. limit=10.0
+2024-08-25 22:51:24,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=145077.33333333334, ans=0.125
+2024-08-25 22:51:38,927 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145130.66666666666, ans=0.1
+2024-08-25 22:51:40,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=145130.66666666666, ans=0.125
+2024-08-25 22:52:10,079 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.30 vs. limit=15.0
+2024-08-25 22:52:34,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=145184.0, ans=0.125
+2024-08-25 22:52:55,275 INFO [train.py:1114] (2/4) Epoch 11, batch 2350, loss[loss=0.2579, simple_loss=0.3097, pruned_loss=0.07451, ctc_loss=0.1425, over 19690.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2902, pruned_loss=0.06413, ctc_loss=0.1205, over 3862862.35 frames. ], batch size: 63, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:53:01,234 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.317e+02 1.788e+02 2.141e+02 2.380e+02 3.835e+02, threshold=4.282e+02, percent-clipped=0.0
+2024-08-25 22:53:05,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=145290.66666666666, ans=0.2
+2024-08-25 22:53:08,125 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=145290.66666666666, ans=0.05
+2024-08-25 22:53:52,573 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=145450.66666666666, ans=0.0
+2024-08-25 22:53:55,732 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=145450.66666666666, ans=0.125
+2024-08-25 22:54:01,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=145450.66666666666, ans=0.0
+2024-08-25 22:54:18,757 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=145504.0, ans=22.5
+2024-08-25 22:54:25,997 INFO [train.py:1114] (2/4) Epoch 11, batch 2400, loss[loss=0.2677, simple_loss=0.3155, pruned_loss=0.07804, ctc_loss=0.1595, over 19284.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.293, pruned_loss=0.06521, ctc_loss=0.1224, over 3857542.92 frames. ], batch size: 71, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:54:28,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=145557.33333333334, ans=0.125
+2024-08-25 22:55:10,173 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=145717.33333333334, ans=0.0
+2024-08-25 22:55:17,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=145717.33333333334, ans=0.025
+2024-08-25 22:55:20,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=145717.33333333334, ans=0.0
+2024-08-25 22:55:36,681 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.16 vs. limit=15.0
+2024-08-25 22:55:44,080 INFO [train.py:1114] (2/4) Epoch 11, batch 2450, loss[loss=0.3178, simple_loss=0.3364, pruned_loss=0.1084, ctc_loss=0.2058, over 12991.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.2976, pruned_loss=0.06893, ctc_loss=0.1294, over 3729308.10 frames. ], batch size: 142, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:56:00,766 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.910e+02 2.208e+02 2.594e+02 5.356e+02, threshold=4.415e+02, percent-clipped=1.0
+2024-08-25 22:56:03,435 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.96 vs. limit=15.0
+2024-08-25 22:56:27,306 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=145877.33333333334, ans=0.2
+2024-08-25 22:56:33,625 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.95 vs. limit=15.0
+2024-08-25 22:56:51,589 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=145930.66666666666, ans=0.125
+2024-08-25 22:57:16,047 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.85 vs. limit=15.0
+2024-08-25 22:57:26,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=145984.0, ans=0.0
+2024-08-25 22:58:44,107 INFO [train.py:1114] (2/4) Epoch 12, batch 0, loss[loss=0.2337, simple_loss=0.2767, pruned_loss=0.06915, ctc_loss=0.1313, over 19829.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.2767, pruned_loss=0.06915, ctc_loss=0.1313, over 19829.00 frames. ], batch size: 49, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 22:58:44,108 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 23:00:02,926 INFO [train.py:1146] (2/4) Epoch 12, validation: loss=0.1972, simple_loss=0.2841, pruned_loss=0.04086, ctc_loss=0.07109, over 944034.00 frames.
+2024-08-25 23:00:02,926 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-25 23:00:03,114 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=146032.0, ans=0.1
+2024-08-25 23:00:06,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=146032.0, ans=0.125
+2024-08-25 23:00:15,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=146032.0, ans=0.125
+2024-08-25 23:00:20,776 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.00 vs. limit=12.0
+2024-08-25 23:00:23,430 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=146085.33333333334, ans=0.125
+2024-08-25 23:00:33,487 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=146138.66666666666, ans=0.125
+2024-08-25 23:01:07,247 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=146298.66666666666, ans=0.1
+2024-08-25 23:01:08,429 INFO [train.py:1114] (2/4) Epoch 12, batch 50, loss[loss=0.2179, simple_loss=0.2717, pruned_loss=0.05926, ctc_loss=0.1139, over 19714.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.2948, pruned_loss=0.0655, ctc_loss=0.1244, over 844495.10 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:01:17,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=146298.66666666666, ans=0.125
+2024-08-25 23:01:22,912 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=146352.0, ans=0.07
+2024-08-25 23:01:25,992 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=146352.0, ans=10.0
+2024-08-25 23:01:26,712 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=146352.0, ans=0.125
+2024-08-25 23:01:27,719 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.810e+02 2.073e+02 2.436e+02 4.057e+02, threshold=4.147e+02, percent-clipped=0.0
+2024-08-25 23:01:42,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=146405.33333333334, ans=0.1
+2024-08-25 23:01:43,447 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=146405.33333333334, ans=0.125
+2024-08-25 23:02:22,993 INFO [train.py:1114] (2/4) Epoch 12, batch 100, loss[loss=0.2072, simple_loss=0.273, pruned_loss=0.05184, ctc_loss=0.09412, over 19727.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2965, pruned_loss=0.06664, ctc_loss=0.1259, over 1498946.70 frames. ], batch size: 51, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:02:34,411 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.04 vs. limit=15.0
+2024-08-25 23:02:36,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=146565.33333333334, ans=0.04949747468305833
+2024-08-25 23:02:54,630 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=146618.66666666666, ans=0.05
+2024-08-25 23:03:23,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=146725.33333333334, ans=0.0
+2024-08-25 23:03:23,477 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=146725.33333333334, ans=0.125
+2024-08-25 23:03:39,247 INFO [train.py:1114] (2/4) Epoch 12, batch 150, loss[loss=0.235, simple_loss=0.2798, pruned_loss=0.06925, ctc_loss=0.1295, over 19733.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.2921, pruned_loss=0.06427, ctc_loss=0.1216, over 2027009.71 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:03:57,609 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.23 vs. limit=15.0
+2024-08-25 23:04:04,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=146885.33333333334, ans=0.125
+2024-08-25 23:04:09,868 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.659e+02 1.880e+02 2.314e+02 3.650e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-25 23:04:38,999 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=146938.66666666666, ans=0.0
+2024-08-25 23:04:55,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=147045.33333333334, ans=0.125
+2024-08-25 23:05:07,058 INFO [train.py:1114] (2/4) Epoch 12, batch 200, loss[loss=0.2655, simple_loss=0.3094, pruned_loss=0.07936, ctc_loss=0.1574, over 18276.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2896, pruned_loss=0.06325, ctc_loss=0.1196, over 2434759.55 frames. ], batch size: 85, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:06:49,930 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.87 vs. limit=22.5
+2024-08-25 23:07:01,821 INFO [train.py:1114] (2/4) Epoch 12, batch 250, loss[loss=0.2413, simple_loss=0.3012, pruned_loss=0.06589, ctc_loss=0.1238, over 19430.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.29, pruned_loss=0.06332, ctc_loss=0.1194, over 2755371.50 frames. ], batch size: 67, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:07:22,628 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 1.825e+02 2.154e+02 2.499e+02 3.884e+02, threshold=4.307e+02, percent-clipped=2.0
+2024-08-25 23:07:48,003 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=147525.33333333334, ans=0.125
+2024-08-25 23:07:52,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=147525.33333333334, ans=0.125
+2024-08-25 23:08:05,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=147578.66666666666, ans=0.0
+2024-08-25 23:08:13,959 INFO [train.py:1114] (2/4) Epoch 12, batch 300, loss[loss=0.2744, simple_loss=0.3165, pruned_loss=0.08425, ctc_loss=0.1594, over 19497.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.2895, pruned_loss=0.06323, ctc_loss=0.1193, over 3001501.23 frames. ], batch size: 61, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:08:19,946 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=147632.0, ans=0.125
+2024-08-25 23:08:33,821 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:08:34,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=147685.33333333334, ans=0.125
+2024-08-25 23:08:51,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=147792.0, ans=0.125
+2024-08-25 23:08:59,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=147792.0, ans=0.0
+2024-08-25 23:09:03,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=147845.33333333334, ans=0.0
+2024-08-25 23:09:11,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=147845.33333333334, ans=0.0
+2024-08-25 23:09:17,494 INFO [train.py:1114] (2/4) Epoch 12, batch 350, loss[loss=0.2161, simple_loss=0.2762, pruned_loss=0.05651, ctc_loss=0.1074, over 19756.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2901, pruned_loss=0.06326, ctc_loss=0.1193, over 3191725.20 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:09:25,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=147898.66666666666, ans=0.125
+2024-08-25 23:09:27,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=147952.0, ans=0.125
+2024-08-25 23:09:36,446 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.749e+02 2.047e+02 2.740e+02 4.170e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 23:09:39,933 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=147952.0, ans=0.1
+2024-08-25 23:09:44,894 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:09:56,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=148058.66666666666, ans=0.04949747468305833
+2024-08-25 23:10:10,988 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=148058.66666666666, ans=0.1
+2024-08-25 23:10:21,656 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.89 vs. limit=15.0
+2024-08-25 23:10:25,921 INFO [train.py:1114] (2/4) Epoch 12, batch 400, loss[loss=0.2202, simple_loss=0.2847, pruned_loss=0.05535, ctc_loss=0.1126, over 19493.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.2891, pruned_loss=0.06258, ctc_loss=0.1179, over 3343684.26 frames. ], batch size: 54, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:10:44,701 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:11:30,671 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=148378.66666666666, ans=0.1
+2024-08-25 23:12:05,900 INFO [train.py:1114] (2/4) Epoch 12, batch 450, loss[loss=0.221, simple_loss=0.2884, pruned_loss=0.05611, ctc_loss=0.1035, over 19628.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.2895, pruned_loss=0.06309, ctc_loss=0.119, over 3452496.09 frames. ], batch size: 55, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:12:28,367 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.359e+02 1.830e+02 2.201e+02 2.765e+02 4.484e+02, threshold=4.403e+02, percent-clipped=1.0
+2024-08-25 23:12:37,599 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:12:54,580 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:13:21,168 INFO [train.py:1114] (2/4) Epoch 12, batch 500, loss[loss=0.2282, simple_loss=0.2882, pruned_loss=0.06214, ctc_loss=0.1096, over 19702.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2879, pruned_loss=0.06222, ctc_loss=0.1172, over 3547344.25 frames. ], batch size: 63, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:13:23,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=148698.66666666666, ans=0.125
+2024-08-25 23:13:33,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=148698.66666666666, ans=0.05
+2024-08-25 23:13:58,129 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=148805.33333333334, ans=0.025
+2024-08-25 23:14:02,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=148805.33333333334, ans=0.0
+2024-08-25 23:14:06,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=148805.33333333334, ans=0.125
+2024-08-25 23:14:31,235 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=148858.66666666666, ans=0.0
+2024-08-25 23:14:43,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=148912.0, ans=0.1
+2024-08-25 23:14:53,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=148912.0, ans=0.1
+2024-08-25 23:14:58,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=148965.33333333334, ans=0.0
+2024-08-25 23:14:58,313 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=148965.33333333334, ans=0.0
+2024-08-25 23:14:59,255 INFO [train.py:1114] (2/4) Epoch 12, batch 550, loss[loss=0.2535, simple_loss=0.3118, pruned_loss=0.06976, ctc_loss=0.1391, over 19297.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2883, pruned_loss=0.0623, ctc_loss=0.1174, over 3608911.18 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:15:03,293 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.25 vs. limit=15.0
+2024-08-25 23:15:17,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=148965.33333333334, ans=0.04949747468305833
+2024-08-25 23:15:41,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=149018.66666666666, ans=0.125
+2024-08-25 23:15:42,257 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.692e+02 2.049e+02 2.499e+02 4.022e+02, threshold=4.098e+02, percent-clipped=0.0
+2024-08-25 23:16:10,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=149125.33333333334, ans=0.125
+2024-08-25 23:16:25,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=149125.33333333334, ans=0.1
+2024-08-25 23:16:54,711 INFO [train.py:1114] (2/4) Epoch 12, batch 600, loss[loss=0.2629, simple_loss=0.3103, pruned_loss=0.07813, ctc_loss=0.1478, over 19397.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.289, pruned_loss=0.06248, ctc_loss=0.1179, over 3665652.38 frames. ], batch size: 67, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:19:18,051 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=149445.33333333334, ans=0.125
+2024-08-25 23:19:22,637 INFO [train.py:1114] (2/4) Epoch 12, batch 650, loss[loss=0.2137, simple_loss=0.2851, pruned_loss=0.05204, ctc_loss=0.09533, over 19779.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2886, pruned_loss=0.06235, ctc_loss=0.1175, over 3716151.89 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:19:36,313 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=149498.66666666666, ans=0.125
+2024-08-25 23:19:42,700 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=149552.0, ans=0.0
+2024-08-25 23:19:48,496 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.911e+02 2.346e+02 2.911e+02 5.072e+02, threshold=4.691e+02, percent-clipped=6.0
+2024-08-25 23:20:32,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=149658.66666666666, ans=0.125
+2024-08-25 23:20:34,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=149658.66666666666, ans=0.125
+2024-08-25 23:20:36,691 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=11.30 vs. limit=15.0
+2024-08-25 23:20:41,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=149712.0, ans=0.125
+2024-08-25 23:20:47,435 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=149712.0, ans=0.0
+2024-08-25 23:20:49,425 INFO [train.py:1114] (2/4) Epoch 12, batch 700, loss[loss=0.2433, simple_loss=0.2915, pruned_loss=0.07119, ctc_loss=0.1314, over 19718.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.2891, pruned_loss=0.06267, ctc_loss=0.1181, over 3749713.72 frames. ], batch size: 51, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:21:19,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=149872.0, ans=0.2
+2024-08-25 23:21:27,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=149925.33333333334, ans=0.125
+2024-08-25 23:21:30,381 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.33 vs. limit=15.0
+2024-08-25 23:21:51,378 INFO [train.py:1114] (2/4) Epoch 12, batch 750, loss[loss=0.2312, simple_loss=0.2895, pruned_loss=0.06259, ctc_loss=0.1193, over 19501.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2884, pruned_loss=0.06228, ctc_loss=0.1171, over 3775081.97 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:22:20,742 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.992e+02 2.563e+02 3.460e+02 5.252e+02, threshold=5.125e+02, percent-clipped=3.0
+2024-08-25 23:22:23,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=150085.33333333334, ans=0.125
+2024-08-25 23:22:42,256 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.81 vs. limit=15.0
+2024-08-25 23:22:52,033 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=150192.0, ans=0.0
+2024-08-25 23:23:10,644 INFO [train.py:1114] (2/4) Epoch 12, batch 800, loss[loss=0.2028, simple_loss=0.2613, pruned_loss=0.05325, ctc_loss=0.09463, over 19432.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2885, pruned_loss=0.06237, ctc_loss=0.1175, over 3796040.55 frames. ], batch size: 48, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:23:22,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=150352.0, ans=0.125
+2024-08-25 23:23:30,131 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=150352.0, ans=0.2
+2024-08-25 23:23:43,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=150458.66666666666, ans=0.125
+2024-08-25 23:24:01,026 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.56 vs. limit=10.0
+2024-08-25 23:24:06,616 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.98 vs. limit=6.0
+2024-08-25 23:24:07,604 INFO [train.py:1114] (2/4) Epoch 12, batch 850, loss[loss=0.2331, simple_loss=0.2965, pruned_loss=0.06032, ctc_loss=0.1229, over 19668.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.288, pruned_loss=0.06184, ctc_loss=0.1167, over 3814711.63 frames. ], batch size: 59, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:24:15,595 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=150565.33333333334, ans=0.125
+2024-08-25 23:24:30,654 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.376e+02 1.732e+02 2.149e+02 2.756e+02 4.869e+02, threshold=4.297e+02, percent-clipped=0.0
+2024-08-25 23:24:50,183 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=150672.0, ans=0.125
+2024-08-25 23:25:06,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=150725.33333333334, ans=0.125
+2024-08-25 23:25:07,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_na.min_abs, batch_count=150725.33333333334, ans=0.02
+2024-08-25 23:25:09,932 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=150725.33333333334, ans=0.0
+2024-08-25 23:25:23,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=150778.66666666666, ans=0.0
+2024-08-25 23:25:25,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=150778.66666666666, ans=0.0
+2024-08-25 23:25:27,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=150778.66666666666, ans=0.125
+2024-08-25 23:25:39,230 INFO [train.py:1114] (2/4) Epoch 12, batch 900, loss[loss=0.2102, simple_loss=0.2666, pruned_loss=0.05507, ctc_loss=0.1092, over 19437.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2883, pruned_loss=0.06238, ctc_loss=0.1175, over 3818187.22 frames. ], batch size: 48, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:27:22,010 INFO [train.py:1114] (2/4) Epoch 12, batch 950, loss[loss=0.2505, simple_loss=0.2954, pruned_loss=0.07499, ctc_loss=0.1388, over 19500.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.2891, pruned_loss=0.06297, ctc_loss=0.1186, over 3819957.49 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:27:47,795 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 1.727e+02 2.047e+02 2.468e+02 3.873e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-08-25 23:28:33,745 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=151258.66666666666, ans=0.125
+2024-08-25 23:28:42,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=151312.0, ans=0.0
+2024-08-25 23:28:46,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=151312.0, ans=0.125
+2024-08-25 23:28:55,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=151365.33333333334, ans=0.125
+2024-08-25 23:28:55,275 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.58 vs. limit=22.5
+2024-08-25 23:28:55,980 INFO [train.py:1114] (2/4) Epoch 12, batch 1000, loss[loss=0.2046, simple_loss=0.2714, pruned_loss=0.05042, ctc_loss=0.09228, over 19826.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.2902, pruned_loss=0.06358, ctc_loss=0.1198, over 3816165.31 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:30:28,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=151472.0, ans=0.2
+2024-08-25 23:30:34,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.97 vs. limit=22.5
+2024-08-25 23:30:39,748 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=151525.33333333334, ans=0.125
+2024-08-25 23:30:48,326 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=151578.66666666666, ans=0.125
+2024-08-25 23:30:55,440 INFO [train.py:1114] (2/4) Epoch 12, batch 1050, loss[loss=0.2489, simple_loss=0.303, pruned_loss=0.07082, ctc_loss=0.133, over 19819.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2891, pruned_loss=0.06334, ctc_loss=0.1191, over 3822284.84 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:31:07,473 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=151685.33333333334, ans=0.0
+2024-08-25 23:31:14,270 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.748e+02 2.222e+02 2.883e+02 4.562e+02, threshold=4.445e+02, percent-clipped=3.0
+2024-08-25 23:31:32,725 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.69 vs. limit=15.0
+2024-08-25 23:31:35,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=151792.0, ans=0.0
+2024-08-25 23:31:45,423 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=151845.33333333334, ans=0.125
+2024-08-25 23:32:07,886 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=151845.33333333334, ans=0.2
+2024-08-25 23:32:14,328 INFO [train.py:1114] (2/4) Epoch 12, batch 1100, loss[loss=0.2353, simple_loss=0.2926, pruned_loss=0.06348, ctc_loss=0.1276, over 19567.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.2885, pruned_loss=0.06268, ctc_loss=0.1179, over 3830763.85 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:32:14,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=151898.66666666666, ans=0.0
+2024-08-25 23:32:38,914 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=151952.0, ans=0.0
+2024-08-25 23:33:00,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=152058.66666666666, ans=0.0
+2024-08-25 23:33:32,430 INFO [train.py:1114] (2/4) Epoch 12, batch 1150, loss[loss=0.2246, simple_loss=0.2814, pruned_loss=0.06008, ctc_loss=0.1193, over 19596.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2882, pruned_loss=0.06257, ctc_loss=0.118, over 3828319.46 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:33:32,731 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=152165.33333333334, ans=0.07
+2024-08-25 23:33:35,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=152165.33333333334, ans=0.09899494936611666
+2024-08-25 23:34:06,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=152218.66666666666, ans=0.125
+2024-08-25 23:34:07,237 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.763e+02 2.002e+02 2.335e+02 5.298e+02, threshold=4.005e+02, percent-clipped=1.0
+2024-08-25 23:34:08,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=152218.66666666666, ans=0.125
+2024-08-25 23:34:19,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=152272.0, ans=0.125
+2024-08-25 23:34:23,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=152272.0, ans=0.0
+2024-08-25 23:34:28,587 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=152325.33333333334, ans=0.2
+2024-08-25 23:34:49,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=152378.66666666666, ans=0.1
+2024-08-25 23:34:58,143 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=152432.0, ans=0.025
+2024-08-25 23:34:59,034 INFO [train.py:1114] (2/4) Epoch 12, batch 1200, loss[loss=0.2615, simple_loss=0.3184, pruned_loss=0.07369, ctc_loss=0.1431, over 19823.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.2897, pruned_loss=0.06338, ctc_loss=0.1201, over 3823869.62 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:35:07,670 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=152432.0, ans=0.2
+2024-08-25 23:35:29,574 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.13 vs. limit=12.0
+2024-08-25 23:35:36,902 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.60 vs. limit=10.0
+2024-08-25 23:35:52,300 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:35:58,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=152645.33333333334, ans=0.125
+2024-08-25 23:36:09,948 INFO [train.py:1114] (2/4) Epoch 12, batch 1250, loss[loss=0.2711, simple_loss=0.3231, pruned_loss=0.07995, ctc_loss=0.1482, over 19530.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2899, pruned_loss=0.06335, ctc_loss=0.1197, over 3842096.02 frames. ], batch size: 61, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:36:34,022 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.907e+02 2.265e+02 2.785e+02 4.753e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 23:36:37,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=152752.0, ans=0.025
+2024-08-25 23:36:38,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=152752.0, ans=0.1
+2024-08-25 23:36:47,073 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=152805.33333333334, ans=15.0
+2024-08-25 23:36:52,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=152805.33333333334, ans=0.125
+2024-08-25 23:37:18,929 INFO [train.py:1114] (2/4) Epoch 12, batch 1300, loss[loss=0.2528, simple_loss=0.3009, pruned_loss=0.07371, ctc_loss=0.1432, over 18846.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2889, pruned_loss=0.06284, ctc_loss=0.1187, over 3844930.74 frames. ], batch size: 76, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:37:34,868 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=153018.66666666666, ans=0.0
+2024-08-25 23:37:45,071 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=153018.66666666666, ans=0.0
+2024-08-25 23:38:11,218 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=153125.33333333334, ans=0.125
+2024-08-25 23:38:21,961 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.50 vs. limit=15.0
+2024-08-25 23:38:29,006 INFO [train.py:1114] (2/4) Epoch 12, batch 1350, loss[loss=0.2257, simple_loss=0.2825, pruned_loss=0.06083, ctc_loss=0.118, over 19773.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.2885, pruned_loss=0.06237, ctc_loss=0.1177, over 3857406.97 frames. ], batch size: 54, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:38:45,299 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=153285.33333333334, ans=0.125
+2024-08-25 23:38:46,279 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.707e+02 2.039e+02 2.408e+02 4.402e+02, threshold=4.078e+02, percent-clipped=0.0
+2024-08-25 23:39:16,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=153392.0, ans=0.0
+2024-08-25 23:39:41,529 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.44 vs. limit=6.0
+2024-08-25 23:39:43,101 INFO [train.py:1114] (2/4) Epoch 12, batch 1400, loss[loss=0.1803, simple_loss=0.2412, pruned_loss=0.04315, ctc_loss=0.08264, over 19659.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2883, pruned_loss=0.06241, ctc_loss=0.1177, over 3864657.87 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:39:58,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=153552.0, ans=0.125
+2024-08-25 23:40:26,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=153658.66666666666, ans=0.07
+2024-08-25 23:40:46,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=153658.66666666666, ans=0.0
+2024-08-25 23:41:07,351 INFO [train.py:1114] (2/4) Epoch 12, batch 1450, loss[loss=0.2401, simple_loss=0.2985, pruned_loss=0.06619, ctc_loss=0.1233, over 19637.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2888, pruned_loss=0.06246, ctc_loss=0.1174, over 3863077.00 frames. ], batch size: 63, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:41:12,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=153765.33333333334, ans=0.125
+2024-08-25 23:41:27,995 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 1.773e+02 2.135e+02 2.639e+02 4.435e+02, threshold=4.270e+02, percent-clipped=2.0
+2024-08-25 23:41:35,629 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.50 vs. limit=15.0
+2024-08-25 23:41:55,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=153872.0, ans=0.125
+2024-08-25 23:42:07,708 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.74 vs. limit=15.0
+2024-08-25 23:42:27,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=153978.66666666666, ans=0.0
+2024-08-25 23:42:43,046 INFO [train.py:1114] (2/4) Epoch 12, batch 1500, loss[loss=0.2334, simple_loss=0.3008, pruned_loss=0.06017, ctc_loss=0.114, over 19566.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.2893, pruned_loss=0.06239, ctc_loss=0.1174, over 3863064.16 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:42:56,981 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.93 vs. limit=15.0
+2024-08-25 23:43:12,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=154085.33333333334, ans=0.125
+2024-08-25 23:43:24,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=154138.66666666666, ans=0.2
+2024-08-25 23:43:26,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=154192.0, ans=0.1
+2024-08-25 23:43:46,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=154192.0, ans=0.125
+2024-08-25 23:43:53,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=154192.0, ans=0.0
+2024-08-25 23:44:00,940 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.56 vs. limit=12.0
+2024-08-25 23:44:01,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=154245.33333333334, ans=0.125
+2024-08-25 23:44:08,821 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=154298.66666666666, ans=0.1
+2024-08-25 23:44:09,907 INFO [train.py:1114] (2/4) Epoch 12, batch 1550, loss[loss=0.2373, simple_loss=0.3027, pruned_loss=0.06216, ctc_loss=0.1188, over 19609.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.2897, pruned_loss=0.06296, ctc_loss=0.1187, over 3847709.59 frames. ], batch size: 60, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:44:19,047 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.07 vs. limit=6.0
+2024-08-25 23:44:21,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=154352.0, ans=0.015
+2024-08-25 23:44:43,873 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.860e+02 2.194e+02 2.828e+02 4.590e+02, threshold=4.388e+02, percent-clipped=1.0
+2024-08-25 23:44:46,313 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=154352.0, ans=0.125
+2024-08-25 23:45:14,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=154405.33333333334, ans=0.09899494936611666
+2024-08-25 23:46:36,679 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.45 vs. limit=22.5
+2024-08-25 23:46:37,536 INFO [train.py:1114] (2/4) Epoch 12, batch 1600, loss[loss=0.2224, simple_loss=0.2911, pruned_loss=0.05592, ctc_loss=0.1045, over 19841.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2899, pruned_loss=0.06319, ctc_loss=0.119, over 3835769.24 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:46:55,195 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.75 vs. limit=22.5
+2024-08-25 23:47:19,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=154618.66666666666, ans=0.125
+2024-08-25 23:47:24,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=154618.66666666666, ans=0.5
+2024-08-25 23:47:28,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=154618.66666666666, ans=0.125
+2024-08-25 23:47:34,108 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.87 vs. limit=22.5
+2024-08-25 23:47:38,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=154672.0, ans=0.125
+2024-08-25 23:47:41,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=154672.0, ans=0.125
+2024-08-25 23:47:52,314 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=154725.33333333334, ans=0.05
+2024-08-25 23:48:12,940 INFO [train.py:1114] (2/4) Epoch 12, batch 1650, loss[loss=0.2293, simple_loss=0.287, pruned_loss=0.0622, ctc_loss=0.1179, over 19655.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.2887, pruned_loss=0.06237, ctc_loss=0.1176, over 3832969.62 frames. ], batch size: 59, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:48:32,978 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.751e+02 2.060e+02 2.481e+02 4.497e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 23:48:35,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=154885.33333333334, ans=0.0
+2024-08-25 23:49:02,642 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=154992.0, ans=0.2
+2024-08-25 23:49:10,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=155045.33333333334, ans=0.0
+2024-08-25 23:49:19,224 INFO [train.py:1114] (2/4) Epoch 12, batch 1700, loss[loss=0.1994, simple_loss=0.2551, pruned_loss=0.05178, ctc_loss=0.1004, over 19677.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2883, pruned_loss=0.06198, ctc_loss=0.1166, over 3846956.48 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:49:31,993 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=155152.0, ans=0.0
+2024-08-25 23:49:56,568 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.58 vs. limit=6.0
+2024-08-25 23:50:36,445 INFO [train.py:1114] (2/4) Epoch 12, batch 1750, loss[loss=0.2287, simple_loss=0.279, pruned_loss=0.06467, ctc_loss=0.1229, over 19695.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.2876, pruned_loss=0.06166, ctc_loss=0.1159, over 3852611.75 frames. ], batch size: 45, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:51:12,451 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.691e+02 1.944e+02 2.310e+02 4.068e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-25 23:51:21,890 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.60 vs. limit=15.0
+2024-08-25 23:51:34,418 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:51:45,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=155525.33333333334, ans=0.0
+2024-08-25 23:52:01,279 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=155578.66666666666, ans=0.1
+2024-08-25 23:52:03,843 INFO [train.py:1114] (2/4) Epoch 12, batch 1800, loss[loss=0.2368, simple_loss=0.2965, pruned_loss=0.06326, ctc_loss=0.1263, over 19613.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.2877, pruned_loss=0.06153, ctc_loss=0.1158, over 3854669.63 frames. ], batch size: 55, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:52:15,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=155632.0, ans=0.125
+2024-08-25 23:52:32,420 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.78 vs. limit=22.5
+2024-08-25 23:53:01,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=155738.66666666666, ans=0.125
+2024-08-25 23:53:28,643 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=155792.0, ans=0.125
+2024-08-25 23:53:37,088 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.33 vs. limit=22.5
+2024-08-25 23:53:58,521 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=155845.33333333334, ans=10.0
+2024-08-25 23:54:05,050 INFO [train.py:1114] (2/4) Epoch 12, batch 1850, loss[loss=0.2382, simple_loss=0.2994, pruned_loss=0.0638, ctc_loss=0.1236, over 19610.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.2872, pruned_loss=0.06124, ctc_loss=0.1152, over 3858666.05 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:54:25,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=155898.66666666666, ans=0.1
+2024-08-25 23:54:33,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=155898.66666666666, ans=0.0
+2024-08-25 23:54:41,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_na.min_abs, batch_count=155952.0, ans=0.02
+2024-08-25 23:54:44,941 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 1.785e+02 2.050e+02 2.712e+02 4.249e+02, threshold=4.100e+02, percent-clipped=1.0
+2024-08-25 23:55:35,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=156112.0, ans=0.2
+2024-08-25 23:56:02,277 INFO [train.py:1114] (2/4) Epoch 12, batch 1900, loss[loss=0.2371, simple_loss=0.2974, pruned_loss=0.0631, ctc_loss=0.1264, over 19657.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.2878, pruned_loss=0.06137, ctc_loss=0.1152, over 3862714.37 frames. ], batch size: 59, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:56:24,365 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.21 vs. limit=15.0
+2024-08-25 23:56:33,910 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=156218.66666666666, ans=0.95
+2024-08-25 23:57:02,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=156272.0, ans=0.025
+2024-08-25 23:57:03,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=156272.0, ans=0.2
+2024-08-25 23:57:15,585 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=156325.33333333334, ans=0.1
+2024-08-25 23:57:17,814 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=156325.33333333334, ans=0.0
+2024-08-25 23:58:03,641 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=156378.66666666666, ans=0.0
+2024-08-25 23:58:09,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=156378.66666666666, ans=0.0
+2024-08-25 23:58:28,736 INFO [train.py:1114] (2/4) Epoch 12, batch 1950, loss[loss=0.2397, simple_loss=0.2874, pruned_loss=0.06965, ctc_loss=0.1318, over 19590.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2879, pruned_loss=0.06088, ctc_loss=0.1143, over 3870854.82 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:59:02,885 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:59:03,814 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 1.700e+02 2.031e+02 2.417e+02 3.778e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-08-25 23:59:07,312 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=156485.33333333334, ans=0.125
+2024-08-25 23:59:21,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=156538.66666666666, ans=0.125
+2024-08-25 23:59:47,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=156645.33333333334, ans=0.125
+2024-08-25 23:59:51,576 INFO [train.py:1114] (2/4) Epoch 12, batch 2000, loss[loss=0.2557, simple_loss=0.2929, pruned_loss=0.0792, ctc_loss=0.15, over 19677.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.289, pruned_loss=0.06162, ctc_loss=0.1158, over 3854634.36 frames. ], batch size: 45, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:00:02,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=156752.0, ans=0.0
+2024-08-26 00:00:08,101 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=156752.0, ans=0.0
+2024-08-26 00:00:15,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=156805.33333333334, ans=0.125
+2024-08-26 00:01:25,982 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.57 vs. limit=15.0
+2024-08-26 00:01:28,937 INFO [train.py:1114] (2/4) Epoch 12, batch 2050, loss[loss=0.2029, simple_loss=0.2577, pruned_loss=0.05464, ctc_loss=0.09721, over 19728.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.2878, pruned_loss=0.0617, ctc_loss=0.1159, over 3850292.88 frames. ], batch size: 47, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:01:46,857 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.778e+02 1.977e+02 2.412e+02 4.440e+02, threshold=3.953e+02, percent-clipped=1.0
+2024-08-26 00:01:52,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=157072.0, ans=0.0
+2024-08-26 00:01:54,581 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=13.63 vs. limit=15.0
+2024-08-26 00:01:59,615 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.09 vs. limit=12.0
+2024-08-26 00:02:19,500 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.28 vs. limit=22.5
+2024-08-26 00:02:21,580 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.99 vs. limit=15.0
+2024-08-26 00:02:27,075 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=157125.33333333334, ans=0.0
+2024-08-26 00:02:28,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_ff2.min_abs, batch_count=157125.33333333334, ans=0.1
+2024-08-26 00:02:36,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=157125.33333333334, ans=0.125
+2024-08-26 00:02:55,211 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 00:02:56,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=157178.66666666666, ans=0.0
+2024-08-26 00:02:58,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=157178.66666666666, ans=0.5
+2024-08-26 00:03:00,085 INFO [train.py:1114] (2/4) Epoch 12, batch 2100, loss[loss=0.2289, simple_loss=0.2876, pruned_loss=0.06256, ctc_loss=0.1127, over 19773.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.2875, pruned_loss=0.06151, ctc_loss=0.1155, over 3857594.39 frames. ], batch size: 54, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:08:51,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=157232.0, ans=0.125
+2024-08-26 00:27:53,771 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.41 vs. limit=10.0
+2024-08-26 00:48:05,722 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=157445.33333333334, ans=0.0
+2024-08-26 00:54:25,949 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=157445.33333333334, ans=0.125
+2024-08-26 00:56:07,936 INFO [train.py:1114] (2/4) Epoch 12, batch 2150, loss[loss=0.2029, simple_loss=0.2668, pruned_loss=0.05093, ctc_loss=0.09285, over 19868.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.287, pruned_loss=0.06127, ctc_loss=0.115, over 3868522.75 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 01:08:01,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=157552.0, ans=0.125
+2024-08-26 01:09:53,319 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.788e+02 2.174e+02 2.705e+02 6.148e+02, threshold=4.348e+02, percent-clipped=11.0
+2024-08-26 01:21:15,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=157605.33333333334, ans=0.0
+2024-08-26 01:25:03,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=157658.66666666666, ans=0.0
+2024-08-26 01:28:45,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=157658.66666666666, ans=0.0
+2024-08-26 01:37:35,704 INFO [train.py:1114] (2/4) Epoch 12, batch 2200, loss[loss=0.2349, simple_loss=0.2988, pruned_loss=0.06338, ctc_loss=0.1109, over 19607.00 frames. ], tot_loss[loss=0.227, simple_loss=0.2865, pruned_loss=0.06088, ctc_loss=0.1143, over 3866806.24 frames. ], batch size: 57, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 01:38:03,515 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=157765.33333333334, ans=0.0
+2024-08-26 01:46:52,640 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=157872.0, ans=0.1
+2024-08-26 01:49:48,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=157872.0, ans=0.1
+2024-08-26 01:56:52,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=157978.66666666666, ans=0.125
+2024-08-26 01:57:30,325 INFO [train.py:1114] (2/4) Epoch 12, batch 2250, loss[loss=0.2448, simple_loss=0.302, pruned_loss=0.06736, ctc_loss=0.1319, over 19601.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.2871, pruned_loss=0.06132, ctc_loss=0.1151, over 3867192.81 frames. ], batch size: 55, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 01:58:21,099 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158032.0, ans=0.1
+2024-08-26 02:02:02,366 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.19 vs. limit=15.0
+2024-08-26 02:02:03,033 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158032.0, ans=0.1
+2024-08-26 02:03:20,448 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=158085.33333333334, ans=0.0
+2024-08-26 02:04:28,486 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.839e+02 2.199e+02 2.577e+02 6.358e+02, threshold=4.399e+02, percent-clipped=1.0
+2024-08-26 02:07:31,986 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=158138.66666666666, ans=0.125
+2024-08-26 02:08:01,321 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=158192.0, ans=0.125
+2024-08-26 02:10:34,837 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.32 vs. limit=15.0
+2024-08-26 02:11:55,699 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158245.33333333334, ans=0.1
+2024-08-26 02:13:20,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=158298.66666666666, ans=0.09899494936611666
+2024-08-26 02:13:21,320 INFO [train.py:1114] (2/4) Epoch 12, batch 2300, loss[loss=0.2315, simple_loss=0.2882, pruned_loss=0.06361, ctc_loss=0.119, over 19501.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.2864, pruned_loss=0.06159, ctc_loss=0.1156, over 3860788.69 frames. ], batch size: 49, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:13:39,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=158298.66666666666, ans=0.125
+2024-08-26 02:16:18,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=158405.33333333334, ans=0.125
+2024-08-26 02:16:56,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=158405.33333333334, ans=0.125
+2024-08-26 02:22:37,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=158565.33333333334, ans=0.0
+2024-08-26 02:22:39,617 INFO [train.py:1114] (2/4) Epoch 12, batch 2350, loss[loss=0.2391, simple_loss=0.2992, pruned_loss=0.0653, ctc_loss=0.1209, over 19678.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.286, pruned_loss=0.06162, ctc_loss=0.1156, over 3863829.97 frames. ], batch size: 63, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:24:54,974 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.28 vs. limit=15.0
+2024-08-26 02:25:18,450 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.991e+02 2.536e+02 3.183e+02 5.552e+02, threshold=5.072e+02, percent-clipped=5.0
+2024-08-26 02:27:03,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=158725.33333333334, ans=0.0
+2024-08-26 02:28:05,484 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=158725.33333333334, ans=0.125
+2024-08-26 02:28:12,094 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.66 vs. limit=10.0
+2024-08-26 02:28:29,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158778.66666666666, ans=0.1
+2024-08-26 02:30:27,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=158778.66666666666, ans=0.125
+2024-08-26 02:30:58,400 INFO [train.py:1114] (2/4) Epoch 12, batch 2400, loss[loss=0.2765, simple_loss=0.3198, pruned_loss=0.08412, ctc_loss=0.1624, over 19328.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.289, pruned_loss=0.0628, ctc_loss=0.1177, over 3858772.25 frames. ], batch size: 71, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:31:29,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=158885.33333333334, ans=0.125
+2024-08-26 02:36:35,846 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=158992.0, ans=0.125
+2024-08-26 02:37:04,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=158992.0, ans=0.125
+2024-08-26 02:37:51,191 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.48 vs. limit=15.0
+2024-08-26 02:38:02,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=159045.33333333334, ans=0.125
+2024-08-26 02:38:22,397 INFO [train.py:1114] (2/4) Epoch 12, batch 2450, loss[loss=0.3385, simple_loss=0.338, pruned_loss=0.1205, ctc_loss=0.2451, over 13609.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.2932, pruned_loss=0.06602, ctc_loss=0.1241, over 3731400.11 frames. ], batch size: 140, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:39:04,626 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=159098.66666666666, ans=0.125
+2024-08-26 02:39:14,705 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=159152.0, ans=0.125
+2024-08-26 02:39:42,316 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.859e+02 2.162e+02 2.447e+02 4.124e+02, threshold=4.324e+02, percent-clipped=0.0
+2024-08-26 02:40:01,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=159152.0, ans=0.025
+2024-08-26 02:40:38,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=159205.33333333334, ans=0.07
+2024-08-26 02:40:53,015 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.92 vs. limit=22.5
+2024-08-26 02:41:13,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=159258.66666666666, ans=0.125
+2024-08-26 02:41:13,280 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=159258.66666666666, ans=0.125
+2024-08-26 02:43:45,623 INFO [train.py:1114] (2/4) Epoch 13, batch 0, loss[loss=0.2146, simple_loss=0.2728, pruned_loss=0.05671, ctc_loss=0.1077, over 19813.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2728, pruned_loss=0.05671, ctc_loss=0.1077, over 19813.00 frames. ], batch size: 49, lr: 1.18e-02, grad_scale: 32.0
+2024-08-26 02:43:45,624 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 02:45:27,908 INFO [train.py:1146] (2/4) Epoch 13, validation: loss=0.1972, simple_loss=0.2835, pruned_loss=0.04113, ctc_loss=0.07151, over 944034.00 frames.
+2024-08-26 02:45:27,909 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-26 02:45:29,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=159306.66666666666, ans=0.025
+2024-08-26 02:45:31,683 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.74 vs. limit=15.0
+2024-08-26 02:45:44,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=159306.66666666666, ans=0.125
+2024-08-26 02:46:11,546 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=159360.0, ans=0.125
+2024-08-26 02:46:20,055 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.11 vs. limit=22.5
+2024-08-26 02:46:24,719 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=159413.33333333334, ans=0.125
+2024-08-26 02:46:53,724 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.24 vs. limit=15.0
+2024-08-26 02:46:54,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=159466.66666666666, ans=0.2
+2024-08-26 02:48:06,383 INFO [train.py:1114] (2/4) Epoch 13, batch 50, loss[loss=0.1943, simple_loss=0.2539, pruned_loss=0.0482, ctc_loss=0.09562, over 19726.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.2895, pruned_loss=0.06243, ctc_loss=0.1182, over 844763.42 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:48:17,659 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.32 vs. limit=12.0
+2024-08-26 02:48:30,545 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.30 vs. limit=22.5
+2024-08-26 02:48:32,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=159626.66666666666, ans=0.1
+2024-08-26 02:48:33,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=159626.66666666666, ans=0.0
+2024-08-26 02:48:38,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=159680.0, ans=0.125
+2024-08-26 02:48:55,468 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.822e+02 2.122e+02 2.766e+02 5.339e+02, threshold=4.244e+02, percent-clipped=3.0
+2024-08-26 02:49:01,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=159733.33333333334, ans=0.2
+2024-08-26 02:49:26,910 INFO [train.py:1114] (2/4) Epoch 13, batch 100, loss[loss=0.2208, simple_loss=0.283, pruned_loss=0.05801, ctc_loss=0.1067, over 19715.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.2903, pruned_loss=0.06182, ctc_loss=0.1173, over 1499351.95 frames. ], batch size: 51, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:49:29,930 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=159840.0, ans=0.125
+2024-08-26 02:49:35,109 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.64 vs. limit=15.0
+2024-08-26 02:49:40,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=159840.0, ans=0.125
+2024-08-26 02:49:54,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=159893.33333333334, ans=0.2
+2024-08-26 02:50:00,767 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=159946.66666666666, ans=0.125
+2024-08-26 02:50:25,106 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=160000.0, ans=0.125
+2024-08-26 02:50:58,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=160000.0, ans=0.125
+2024-08-26 02:51:13,903 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 02:51:16,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 02:51:27,230 INFO [train.py:1114] (2/4) Epoch 13, batch 150, loss[loss=0.2065, simple_loss=0.2592, pruned_loss=0.05585, ctc_loss=0.1053, over 19689.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.2873, pruned_loss=0.06045, ctc_loss=0.114, over 2027622.75 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:52:39,474 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=160213.33333333334, ans=0.125
+2024-08-26 02:52:48,524 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.693e+02 1.889e+02 2.276e+02 3.515e+02, threshold=3.778e+02, percent-clipped=0.0
+2024-08-26 02:53:09,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160266.66666666666, ans=0.1
+2024-08-26 02:53:36,256 INFO [train.py:1114] (2/4) Epoch 13, batch 200, loss[loss=0.2841, simple_loss=0.3246, pruned_loss=0.0871, ctc_loss=0.1736, over 18259.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2863, pruned_loss=0.06006, ctc_loss=0.1133, over 2434814.35 frames. ], batch size: 85, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:54:07,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=160426.66666666666, ans=0.025
+2024-08-26 02:54:09,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=160426.66666666666, ans=0.125
+2024-08-26 02:54:11,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=160480.0, ans=0.5
+2024-08-26 02:55:04,400 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.33 vs. limit=12.0
+2024-08-26 02:55:15,707 INFO [train.py:1114] (2/4) Epoch 13, batch 250, loss[loss=0.2621, simple_loss=0.3092, pruned_loss=0.07815, ctc_loss=0.1469, over 19360.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.286, pruned_loss=0.05979, ctc_loss=0.1131, over 2754451.47 frames. ], batch size: 67, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:55:18,118 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=160640.0, ans=0.125
+2024-08-26 02:55:37,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=160746.66666666666, ans=0.125
+2024-08-26 02:55:40,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=160746.66666666666, ans=0.1
+2024-08-26 02:55:47,663 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.754e+02 2.188e+02 2.577e+02 4.403e+02, threshold=4.375e+02, percent-clipped=2.0
+2024-08-26 02:55:48,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=160746.66666666666, ans=0.2
+2024-08-26 02:56:01,573 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.91 vs. limit=15.0
+2024-08-26 02:56:10,355 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=160853.33333333334, ans=0.0
+2024-08-26 02:56:15,075 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=160853.33333333334, ans=0.07
+2024-08-26 02:56:43,563 INFO [train.py:1114] (2/4) Epoch 13, batch 300, loss[loss=0.243, simple_loss=0.3006, pruned_loss=0.06819, ctc_loss=0.1227, over 19511.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2851, pruned_loss=0.05938, ctc_loss=0.1123, over 2999845.44 frames. ], batch size: 61, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:57:24,390 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.50 vs. limit=6.0
+2024-08-26 02:57:36,731 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 02:57:42,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_ff2.min_abs, batch_count=161120.0, ans=0.1
+2024-08-26 02:57:46,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=161120.0, ans=0.1
+2024-08-26 02:57:47,824 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.80 vs. limit=15.0
+2024-08-26 02:57:50,481 INFO [train.py:1114] (2/4) Epoch 13, batch 350, loss[loss=0.2167, simple_loss=0.2675, pruned_loss=0.06074, ctc_loss=0.1112, over 19755.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2851, pruned_loss=0.05926, ctc_loss=0.1118, over 3189809.24 frames. ], batch size: 48, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:58:04,801 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.39 vs. limit=22.5
+2024-08-26 02:58:13,161 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.24 vs. limit=15.0
+2024-08-26 02:58:13,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=161226.66666666666, ans=0.125
+2024-08-26 02:58:25,611 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.772e+02 2.039e+02 2.354e+02 3.759e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-26 02:58:36,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=161333.33333333334, ans=0.0
+2024-08-26 02:59:24,150 INFO [train.py:1114] (2/4) Epoch 13, batch 400, loss[loss=0.2183, simple_loss=0.2878, pruned_loss=0.05329, ctc_loss=0.1055, over 19504.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2846, pruned_loss=0.05922, ctc_loss=0.1117, over 3340937.66 frames. ], batch size: 54, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 02:59:55,909 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=161493.33333333334, ans=0.0
+2024-08-26 02:59:56,403 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=161493.33333333334, ans=0.1
+2024-08-26 02:59:56,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=161493.33333333334, ans=0.0
+2024-08-26 03:00:32,909 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.35 vs. limit=15.0
+2024-08-26 03:00:41,435 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=161600.0, ans=0.025
+2024-08-26 03:00:41,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=161600.0, ans=0.125
+2024-08-26 03:01:53,844 INFO [train.py:1114] (2/4) Epoch 13, batch 450, loss[loss=0.217, simple_loss=0.288, pruned_loss=0.05206, ctc_loss=0.1045, over 19610.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.285, pruned_loss=0.05951, ctc_loss=0.1122, over 3448536.75 frames. ], batch size: 55, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:01:55,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=161706.66666666666, ans=0.0
+2024-08-26 03:01:56,659 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.21 vs. limit=15.0
+2024-08-26 03:02:43,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=161760.0, ans=0.0
+2024-08-26 03:03:10,112 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.726e+02 2.085e+02 2.754e+02 4.301e+02, threshold=4.170e+02, percent-clipped=3.0
+2024-08-26 03:03:23,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=161920.0, ans=0.1
+2024-08-26 03:03:41,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=161920.0, ans=0.0
+2024-08-26 03:03:42,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=161920.0, ans=0.0
+2024-08-26 03:03:51,743 INFO [train.py:1114] (2/4) Epoch 13, batch 500, loss[loss=0.2653, simple_loss=0.3156, pruned_loss=0.07838, ctc_loss=0.1454, over 19656.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2847, pruned_loss=0.05939, ctc_loss=0.1119, over 3544900.86 frames. ], batch size: 63, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:04:14,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=161973.33333333334, ans=0.125
+2024-08-26 03:04:16,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=161973.33333333334, ans=0.125
+2024-08-26 03:04:44,670 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=162026.66666666666, ans=0.1
+2024-08-26 03:05:20,930 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.64 vs. limit=15.0
+2024-08-26 03:05:28,949 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=162133.33333333334, ans=0.125
+2024-08-26 03:05:42,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=162186.66666666666, ans=0.125
+2024-08-26 03:06:03,078 INFO [train.py:1114] (2/4) Epoch 13, batch 550, loss[loss=0.2592, simple_loss=0.315, pruned_loss=0.07291, ctc_loss=0.1441, over 19232.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2848, pruned_loss=0.05945, ctc_loss=0.1121, over 3608365.90 frames. ], batch size: 71, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:06:04,407 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=162240.0, ans=10.0
+2024-08-26 03:06:27,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=162293.33333333334, ans=0.025
+2024-08-26 03:06:47,052 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 1.758e+02 1.954e+02 2.485e+02 4.688e+02, threshold=3.908e+02, percent-clipped=2.0
+2024-08-26 03:07:12,129 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=162453.33333333334, ans=0.125
+2024-08-26 03:07:14,442 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=162453.33333333334, ans=0.1
+2024-08-26 03:07:24,257 INFO [train.py:1114] (2/4) Epoch 13, batch 600, loss[loss=0.2636, simple_loss=0.3144, pruned_loss=0.07767, ctc_loss=0.1437, over 19429.00 frames. ], tot_loss[loss=0.225, simple_loss=0.2854, pruned_loss=0.05977, ctc_loss=0.1125, over 3664877.48 frames. ], batch size: 67, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:07:36,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=162506.66666666666, ans=0.0
+2024-08-26 03:07:44,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=162560.0, ans=0.125
+2024-08-26 03:07:50,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=162560.0, ans=0.1
+2024-08-26 03:08:16,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=162613.33333333334, ans=0.1
+2024-08-26 03:08:29,444 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:08:35,062 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=162666.66666666666, ans=0.0
+2024-08-26 03:08:37,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=162666.66666666666, ans=0.0
+2024-08-26 03:09:14,956 INFO [train.py:1114] (2/4) Epoch 13, batch 650, loss[loss=0.229, simple_loss=0.2906, pruned_loss=0.06073, ctc_loss=0.115, over 19758.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2851, pruned_loss=0.05963, ctc_loss=0.1125, over 3715505.86 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:09:15,252 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=162773.33333333334, ans=0.0
+2024-08-26 03:09:22,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=162773.33333333334, ans=0.125
+2024-08-26 03:09:25,671 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.13 vs. limit=15.0
+2024-08-26 03:09:44,377 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=162826.66666666666, ans=0.2
+2024-08-26 03:09:45,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=162826.66666666666, ans=0.0
+2024-08-26 03:09:57,794 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.29 vs. limit=15.0
+2024-08-26 03:10:09,872 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.755e+02 2.119e+02 2.960e+02 5.119e+02, threshold=4.237e+02, percent-clipped=6.0
+2024-08-26 03:10:30,534 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=162986.66666666666, ans=0.2
+2024-08-26 03:10:39,731 INFO [train.py:1114] (2/4) Epoch 13, batch 700, loss[loss=0.1995, simple_loss=0.2662, pruned_loss=0.04774, ctc_loss=0.09344, over 19719.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2849, pruned_loss=0.0595, ctc_loss=0.1118, over 3747011.77 frames. ], batch size: 51, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:10:56,700 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=163093.33333333334, ans=0.125
+2024-08-26 03:10:58,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=163093.33333333334, ans=0.125
+2024-08-26 03:11:37,144 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=163200.0, ans=0.2
+2024-08-26 03:11:45,438 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.39 vs. limit=15.0
+2024-08-26 03:12:00,818 INFO [train.py:1114] (2/4) Epoch 13, batch 750, loss[loss=0.2506, simple_loss=0.3073, pruned_loss=0.07125, ctc_loss=0.1286, over 19490.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2845, pruned_loss=0.0594, ctc_loss=0.1114, over 3773780.70 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:12:09,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=163306.66666666666, ans=0.2
+2024-08-26 03:12:30,501 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.93 vs. limit=15.0
+2024-08-26 03:12:36,515 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=163413.33333333334, ans=0.0
+2024-08-26 03:12:36,806 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys.whitening_limit, batch_count=163413.33333333334, ans=6.0
+2024-08-26 03:12:37,698 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=163413.33333333334, ans=0.125
+2024-08-26 03:12:43,037 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.800e+02 2.310e+02 2.882e+02 4.749e+02, threshold=4.619e+02, percent-clipped=2.0
+2024-08-26 03:12:51,567 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=163466.66666666666, ans=0.125
+2024-08-26 03:13:31,946 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=163520.0, ans=0.0
+2024-08-26 03:13:37,927 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=163520.0, ans=0.125
+2024-08-26 03:13:53,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=163573.33333333334, ans=0.1
+2024-08-26 03:13:56,370 INFO [train.py:1114] (2/4) Epoch 13, batch 800, loss[loss=0.2063, simple_loss=0.2668, pruned_loss=0.05306, ctc_loss=0.09921, over 19410.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2849, pruned_loss=0.05981, ctc_loss=0.1122, over 3794107.70 frames. ], batch size: 48, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:14:05,670 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=163573.33333333334, ans=0.0
+2024-08-26 03:14:11,929 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.07 vs. limit=22.5
+2024-08-26 03:14:12,391 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.73 vs. limit=22.5
+2024-08-26 03:14:14,059 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=163573.33333333334, ans=0.125
+2024-08-26 03:14:39,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=163680.0, ans=0.2
+2024-08-26 03:14:52,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=163733.33333333334, ans=0.0
+2024-08-26 03:14:56,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=163733.33333333334, ans=0.0
+2024-08-26 03:15:12,884 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=163840.0, ans=0.0
+2024-08-26 03:15:13,896 INFO [train.py:1114] (2/4) Epoch 13, batch 850, loss[loss=0.2136, simple_loss=0.2934, pruned_loss=0.04787, ctc_loss=0.09498, over 19639.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2842, pruned_loss=0.05914, ctc_loss=0.1113, over 3813447.23 frames. ], batch size: 59, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:15:18,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=163840.0, ans=0.125
+2024-08-26 03:15:56,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=163946.66666666666, ans=0.0
+2024-08-26 03:16:06,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=163946.66666666666, ans=0.125
+2024-08-26 03:16:11,619 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.727e+02 1.948e+02 2.271e+02 3.773e+02, threshold=3.897e+02, percent-clipped=0.0
+2024-08-26 03:16:12,756 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=163946.66666666666, ans=0.1
+2024-08-26 03:16:26,854 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=164053.33333333334, ans=0.125
+2024-08-26 03:16:39,637 INFO [train.py:1114] (2/4) Epoch 13, batch 900, loss[loss=0.1888, simple_loss=0.2553, pruned_loss=0.04427, ctc_loss=0.08421, over 19813.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2845, pruned_loss=0.05956, ctc_loss=0.1119, over 3818229.55 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:16:40,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=164106.66666666666, ans=0.125
+2024-08-26 03:16:42,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=164106.66666666666, ans=0.1
+2024-08-26 03:16:57,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=164160.0, ans=0.2
+2024-08-26 03:17:00,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=164160.0, ans=0.125
+2024-08-26 03:17:05,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=164213.33333333334, ans=0.0
+2024-08-26 03:17:39,435 INFO [train.py:1114] (2/4) Epoch 13, batch 950, loss[loss=0.2111, simple_loss=0.2664, pruned_loss=0.05591, ctc_loss=0.11, over 19505.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.284, pruned_loss=0.05939, ctc_loss=0.1117, over 3818816.00 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:17:39,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=164373.33333333334, ans=0.07
+2024-08-26 03:17:43,193 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:17:57,979 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:18:34,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=164426.66666666666, ans=0.125
+2024-08-26 03:18:52,290 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.763e+02 2.081e+02 2.549e+02 5.575e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-26 03:19:07,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=164533.33333333334, ans=0.04949747468305833
+2024-08-26 03:19:10,806 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=164533.33333333334, ans=0.125
+2024-08-26 03:19:29,068 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=164640.0, ans=0.0
+2024-08-26 03:19:29,891 INFO [train.py:1114] (2/4) Epoch 13, batch 1000, loss[loss=0.2237, simple_loss=0.2844, pruned_loss=0.05862, ctc_loss=0.1145, over 19842.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.2857, pruned_loss=0.06018, ctc_loss=0.1132, over 3814582.96 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:19:45,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=164693.33333333334, ans=0.125
+2024-08-26 03:19:48,980 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:20:23,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=164853.33333333334, ans=0.0
+2024-08-26 03:20:24,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=164853.33333333334, ans=0.1
+2024-08-26 03:20:26,735 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=164853.33333333334, ans=0.0
+2024-08-26 03:20:35,692 INFO [train.py:1114] (2/4) Epoch 13, batch 1050, loss[loss=0.2264, simple_loss=0.2973, pruned_loss=0.05568, ctc_loss=0.1103, over 19838.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2851, pruned_loss=0.06021, ctc_loss=0.1132, over 3821495.00 frames. ], batch size: 57, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:20:44,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=164906.66666666666, ans=0.125
+2024-08-26 03:20:46,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=164960.0, ans=0.125
+2024-08-26 03:20:46,907 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=164960.0, ans=0.09899494936611666
+2024-08-26 03:20:48,603 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.37 vs. limit=15.0
+2024-08-26 03:20:54,918 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=164960.0, ans=0.125
+2024-08-26 03:21:08,096 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.698e+02 1.997e+02 2.318e+02 3.616e+02, threshold=3.994e+02, percent-clipped=0.0
+2024-08-26 03:21:24,348 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=165066.66666666666, ans=0.125
+2024-08-26 03:21:31,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=165120.0, ans=0.1
+2024-08-26 03:21:32,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=165120.0, ans=0.2
+2024-08-26 03:21:42,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=165120.0, ans=0.1
+2024-08-26 03:21:44,513 INFO [train.py:1114] (2/4) Epoch 13, batch 1100, loss[loss=0.208, simple_loss=0.2777, pruned_loss=0.05078, ctc_loss=0.09204, over 19589.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2845, pruned_loss=0.05988, ctc_loss=0.1126, over 3830301.92 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:21:49,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=165173.33333333334, ans=0.0
+2024-08-26 03:22:45,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.38 vs. limit=15.0
+2024-08-26 03:22:57,622 INFO [train.py:1114] (2/4) Epoch 13, batch 1150, loss[loss=0.2159, simple_loss=0.2737, pruned_loss=0.05742, ctc_loss=0.1082, over 19614.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2838, pruned_loss=0.0596, ctc_loss=0.112, over 3830394.96 frames. ], batch size: 52, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:23:02,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=165440.0, ans=0.0
+2024-08-26 03:23:10,111 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=165493.33333333334, ans=0.125
+2024-08-26 03:23:19,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=165493.33333333334, ans=0.125
+2024-08-26 03:23:24,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=165493.33333333334, ans=0.125
+2024-08-26 03:23:38,673 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.729e+02 2.006e+02 2.456e+02 7.202e+02, threshold=4.012e+02, percent-clipped=3.0
+2024-08-26 03:23:51,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=165600.0, ans=0.2
+2024-08-26 03:23:54,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=165653.33333333334, ans=0.5
+2024-08-26 03:23:57,277 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:23:59,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=165653.33333333334, ans=0.0
+2024-08-26 03:24:03,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=165653.33333333334, ans=0.125
+2024-08-26 03:24:11,580 INFO [train.py:1114] (2/4) Epoch 13, batch 1200, loss[loss=0.2124, simple_loss=0.2803, pruned_loss=0.05303, ctc_loss=0.09605, over 19834.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2845, pruned_loss=0.05973, ctc_loss=0.1122, over 3826231.54 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:24:19,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=165706.66666666666, ans=0.2
+2024-08-26 03:24:30,154 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=165706.66666666666, ans=0.125
+2024-08-26 03:24:54,947 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=165813.33333333334, ans=0.0
+2024-08-26 03:25:15,164 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=165813.33333333334, ans=0.04949747468305833
+2024-08-26 03:25:23,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=165866.66666666666, ans=0.1
+2024-08-26 03:25:58,794 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=165866.66666666666, ans=0.2
+2024-08-26 03:26:20,507 INFO [train.py:1114] (2/4) Epoch 13, batch 1250, loss[loss=0.2469, simple_loss=0.3022, pruned_loss=0.06892, ctc_loss=0.1342, over 19497.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.285, pruned_loss=0.05974, ctc_loss=0.1121, over 3844509.72 frames. ], batch size: 61, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:26:20,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=165973.33333333334, ans=0.125
+2024-08-26 03:26:32,651 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.05 vs. limit=15.0
+2024-08-26 03:26:45,880 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=166026.66666666666, ans=0.125
+2024-08-26 03:27:02,876 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=166080.0, ans=0.125
+2024-08-26 03:27:23,465 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.311e+02 1.715e+02 1.869e+02 2.285e+02 3.930e+02, threshold=3.738e+02, percent-clipped=0.0
+2024-08-26 03:27:38,041 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.14 vs. limit=6.0
+2024-08-26 03:27:39,257 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=166133.33333333334, ans=0.2
+2024-08-26 03:28:00,525 INFO [train.py:1114] (2/4) Epoch 13, batch 1300, loss[loss=0.2667, simple_loss=0.3093, pruned_loss=0.08235, ctc_loss=0.1486, over 18884.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2836, pruned_loss=0.05898, ctc_loss=0.1109, over 3848264.69 frames. ], batch size: 76, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:28:00,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=166240.0, ans=0.0
+2024-08-26 03:28:38,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=166293.33333333334, ans=0.125
+2024-08-26 03:30:08,053 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:30:18,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=166506.66666666666, ans=0.0
+2024-08-26 03:30:19,038 INFO [train.py:1114] (2/4) Epoch 13, batch 1350, loss[loss=0.2249, simple_loss=0.2885, pruned_loss=0.05803, ctc_loss=0.1134, over 19774.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.2835, pruned_loss=0.05875, ctc_loss=0.1104, over 3859343.46 frames. ], batch size: 54, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:30:46,465 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.76 vs. limit=22.5
+2024-08-26 03:31:08,778 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.736e+02 2.053e+02 2.622e+02 5.263e+02, threshold=4.106e+02, percent-clipped=6.0
+2024-08-26 03:31:20,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=166666.66666666666, ans=0.0
+2024-08-26 03:31:36,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=166720.0, ans=0.0
+2024-08-26 03:31:40,380 INFO [train.py:1114] (2/4) Epoch 13, batch 1400, loss[loss=0.2044, simple_loss=0.2645, pruned_loss=0.05185, ctc_loss=0.1013, over 19667.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2831, pruned_loss=0.05841, ctc_loss=0.1098, over 3865710.98 frames. ], batch size: 46, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:31:53,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=166773.33333333334, ans=0.0
+2024-08-26 03:32:14,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=166773.33333333334, ans=0.125
+2024-08-26 03:32:31,744 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.42 vs. limit=15.0
+2024-08-26 03:32:36,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=166880.0, ans=0.5
+2024-08-26 03:32:55,123 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff3.min_abs, batch_count=166933.33333333334, ans=0.2
+2024-08-26 03:33:16,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=166986.66666666666, ans=0.125
+2024-08-26 03:33:21,478 INFO [train.py:1114] (2/4) Epoch 13, batch 1450, loss[loss=0.2403, simple_loss=0.2961, pruned_loss=0.06772, ctc_loss=0.1227, over 19671.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2839, pruned_loss=0.05883, ctc_loss=0.1106, over 3863673.82 frames. ], batch size: 63, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:33:33,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=167093.33333333334, ans=0.0
+2024-08-26 03:33:44,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=167093.33333333334, ans=0.0
+2024-08-26 03:33:59,239 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.756e+02 1.937e+02 2.380e+02 3.895e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-26 03:34:33,954 INFO [train.py:1114] (2/4) Epoch 13, batch 1500, loss[loss=0.2403, simple_loss=0.2974, pruned_loss=0.06641, ctc_loss=0.1258, over 19606.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2851, pruned_loss=0.05954, ctc_loss=0.1121, over 3862842.30 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:34:54,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=167360.0, ans=0.125
+2024-08-26 03:35:43,267 INFO [train.py:1114] (2/4) Epoch 13, batch 1550, loss[loss=0.2204, simple_loss=0.2919, pruned_loss=0.05532, ctc_loss=0.09591, over 19635.00 frames. ], tot_loss[loss=0.225, simple_loss=0.2854, pruned_loss=0.05975, ctc_loss=0.1128, over 3848410.62 frames. ], batch size: 60, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:35:59,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=167626.66666666666, ans=0.125
+2024-08-26 03:36:22,751 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.33 vs. limit=10.0
+2024-08-26 03:36:35,367 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.778e+02 2.054e+02 2.767e+02 5.252e+02, threshold=4.108e+02, percent-clipped=7.0
+2024-08-26 03:36:41,377 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.44 vs. limit=15.0
+2024-08-26 03:37:04,414 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=167840.0, ans=0.0
+2024-08-26 03:37:05,323 INFO [train.py:1114] (2/4) Epoch 13, batch 1600, loss[loss=0.2266, simple_loss=0.2956, pruned_loss=0.05739, ctc_loss=0.107, over 19844.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2853, pruned_loss=0.05966, ctc_loss=0.1127, over 3837793.79 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:37:16,874 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=167840.0, ans=0.2
+2024-08-26 03:37:42,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=167946.66666666666, ans=0.1
+2024-08-26 03:38:21,133 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=168000.0, ans=0.0
+2024-08-26 03:38:35,984 INFO [train.py:1114] (2/4) Epoch 13, batch 1650, loss[loss=0.199, simple_loss=0.2718, pruned_loss=0.04636, ctc_loss=0.08361, over 19668.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2851, pruned_loss=0.0598, ctc_loss=0.1128, over 3833703.32 frames. ], batch size: 59, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:38:36,546 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.26 vs. limit=12.0
+2024-08-26 03:38:44,671 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=168106.66666666666, ans=0.1
+2024-08-26 03:38:47,725 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=168106.66666666666, ans=0.0
+2024-08-26 03:38:58,494 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.51 vs. limit=10.0
+2024-08-26 03:39:06,360 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.35 vs. limit=15.0
+2024-08-26 03:39:20,047 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.825e+02 2.209e+02 2.614e+02 4.167e+02, threshold=4.418e+02, percent-clipped=2.0
+2024-08-26 03:39:28,532 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=168266.66666666666, ans=0.125
+2024-08-26 03:39:56,780 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=168320.0, ans=0.0
+2024-08-26 03:40:00,077 INFO [train.py:1114] (2/4) Epoch 13, batch 1700, loss[loss=0.203, simple_loss=0.2607, pruned_loss=0.05303, ctc_loss=0.0978, over 19655.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2852, pruned_loss=0.05992, ctc_loss=0.1128, over 3847716.76 frames. ], batch size: 46, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:40:24,987 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=168426.66666666666, ans=0.125
+2024-08-26 03:40:35,039 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=168426.66666666666, ans=0.0
+2024-08-26 03:40:42,528 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.82 vs. limit=15.0
+2024-08-26 03:40:47,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=168533.33333333334, ans=0.2
+2024-08-26 03:41:02,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=168586.66666666666, ans=0.0
+2024-08-26 03:41:17,849 INFO [train.py:1114] (2/4) Epoch 13, batch 1750, loss[loss=0.1786, simple_loss=0.2406, pruned_loss=0.04139, ctc_loss=0.08448, over 19679.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2844, pruned_loss=0.05925, ctc_loss=0.1117, over 3852034.96 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:41:19,636 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.14 vs. limit=15.0
+2024-08-26 03:41:34,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=22.79 vs. limit=22.5
+2024-08-26 03:41:42,626 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=168693.33333333334, ans=0.1
+2024-08-26 03:41:48,056 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=168693.33333333334, ans=0.125
+2024-08-26 03:41:58,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=168746.66666666666, ans=0.0
+2024-08-26 03:41:59,753 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=16.17 vs. limit=22.5
+2024-08-26 03:42:01,095 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.702e+02 2.065e+02 2.813e+02 5.109e+02, threshold=4.129e+02, percent-clipped=2.0
+2024-08-26 03:42:11,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=168746.66666666666, ans=0.125
+2024-08-26 03:42:42,857 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=168853.33333333334, ans=0.125
+2024-08-26 03:42:43,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=168853.33333333334, ans=0.025
+2024-08-26 03:42:45,934 INFO [train.py:1114] (2/4) Epoch 13, batch 1800, loss[loss=0.2249, simple_loss=0.2881, pruned_loss=0.05769, ctc_loss=0.1155, over 19617.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2846, pruned_loss=0.05935, ctc_loss=0.1118, over 3853948.22 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:42:57,747 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=168960.0, ans=0.125
+2024-08-26 03:42:58,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=168960.0, ans=0.125
+2024-08-26 03:43:01,397 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.69 vs. limit=22.5
+2024-08-26 03:43:04,086 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=168960.0, ans=0.125
+2024-08-26 03:43:16,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=169013.33333333334, ans=0.0
+2024-08-26 03:43:26,509 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.52 vs. limit=10.0
+2024-08-26 03:43:36,402 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=169066.66666666666, ans=0.125
+2024-08-26 03:43:47,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=169120.0, ans=0.125
+2024-08-26 03:43:53,525 INFO [train.py:1114] (2/4) Epoch 13, batch 1850, loss[loss=0.2189, simple_loss=0.289, pruned_loss=0.05442, ctc_loss=0.09984, over 19599.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2841, pruned_loss=0.05878, ctc_loss=0.1105, over 3857857.64 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:44:05,142 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.29 vs. limit=10.0
+2024-08-26 03:44:17,195 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.38 vs. limit=15.0
+2024-08-26 03:44:29,687 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.936e+02 2.666e+02 3.402e+02 5.252e+02, threshold=5.332e+02, percent-clipped=13.0
+2024-08-26 03:44:43,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=169386.66666666666, ans=0.125
+2024-08-26 03:44:57,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=169386.66666666666, ans=0.0
+2024-08-26 03:45:07,843 INFO [train.py:1114] (2/4) Epoch 13, batch 1900, loss[loss=0.227, simple_loss=0.2928, pruned_loss=0.0573, ctc_loss=0.1164, over 19655.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.2846, pruned_loss=0.05882, ctc_loss=0.1105, over 3862789.69 frames. ], batch size: 59, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:45:09,023 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=169440.0, ans=0.025
+2024-08-26 03:45:20,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=169493.33333333334, ans=0.125
+2024-08-26 03:45:28,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=169493.33333333334, ans=0.0
+2024-08-26 03:45:43,477 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=169546.66666666666, ans=0.04949747468305833
+2024-08-26 03:45:51,693 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.26 vs. limit=5.0
+2024-08-26 03:46:04,090 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=169600.0, ans=0.2
+2024-08-26 03:46:22,593 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.08 vs. limit=6.0
+2024-08-26 03:46:24,453 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=169706.66666666666, ans=0.0
+2024-08-26 03:46:29,183 INFO [train.py:1114] (2/4) Epoch 13, batch 1950, loss[loss=0.2233, simple_loss=0.2818, pruned_loss=0.06087, ctc_loss=0.1077, over 19586.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2858, pruned_loss=0.05918, ctc_loss=0.111, over 3871322.34 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:46:29,335 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=169706.66666666666, ans=0.1
+2024-08-26 03:46:29,373 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=169706.66666666666, ans=0.125
+2024-08-26 03:46:54,110 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=169760.0, ans=0.125
+2024-08-26 03:46:59,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=169813.33333333334, ans=0.1
+2024-08-26 03:50:25,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=169813.33333333334, ans=0.0
+2024-08-26 03:50:26,627 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.795e+02 2.018e+02 2.323e+02 3.502e+02, threshold=4.036e+02, percent-clipped=0.0
+2024-08-26 03:54:17,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=169866.66666666666, ans=0.125
+2024-08-26 04:20:39,931 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.14 vs. limit=22.5
+2024-08-26 04:22:39,304 INFO [train.py:1114] (2/4) Epoch 13, batch 2000, loss[loss=0.1778, simple_loss=0.2411, pruned_loss=0.04243, ctc_loss=0.07417, over 19663.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2865, pruned_loss=0.05975, ctc_loss=0.112, over 3856689.37 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 04:39:36,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=170026.66666666666, ans=0.125
+2024-08-26 05:07:53,312 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=170186.66666666666, ans=0.125
+2024-08-26 05:13:23,019 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=170186.66666666666, ans=0.125
+2024-08-26 05:17:15,382 INFO [train.py:1114] (2/4) Epoch 13, batch 2050, loss[loss=0.1956, simple_loss=0.2558, pruned_loss=0.04949, ctc_loss=0.09092, over 19705.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2853, pruned_loss=0.05947, ctc_loss=0.1116, over 3851357.49 frames. ], batch size: 47, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:18:37,069 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.29 vs. limit=6.0
+2024-08-26 05:25:20,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=170293.33333333334, ans=0.125
+2024-08-26 05:25:21,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=170293.33333333334, ans=0.125
+2024-08-26 05:34:32,866 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.739e+02 2.095e+02 2.592e+02 3.598e+02, threshold=4.189e+02, percent-clipped=0.0
+2024-08-26 05:34:44,278 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=170400.0, ans=0.125
+2024-08-26 05:43:17,292 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=170453.33333333334, ans=0.04949747468305833
+2024-08-26 05:45:21,769 INFO [train.py:1114] (2/4) Epoch 13, batch 2100, loss[loss=0.2535, simple_loss=0.3039, pruned_loss=0.07408, ctc_loss=0.1373, over 19768.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.2843, pruned_loss=0.05896, ctc_loss=0.1107, over 3858507.93 frames. ], batch size: 54, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:50:50,951 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 05:51:19,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=170560.0, ans=0.125
+2024-08-26 05:52:06,843 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=170613.33333333334, ans=0.0
+2024-08-26 05:53:15,374 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.98 vs. limit=15.0
+2024-08-26 05:57:05,746 INFO [train.py:1114] (2/4) Epoch 13, batch 2150, loss[loss=0.1881, simple_loss=0.2537, pruned_loss=0.04417, ctc_loss=0.08574, over 19851.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2838, pruned_loss=0.05872, ctc_loss=0.1105, over 3869917.21 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:58:18,348 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=170773.33333333334, ans=0.125
+2024-08-26 06:01:05,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=170880.0, ans=0.0
+2024-08-26 06:01:18,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=170880.0, ans=0.125
+2024-08-26 06:02:10,744 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.801e+02 2.071e+02 2.646e+02 5.963e+02, threshold=4.141e+02, percent-clipped=6.0
+2024-08-26 06:03:10,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.57 vs. limit=22.5
+2024-08-26 06:03:39,253 INFO [train.py:1114] (2/4) Epoch 13, batch 2200, loss[loss=0.2476, simple_loss=0.3103, pruned_loss=0.06696, ctc_loss=0.1276, over 19581.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2841, pruned_loss=0.05882, ctc_loss=0.1107, over 3867915.10 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 06:05:01,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=171146.66666666666, ans=0.125
+2024-08-26 06:05:27,774 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=171200.0, ans=0.07
+2024-08-26 06:05:56,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=171253.33333333334, ans=0.0
+2024-08-26 06:06:26,731 INFO [train.py:1114] (2/4) Epoch 13, batch 2250, loss[loss=0.2113, simple_loss=0.2813, pruned_loss=0.05129, ctc_loss=0.097, over 19610.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2844, pruned_loss=0.05882, ctc_loss=0.1106, over 3867059.55 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 06:06:30,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=171306.66666666666, ans=0.025
+2024-08-26 06:08:13,722 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=171413.33333333334, ans=0.125
+2024-08-26 06:08:30,395 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.765e+02 2.070e+02 2.599e+02 3.761e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-26 06:08:30,690 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=171466.66666666666, ans=0.125
+2024-08-26 06:09:00,922 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=171466.66666666666, ans=0.125
+2024-08-26 06:09:43,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=171520.0, ans=0.1
+2024-08-26 06:10:19,750 INFO [train.py:1114] (2/4) Epoch 13, batch 2300, loss[loss=0.2254, simple_loss=0.2872, pruned_loss=0.05972, ctc_loss=0.1106, over 19507.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2837, pruned_loss=0.05899, ctc_loss=0.111, over 3860537.43 frames. ], batch size: 49, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:10:27,746 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=171573.33333333334, ans=0.125
+2024-08-26 06:10:30,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=171573.33333333334, ans=0.2
+2024-08-26 06:10:43,120 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=171626.66666666666, ans=0.125
+2024-08-26 06:10:43,276 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.28 vs. limit=15.0
+2024-08-26 06:10:52,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=171626.66666666666, ans=0.1
+2024-08-26 06:11:02,462 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.67 vs. limit=15.0
+2024-08-26 06:11:19,266 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.27 vs. limit=10.0
+2024-08-26 06:11:43,348 INFO [train.py:1114] (2/4) Epoch 13, batch 2350, loss[loss=0.2523, simple_loss=0.3092, pruned_loss=0.0705, ctc_loss=0.1358, over 19662.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2843, pruned_loss=0.05935, ctc_loss=0.1114, over 3863155.69 frames. ], batch size: 63, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:11:48,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=171840.0, ans=0.2
+2024-08-26 06:11:52,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=171840.0, ans=0.1
+2024-08-26 06:12:10,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=171946.66666666666, ans=0.025
+2024-08-26 06:12:16,618 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.773e+02 2.247e+02 3.255e+02 4.983e+02, threshold=4.494e+02, percent-clipped=2.0
+2024-08-26 06:12:18,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=172000.0, ans=0.1
+2024-08-26 06:12:19,615 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=172000.0, ans=0.0
+2024-08-26 06:12:27,453 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.45 vs. limit=22.5
+2024-08-26 06:12:46,281 INFO [train.py:1114] (2/4) Epoch 13, batch 2400, loss[loss=0.2384, simple_loss=0.2967, pruned_loss=0.06553, ctc_loss=0.1227, over 19359.00 frames. ], tot_loss[loss=0.226, simple_loss=0.2865, pruned_loss=0.06016, ctc_loss=0.1127, over 3857901.38 frames. ], batch size: 71, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:13:13,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=172160.0, ans=0.07
+2024-08-26 06:13:40,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=172266.66666666666, ans=0.125
+2024-08-26 06:14:08,368 INFO [train.py:1114] (2/4) Epoch 13, batch 2450, loss[loss=0.3115, simple_loss=0.3348, pruned_loss=0.1036, ctc_loss=0.2028, over 13446.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.2901, pruned_loss=0.06268, ctc_loss=0.118, over 3732248.48 frames. ], batch size: 140, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:14:08,586 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=172373.33333333334, ans=0.125
+2024-08-26 06:14:24,558 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=23.26 vs. limit=22.5
+2024-08-26 06:14:37,459 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.88 vs. limit=15.0
+2024-08-26 06:14:41,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=172480.0, ans=0.025
+2024-08-26 06:14:42,013 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.83 vs. limit=15.0
+2024-08-26 06:14:43,308 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.935e+02 2.072e+02 2.350e+02 4.711e+02, threshold=4.143e+02, percent-clipped=2.0
+2024-08-26 06:14:44,589 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=172533.33333333334, ans=0.125
+2024-08-26 06:15:44,201 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=172581.33333333334, ans=0.04949747468305833
+2024-08-26 06:16:27,496 INFO [train.py:1114] (2/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:16:27,497 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 06:17:53,188 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.1962, 3.1745, 3.7702, 2.8743], device='cuda:2')
+2024-08-26 06:17:58,793 INFO [train.py:1146] (2/4) Epoch 14, validation: loss=0.1898, simple_loss=0.2778, pruned_loss=0.03769, ctc_loss=0.06578, over 944034.00 frames.
+2024-08-26 06:18:12,591 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 14234MB
+2024-08-26 06:18:22,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=172634.66666666666, ans=0.125
+2024-08-26 06:19:03,056 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=172688.0, ans=0.0
+2024-08-26 06:19:53,743 INFO [train.py:1114] (2/4) Epoch 14, batch 50, loss[loss=0.2167, simple_loss=0.2747, pruned_loss=0.05789, ctc_loss=0.1076, over 19751.00 frames. ], tot_loss[loss=0.228, simple_loss=0.2879, pruned_loss=0.06106, ctc_loss=0.1151, over 845084.53 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:20:17,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=172848.0, ans=0.0
+2024-08-26 06:20:27,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-26 06:20:52,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=172954.66666666666, ans=0.125
+2024-08-26 06:21:17,218 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.738e+02 2.047e+02 2.487e+02 4.948e+02, threshold=4.095e+02, percent-clipped=4.0
+2024-08-26 06:21:21,524 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:21:51,866 INFO [train.py:1114] (2/4) Epoch 14, batch 100, loss[loss=0.2066, simple_loss=0.271, pruned_loss=0.05254, ctc_loss=0.09275, over 19711.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2881, pruned_loss=0.06079, ctc_loss=0.1145, over 1499046.71 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:22:29,773 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=173221.33333333334, ans=0.125
+2024-08-26 06:23:33,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=173381.33333333334, ans=0.2
+2024-08-26 06:23:38,125 INFO [train.py:1114] (2/4) Epoch 14, batch 150, loss[loss=0.1981, simple_loss=0.2593, pruned_loss=0.04973, ctc_loss=0.09348, over 19692.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2846, pruned_loss=0.05869, ctc_loss=0.1106, over 2028604.36 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:23:54,849 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.50 vs. limit=12.0
+2024-08-26 06:24:33,108 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.66 vs. limit=15.0
+2024-08-26 06:24:35,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=173541.33333333334, ans=0.05
+2024-08-26 06:24:49,764 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.676e+02 1.898e+02 2.213e+02 4.155e+02, threshold=3.795e+02, percent-clipped=1.0
+2024-08-26 06:24:57,011 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:24:59,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=173648.0, ans=0.025
+2024-08-26 06:25:00,487 INFO [train.py:1114] (2/4) Epoch 14, batch 200, loss[loss=0.2392, simple_loss=0.2954, pruned_loss=0.06596, ctc_loss=0.1277, over 18454.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2835, pruned_loss=0.0583, ctc_loss=0.1099, over 2436843.10 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:25:23,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=173701.33333333334, ans=0.1
+2024-08-26 06:25:50,004 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=173808.0, ans=0.025
+2024-08-26 06:25:50,013 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=173808.0, ans=0.07
+2024-08-26 06:25:58,192 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.23 vs. limit=12.0
+2024-08-26 06:26:05,446 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=173861.33333333334, ans=0.125
+2024-08-26 06:26:07,084 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.17 vs. limit=15.0
+2024-08-26 06:26:10,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=173861.33333333334, ans=0.125
+2024-08-26 06:26:16,064 INFO [train.py:1114] (2/4) Epoch 14, batch 250, loss[loss=0.2497, simple_loss=0.3098, pruned_loss=0.06896, ctc_loss=0.1292, over 19384.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.284, pruned_loss=0.05877, ctc_loss=0.1107, over 2757067.39 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:26:16,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=173914.66666666666, ans=0.1
+2024-08-26 06:26:17,445 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=173914.66666666666, ans=0.0
+2024-08-26 06:26:54,333 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=174021.33333333334, ans=0.0
+2024-08-26 06:27:18,007 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.683e+02 2.061e+02 2.648e+02 4.927e+02, threshold=4.123e+02, percent-clipped=4.0
+2024-08-26 06:27:20,829 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.16 vs. limit=15.0
+2024-08-26 06:27:25,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=174128.0, ans=0.125
+2024-08-26 06:27:28,130 INFO [train.py:1114] (2/4) Epoch 14, batch 300, loss[loss=0.2478, simple_loss=0.3065, pruned_loss=0.06916, ctc_loss=0.1272, over 19542.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.2837, pruned_loss=0.05847, ctc_loss=0.11, over 3001239.55 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:27:50,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=174288.0, ans=0.0
+2024-08-26 06:28:31,235 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.26 vs. limit=15.0
+2024-08-26 06:28:34,461 INFO [train.py:1114] (2/4) Epoch 14, batch 350, loss[loss=0.2057, simple_loss=0.2669, pruned_loss=0.05264, ctc_loss=0.09775, over 19738.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.2836, pruned_loss=0.05844, ctc_loss=0.1098, over 3191462.47 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:28:40,673 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=174448.0, ans=0.1
+2024-08-26 06:28:54,881 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.16 vs. limit=15.0
+2024-08-26 06:29:06,125 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=174554.66666666666, ans=0.0
+2024-08-26 06:29:11,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=174554.66666666666, ans=0.125
+2024-08-26 06:29:20,299 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.75 vs. limit=22.5
+2024-08-26 06:29:20,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=174608.0, ans=0.0
+2024-08-26 06:29:32,485 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.657e+02 1.894e+02 2.440e+02 4.007e+02, threshold=3.787e+02, percent-clipped=0.0
+2024-08-26 06:29:42,968 INFO [train.py:1114] (2/4) Epoch 14, batch 400, loss[loss=0.1951, simple_loss=0.2685, pruned_loss=0.04362, ctc_loss=0.0862, over 19482.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2821, pruned_loss=0.0575, ctc_loss=0.108, over 3343716.94 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:29:51,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=174714.66666666666, ans=0.125
+2024-08-26 06:29:55,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=174768.0, ans=0.0
+2024-08-26 06:30:07,564 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=174768.0, ans=0.0
+2024-08-26 06:30:08,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=174768.0, ans=0.125
+2024-08-26 06:30:58,898 INFO [train.py:1114] (2/4) Epoch 14, batch 450, loss[loss=0.2307, simple_loss=0.2876, pruned_loss=0.0632, ctc_loss=0.1187, over 19609.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2822, pruned_loss=0.05744, ctc_loss=0.1079, over 3449888.47 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:32:03,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=175088.0, ans=10.0
+2024-08-26 06:32:20,203 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.13 vs. limit=15.0
+2024-08-26 06:32:24,923 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:32:25,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=175141.33333333334, ans=0.125
+2024-08-26 06:32:32,623 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.297e+02 1.702e+02 1.875e+02 2.205e+02 3.904e+02, threshold=3.749e+02, percent-clipped=2.0
+2024-08-26 06:32:51,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=175194.66666666666, ans=0.1
+2024-08-26 06:32:59,839 INFO [train.py:1114] (2/4) Epoch 14, batch 500, loss[loss=0.2622, simple_loss=0.3175, pruned_loss=0.07478, ctc_loss=0.1435, over 19681.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2817, pruned_loss=0.05738, ctc_loss=0.108, over 3545845.53 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:33:26,937 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.20 vs. limit=22.5
+2024-08-26 06:33:34,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=175301.33333333334, ans=0.0
+2024-08-26 06:33:36,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=175301.33333333334, ans=0.95
+2024-08-26 06:33:38,277 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=175301.33333333334, ans=0.2
+2024-08-26 06:34:03,172 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=175408.0, ans=0.1
+2024-08-26 06:34:13,945 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:34:32,907 INFO [train.py:1114] (2/4) Epoch 14, batch 550, loss[loss=0.2312, simple_loss=0.2901, pruned_loss=0.06335, ctc_loss=0.1143, over 19311.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2819, pruned_loss=0.05759, ctc_loss=0.1086, over 3607179.29 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:34:36,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=175514.66666666666, ans=0.0
+2024-08-26 06:35:08,592 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=175621.33333333334, ans=0.125
+2024-08-26 06:35:14,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=175674.66666666666, ans=0.2
+2024-08-26 06:35:25,204 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=175674.66666666666, ans=0.125
+2024-08-26 06:35:36,408 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.729e+02 1.957e+02 2.291e+02 4.042e+02, threshold=3.913e+02, percent-clipped=2.0
+2024-08-26 06:36:16,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=175728.0, ans=0.2
+2024-08-26 06:36:18,836 INFO [train.py:1114] (2/4) Epoch 14, batch 600, loss[loss=0.2534, simple_loss=0.3137, pruned_loss=0.07132, ctc_loss=0.1262, over 19341.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2819, pruned_loss=0.05751, ctc_loss=0.1084, over 3663666.03 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:37:45,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=175834.66666666666, ans=0.0
+2024-08-26 06:38:24,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=175834.66666666666, ans=0.125
+2024-08-26 06:38:31,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=175834.66666666666, ans=0.125
+2024-08-26 06:38:31,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=175834.66666666666, ans=0.125
+2024-08-26 06:38:36,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=175888.0, ans=0.0
+2024-08-26 06:39:00,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=175941.33333333334, ans=0.125
+2024-08-26 06:39:25,932 INFO [train.py:1114] (2/4) Epoch 14, batch 650, loss[loss=0.2078, simple_loss=0.2682, pruned_loss=0.05443, ctc_loss=0.09625, over 19778.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.281, pruned_loss=0.05695, ctc_loss=0.1073, over 3714475.89 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:40:33,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=176101.33333333334, ans=0.0
+2024-08-26 06:40:37,864 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=176101.33333333334, ans=0.0
+2024-08-26 06:40:54,089 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=176154.66666666666, ans=0.0
+2024-08-26 06:40:58,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=176154.66666666666, ans=0.0
+2024-08-26 06:41:30,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=176261.33333333334, ans=0.125
+2024-08-26 06:41:31,383 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.772e+02 2.123e+02 2.635e+02 4.354e+02, threshold=4.247e+02, percent-clipped=3.0
+2024-08-26 06:41:34,699 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=176261.33333333334, ans=0.0
+2024-08-26 06:41:45,010 INFO [train.py:1114] (2/4) Epoch 14, batch 700, loss[loss=0.2119, simple_loss=0.2791, pruned_loss=0.0526, ctc_loss=0.09863, over 19746.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2814, pruned_loss=0.05702, ctc_loss=0.1076, over 3746176.45 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:41:52,315 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=176314.66666666666, ans=0.125
+2024-08-26 06:41:53,550 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=176314.66666666666, ans=0.125
+2024-08-26 06:41:58,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=176368.0, ans=0.025
+2024-08-26 06:42:35,891 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.55 vs. limit=6.0
+2024-08-26 06:42:38,287 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.21 vs. limit=12.0
+2024-08-26 06:42:46,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176528.0, ans=0.1
+2024-08-26 06:42:47,078 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.74 vs. limit=12.0
+2024-08-26 06:42:48,062 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=176528.0, ans=0.025
+2024-08-26 06:42:51,117 INFO [train.py:1114] (2/4) Epoch 14, batch 750, loss[loss=0.2168, simple_loss=0.2863, pruned_loss=0.05314, ctc_loss=0.1029, over 19499.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2813, pruned_loss=0.05693, ctc_loss=0.1073, over 3773023.39 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:43:03,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=176581.33333333334, ans=0.0
+2024-08-26 06:43:03,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=176581.33333333334, ans=0.125
+2024-08-26 06:44:15,626 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.10 vs. limit=15.0
+2024-08-26 06:44:27,362 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.803e+02 2.358e+02 3.080e+02 4.835e+02, threshold=4.715e+02, percent-clipped=7.0
+2024-08-26 06:44:41,972 INFO [train.py:1114] (2/4) Epoch 14, batch 800, loss[loss=0.2336, simple_loss=0.2824, pruned_loss=0.06663, ctc_loss=0.1288, over 19393.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2818, pruned_loss=0.05697, ctc_loss=0.1075, over 3795191.57 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:45:26,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177008.0, ans=0.1
+2024-08-26 06:45:47,356 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=177061.33333333334, ans=0.0
+2024-08-26 06:45:52,067 INFO [train.py:1114] (2/4) Epoch 14, batch 850, loss[loss=0.2289, simple_loss=0.2926, pruned_loss=0.0599, ctc_loss=0.1137, over 19643.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2818, pruned_loss=0.05734, ctc_loss=0.1081, over 3814842.26 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:46:19,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=177114.66666666666, ans=0.2
+2024-08-26 06:46:33,252 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=177168.0, ans=0.125
+2024-08-26 06:46:35,264 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177221.33333333334, ans=0.1
+2024-08-26 06:47:02,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=177274.66666666666, ans=0.2
+2024-08-26 06:47:08,507 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=177274.66666666666, ans=0.2
+2024-08-26 06:47:11,733 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.690e+02 1.974e+02 2.351e+02 3.908e+02, threshold=3.948e+02, percent-clipped=0.0
+2024-08-26 06:47:21,322 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=177328.0, ans=0.1
+2024-08-26 06:47:24,596 INFO [train.py:1114] (2/4) Epoch 14, batch 900, loss[loss=0.2134, simple_loss=0.2618, pruned_loss=0.05975, ctc_loss=0.1137, over 19417.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2818, pruned_loss=0.05751, ctc_loss=0.1085, over 3817936.02 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:47:49,462 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=3.90 vs. limit=12.0
+2024-08-26 06:48:00,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=177541.33333333334, ans=0.125
+2024-08-26 06:48:09,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177541.33333333334, ans=0.1
+2024-08-26 06:48:29,009 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=177594.66666666666, ans=0.125
+2024-08-26 06:48:31,392 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=177594.66666666666, ans=0.125
+2024-08-26 06:48:38,066 INFO [train.py:1114] (2/4) Epoch 14, batch 950, loss[loss=0.1939, simple_loss=0.2579, pruned_loss=0.04706, ctc_loss=0.08947, over 19488.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2823, pruned_loss=0.05786, ctc_loss=0.1092, over 3819119.82 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:48:53,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=177701.33333333334, ans=0.09899494936611666
+2024-08-26 06:48:59,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-26 06:49:04,731 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=177701.33333333334, ans=0.2
+2024-08-26 06:49:06,086 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177754.66666666666, ans=0.1
+2024-08-26 06:49:20,230 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.33 vs. limit=22.5
+2024-08-26 06:49:21,457 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.96 vs. limit=15.0
+2024-08-26 06:49:30,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=177808.0, ans=0.0
+2024-08-26 06:49:35,204 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=177861.33333333334, ans=0.2
+2024-08-26 06:49:36,177 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.810e+02 2.092e+02 2.519e+02 4.035e+02, threshold=4.185e+02, percent-clipped=1.0
+2024-08-26 06:50:06,704 INFO [train.py:1114] (2/4) Epoch 14, batch 1000, loss[loss=0.1986, simple_loss=0.2649, pruned_loss=0.04755, ctc_loss=0.09288, over 19852.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.283, pruned_loss=0.05816, ctc_loss=0.1098, over 3815772.92 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:50:08,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=177914.66666666666, ans=0.0
+2024-08-26 06:50:08,234 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177914.66666666666, ans=0.1
+2024-08-26 06:51:23,238 INFO [train.py:1114] (2/4) Epoch 14, batch 1050, loss[loss=0.2383, simple_loss=0.3009, pruned_loss=0.0645, ctc_loss=0.1169, over 19852.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2821, pruned_loss=0.05788, ctc_loss=0.109, over 3822703.55 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:51:29,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=178181.33333333334, ans=0.0
+2024-08-26 06:51:51,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=178288.0, ans=0.125
+2024-08-26 06:52:03,615 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=178341.33333333334, ans=0.07
+2024-08-26 06:52:03,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=178341.33333333334, ans=0.125
+2024-08-26 06:52:17,044 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 1.767e+02 2.034e+02 2.568e+02 4.426e+02, threshold=4.067e+02, percent-clipped=2.0
+2024-08-26 06:52:39,172 INFO [train.py:1114] (2/4) Epoch 14, batch 1100, loss[loss=0.1891, simple_loss=0.2586, pruned_loss=0.043, ctc_loss=0.08402, over 19588.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2815, pruned_loss=0.05725, ctc_loss=0.1078, over 3830526.86 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:53:11,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=178554.66666666666, ans=0.025
+2024-08-26 06:53:16,594 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=20.26 vs. limit=22.5
+2024-08-26 06:53:18,058 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=178608.0, ans=0.0
+2024-08-26 06:53:42,805 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=178661.33333333334, ans=0.125
+2024-08-26 06:53:49,716 INFO [train.py:1114] (2/4) Epoch 14, batch 1150, loss[loss=0.2121, simple_loss=0.2786, pruned_loss=0.05272, ctc_loss=0.1004, over 19586.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2819, pruned_loss=0.05759, ctc_loss=0.1084, over 3831181.97 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:53:52,936 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.90 vs. limit=22.5
+2024-08-26 06:53:56,173 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=178714.66666666666, ans=0.125
+2024-08-26 06:54:15,530 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.25 vs. limit=12.0
+2024-08-26 06:54:23,069 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.03 vs. limit=10.0
+2024-08-26 06:54:24,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=178821.33333333334, ans=0.2
+2024-08-26 06:54:32,979 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=178821.33333333334, ans=0.125
+2024-08-26 06:54:38,219 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.20 vs. limit=22.5
+2024-08-26 06:54:47,785 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.255e+02 1.672e+02 1.916e+02 2.259e+02 4.129e+02, threshold=3.832e+02, percent-clipped=1.0
+2024-08-26 06:54:58,220 INFO [train.py:1114] (2/4) Epoch 14, batch 1200, loss[loss=0.2267, simple_loss=0.2924, pruned_loss=0.058, ctc_loss=0.1125, over 19837.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2826, pruned_loss=0.0578, ctc_loss=0.1088, over 3826923.42 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:55:04,133 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=178981.33333333334, ans=0.2
+2024-08-26 06:55:05,374 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:55:09,195 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.10 vs. limit=15.0
+2024-08-26 06:55:14,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-26 06:55:20,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-26 06:55:27,822 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.82 vs. limit=15.0
+2024-08-26 06:55:52,889 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=179194.66666666666, ans=10.0
+2024-08-26 06:56:28,019 INFO [train.py:1114] (2/4) Epoch 14, batch 1250, loss[loss=0.2374, simple_loss=0.3013, pruned_loss=0.0639, ctc_loss=0.1144, over 19523.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2833, pruned_loss=0.05811, ctc_loss=0.1091, over 3844708.95 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:56:38,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff2.min_abs, batch_count=179248.0, ans=0.1
+2024-08-26 06:56:39,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_ff3.min_abs, batch_count=179248.0, ans=0.2
+2024-08-26 06:57:34,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=179408.0, ans=0.125
+2024-08-26 06:57:38,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=179408.0, ans=0.0
+2024-08-26 06:58:13,353 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.864e+02 2.134e+02 2.537e+02 3.723e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-08-26 06:58:15,881 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=179461.33333333334, ans=0.0
+2024-08-26 06:58:23,018 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=179461.33333333334, ans=0.125
+2024-08-26 06:58:27,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_na.min_abs, batch_count=179461.33333333334, ans=0.02
+2024-08-26 06:58:29,355 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=179461.33333333334, ans=0.025
+2024-08-26 06:58:30,280 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-26 06:58:31,297 INFO [train.py:1114] (2/4) Epoch 14, batch 1300, loss[loss=0.2668, simple_loss=0.3179, pruned_loss=0.07862, ctc_loss=0.1463, over 18859.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2826, pruned_loss=0.05783, ctc_loss=0.1085, over 3848027.07 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:58:37,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=179514.66666666666, ans=0.2
+2024-08-26 06:59:10,954 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=179568.0, ans=0.0
+2024-08-26 06:59:36,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=179621.33333333334, ans=0.125
+2024-08-26 07:00:16,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=179728.0, ans=0.125
+2024-08-26 07:00:33,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=179728.0, ans=0.125
+2024-08-26 07:00:35,388 INFO [train.py:1114] (2/4) Epoch 14, batch 1350, loss[loss=0.2403, simple_loss=0.2901, pruned_loss=0.069, ctc_loss=0.1313, over 19790.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2821, pruned_loss=0.05768, ctc_loss=0.1082, over 3857906.85 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:00:38,729 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:00:46,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=179781.33333333334, ans=0.0
+2024-08-26 07:00:57,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=179781.33333333334, ans=0.0
+2024-08-26 07:01:26,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=179834.66666666666, ans=0.0
+2024-08-26 07:01:32,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=179888.0, ans=0.125
+2024-08-26 07:02:12,324 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.91 vs. limit=15.0
+2024-08-26 07:02:21,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=179994.66666666666, ans=0.125
+2024-08-26 07:02:26,032 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.690e+02 1.870e+02 2.214e+02 3.706e+02, threshold=3.740e+02, percent-clipped=0.0
+2024-08-26 07:02:27,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=179994.66666666666, ans=0.2
+2024-08-26 07:02:47,345 INFO [train.py:1114] (2/4) Epoch 14, batch 1400, loss[loss=0.1956, simple_loss=0.2562, pruned_loss=0.04947, ctc_loss=0.08989, over 19660.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2815, pruned_loss=0.05764, ctc_loss=0.1078, over 3865083.37 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:02:53,010 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.41 vs. limit=15.0
+2024-08-26 07:03:10,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=180048.0, ans=0.025
+2024-08-26 07:03:28,050 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=180101.33333333334, ans=0.125
+2024-08-26 07:03:34,043 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=180154.66666666666, ans=0.125
+2024-08-26 07:03:55,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.35 vs. limit=22.5
+2024-08-26 07:04:09,183 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=180261.33333333334, ans=0.125
+2024-08-26 07:04:13,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=180261.33333333334, ans=0.2
+2024-08-26 07:04:13,306 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=180261.33333333334, ans=0.0
+2024-08-26 07:04:25,279 INFO [train.py:1114] (2/4) Epoch 14, batch 1450, loss[loss=0.2631, simple_loss=0.3147, pruned_loss=0.07806, ctc_loss=0.1382, over 19663.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2827, pruned_loss=0.05818, ctc_loss=0.1089, over 3861354.24 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:04:38,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=180314.66666666666, ans=0.125
+2024-08-26 07:05:03,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=180368.0, ans=0.1
+2024-08-26 07:05:14,676 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=180421.33333333334, ans=0.0
+2024-08-26 07:05:15,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=180421.33333333334, ans=0.125
+2024-08-26 07:05:15,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=180421.33333333334, ans=10.0
+2024-08-26 07:05:28,292 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-26 07:05:40,278 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=180528.0, ans=0.0
+2024-08-26 07:05:41,174 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.716e+02 1.963e+02 2.339e+02 6.137e+02, threshold=3.925e+02, percent-clipped=1.0
+2024-08-26 07:05:57,996 INFO [train.py:1114] (2/4) Epoch 14, batch 1500, loss[loss=0.2123, simple_loss=0.2836, pruned_loss=0.05208, ctc_loss=0.09211, over 19592.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2826, pruned_loss=0.05748, ctc_loss=0.1077, over 3861121.37 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:06:04,429 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=180581.33333333334, ans=0.125
+2024-08-26 07:06:07,016 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.21 vs. limit=15.0
+2024-08-26 07:06:36,403 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=180634.66666666666, ans=0.125
+2024-08-26 07:06:58,754 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180688.0, ans=0.1
+2024-08-26 07:06:59,208 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.39 vs. limit=22.5
+2024-08-26 07:07:02,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=180741.33333333334, ans=0.125
+2024-08-26 07:07:02,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=180741.33333333334, ans=0.125
+2024-08-26 07:07:23,917 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.12 vs. limit=12.0
+2024-08-26 07:07:24,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180794.66666666666, ans=0.1
+2024-08-26 07:07:26,414 INFO [train.py:1114] (2/4) Epoch 14, batch 1550, loss[loss=0.2455, simple_loss=0.3054, pruned_loss=0.0678, ctc_loss=0.1251, over 19599.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2827, pruned_loss=0.0577, ctc_loss=0.1082, over 3844900.86 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:07:37,751 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=180848.0, ans=0.125
+2024-08-26 07:07:39,767 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=180848.0, ans=0.0
+2024-08-26 07:08:03,642 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=180954.66666666666, ans=0.0
+2024-08-26 07:08:18,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=181061.33333333334, ans=0.05
+2024-08-26 07:08:20,836 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.735e+02 1.996e+02 2.323e+02 4.332e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-26 07:08:46,920 INFO [train.py:1114] (2/4) Epoch 14, batch 1600, loss[loss=0.2351, simple_loss=0.3016, pruned_loss=0.06205, ctc_loss=0.1111, over 19846.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2825, pruned_loss=0.05768, ctc_loss=0.1081, over 3833861.75 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:08:48,624 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.92 vs. limit=15.0
+2024-08-26 07:08:51,860 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=181114.66666666666, ans=0.125
+2024-08-26 07:08:58,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=181114.66666666666, ans=0.0
+2024-08-26 07:09:19,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=181221.33333333334, ans=0.125
+2024-08-26 07:09:21,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=181221.33333333334, ans=0.025
+2024-08-26 07:09:55,722 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=181274.66666666666, ans=0.1
+2024-08-26 07:10:12,185 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=181328.0, ans=0.125
+2024-08-26 07:10:20,299 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=181328.0, ans=0.125
+2024-08-26 07:10:22,384 INFO [train.py:1114] (2/4) Epoch 14, batch 1650, loss[loss=0.2333, simple_loss=0.2992, pruned_loss=0.06024, ctc_loss=0.1174, over 19648.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2823, pruned_loss=0.05772, ctc_loss=0.1082, over 3830692.64 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:11:10,762 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.857e+02 2.243e+02 2.957e+02 5.258e+02, threshold=4.486e+02, percent-clipped=5.0
+2024-08-26 07:11:28,232 INFO [train.py:1114] (2/4) Epoch 14, batch 1700, loss[loss=0.1672, simple_loss=0.2317, pruned_loss=0.0372, ctc_loss=0.07066, over 19674.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2818, pruned_loss=0.05735, ctc_loss=0.1075, over 3844833.60 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:11:29,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=181648.0, ans=0.0
+2024-08-26 07:11:48,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=181701.33333333334, ans=0.125
+2024-08-26 07:11:54,676 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.56 vs. limit=15.0
+2024-08-26 07:12:08,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=181808.0, ans=0.125
+2024-08-26 07:12:13,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=181861.33333333334, ans=0.2
+2024-08-26 07:12:14,389 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=181861.33333333334, ans=0.0
+2024-08-26 07:12:20,252 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=181861.33333333334, ans=0.0
+2024-08-26 07:12:24,393 INFO [train.py:1114] (2/4) Epoch 14, batch 1750, loss[loss=0.2026, simple_loss=0.2542, pruned_loss=0.05471, ctc_loss=0.1038, over 19631.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2813, pruned_loss=0.05702, ctc_loss=0.107, over 3850031.43 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:12:31,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=181914.66666666666, ans=10.0
+2024-08-26 07:13:16,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=182021.33333333334, ans=0.125
+2024-08-26 07:13:30,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=182074.66666666666, ans=0.0
+2024-08-26 07:13:35,899 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.769e+02 2.123e+02 2.747e+02 4.234e+02, threshold=4.245e+02, percent-clipped=0.0
+2024-08-26 07:13:51,682 INFO [train.py:1114] (2/4) Epoch 14, batch 1800, loss[loss=0.2475, simple_loss=0.3034, pruned_loss=0.06934, ctc_loss=0.1326, over 19616.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2814, pruned_loss=0.05719, ctc_loss=0.1075, over 3851925.79 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:14:10,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182234.66666666666, ans=0.1
+2024-08-26 07:14:10,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=182234.66666666666, ans=0.1
+2024-08-26 07:14:19,706 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.81 vs. limit=15.0
+2024-08-26 07:14:25,238 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.30 vs. limit=15.0
+2024-08-26 07:14:32,529 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=182341.33333333334, ans=0.0
+2024-08-26 07:14:33,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=182341.33333333334, ans=0.125
+2024-08-26 07:14:49,564 INFO [train.py:1114] (2/4) Epoch 14, batch 1850, loss[loss=0.2182, simple_loss=0.2853, pruned_loss=0.05494, ctc_loss=0.1031, over 19581.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2811, pruned_loss=0.05696, ctc_loss=0.1071, over 3855220.69 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:15:03,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=182501.33333333334, ans=0.07
+2024-08-26 07:15:09,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=182501.33333333334, ans=0.125
+2024-08-26 07:15:35,879 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.755e+02 2.000e+02 2.500e+02 5.147e+02, threshold=4.001e+02, percent-clipped=3.0
+2024-08-26 07:15:46,852 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=182661.33333333334, ans=0.0
+2024-08-26 07:15:47,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=182661.33333333334, ans=0.1
+2024-08-26 07:15:52,251 INFO [train.py:1114] (2/4) Epoch 14, batch 1900, loss[loss=0.2098, simple_loss=0.2823, pruned_loss=0.04953, ctc_loss=0.09546, over 19652.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2816, pruned_loss=0.05687, ctc_loss=0.107, over 3859926.01 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:15:55,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=182714.66666666666, ans=0.0
+2024-08-26 07:16:08,004 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=182768.0, ans=0.0
+2024-08-26 07:16:17,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=182821.33333333334, ans=0.125
+2024-08-26 07:16:23,382 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=182821.33333333334, ans=0.125
+2024-08-26 07:16:56,703 INFO [train.py:1114] (2/4) Epoch 14, batch 1950, loss[loss=0.2198, simple_loss=0.2835, pruned_loss=0.05702, ctc_loss=0.1053, over 19597.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2833, pruned_loss=0.05746, ctc_loss=0.108, over 3869013.35 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:17:24,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=183034.66666666666, ans=0.125
+2024-08-26 07:17:24,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=183034.66666666666, ans=0.0
+2024-08-26 07:17:38,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=183088.0, ans=0.0
+2024-08-26 07:17:51,218 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=183141.33333333334, ans=0.0
+2024-08-26 07:17:51,296 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=183141.33333333334, ans=0.125
+2024-08-26 07:17:55,523 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.666e+02 1.941e+02 2.281e+02 4.229e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-26 07:18:06,746 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183194.66666666666, ans=0.1
+2024-08-26 07:18:08,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=183194.66666666666, ans=0.125
+2024-08-26 07:18:14,087 INFO [train.py:1114] (2/4) Epoch 14, batch 2000, loss[loss=0.2163, simple_loss=0.2616, pruned_loss=0.06125, ctc_loss=0.121, over 19640.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2839, pruned_loss=0.05783, ctc_loss=0.1087, over 3853811.42 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 64.0
+2024-08-26 07:18:23,277 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff2.min_abs, batch_count=183248.0, ans=0.1
+2024-08-26 07:19:11,490 INFO [train.py:1114] (2/4) Epoch 14, batch 2050, loss[loss=0.2132, simple_loss=0.2672, pruned_loss=0.05788, ctc_loss=0.1088, over 19736.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2832, pruned_loss=0.05788, ctc_loss=0.1087, over 3849695.00 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:19:12,671 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=183514.66666666666, ans=0.125
+2024-08-26 07:19:43,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=183621.33333333334, ans=0.2
+2024-08-26 07:19:54,330 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.04 vs. limit=8.0
+2024-08-26 07:20:51,590 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.705e+02 1.994e+02 2.461e+02 3.917e+02, threshold=3.988e+02, percent-clipped=1.0
+2024-08-26 07:23:16,204 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=183728.0, ans=0.125
+2024-08-26 07:24:48,251 INFO [train.py:1114] (2/4) Epoch 14, batch 2100, loss[loss=0.206, simple_loss=0.2731, pruned_loss=0.05027, ctc_loss=0.09588, over 19785.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2822, pruned_loss=0.05744, ctc_loss=0.1077, over 3857710.45 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:26:26,037 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.46 vs. limit=6.0
+2024-08-26 07:26:28,439 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.34 vs. limit=15.0
+2024-08-26 07:28:51,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=183781.33333333334, ans=0.125
+2024-08-26 07:31:06,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=183781.33333333334, ans=0.1
+2024-08-26 07:43:35,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=183834.66666666666, ans=0.2
+2024-08-26 07:45:05,517 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.05 vs. limit=6.0
+2024-08-26 07:50:00,122 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=183888.0, ans=0.125
+2024-08-26 07:50:01,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=183888.0, ans=0.125
+2024-08-26 08:00:56,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=183941.33333333334, ans=0.07
+2024-08-26 08:13:15,356 INFO [train.py:1114] (2/4) Epoch 14, batch 2150, loss[loss=0.1965, simple_loss=0.2658, pruned_loss=0.04592, ctc_loss=0.08843, over 19866.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2815, pruned_loss=0.05714, ctc_loss=0.1071, over 3868069.69 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 08:20:19,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184101.33333333334, ans=0.1
+2024-08-26 08:27:10,081 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=184101.33333333334, ans=0.125
+2024-08-26 08:27:10,894 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=184101.33333333334, ans=0.125
+2024-08-26 08:59:37,608 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.765e+02 2.052e+02 2.784e+02 6.261e+02, threshold=4.104e+02, percent-clipped=7.0
+2024-08-26 09:03:09,761 INFO [train.py:1114] (2/4) Epoch 14, batch 2200, loss[loss=0.2297, simple_loss=0.2943, pruned_loss=0.06055, ctc_loss=0.1099, over 19569.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2818, pruned_loss=0.05726, ctc_loss=0.1073, over 3866659.28 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 09:07:05,923 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=184314.66666666666, ans=0.0
+2024-08-26 09:10:29,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184368.0, ans=0.1
+2024-08-26 09:11:10,017 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 09:18:48,166 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=184474.66666666666, ans=0.0
+2024-08-26 09:19:01,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=184474.66666666666, ans=0.0
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-3
new file mode 100644
index 0000000000000000000000000000000000000000..17477cab5f5c6136e1c75e5c7ec9bba25414b1cd
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-25-02-23-27-3
@@ -0,0 +1,4906 @@
+2024-08-25 02:23:27,610 INFO [train.py:1182] (3/4) Training started
+2024-08-25 02:23:27,611 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-25 02:23:27,697 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2654.int.cedar.computecanada.ca', 'IP address': '172.16.146.91'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 02:23:27,697 INFO [train.py:1212] (3/4) About to create model
+2024-08-25 02:23:29,322 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-25 02:23:29,428 INFO [train.py:1231] (3/4) Using DDP
+2024-08-25 02:23:51,124 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-25 02:23:51,497 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-25 02:23:53,043 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-25 02:23:53,051 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-25 02:23:53,293 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-25 02:23:53,346 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-25 02:23:53,653 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-25 02:23:53,654 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 02:27:50,705 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12271MB
+2024-08-25 02:27:52,174 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12346MB
+2024-08-25 02:28:01,912 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12346MB
+2024-08-25 02:28:03,370 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12346MB
+2024-08-25 02:28:25,875 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=256, metric=45.71 vs. limit=7.5
+2024-08-25 02:28:26,152 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12346MB
+2024-08-25 02:28:27,777 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12543MB
+2024-08-25 02:29:16,119 INFO [train.py:1114] (3/4) Epoch 1, batch 0, loss[loss=8.814, simple_loss=7.18, pruned_loss=6.734, ctc_loss=4.794, over 19817.00 frames. ], tot_loss[loss=8.814, simple_loss=7.18, pruned_loss=6.734, ctc_loss=4.794, over 19817.00 frames. ], batch size: 49, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 02:29:16,120 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 02:29:29,433 INFO [train.py:1146] (3/4) Epoch 1, validation: loss=8.973, simple_loss=7.311, pruned_loss=6.819, ctc_loss=4.895, over 944034.00 frames.
+2024-08-25 02:29:29,434 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 12543MB
+2024-08-25 02:29:31,369 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.80 vs. limit=7.5
+2024-08-25 02:29:38,211 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=0.0, ans=0.5
+2024-08-25 02:30:01,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=0.0, ans=0.2
+2024-08-25 02:30:14,648 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.62 vs. limit=5.013333333333334
+2024-08-25 02:30:23,438 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.714e+03 3.750e+03 4.817e+03 5.615e+03 6.551e+03, threshold=1.927e+04, percent-clipped=0.0
+2024-08-25 02:30:48,434 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=492.70 vs. limit=7.54
+2024-08-25 02:30:48,682 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.98 vs. limit=7.52
+2024-08-25 02:32:26,054 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.867e+02 1.019e+03 3.714e+03 5.063e+03 6.846e+03, threshold=1.486e+04, percent-clipped=0.0
+2024-08-25 02:32:37,481 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=63.16 vs. limit=5.026666666666666
+2024-08-25 02:32:51,802 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=349.16 vs. limit=7.56
+2024-08-25 02:32:52,214 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.09 vs. limit=4.064
+2024-08-25 02:33:26,774 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=139.28 vs. limit=7.66
+2024-08-25 02:33:35,945 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=52.92 vs. limit=7.58
+2024-08-25 02:33:36,811 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.544e+02 7.649e+02 1.076e+03 3.731e+03 6.846e+03, threshold=4.304e+03, percent-clipped=0.0
+2024-08-25 02:34:04,702 INFO [train.py:1114] (3/4) Epoch 1, batch 50, loss[loss=1.449, simple_loss=1.079, pruned_loss=1.264, ctc_loss=1.142, over 19710.00 frames. ], tot_loss[loss=3.551, simple_loss=2.933, pruned_loss=2.55, ctc_loss=1.777, over 845595.77 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 02:34:06,446 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=10.88 vs. limit=4.1066666666666665
+2024-08-25 02:34:15,547 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=87.90 vs. limit=7.6
+2024-08-25 02:34:22,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=266.6666666666667, ans=0.09833333333333334
+2024-08-25 02:34:26,198 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=320.0, ans=0.485
+2024-08-25 02:34:30,758 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=320.0, ans=0.049
+2024-08-25 02:35:07,300 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=31.08 vs. limit=7.78
+2024-08-25 02:35:08,683 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=375.51 vs. limit=7.64
+2024-08-25 02:35:14,210 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=373.3333333333333, ans=0.4825
+2024-08-25 02:35:17,627 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=127.77 vs. limit=7.64
+2024-08-25 02:35:28,557 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.whiten.whitening_limit, batch_count=426.6666666666667, ans=4.1706666666666665
+2024-08-25 02:35:30,933 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=65.72 vs. limit=7.66
+2024-08-25 02:37:30,195 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=480.0, ans=0.223
+2024-08-25 02:37:30,768 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=20.83 vs. limit=7.68
+2024-08-25 02:37:42,456 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=40.75 vs. limit=7.86
+2024-08-25 02:37:47,607 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=480.0, ans=0.4775
+2024-08-25 02:37:50,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=533.3333333333334, ans=0.475
+2024-08-25 02:37:51,544 INFO [train.py:1114] (3/4) Epoch 1, batch 100, loss[loss=1.321, simple_loss=0.9457, pruned_loss=1.205, ctc_loss=1.121, over 19721.00 frames. ], tot_loss[loss=2.416, simple_loss=1.918, pruned_loss=1.865, ctc_loss=1.473, over 1499372.19 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 02:37:55,738 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.639e+01 1.517e+02 3.832e+02 1.019e+03 9.054e+03, threshold=7.665e+02, percent-clipped=2.0
+2024-08-25 02:38:04,173 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=82.33 vs. limit=7.7
+2024-08-25 02:38:04,539 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=25.01 vs. limit=7.7
+2024-08-25 02:38:08,733 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 02:38:09,263 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=37.86 vs. limit=7.94
+2024-08-25 02:38:24,633 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=85.48 vs. limit=5.1466666666666665
+2024-08-25 02:38:35,188 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=38.13 vs. limit=7.72
+2024-08-25 02:38:36,218 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=77.19 vs. limit=5.32
+2024-08-25 02:38:36,704 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=55.71 vs. limit=7.74
+2024-08-25 02:38:41,824 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=140.28 vs. limit=5.32
+2024-08-25 02:38:52,551 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=35.68 vs. limit=7.98
+2024-08-25 02:38:53,881 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=78.17 vs. limit=7.76
+2024-08-25 02:39:03,345 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=693.3333333333334, ans=0.4675
+2024-08-25 02:39:05,324 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=71.19 vs. limit=7.76
+2024-08-25 02:39:07,283 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn1.whiten.whitening_limit, batch_count=693.3333333333334, ans=8.02
+2024-08-25 02:39:09,234 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=746.6666666666666, ans=0.208
+2024-08-25 02:39:09,322 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=746.6666666666666, ans=0.46499999999999997
+2024-08-25 02:39:11,152 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=34.00 vs. limit=7.78
+2024-08-25 02:39:13,715 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=68.01 vs. limit=7.78
+2024-08-25 02:39:16,479 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=11.15 vs. limit=5.1866666666666665
+2024-08-25 02:39:17,993 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=68.11 vs. limit=7.78
+2024-08-25 02:39:22,878 INFO [train.py:1114] (3/4) Epoch 1, batch 150, loss[loss=1.161, simple_loss=0.8052, pruned_loss=1.019, ctc_loss=1.075, over 19706.00 frames. ], tot_loss[loss=1.943, simple_loss=1.492, pruned_loss=1.564, ctc_loss=1.344, over 2028131.03 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 02:39:39,098 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=19.92 vs. limit=7.82
+2024-08-25 02:39:39,202 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=72.71 vs. limit=5.426666666666667
+2024-08-25 02:39:51,412 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=32.54 vs. limit=8.18
+2024-08-25 02:39:54,167 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=33.23 vs. limit=5.453333333333333
+2024-08-25 02:39:58,139 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=19.61 vs. limit=7.84
+2024-08-25 02:40:04,186 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=14.35 vs. limit=4.384
+2024-08-25 02:40:08,301 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=96.49 vs. limit=5.48
+2024-08-25 02:40:12,008 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=960.0, ans=0.8664000000000001
+2024-08-25 02:40:12,468 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=380.50 vs. limit=7.86
+2024-08-25 02:40:17,848 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=46.32 vs. limit=7.88
+2024-08-25 02:40:17,883 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=26.09 vs. limit=7.88
+2024-08-25 02:40:19,454 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=25.01 vs. limit=7.88
+2024-08-25 02:40:32,726 INFO [train.py:1114] (3/4) Epoch 1, batch 200, loss[loss=1.242, simple_loss=0.8555, pruned_loss=0.9993, ctc_loss=1.196, over 18388.00 frames. ], tot_loss[loss=1.689, simple_loss=1.263, pruned_loss=1.374, ctc_loss=1.279, over 2436267.97 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 02:40:32,952 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=1066.6666666666667, ans=0.19
+2024-08-25 02:40:33,209 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=64.14 vs. limit=7.9
+2024-08-25 02:40:35,794 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 02:40:36,946 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.587e+01 1.185e+02 1.545e+02 1.999e+02 4.229e+02, threshold=3.089e+02, percent-clipped=0.0
+2024-08-25 02:41:05,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=1120.0, ans=0.0748
+2024-08-25 02:41:05,999 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=10.26 vs. limit=5.28
+2024-08-25 02:41:11,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1120.0, ans=0.2888
+2024-08-25 02:41:15,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=1120.0, ans=0.4475
+2024-08-25 02:41:20,875 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 02:41:22,248 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=1173.3333333333333, ans=0.445
+2024-08-25 02:41:22,493 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.58 vs. limit=3.176
+2024-08-25 02:41:31,174 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=14.97 vs. limit=5.293333333333333
+2024-08-25 02:41:31,244 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=40.64 vs. limit=7.94
+2024-08-25 02:41:31,421 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=25.17 vs. limit=8.38
+2024-08-25 02:41:34,982 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=1173.3333333333333, ans=0.28826666666666667
+2024-08-25 02:41:35,557 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=16.76 vs. limit=5.586666666666667
+2024-08-25 02:41:42,370 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=15.52 vs. limit=4.490666666666667
+2024-08-25 02:41:54,952 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1226.6666666666667, ans=0.28773333333333334
+2024-08-25 02:41:57,882 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=1280.0, ans=0.2872
+2024-08-25 02:42:01,661 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=48.02 vs. limit=7.98
+2024-08-25 02:42:07,878 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=1280.0, ans=0.44
+2024-08-25 02:42:11,852 INFO [train.py:1114] (3/4) Epoch 1, batch 250, loss[loss=1.204, simple_loss=0.8178, pruned_loss=0.9562, ctc_loss=1.172, over 19410.00 frames. ], tot_loss[loss=1.536, simple_loss=1.124, pruned_loss=1.249, ctc_loss=1.244, over 2756308.20 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 02:42:12,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=1333.3333333333333, ans=0.8533333333333334
+2024-08-25 02:42:17,052 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.89 vs. limit=8.5
+2024-08-25 02:42:29,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=1386.6666666666667, ans=0.28613333333333335
+2024-08-25 02:42:40,511 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=79.17 vs. limit=8.54
+2024-08-25 02:42:45,891 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=93.90 vs. limit=8.04
+2024-08-25 02:42:47,037 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=1440.0, ans=0.8496
+2024-08-25 02:42:52,947 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=25.29 vs. limit=8.58
+2024-08-25 02:42:59,648 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=132.28 vs. limit=8.06
+2024-08-25 02:42:59,837 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=17.41 vs. limit=5.373333333333333
+2024-08-25 02:43:01,033 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=219.38 vs. limit=8.06
+2024-08-25 02:43:04,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1493.3333333333333, ans=0.28506666666666663
+2024-08-25 02:43:07,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=1493.3333333333333, ans=0.43
+2024-08-25 02:43:10,560 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=99.45 vs. limit=8.08
+2024-08-25 02:43:11,603 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=1546.6666666666667, ans=0.4275
+2024-08-25 02:43:12,806 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=1546.6666666666667, ans=0.142
+2024-08-25 02:43:13,573 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=49.82 vs. limit=8.08
+2024-08-25 02:43:18,689 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=1546.6666666666667, ans=0.4275
+2024-08-25 02:43:21,291 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=1546.6666666666667, ans=0.142
+2024-08-25 02:43:23,908 INFO [train.py:1114] (3/4) Epoch 1, batch 300, loss[loss=1.262, simple_loss=0.8449, pruned_loss=0.9881, ctc_loss=1.239, over 19532.00 frames. ], tot_loss[loss=1.435, simple_loss=1.03, pruned_loss=1.159, ctc_loss=1.22, over 3001332.80 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 02:43:24,350 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=15.73 vs. limit=4.64
+2024-08-25 02:43:25,443 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1600.0, ans=0.284
+2024-08-25 02:43:27,477 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.84 vs. limit=5.4
+2024-08-25 02:43:27,973 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 8.516e+01 1.281e+02 1.784e+02 2.457e+02 1.092e+03, threshold=3.568e+02, percent-clipped=12.0
+2024-08-25 02:43:33,700 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=1600.0, ans=0.425
+2024-08-25 02:43:41,023 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=131.44 vs. limit=8.12
+2024-08-25 02:43:42,303 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=39.90 vs. limit=8.74
+2024-08-25 02:44:00,592 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=31.17 vs. limit=8.78
+2024-08-25 02:44:16,649 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=11.52 vs. limit=4.704
+2024-08-25 02:44:22,371 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.06 vs. limit=8.82
+2024-08-25 02:44:24,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=1813.3333333333333, ans=0.08866666666666667
+2024-08-25 02:44:27,488 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=198.81 vs. limit=8.18
+2024-08-25 02:44:31,769 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=59.97 vs. limit=8.18
+2024-08-25 02:44:35,132 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=149.78 vs. limit=8.18
+2024-08-25 02:44:38,846 INFO [train.py:1114] (3/4) Epoch 1, batch 350, loss[loss=1.112, simple_loss=0.7322, pruned_loss=0.8619, ctc_loss=1.109, over 19760.00 frames. ], tot_loss[loss=1.368, simple_loss=0.9653, pruned_loss=1.096, ctc_loss=1.207, over 3191955.11 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 02:44:45,225 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=1866.6666666666667, ans=0.4125
+2024-08-25 02:44:48,863 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=18.14 vs. limit=5.933333333333334
+2024-08-25 02:44:52,571 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=28.47 vs. limit=8.22
+2024-08-25 02:44:53,986 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=10.86 vs. limit=5.48
+2024-08-25 02:44:58,362 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.82 vs. limit=8.94
+2024-08-25 02:45:03,599 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=105.44 vs. limit=8.22
+2024-08-25 02:45:18,696 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=55.98 vs. limit=8.24
+2024-08-25 02:45:20,009 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=89.70 vs. limit=8.24
+2024-08-25 02:45:21,141 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=110.33 vs. limit=8.24
+2024-08-25 02:45:21,406 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=35.53 vs. limit=8.24
+2024-08-25 02:45:22,372 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=1973.3333333333333, ans=0.8309333333333333
+2024-08-25 02:45:22,373 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=1973.3333333333333, ans=0.4075
+2024-08-25 02:45:23,943 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=28.72 vs. limit=8.26
+2024-08-25 02:45:25,411 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.83 vs. limit=9.02
+2024-08-25 02:45:27,840 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.61 vs. limit=9.02
+2024-08-25 02:45:32,312 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=50.93 vs. limit=8.26
+2024-08-25 02:45:33,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=2026.6666666666667, ans=0.08733333333333333
+2024-08-25 02:45:38,916 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=81.20 vs. limit=8.28
+2024-08-25 02:45:39,242 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=182.31 vs. limit=8.28
+2024-08-25 02:46:54,020 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=2080.0, ans=0.8272
+2024-08-25 02:47:06,886 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=14.16 vs. limit=6.04
+2024-08-25 02:47:08,400 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=41.59 vs. limit=8.3
+2024-08-25 02:47:09,645 INFO [train.py:1114] (3/4) Epoch 1, batch 400, loss[loss=1.163, simple_loss=0.7652, pruned_loss=0.8849, ctc_loss=1.133, over 19503.00 frames. ], tot_loss[loss=1.317, simple_loss=0.9148, pruned_loss=1.044, ctc_loss=1.191, over 3344538.75 frames. ], batch size: 54, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 02:47:13,855 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 9.873e+01 1.501e+02 1.913e+02 2.464e+02 6.763e+02, threshold=3.826e+02, percent-clipped=7.0
+2024-08-25 02:47:25,636 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.68 vs. limit=5.546666666666667
+2024-08-25 02:47:37,720 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=2186.6666666666665, ans=0.8234666666666667
+2024-08-25 02:47:38,036 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=2186.6666666666665, ans=9.14
+2024-08-25 02:47:41,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=2240.0, ans=0.0496
+2024-08-25 02:47:44,969 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=27.93 vs. limit=8.34
+2024-08-25 02:47:49,923 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=2240.0, ans=0.395
+2024-08-25 02:47:59,525 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=85.83 vs. limit=8.36
+2024-08-25 02:47:59,678 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=18.35 vs. limit=8.36
+2024-08-25 02:48:00,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=2293.3333333333335, ans=0.3925
+2024-08-25 02:48:00,718 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=47.90 vs. limit=8.36
+2024-08-25 02:48:01,795 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=2293.3333333333335, ans=0.3925
+2024-08-25 02:48:04,968 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=60.18 vs. limit=8.36
+2024-08-25 02:48:19,324 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=182.79 vs. limit=8.38
+2024-08-25 02:48:21,725 INFO [train.py:1114] (3/4) Epoch 1, batch 450, loss[loss=1.206, simple_loss=0.7863, pruned_loss=0.9028, ctc_loss=1.176, over 19616.00 frames. ], tot_loss[loss=1.282, simple_loss=0.8796, pruned_loss=1.006, ctc_loss=1.179, over 3453240.13 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 02:48:22,367 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=17.89 vs. limit=8.4
+2024-08-25 02:48:22,458 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.29 vs. limit=9.3
+2024-08-25 02:48:23,619 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=15.54 vs. limit=8.4
+2024-08-25 02:48:25,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=2400.0, ans=0.3875
+2024-08-25 02:48:28,931 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=2400.0, ans=0.23600000000000002
+2024-08-25 02:48:38,568 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=12.23 vs. limit=9.34
+2024-08-25 02:48:39,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=2453.3333333333335, ans=0.385
+2024-08-25 02:48:46,599 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=12.03 vs. limit=9.34
+2024-08-25 02:48:48,316 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=7.10 vs. limit=5.0
+2024-08-25 02:48:49,947 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=2506.6666666666665, ans=0.3825
+2024-08-25 02:48:57,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=2506.6666666666665, ans=0.3825
+2024-08-25 02:49:05,452 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=11.31 vs. limit=6.28
+2024-08-25 02:49:08,184 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.34 vs. limit=9.42
+2024-08-25 02:49:08,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=2560.0, ans=0.38
+2024-08-25 02:49:09,130 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=13.15 vs. limit=8.46
+2024-08-25 02:49:12,103 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.27 vs. limit=9.42
+2024-08-25 02:49:19,638 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=2613.3333333333335, ans=0.041199999999999994
+2024-08-25 02:49:28,641 INFO [train.py:1114] (3/4) Epoch 1, batch 500, loss[loss=1.208, simple_loss=0.7978, pruned_loss=0.8577, ctc_loss=1.154, over 19686.00 frames. ], tot_loss[loss=1.25, simple_loss=0.8494, pruned_loss=0.9661, ctc_loss=1.16, over 3547250.01 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:49:30,941 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=12.23 vs. limit=8.5
+2024-08-25 02:49:32,567 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.093e+02 1.834e+02 2.411e+02 2.968e+02 6.409e+02, threshold=4.822e+02, percent-clipped=7.0
+2024-08-25 02:49:45,479 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.94 vs. limit=9.54
+2024-08-25 02:49:48,116 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=17.38 vs. limit=8.52
+2024-08-25 02:49:50,844 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.31 vs. limit=9.54
+2024-08-25 02:50:09,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=2826.6666666666665, ans=0.094
+2024-08-25 02:50:11,675 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.04 vs. limit=9.620000000000001
+2024-08-25 02:50:15,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=2826.6666666666665, ans=0.3675
+2024-08-25 02:50:19,336 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=2826.6666666666665, ans=0.094
+2024-08-25 02:50:19,527 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.04 vs. limit=9.620000000000001
+2024-08-25 02:50:20,939 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.14 vs. limit=6.413333333333333
+2024-08-25 02:50:21,248 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=20.79 vs. limit=8.56
+2024-08-25 02:50:30,372 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.71 vs. limit=9.66
+2024-08-25 02:50:30,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=2880.0, ans=8.58
+2024-08-25 02:50:32,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_na.min_abs, batch_count=2880.0, ans=0.015519999999999999
+2024-08-25 02:50:39,214 INFO [train.py:1114] (3/4) Epoch 1, batch 550, loss[loss=1.167, simple_loss=0.7934, pruned_loss=0.7574, ctc_loss=1.096, over 19216.00 frames. ], tot_loss[loss=1.22, simple_loss=0.8261, pruned_loss=0.9176, ctc_loss=1.138, over 3608044.28 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:50:50,358 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=2933.3333333333335, ans=0.3625
+2024-08-25 02:51:08,548 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=2986.6666666666665, ans=0.36
+2024-08-25 02:51:11,114 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.769e+01
+2024-08-25 02:51:12,613 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=3040.0, ans=0.0316
+2024-08-25 02:51:13,820 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=3040.0, ans=6.9
+2024-08-25 02:51:15,244 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=3040.0, ans=0.7936000000000001
+2024-08-25 02:51:34,160 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.86 vs. limit=9.82
+2024-08-25 02:51:44,471 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.34 vs. limit=8.68
+2024-08-25 02:51:53,823 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=3200.0, ans=0.268
+2024-08-25 02:51:55,117 INFO [train.py:1114] (3/4) Epoch 1, batch 600, loss[loss=1.042, simple_loss=0.715, pruned_loss=0.6269, ctc_loss=1.01, over 19385.00 frames. ], tot_loss[loss=1.175, simple_loss=0.7973, pruned_loss=0.8511, ctc_loss=1.102, over 3666354.70 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:51:55,442 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=3200.0, ans=0.268
+2024-08-25 02:51:59,173 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.677e+02 3.553e+02 4.456e+02 9.241e+02, threshold=7.106e+02, percent-clipped=18.0
+2024-08-25 02:51:59,952 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.41 vs. limit=8.7
+2024-08-25 02:52:00,523 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=3200.0, ans=0.35
+2024-08-25 02:52:02,446 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.73 vs. limit=9.9
+2024-08-25 02:52:07,919 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.35 vs. limit=9.94
+2024-08-25 02:52:20,444 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.78 vs. limit=9.94
+2024-08-25 02:52:21,618 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=9.42 vs. limit=5.826666666666666
+2024-08-25 02:52:42,302 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.98 vs. limit=6.68
+2024-08-25 02:52:58,068 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.63 vs. limit=5.8533333333333335
+2024-08-25 02:53:01,026 INFO [train.py:1114] (3/4) Epoch 1, batch 650, loss[loss=0.8302, simple_loss=0.5855, pruned_loss=0.4569, ctc_loss=0.8022, over 19771.00 frames. ], tot_loss[loss=1.112, simple_loss=0.7586, pruned_loss=0.7743, ctc_loss=1.048, over 3716874.53 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 4.0
+2024-08-25 02:53:14,884 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.05 vs. limit=5.88
+2024-08-25 02:53:19,454 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_na.min_abs, batch_count=3520.0, ans=0.01808
+2024-08-25 02:53:48,686 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.12 vs. limit=8.86
+2024-08-25 02:53:52,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=3680.0, ans=0.03999999999999998
+2024-08-25 02:53:52,876 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.15 vs. limit=8.879999999999999
+2024-08-25 02:53:53,751 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=3680.0, ans=0.017199999999999993
+2024-08-25 02:53:54,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=3680.0, ans=0.3275
+2024-08-25 02:54:09,016 INFO [train.py:1114] (3/4) Epoch 1, batch 700, loss[loss=0.8421, simple_loss=0.5906, pruned_loss=0.4705, ctc_loss=0.7974, over 19732.00 frames. ], tot_loss[loss=1.051, simple_loss=0.7226, pruned_loss=0.7019, ctc_loss=0.993, over 3748893.67 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:54:14,192 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.975e+02 3.878e+02 5.385e+02 1.936e+03, threshold=7.756e+02, percent-clipped=10.0
+2024-08-25 02:54:16,357 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.32 vs. limit=8.9
+2024-08-25 02:54:21,644 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=10.70 vs. limit=10.34
+2024-08-25 02:54:36,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=3840.0, ans=0.013600000000000001
+2024-08-25 02:54:53,599 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=10.56 vs. limit=10.42
+2024-08-25 02:54:57,484 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=11.79 vs. limit=10.42
+2024-08-25 02:55:16,797 INFO [train.py:1114] (3/4) Epoch 1, batch 750, loss[loss=0.8315, simple_loss=0.6012, pruned_loss=0.4291, ctc_loss=0.7765, over 19497.00 frames. ], tot_loss[loss=0.9888, simple_loss=0.6859, pruned_loss=0.6337, ctc_loss=0.9341, over 3776162.02 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 02:55:50,923 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.13 vs. limit=10.58
+2024-08-25 02:55:56,240 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.51 vs. limit=10.58
+2024-08-25 02:56:07,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=4160.0, ans=0.0
+2024-08-25 02:56:20,916 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=4213.333333333333, ans=0.3025
+2024-08-25 02:56:24,860 INFO [train.py:1114] (3/4) Epoch 1, batch 800, loss[loss=0.7114, simple_loss=0.5194, pruned_loss=0.3592, ctc_loss=0.6532, over 19823.00 frames. ], tot_loss[loss=0.9304, simple_loss=0.6523, pruned_loss=0.5724, ctc_loss=0.8747, over 3797711.47 frames. ], batch size: 49, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:56:26,844 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.09 vs. limit=9.1
+2024-08-25 02:56:29,873 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.945e+02 3.956e+02 5.210e+02 9.107e+02, threshold=7.913e+02, percent-clipped=4.0
+2024-08-25 02:56:35,583 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=12.05 vs. limit=10.7
+2024-08-25 02:56:49,456 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.35 vs. limit=10.74
+2024-08-25 02:56:50,427 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.36 vs. limit=9.14
+2024-08-25 02:57:01,806 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=4373.333333333333, ans=0.29500000000000004
+2024-08-25 02:57:04,413 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.95 vs. limit=9.16
+2024-08-25 02:57:13,053 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.63 vs. limit=5.770666666666667
+2024-08-25 02:57:19,333 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.86 vs. limit=10.86
+2024-08-25 02:57:27,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=4480.0, ans=0.7432000000000001
+2024-08-25 02:57:30,595 INFO [train.py:1114] (3/4) Epoch 1, batch 850, loss[loss=0.7144, simple_loss=0.5373, pruned_loss=0.3422, ctc_loss=0.631, over 19649.00 frames. ], tot_loss[loss=0.8764, simple_loss=0.622, pruned_loss=0.518, ctc_loss=0.8169, over 3815877.14 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 02:57:33,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=4533.333333333333, ans=0.7413333333333334
+2024-08-25 02:57:49,913 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=4586.666666666667, ans=0.26880000000000004
+2024-08-25 02:58:07,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=4640.0, ans=0.2825
+2024-08-25 02:58:42,811 INFO [train.py:1114] (3/4) Epoch 1, batch 900, loss[loss=0.6219, simple_loss=0.4747, pruned_loss=0.2905, ctc_loss=0.5369, over 19807.00 frames. ], tot_loss[loss=0.8294, simple_loss=0.5959, pruned_loss=0.4722, ctc_loss=0.7652, over 3820209.74 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 02:58:44,673 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.00 vs. limit=11.1
+2024-08-25 02:58:48,908 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.783e+02 3.682e+02 4.971e+02 1.764e+03, threshold=7.364e+02, percent-clipped=6.0
+2024-08-25 02:58:50,555 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=4800.0, ans=0.732
+2024-08-25 02:58:55,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=4853.333333333333, ans=0.00981449275362319
+2024-08-25 02:59:04,247 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=4853.333333333333, ans=0.09899494936611666
+2024-08-25 02:59:29,863 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=4960.0, ans=0.009791304347826088
+2024-08-25 02:59:38,098 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.49 vs. limit=6.253333333333333
+2024-08-25 02:59:50,553 INFO [train.py:1114] (3/4) Epoch 1, batch 950, loss[loss=0.5815, simple_loss=0.4529, pruned_loss=0.2567, ctc_loss=0.5036, over 19512.00 frames. ], tot_loss[loss=0.7887, simple_loss=0.5736, pruned_loss=0.4332, ctc_loss=0.7197, over 3821989.98 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 02:59:50,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=5066.666666666667, ans=0.2625
+2024-08-25 02:59:55,824 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=5.323e-03
+2024-08-25 03:00:04,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=5120.0, ans=0.26
+2024-08-25 03:00:06,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=5120.0, ans=0.26
+2024-08-25 03:00:33,226 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=5226.666666666667, ans=0.04488888888888889
+2024-08-25 03:00:45,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=5280.0, ans=0.2525
+2024-08-25 03:00:53,124 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=5333.333333333333, ans=0.25
+2024-08-25 03:00:54,642 INFO [train.py:1114] (3/4) Epoch 1, batch 1000, loss[loss=0.5876, simple_loss=0.462, pruned_loss=0.2558, ctc_loss=0.5019, over 19852.00 frames. ], tot_loss[loss=0.7529, simple_loss=0.5544, pruned_loss=0.3998, ctc_loss=0.6785, over 3818261.56 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:00:56,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=5333.333333333333, ans=0.25
+2024-08-25 03:01:00,350 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=5333.333333333333, ans=0.25
+2024-08-25 03:01:01,324 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.847e+02 3.463e+02 4.611e+02 9.717e+02, threshold=6.926e+02, percent-clipped=4.0
+2024-08-25 03:01:10,466 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=5386.666666666667, ans=0.2475
+2024-08-25 03:01:10,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=5386.666666666667, ans=0.07
+2024-08-25 03:01:13,327 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=5386.666666666667, ans=0.24613333333333332
+2024-08-25 03:01:28,001 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.84 vs. limit=7.720000000000001
+2024-08-25 03:01:33,113 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.40 vs. limit=6.36
+2024-08-25 03:01:33,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=5440.0, ans=0.245
+2024-08-25 03:01:48,101 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=5493.333333333333, ans=0.2425
+2024-08-25 03:02:07,680 INFO [train.py:1114] (3/4) Epoch 1, batch 1050, loss[loss=0.6458, simple_loss=0.5106, pruned_loss=0.285, ctc_loss=0.5304, over 19830.00 frames. ], tot_loss[loss=0.7148, simple_loss=0.5338, pruned_loss=0.3669, ctc_loss=0.6352, over 3824917.30 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:02:09,525 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten.whitening_limit, batch_count=5600.0, ans=11.7
+2024-08-25 03:02:15,333 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=5600.0, ans=0.2375
+2024-08-25 03:02:42,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=5706.666666666667, ans=0.23249999999999998
+2024-08-25 03:02:51,718 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=5760.0, ans=0.2424
+2024-08-25 03:03:13,746 INFO [train.py:1114] (3/4) Epoch 1, batch 1100, loss[loss=0.5029, simple_loss=0.4111, pruned_loss=0.2055, ctc_loss=0.413, over 19577.00 frames. ], tot_loss[loss=0.6832, simple_loss=0.517, pruned_loss=0.3399, ctc_loss=0.5987, over 3832258.35 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 8.0
+2024-08-25 03:03:14,270 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.01 vs. limit=11.9
+2024-08-25 03:03:17,701 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=5866.666666666667, ans=0.0
+2024-08-25 03:03:20,123 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.626e+02 3.754e+02 4.559e+02 6.965e+02, threshold=7.509e+02, percent-clipped=1.0
+2024-08-25 03:03:34,366 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=5920.0, ans=0.009582608695652174
+2024-08-25 03:03:37,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=5920.0, ans=0.6928000000000001
+2024-08-25 03:03:51,030 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=6026.666666666667, ans=0.041555555555555554
+2024-08-25 03:03:54,751 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=6026.666666666667, ans=0.031166666666666665
+2024-08-25 03:04:14,727 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=6080.0, ans=0.04133333333333333
+2024-08-25 03:04:17,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=6133.333333333333, ans=0.04111111111111111
+2024-08-25 03:04:18,508 INFO [train.py:1114] (3/4) Epoch 1, batch 1150, loss[loss=0.5375, simple_loss=0.4394, pruned_loss=0.2262, ctc_loss=0.4237, over 19574.00 frames. ], tot_loss[loss=0.6591, simple_loss=0.5045, pruned_loss=0.3192, ctc_loss=0.5702, over 3829761.48 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:04:27,201 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.71 vs. limit=8.066666666666666
+2024-08-25 03:04:42,089 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=6186.666666666667, ans=0.04088888888888889
+2024-08-25 03:05:04,956 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.62 vs. limit=6.573333333333333
+2024-08-25 03:05:23,317 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=6400.0, ans=0.2
+2024-08-25 03:05:24,507 INFO [train.py:1114] (3/4) Epoch 1, batch 1200, loss[loss=0.5894, simple_loss=0.48, pruned_loss=0.2459, ctc_loss=0.4801, over 19835.00 frames. ], tot_loss[loss=0.6376, simple_loss=0.4939, pruned_loss=0.3008, ctc_loss=0.5452, over 3825141.66 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 03:05:30,706 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.702e+02 3.344e+02 4.028e+02 1.038e+03, threshold=6.687e+02, percent-clipped=4.0
+2024-08-25 03:05:48,672 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.14 vs. limit=12.34
+2024-08-25 03:06:30,996 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=6613.333333333333, ans=0.03911111111111111
+2024-08-25 03:06:33,173 INFO [train.py:1114] (3/4) Epoch 1, batch 1250, loss[loss=0.5828, simple_loss=0.4744, pruned_loss=0.2438, ctc_loss=0.4758, over 19546.00 frames. ], tot_loss[loss=0.6158, simple_loss=0.4831, pruned_loss=0.2833, ctc_loss=0.5193, over 3843098.91 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:06:59,381 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=6773.333333333333, ans=0.1825
+2024-08-25 03:07:02,788 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=6773.333333333333, ans=0.1825
+2024-08-25 03:07:08,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=6773.333333333333, ans=0.1825
+2024-08-25 03:07:20,843 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=7.004e-02
+2024-08-25 03:07:31,680 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=6880.0, ans=0.07
+2024-08-25 03:07:53,434 INFO [train.py:1114] (3/4) Epoch 1, batch 1300, loss[loss=0.6086, simple_loss=0.4867, pruned_loss=0.2655, ctc_loss=0.4911, over 18922.00 frames. ], tot_loss[loss=0.5979, simple_loss=0.474, pruned_loss=0.2695, ctc_loss=0.4982, over 3847791.71 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 8.0
+2024-08-25 03:07:55,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=6933.333333333333, ans=0.175
+2024-08-25 03:07:58,696 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=6933.333333333333, ans=0.175
+2024-08-25 03:08:00,267 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.44 vs. limit=10.1
+2024-08-25 03:08:00,989 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.595e+02 3.171e+02 4.007e+02 5.829e+02, threshold=6.342e+02, percent-clipped=0.0
+2024-08-25 03:08:29,701 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2.whitening_limit, batch_count=7040.0, ans=8.52
+2024-08-25 03:08:50,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=7146.666666666667, ans=0.16499999999999998
+2024-08-25 03:08:54,454 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.29 vs. limit=10.18
+2024-08-25 03:09:00,189 INFO [train.py:1114] (3/4) Epoch 1, batch 1350, loss[loss=0.4972, simple_loss=0.4234, pruned_loss=0.1963, ctc_loss=0.3893, over 19754.00 frames. ], tot_loss[loss=0.5814, simple_loss=0.4658, pruned_loss=0.2572, ctc_loss=0.4792, over 3858533.61 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:10:25,730 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:10:30,800 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=7306.666666666667, ans=0.0
+2024-08-25 03:12:10,363 INFO [train.py:1114] (3/4) Epoch 1, batch 1400, loss[loss=0.4248, simple_loss=0.3704, pruned_loss=0.1633, ctc_loss=0.3244, over 19661.00 frames. ], tot_loss[loss=0.5644, simple_loss=0.4573, pruned_loss=0.2451, ctc_loss=0.46, over 3865756.22 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:12:15,003 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.29 vs. limit=10.3
+2024-08-25 03:12:32,379 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.490e+02 2.974e+02 4.034e+02 6.918e+02, threshold=5.948e+02, percent-clipped=1.0
+2024-08-25 03:12:55,049 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.73 vs. limit=10.34
+2024-08-25 03:13:02,033 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=7.10 vs. limit=7.029333333333334
+2024-08-25 03:13:13,315 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.10 vs. limit=8.813333333333333
+2024-08-25 03:13:18,918 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=1.624e-02
+2024-08-25 03:13:21,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=7680.0, ans=0.14
+2024-08-25 03:13:28,401 INFO [train.py:1114] (3/4) Epoch 1, batch 1450, loss[loss=0.5137, simple_loss=0.4464, pruned_loss=0.2009, ctc_loss=0.39, over 19644.00 frames. ], tot_loss[loss=0.5523, simple_loss=0.4515, pruned_loss=0.2366, ctc_loss=0.4456, over 3863304.18 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:13:47,115 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=7786.666666666667, ans=0.025
+2024-08-25 03:13:56,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=7840.0, ans=0.1325
+2024-08-25 03:13:56,463 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.43 vs. limit=13.379999999999999
+2024-08-25 03:14:09,550 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=7893.333333333333, ans=0.04949747468305833
+2024-08-25 03:14:10,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=7893.333333333333, ans=0.13
+2024-08-25 03:14:11,285 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=6.76 vs. limit=7.157333333333334
+2024-08-25 03:14:23,650 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.43 vs. limit=10.48
+2024-08-25 03:14:29,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=8000.0, ans=0.125
+2024-08-25 03:14:30,703 INFO [train.py:1114] (3/4) Epoch 1, batch 1500, loss[loss=0.5283, simple_loss=0.4556, pruned_loss=0.2093, ctc_loss=0.4066, over 19599.00 frames. ], tot_loss[loss=0.5432, simple_loss=0.4479, pruned_loss=0.2298, ctc_loss=0.4342, over 3863668.54 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 8.0
+2024-08-25 03:14:38,510 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.576e+02 3.382e+02 4.091e+02 7.597e+02, threshold=6.763e+02, percent-clipped=6.0
+2024-08-25 03:14:42,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=8053.333333333333, ans=0.09899494936611666
+2024-08-25 03:14:52,672 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.62 vs. limit=7.013333333333334
+2024-08-25 03:15:11,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=8160.0, ans=0.125
+2024-08-25 03:15:13,989 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.69 vs. limit=13.620000000000001
+2024-08-25 03:15:19,870 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff2.min_abs, batch_count=8160.0, ans=0.1
+2024-08-25 03:15:40,015 INFO [train.py:1114] (3/4) Epoch 1, batch 1550, loss[loss=0.5335, simple_loss=0.4589, pruned_loss=0.2132, ctc_loss=0.4116, over 19615.00 frames. ], tot_loss[loss=0.5328, simple_loss=0.4431, pruned_loss=0.223, ctc_loss=0.4226, over 3847809.76 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 8.0
+2024-08-25 03:15:46,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=8266.666666666666, ans=0.025
+2024-08-25 03:16:11,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=8373.333333333334, ans=0.00904927536231884
+2024-08-25 03:16:26,119 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.68 vs. limit=13.82
+2024-08-25 03:16:27,474 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.00 vs. limit=9.213333333333333
+2024-08-25 03:16:41,380 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=8480.0, ans=0.00902608695652174
+2024-08-25 03:16:43,725 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.17 vs. limit=10.68
+2024-08-25 03:16:46,771 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=8480.0, ans=0.125
+2024-08-25 03:16:49,342 INFO [train.py:1114] (3/4) Epoch 1, batch 1600, loss[loss=0.5179, simple_loss=0.4443, pruned_loss=0.2092, ctc_loss=0.3994, over 19840.00 frames. ], tot_loss[loss=0.5244, simple_loss=0.4391, pruned_loss=0.2178, ctc_loss=0.413, over 3837198.14 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:16:59,533 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 2.604e+02 3.125e+02 4.170e+02 2.617e+03, threshold=6.251e+02, percent-clipped=7.0
+2024-08-25 03:17:04,882 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=8586.666666666666, ans=0.09899494936611666
+2024-08-25 03:17:09,832 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=8586.666666666666, ans=0.125
+2024-08-25 03:17:10,954 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=2.598e-03
+2024-08-25 03:17:22,690 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.40 vs. limit=13.98
+2024-08-25 03:17:26,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=8640.0, ans=0.2136
+2024-08-25 03:17:37,597 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=8693.333333333334, ans=0.125
+2024-08-25 03:18:57,667 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.30 vs. limit=14.059999999999999
+2024-08-25 03:18:58,913 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.18 vs. limit=14.059999999999999
+2024-08-25 03:19:09,301 INFO [train.py:1114] (3/4) Epoch 1, batch 1650, loss[loss=0.5449, simple_loss=0.4644, pruned_loss=0.22, ctc_loss=0.4336, over 19670.00 frames. ], tot_loss[loss=0.5151, simple_loss=0.4351, pruned_loss=0.2119, ctc_loss=0.403, over 3834030.75 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 16.0
+2024-08-25 03:19:17,147 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=8800.0, ans=0.125
+2024-08-25 03:19:19,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=8800.0, ans=0.212
+2024-08-25 03:19:26,750 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=8853.333333333334, ans=0.029777777777777778
+2024-08-25 03:19:37,892 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.84 vs. limit=10.84
+2024-08-25 03:19:52,367 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.08 vs. limit=4.344
+2024-08-25 03:19:53,665 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.08 vs. limit=14.219999999999999
+2024-08-25 03:19:59,191 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=9013.333333333334, ans=0.8401333333333333
+2024-08-25 03:20:12,475 INFO [train.py:1114] (3/4) Epoch 1, batch 1700, loss[loss=0.3802, simple_loss=0.3528, pruned_loss=0.1412, ctc_loss=0.2761, over 19677.00 frames. ], tot_loss[loss=0.5042, simple_loss=0.4304, pruned_loss=0.2052, ctc_loss=0.3912, over 3848340.67 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:20:19,814 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.828e+02 2.395e+02 2.888e+02 3.702e+02 8.491e+02, threshold=5.776e+02, percent-clipped=2.0
+2024-08-25 03:20:22,387 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=9066.666666666666, ans=0.20933333333333334
+2024-08-25 03:20:26,461 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.14 vs. limit=10.92
+2024-08-25 03:20:35,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=9120.0, ans=0.5808
+2024-08-25 03:20:41,415 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.39 vs. limit=10.94
+2024-08-25 03:20:52,443 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:22:14,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=9226.666666666666, ans=0.20773333333333333
+2024-08-25 03:22:23,698 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=9280.0, ans=0.025
+2024-08-25 03:22:33,848 INFO [train.py:1114] (3/4) Epoch 1, batch 1750, loss[loss=0.4259, simple_loss=0.3779, pruned_loss=0.1699, ctc_loss=0.3132, over 19672.00 frames. ], tot_loss[loss=0.4942, simple_loss=0.4257, pruned_loss=0.1996, ctc_loss=0.3813, over 3853438.43 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 16.0
+2024-08-25 03:22:42,239 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=9333.333333333334, ans=0.20666666666666667
+2024-08-25 03:22:49,949 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=9386.666666666666, ans=0.025
+2024-08-25 03:22:50,087 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=9386.666666666666, ans=0.125
+2024-08-25 03:22:56,921 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=9440.0, ans=0.125
+2024-08-25 03:23:02,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=9440.0, ans=0.027333333333333334
+2024-08-25 03:23:07,467 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.39 vs. limit=14.620000000000001
+2024-08-25 03:23:27,059 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.61 vs. limit=11.08
+2024-08-25 03:23:29,183 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=9546.666666666666, ans=0.125
+2024-08-25 03:23:31,431 INFO [train.py:1114] (3/4) Epoch 1, batch 1800, loss[loss=0.4637, simple_loss=0.4305, pruned_loss=0.1753, ctc_loss=0.3371, over 19613.00 frames. ], tot_loss[loss=0.4855, simple_loss=0.4221, pruned_loss=0.1946, ctc_loss=0.372, over 3854221.20 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 8.0
+2024-08-25 03:23:38,476 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=9600.0, ans=0.125
+2024-08-25 03:23:39,410 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.646e+02 3.473e+02 4.220e+02 8.344e+02, threshold=6.945e+02, percent-clipped=3.0
+2024-08-25 03:24:14,198 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=9760.0, ans=0.5584
+2024-08-25 03:24:35,841 INFO [train.py:1114] (3/4) Epoch 1, batch 1850, loss[loss=0.4616, simple_loss=0.4149, pruned_loss=0.1825, ctc_loss=0.3424, over 19592.00 frames. ], tot_loss[loss=0.4766, simple_loss=0.4183, pruned_loss=0.1897, ctc_loss=0.3632, over 3859030.19 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:24:43,635 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=9866.666666666666, ans=0.008724637681159421
+2024-08-25 03:24:52,561 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=9920.0, ans=0.125
+2024-08-25 03:25:12,710 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.34 vs. limit=9.986666666666668
+2024-08-25 03:25:15,827 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=10026.666666666666, ans=0.125
+2024-08-25 03:25:30,030 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=10080.0, ans=0.125
+2024-08-25 03:25:32,821 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.27 vs. limit=15.059999999999999
+2024-08-25 03:25:40,384 INFO [train.py:1114] (3/4) Epoch 1, batch 1900, loss[loss=0.4538, simple_loss=0.4204, pruned_loss=0.1713, ctc_loss=0.3477, over 19655.00 frames. ], tot_loss[loss=0.4735, simple_loss=0.4178, pruned_loss=0.188, ctc_loss=0.3601, over 3863302.01 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:25:48,462 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.873e+02 2.554e+02 2.990e+02 4.033e+02 8.041e+02, threshold=5.979e+02, percent-clipped=3.0
+2024-08-25 03:26:02,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=10240.0, ans=0.125
+2024-08-25 03:26:07,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=10240.0, ans=0.5416000000000001
+2024-08-25 03:26:14,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=10293.333333333334, ans=11.36
+2024-08-25 03:26:18,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=10293.333333333334, ans=0.35440000000000005
+2024-08-25 03:26:21,967 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=10293.333333333334, ans=0.008631884057971015
+2024-08-25 03:26:24,236 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=10293.333333333334, ans=0.35440000000000005
+2024-08-25 03:26:38,014 INFO [train.py:1114] (3/4) Epoch 1, batch 1950, loss[loss=0.4127, simple_loss=0.3907, pruned_loss=0.1565, ctc_loss=0.2976, over 19608.00 frames. ], tot_loss[loss=0.4682, simple_loss=0.4167, pruned_loss=0.1849, ctc_loss=0.3545, over 3871559.95 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 8.0
+2024-08-25 03:26:39,933 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.22 vs. limit=15.3
+2024-08-25 03:27:02,655 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=10506.666666666666, ans=0.125
+2024-08-25 03:27:19,133 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.63 vs. limit=4.584
+2024-08-25 03:27:23,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=10613.333333333334, ans=0.02244444444444444
+2024-08-25 03:27:32,836 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=10613.333333333334, ans=0.02244444444444444
+2024-08-25 03:27:36,524 INFO [train.py:1114] (3/4) Epoch 1, batch 2000, loss[loss=0.398, simple_loss=0.3674, pruned_loss=0.1543, ctc_loss=0.3001, over 19619.00 frames. ], tot_loss[loss=0.4638, simple_loss=0.4155, pruned_loss=0.1827, ctc_loss=0.3505, over 3857002.57 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:27:44,894 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.508e+02 3.011e+02 3.695e+02 6.472e+02, threshold=6.022e+02, percent-clipped=1.0
+2024-08-25 03:27:46,175 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=10666.666666666666, ans=0.5266666666666667
+2024-08-25 03:28:01,967 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=10773.333333333334, ans=0.021777777777777774
+2024-08-25 03:28:08,108 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.52 vs. limit=11.54
+2024-08-25 03:28:13,915 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.77 vs. limit=11.559999999999999
+2024-08-25 03:28:19,914 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.36 vs. limit=4.6240000000000006
+2024-08-25 03:28:41,983 INFO [train.py:1114] (3/4) Epoch 1, batch 2050, loss[loss=0.3962, simple_loss=0.366, pruned_loss=0.1551, ctc_loss=0.2908, over 19731.00 frames. ], tot_loss[loss=0.4567, simple_loss=0.4121, pruned_loss=0.1794, ctc_loss=0.3437, over 3852270.95 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:28:43,759 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.20 vs. limit=11.6
+2024-08-25 03:28:53,200 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=10933.333333333334, ans=0.5173333333333334
+2024-08-25 03:29:13,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=10986.666666666666, ans=0.07
+2024-08-25 03:30:20,827 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=11093.333333333334, ans=0.0
+2024-08-25 03:30:34,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=11146.666666666666, ans=10.0
+2024-08-25 03:30:37,005 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=11146.666666666666, ans=0.020222222222222228
+2024-08-25 03:30:37,317 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.40 vs. limit=7.786666666666667
+2024-08-25 03:31:02,649 INFO [train.py:1114] (3/4) Epoch 1, batch 2100, loss[loss=0.3888, simple_loss=0.3831, pruned_loss=0.1421, ctc_loss=0.2757, over 19783.00 frames. ], tot_loss[loss=0.4496, simple_loss=0.4088, pruned_loss=0.1757, ctc_loss=0.3375, over 3858651.34 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 16.0
+2024-08-25 03:31:08,843 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=11200.0, ans=0.188
+2024-08-25 03:31:19,369 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 2.443e+02 2.901e+02 4.101e+02 7.108e+02, threshold=5.802e+02, percent-clipped=5.0
+2024-08-25 03:31:25,415 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=11.34 vs. limit=10.626666666666667
+2024-08-25 03:31:29,583 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.42 vs. limit=11.719999999999999
+2024-08-25 03:31:39,436 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.82 vs. limit=10.626666666666667
+2024-08-25 03:31:41,322 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=11306.666666666666, ans=0.05
+2024-08-25 03:32:07,050 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=11360.0, ans=0.019333333333333338
+2024-08-25 03:32:10,766 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.63 vs. limit=4.712
+2024-08-25 03:32:26,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=11413.333333333334, ans=0.0
+2024-08-25 03:32:32,794 INFO [train.py:1114] (3/4) Epoch 1, batch 2150, loss[loss=0.4278, simple_loss=0.4032, pruned_loss=0.164, ctc_loss=0.311, over 19848.00 frames. ], tot_loss[loss=0.4442, simple_loss=0.4067, pruned_loss=0.1729, ctc_loss=0.3319, over 3870799.03 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:32:42,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=11466.666666666666, ans=0.018888888888888893
+2024-08-25 03:33:34,122 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=11626.666666666666, ans=0.18373333333333333
+2024-08-25 03:33:39,405 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.02 vs. limit=11.86
+2024-08-25 03:33:45,667 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.73 vs. limit=11.879999999999999
+2024-08-25 03:33:57,385 INFO [train.py:1114] (3/4) Epoch 1, batch 2200, loss[loss=0.4565, simple_loss=0.4271, pruned_loss=0.1755, ctc_loss=0.3374, over 19589.00 frames. ], tot_loss[loss=0.4407, simple_loss=0.4054, pruned_loss=0.1711, ctc_loss=0.3289, over 3868780.41 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 8.0
+2024-08-25 03:33:58,934 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=11733.333333333334, ans=0.025
+2024-08-25 03:34:06,409 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=11733.333333333334, ans=0.18266666666666664
+2024-08-25 03:34:08,401 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.628e+02 3.380e+02 4.438e+02 7.655e+02, threshold=6.760e+02, percent-clipped=12.0
+2024-08-25 03:34:09,931 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=11786.666666666666, ans=0.125
+2024-08-25 03:34:40,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=11893.333333333334, ans=0.125
+2024-08-25 03:35:03,300 INFO [train.py:1114] (3/4) Epoch 1, batch 2250, loss[loss=0.4221, simple_loss=0.4117, pruned_loss=0.1572, ctc_loss=0.2952, over 19623.00 frames. ], tot_loss[loss=0.437, simple_loss=0.4043, pruned_loss=0.1691, ctc_loss=0.3244, over 3868154.38 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:35:04,599 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=12000.0, ans=0.18
+2024-08-25 03:35:10,700 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=12000.0, ans=0.01666666666666667
+2024-08-25 03:35:34,081 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=12106.666666666666, ans=0.016222222222222228
+2024-08-25 03:35:35,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=12106.666666666666, ans=0.025
+2024-08-25 03:35:39,088 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=12160.0, ans=0.1784
+2024-08-25 03:35:50,698 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=12213.333333333334, ans=0.025
+2024-08-25 03:35:54,905 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.11 vs. limit=8.053333333333335
+2024-08-25 03:36:01,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=12213.333333333334, ans=0.008214492753623188
+2024-08-25 03:36:03,060 INFO [train.py:1114] (3/4) Epoch 1, batch 2300, loss[loss=0.3666, simple_loss=0.3645, pruned_loss=0.132, ctc_loss=0.2619, over 19494.00 frames. ], tot_loss[loss=0.432, simple_loss=0.4014, pruned_loss=0.1667, ctc_loss=0.3195, over 3861800.96 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:36:06,793 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=12266.666666666666, ans=0.05
+2024-08-25 03:36:12,293 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.546e+02 3.099e+02 3.956e+02 8.242e+02, threshold=6.199e+02, percent-clipped=6.0
+2024-08-25 03:36:22,689 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.20 vs. limit=8.928
+2024-08-25 03:36:29,106 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.75 vs. limit=4.856
+2024-08-25 03:36:40,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=12426.666666666666, ans=0.125
+2024-08-25 03:36:57,944 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.79 vs. limit=12.18
+2024-08-25 03:37:00,703 INFO [train.py:1114] (3/4) Epoch 1, batch 2350, loss[loss=0.4845, simple_loss=0.4408, pruned_loss=0.1913, ctc_loss=0.3639, over 19678.00 frames. ], tot_loss[loss=0.4282, simple_loss=0.3999, pruned_loss=0.1645, ctc_loss=0.316, over 3863961.48 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 8.0
+2024-08-25 03:37:16,424 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=12586.666666666666, ans=0.125
+2024-08-25 03:37:29,085 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=12640.0, ans=10.0
+2024-08-25 03:37:41,543 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=12693.333333333334, ans=0.125
+2024-08-25 03:37:41,603 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=12693.333333333334, ans=0.125
+2024-08-25 03:37:47,928 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.15 vs. limit=17.060000000000002
+2024-08-25 03:37:53,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=12746.666666666666, ans=0.008098550724637681
+2024-08-25 03:37:55,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=12746.666666666666, ans=0.008098550724637681
+2024-08-25 03:37:59,414 INFO [train.py:1114] (3/4) Epoch 1, batch 2400, loss[loss=0.4501, simple_loss=0.4235, pruned_loss=0.1723, ctc_loss=0.3306, over 19283.00 frames. ], tot_loss[loss=0.4309, simple_loss=0.4028, pruned_loss=0.1656, ctc_loss=0.3177, over 3858589.54 frames. ], batch size: 71, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:38:08,240 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.522e+02 3.053e+02 3.990e+02 1.210e+03, threshold=6.106e+02, percent-clipped=3.0
+2024-08-25 03:38:12,934 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=12853.333333333334, ans=0.125
+2024-08-25 03:38:19,684 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=20.84 vs. limit=12.32
+2024-08-25 03:38:20,560 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=12906.666666666666, ans=0.125
+2024-08-25 03:38:31,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=12906.666666666666, ans=0.00806376811594203
+2024-08-25 03:38:47,815 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=22.13 vs. limit=12.379999999999999
+2024-08-25 03:38:53,242 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.43 vs. limit=12.379999999999999
+2024-08-25 03:39:03,891 INFO [train.py:1114] (3/4) Epoch 1, batch 2450, loss[loss=0.5241, simple_loss=0.4447, pruned_loss=0.2163, ctc_loss=0.4273, over 13283.00 frames. ], tot_loss[loss=0.4386, simple_loss=0.4073, pruned_loss=0.1696, ctc_loss=0.3247, over 3730669.53 frames. ], batch size: 141, lr: 4.39e-02, grad_scale: 16.0
+2024-08-25 03:39:07,758 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=13066.666666666666, ans=0.012222222222222225
+2024-08-25 03:39:16,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=13120.0, ans=0.125
+2024-08-25 03:39:17,972 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.68 vs. limit=12.42
+2024-08-25 03:39:23,828 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.48 vs. limit=8.28
+2024-08-25 03:39:25,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=13120.0, ans=0.125
+2024-08-25 03:39:38,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=13226.666666666666, ans=0.125
+2024-08-25 03:40:43,720 INFO [train.py:1114] (3/4) Epoch 2, batch 0, loss[loss=0.4414, simple_loss=0.3996, pruned_loss=0.1745, ctc_loss=0.3355, over 19819.00 frames. ], tot_loss[loss=0.4414, simple_loss=0.3996, pruned_loss=0.1745, ctc_loss=0.3355, over 19819.00 frames. ], batch size: 49, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 03:40:43,721 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 03:40:51,833 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.5.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.7546, 3.5714, 2.7950, 3.5909], device='cuda:3')
+2024-08-25 03:40:53,928 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.5933, 3.0181, 3.3461, 3.1235], device='cuda:3')
+2024-08-25 03:40:55,166 INFO [train.py:1146] (3/4) Epoch 2, validation: loss=0.3317, simple_loss=0.3718, pruned_loss=0.1058, ctc_loss=0.2, over 944034.00 frames.
+2024-08-25 03:40:55,167 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 03:41:12,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=13333.333333333334, ans=0.007971014492753623
+2024-08-25 03:41:17,114 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.388e+02 2.818e+02 3.444e+02 6.577e+02, threshold=5.636e+02, percent-clipped=3.0
+2024-08-25 03:41:21,104 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=5.96 vs. limit=9.354666666666667
+2024-08-25 03:41:22,016 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=13386.666666666666, ans=0.4008
+2024-08-25 03:41:24,521 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.96 vs. limit=5.008
+2024-08-25 03:41:29,553 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=12.52
+2024-08-25 03:41:57,958 INFO [train.py:1114] (3/4) Epoch 2, batch 50, loss[loss=0.3573, simple_loss=0.3566, pruned_loss=0.1298, ctc_loss=0.2458, over 19694.00 frames. ], tot_loss[loss=0.4201, simple_loss=0.3989, pruned_loss=0.1595, ctc_loss=0.3058, over 844976.60 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:42:08,841 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.17 vs. limit=8.386666666666667
+2024-08-25 03:43:15,622 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=13706.666666666666, ans=0.007889855072463769
+2024-08-25 03:43:18,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=13706.666666666666, ans=0.007889855072463769
+2024-08-25 03:43:36,873 INFO [train.py:1114] (3/4) Epoch 2, batch 100, loss[loss=0.4122, simple_loss=0.3896, pruned_loss=0.1577, ctc_loss=0.2983, over 19724.00 frames. ], tot_loss[loss=0.4193, simple_loss=0.3996, pruned_loss=0.1588, ctc_loss=0.3037, over 1499358.22 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:43:42,234 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=13813.333333333334, ans=0.125
+2024-08-25 03:44:02,831 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.500e+02 2.916e+02 3.893e+02 6.295e+02, threshold=5.832e+02, percent-clipped=2.0
+2024-08-25 03:44:19,358 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.85 vs. limit=17.98
+2024-08-25 03:44:20,381 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=13973.333333333334, ans=0.07
+2024-08-25 03:44:34,089 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.17 vs. limit=8.506666666666666
+2024-08-25 03:44:38,018 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14026.666666666666, ans=0.15973333333333334
+2024-08-25 03:44:42,784 INFO [train.py:1114] (3/4) Epoch 2, batch 150, loss[loss=0.3652, simple_loss=0.3575, pruned_loss=0.1352, ctc_loss=0.2564, over 19711.00 frames. ], tot_loss[loss=0.4102, simple_loss=0.3941, pruned_loss=0.1541, ctc_loss=0.2951, over 2027441.50 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 16.0
+2024-08-25 03:44:49,099 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=14080.0, ans=0.125
+2024-08-25 03:45:14,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=14186.666666666666, ans=0.025
+2024-08-25 03:45:18,703 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=14240.0, ans=0.15760000000000002
+2024-08-25 03:45:22,193 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=14240.0, ans=0.09899494936611666
+2024-08-25 03:45:34,142 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=14293.333333333334, ans=0.125
+2024-08-25 03:45:42,121 INFO [train.py:1114] (3/4) Epoch 2, batch 200, loss[loss=0.44, simple_loss=0.4145, pruned_loss=0.1677, ctc_loss=0.3252, over 18255.00 frames. ], tot_loss[loss=0.4037, simple_loss=0.3899, pruned_loss=0.1508, ctc_loss=0.2896, over 2435304.42 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:46:06,457 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.951e+02 2.445e+02 2.940e+02 3.728e+02 6.995e+02, threshold=5.880e+02, percent-clipped=3.0
+2024-08-25 03:46:06,664 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=14453.333333333334, ans=0.125
+2024-08-25 03:46:45,921 INFO [train.py:1114] (3/4) Epoch 2, batch 250, loss[loss=0.4225, simple_loss=0.4091, pruned_loss=0.1576, ctc_loss=0.3019, over 19406.00 frames. ], tot_loss[loss=0.4032, simple_loss=0.3895, pruned_loss=0.1507, ctc_loss=0.2887, over 2754696.94 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 16.0
+2024-08-25 03:46:55,673 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=14613.333333333334, ans=0.0057777777777777775
+2024-08-25 03:47:00,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=14666.666666666666, ans=0.125
+2024-08-25 03:47:19,367 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=13.02
+2024-08-25 03:47:33,027 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.24 vs. limit=13.04
+2024-08-25 03:47:50,845 INFO [train.py:1114] (3/4) Epoch 2, batch 300, loss[loss=0.3829, simple_loss=0.3838, pruned_loss=0.1366, ctc_loss=0.272, over 19500.00 frames. ], tot_loss[loss=0.4, simple_loss=0.3879, pruned_loss=0.149, ctc_loss=0.2854, over 2999500.08 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:47:52,321 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=14880.0, ans=0.004666666666666666
+2024-08-25 03:48:13,154 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.396e+02 2.818e+02 3.488e+02 8.647e+02, threshold=5.636e+02, percent-clipped=6.0
+2024-08-25 03:48:34,326 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.17 vs. limit=8.76
+2024-08-25 03:48:36,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=15040.0, ans=12.52
+2024-08-25 03:48:37,545 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=15093.333333333334, ans=0.37173333333333336
+2024-08-25 03:48:50,394 INFO [train.py:1114] (3/4) Epoch 2, batch 350, loss[loss=0.3854, simple_loss=0.3657, pruned_loss=0.1469, ctc_loss=0.2783, over 19780.00 frames. ], tot_loss[loss=0.3977, simple_loss=0.3871, pruned_loss=0.1476, ctc_loss=0.2831, over 3190079.29 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 16.0
+2024-08-25 03:49:37,324 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=15253.333333333334, ans=0.125
+2024-08-25 03:49:56,827 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=15306.666666666666, ans=0.14693333333333333
+2024-08-25 03:50:06,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=15360.0, ans=0.1464
+2024-08-25 03:50:17,354 INFO [train.py:1114] (3/4) Epoch 2, batch 400, loss[loss=0.3822, simple_loss=0.3915, pruned_loss=0.135, ctc_loss=0.2574, over 19496.00 frames. ], tot_loss[loss=0.3965, simple_loss=0.3863, pruned_loss=0.147, ctc_loss=0.2819, over 3342521.02 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:50:32,991 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:50:36,785 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.77 vs. limit=13.3
+2024-08-25 03:50:39,712 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.407e+02 2.984e+02 3.456e+02 5.488e+02, threshold=5.968e+02, percent-clipped=0.0
+2024-08-25 03:51:15,805 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=15680.0, ans=0.14320000000000002
+2024-08-25 03:51:19,335 INFO [train.py:1114] (3/4) Epoch 2, batch 450, loss[loss=0.3785, simple_loss=0.3751, pruned_loss=0.1387, ctc_loss=0.2613, over 19623.00 frames. ], tot_loss[loss=0.3952, simple_loss=0.3856, pruned_loss=0.1464, ctc_loss=0.28, over 3449587.75 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 03:51:34,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=15733.333333333334, ans=0.125
+2024-08-25 03:51:36,805 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=15733.333333333334, ans=0.125
+2024-08-25 03:51:40,293 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=15733.333333333334, ans=0.125
+2024-08-25 03:51:46,549 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.10 vs. limit=19.34
+2024-08-25 03:51:50,663 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=15786.666666666666, ans=0.125
+2024-08-25 03:51:51,144 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.18 vs. limit=13.42
+2024-08-25 03:52:14,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=15893.333333333334, ans=0.125
+2024-08-25 03:52:17,538 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:52:21,862 INFO [train.py:1114] (3/4) Epoch 2, batch 500, loss[loss=0.409, simple_loss=0.398, pruned_loss=0.1518, ctc_loss=0.2908, over 19682.00 frames. ], tot_loss[loss=0.3916, simple_loss=0.3833, pruned_loss=0.1446, ctc_loss=0.2768, over 3545523.35 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:53:00,741 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.29 vs. limit=13.0
+2024-08-25 03:53:11,992 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.425e+02 3.079e+02 3.995e+02 1.154e+03, threshold=6.159e+02, percent-clipped=13.0
+2024-08-25 03:53:33,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=16106.666666666666, ans=0.125
+2024-08-25 03:53:33,743 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=16106.666666666666, ans=0.3362666666666667
+2024-08-25 03:53:44,350 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=16160.0, ans=0.125
+2024-08-25 03:53:49,955 INFO [train.py:1114] (3/4) Epoch 2, batch 550, loss[loss=0.43, simple_loss=0.4179, pruned_loss=0.1602, ctc_loss=0.3043, over 19287.00 frames. ], tot_loss[loss=0.3902, simple_loss=0.3826, pruned_loss=0.1439, ctc_loss=0.275, over 3607067.87 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 16.0
+2024-08-25 03:53:51,374 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=16213.333333333334, ans=0.125
+2024-08-25 03:54:04,865 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 03:54:08,880 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.09 vs. limit=13.6
+2024-08-25 03:54:09,541 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=16266.666666666666, ans=10.0
+2024-08-25 03:54:14,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=16320.0, ans=0.007321739130434783
+2024-08-25 03:54:15,647 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=16320.0, ans=0.0
+2024-08-25 03:54:26,829 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=16373.333333333334, ans=0.125
+2024-08-25 03:54:31,387 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=16373.333333333334, ans=0.125
+2024-08-25 03:54:51,528 INFO [train.py:1114] (3/4) Epoch 2, batch 600, loss[loss=0.4684, simple_loss=0.4332, pruned_loss=0.1815, ctc_loss=0.3515, over 19377.00 frames. ], tot_loss[loss=0.3886, simple_loss=0.3819, pruned_loss=0.143, ctc_loss=0.2731, over 3665159.69 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:54:57,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=16480.0, ans=0.125
+2024-08-25 03:54:57,774 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=16480.0, ans=0.125
+2024-08-25 03:55:03,577 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=16533.333333333332, ans=0.025
+2024-08-25 03:55:04,607 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=16533.333333333332, ans=0.125
+2024-08-25 03:55:14,973 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.336e+02 2.753e+02 3.494e+02 8.105e+02, threshold=5.507e+02, percent-clipped=1.0
+2024-08-25 03:55:15,369 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=16586.666666666668, ans=0.125
+2024-08-25 03:55:20,383 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=16586.666666666668, ans=0.13413333333333333
+2024-08-25 03:55:46,962 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=11.85 vs. limit=13.759999999999998
+2024-08-25 03:55:56,157 INFO [train.py:1114] (3/4) Epoch 2, batch 650, loss[loss=0.3535, simple_loss=0.3639, pruned_loss=0.1241, ctc_loss=0.2372, over 19757.00 frames. ], tot_loss[loss=0.3854, simple_loss=0.3801, pruned_loss=0.1413, ctc_loss=0.27, over 3715297.44 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 16.0
+2024-08-25 03:56:45,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=16960.0, ans=0.13040000000000002
+2024-08-25 03:56:49,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=16960.0, ans=0.0
+2024-08-25 03:56:56,407 INFO [train.py:1114] (3/4) Epoch 2, batch 700, loss[loss=0.391, simple_loss=0.3847, pruned_loss=0.1444, ctc_loss=0.2712, over 19719.00 frames. ], tot_loss[loss=0.3845, simple_loss=0.38, pruned_loss=0.1407, ctc_loss=0.2689, over 3747694.77 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:56:57,213 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.94 vs. limit=13.879999999999999
+2024-08-25 03:57:00,350 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=17013.333333333332, ans=0.12986666666666669
+2024-08-25 03:57:03,878 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=17013.333333333332, ans=0.125
+2024-08-25 03:57:05,182 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=17013.333333333332, ans=0.125
+2024-08-25 03:57:12,991 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.94 vs. limit=13.9
+2024-08-25 03:57:23,237 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.519e+02 2.895e+02 3.628e+02 6.087e+02, threshold=5.790e+02, percent-clipped=2.0
+2024-08-25 03:57:28,008 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.01 vs. limit=20.34
+2024-08-25 03:57:53,113 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=17226.666666666668, ans=0.125
+2024-08-25 03:57:56,723 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=17226.666666666668, ans=0.125
+2024-08-25 03:58:01,089 INFO [train.py:1114] (3/4) Epoch 2, batch 750, loss[loss=0.3548, simple_loss=0.3697, pruned_loss=0.1219, ctc_loss=0.24, over 19518.00 frames. ], tot_loss[loss=0.3825, simple_loss=0.3787, pruned_loss=0.1397, ctc_loss=0.2671, over 3772835.56 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 16.0
+2024-08-25 03:58:19,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=17333.333333333332, ans=0.0
+2024-08-25 03:58:37,778 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=17440.0, ans=0.9243999999999999
+2024-08-25 03:58:40,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=17440.0, ans=0.125
+2024-08-25 04:00:11,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=17493.333333333332, ans=0.007066666666666667
+2024-08-25 04:00:16,087 INFO [train.py:1114] (3/4) Epoch 2, batch 800, loss[loss=0.3323, simple_loss=0.3423, pruned_loss=0.1174, ctc_loss=0.2188, over 19833.00 frames. ], tot_loss[loss=0.3819, simple_loss=0.3785, pruned_loss=0.1394, ctc_loss=0.2663, over 3794352.44 frames. ], batch size: 49, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:00:18,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=17546.666666666668, ans=0.2858666666666667
+2024-08-25 04:00:39,330 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.611e+02 3.088e+02 3.881e+02 9.768e+02, threshold=6.176e+02, percent-clipped=6.0
+2024-08-25 04:00:39,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=17653.333333333332, ans=0.0
+2024-08-25 04:01:02,578 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:01:12,809 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=17760.0, ans=0.0
+2024-08-25 04:01:15,042 INFO [train.py:1114] (3/4) Epoch 2, batch 850, loss[loss=0.4404, simple_loss=0.4167, pruned_loss=0.1681, ctc_loss=0.32, over 19660.00 frames. ], tot_loss[loss=0.3792, simple_loss=0.3768, pruned_loss=0.1381, ctc_loss=0.2637, over 3814929.35 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 04:01:32,458 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=6.94 vs. limit=11.146666666666668
+2024-08-25 04:02:11,398 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=18026.666666666668, ans=0.0
+2024-08-25 04:02:18,995 INFO [train.py:1114] (3/4) Epoch 2, batch 900, loss[loss=0.3678, simple_loss=0.3639, pruned_loss=0.1333, ctc_loss=0.2625, over 19419.00 frames. ], tot_loss[loss=0.38, simple_loss=0.3772, pruned_loss=0.1386, ctc_loss=0.2641, over 3818536.94 frames. ], batch size: 48, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:02:45,637 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=6.62 vs. limit=11.232
+2024-08-25 04:03:03,829 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.814e+02 2.530e+02 3.033e+02 3.602e+02 3.379e+03, threshold=6.066e+02, percent-clipped=6.0
+2024-08-25 04:03:06,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=18186.666666666668, ans=0.125
+2024-08-25 04:03:30,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=18293.333333333332, ans=0.025
+2024-08-25 04:03:34,906 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=18293.333333333332, ans=0.11706666666666668
+2024-08-25 04:03:36,918 INFO [train.py:1114] (3/4) Epoch 2, batch 950, loss[loss=0.3708, simple_loss=0.3672, pruned_loss=0.136, ctc_loss=0.2555, over 19495.00 frames. ], tot_loss[loss=0.379, simple_loss=0.3766, pruned_loss=0.138, ctc_loss=0.2629, over 3819408.56 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 04:04:01,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=18400.0, ans=0.256
+2024-08-25 04:04:09,918 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=18453.333333333332, ans=0.25413333333333343
+2024-08-25 04:04:11,740 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.23 vs. limit=11.381333333333334
+2024-08-25 04:04:12,220 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=18453.333333333332, ans=0.125
+2024-08-25 04:04:18,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=18506.666666666668, ans=0.25226666666666675
+2024-08-25 04:04:27,568 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=18560.0, ans=0.006834782608695652
+2024-08-25 04:04:33,047 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.40 vs. limit=21.42
+2024-08-25 04:04:39,319 INFO [train.py:1114] (3/4) Epoch 2, batch 1000, loss[loss=0.4116, simple_loss=0.3915, pruned_loss=0.1559, ctc_loss=0.3, over 19839.00 frames. ], tot_loss[loss=0.3792, simple_loss=0.377, pruned_loss=0.1381, ctc_loss=0.2627, over 3815288.34 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:05:05,784 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.321e+02 2.743e+02 3.485e+02 6.350e+02, threshold=5.486e+02, percent-clipped=2.0
+2024-08-25 04:05:06,745 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.15 vs. limit=14.52
+2024-08-25 04:05:09,142 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.23 vs. limit=5.808
+2024-08-25 04:05:30,651 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=20.22 vs. limit=21.619999999999997
+2024-08-25 04:05:41,812 INFO [train.py:1114] (3/4) Epoch 2, batch 1050, loss[loss=0.4339, simple_loss=0.4255, pruned_loss=0.1617, ctc_loss=0.2972, over 19854.00 frames. ], tot_loss[loss=0.3768, simple_loss=0.3757, pruned_loss=0.1369, ctc_loss=0.2603, over 3821712.04 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 04:06:34,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=19093.333333333332, ans=0.23173333333333346
+2024-08-25 04:06:38,481 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=19093.333333333332, ans=0.125
+2024-08-25 04:06:44,159 INFO [train.py:1114] (3/4) Epoch 2, batch 1100, loss[loss=0.3538, simple_loss=0.3589, pruned_loss=0.1249, ctc_loss=0.2472, over 19594.00 frames. ], tot_loss[loss=0.3757, simple_loss=0.3751, pruned_loss=0.1364, ctc_loss=0.259, over 3829183.81 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:07:00,154 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=19200.0, ans=0.125
+2024-08-25 04:07:11,083 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.465e+02 2.960e+02 4.039e+02 7.406e+02, threshold=5.919e+02, percent-clipped=11.0
+2024-08-25 04:08:08,080 INFO [train.py:1114] (3/4) Epoch 2, batch 1150, loss[loss=0.359, simple_loss=0.3671, pruned_loss=0.1285, ctc_loss=0.2344, over 19585.00 frames. ], tot_loss[loss=0.3757, simple_loss=0.3751, pruned_loss=0.1364, ctc_loss=0.2587, over 3829453.92 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 04:08:08,761 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.94 vs. limit=11.765333333333333
+2024-08-25 04:08:09,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=19413.333333333332, ans=0.125
+2024-08-25 04:08:13,548 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=19413.333333333332, ans=0.125
+2024-08-25 04:08:17,674 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.61 vs. limit=14.780000000000001
+2024-08-25 04:08:18,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=19413.333333333332, ans=0.125
+2024-08-25 04:08:26,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=19466.666666666668, ans=0.05533333333333329
+2024-08-25 04:08:29,653 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.20 vs. limit=22.1
+2024-08-25 04:08:50,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=19573.333333333332, ans=0.21493333333333342
+2024-08-25 04:08:55,580 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=19626.666666666668, ans=0.125
+2024-08-25 04:08:55,636 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=19626.666666666668, ans=0.025
+2024-08-25 04:08:58,225 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.83 vs. limit=14.86
+2024-08-25 04:09:01,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=19626.666666666668, ans=0.125
+2024-08-25 04:09:08,082 INFO [train.py:1114] (3/4) Epoch 2, batch 1200, loss[loss=0.3684, simple_loss=0.3832, pruned_loss=0.1276, ctc_loss=0.2464, over 19835.00 frames. ], tot_loss[loss=0.3769, simple_loss=0.3762, pruned_loss=0.1368, ctc_loss=0.2595, over 3825699.81 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 04:09:26,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=19733.333333333332, ans=0.125
+2024-08-25 04:09:32,229 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.02 vs. limit=14.9
+2024-08-25 04:09:34,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=19786.666666666668, ans=0.0065681159420289854
+2024-08-25 04:09:36,230 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.637e+02 3.065e+02 4.000e+02 6.600e+02, threshold=6.130e+02, percent-clipped=2.0
+2024-08-25 04:09:57,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=19840.0, ans=0.125
+2024-08-25 04:10:03,570 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=19893.333333333332, ans=0.125
+2024-08-25 04:10:08,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=19893.333333333332, ans=0.125
+2024-08-25 04:10:11,975 INFO [train.py:1114] (3/4) Epoch 2, batch 1250, loss[loss=0.4067, simple_loss=0.4018, pruned_loss=0.1494, ctc_loss=0.2817, over 19560.00 frames. ], tot_loss[loss=0.3758, simple_loss=0.376, pruned_loss=0.1361, ctc_loss=0.2581, over 3843546.43 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:10:27,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1.whitening_limit, batch_count=20000.0, ans=10.0
+2024-08-25 04:10:29,012 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=20000.0, ans=0.2
+2024-08-25 04:10:42,267 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.61 vs. limit=12.0
+2024-08-25 04:10:52,886 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.05 vs. limit=15.0
+2024-08-25 04:11:07,594 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=20160.0, ans=0.125
+2024-08-25 04:11:08,924 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=20160.0, ans=0.125
+2024-08-25 04:11:14,916 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=20213.333333333332, ans=0.025
+2024-08-25 04:11:15,946 INFO [train.py:1114] (3/4) Epoch 2, batch 1300, loss[loss=0.468, simple_loss=0.4275, pruned_loss=0.1864, ctc_loss=0.3392, over 18948.00 frames. ], tot_loss[loss=0.3742, simple_loss=0.375, pruned_loss=0.1353, ctc_loss=0.2566, over 3847154.83 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 04:11:28,057 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=20266.666666666668, ans=0.125
+2024-08-25 04:11:32,960 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=20266.666666666668, ans=0.1
+2024-08-25 04:11:36,348 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=20266.666666666668, ans=0.2
+2024-08-25 04:11:41,996 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.187e+02 2.429e+02 2.931e+02 4.736e+02, threshold=4.858e+02, percent-clipped=0.0
+2024-08-25 04:11:48,331 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:11:55,960 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=20373.333333333332, ans=0.1
+2024-08-25 04:11:57,351 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.28 vs. limit=15.0
+2024-08-25 04:11:59,778 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=18.89 vs. limit=22.5
+2024-08-25 04:12:04,403 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.21 vs. limit=15.0
+2024-08-25 04:12:15,273 INFO [train.py:1114] (3/4) Epoch 2, batch 1350, loss[loss=0.3496, simple_loss=0.3623, pruned_loss=0.1202, ctc_loss=0.2413, over 19775.00 frames. ], tot_loss[loss=0.3707, simple_loss=0.373, pruned_loss=0.1336, ctc_loss=0.2531, over 3858684.74 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:12:39,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=20586.666666666668, ans=0.025
+2024-08-25 04:13:09,087 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:13:11,412 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=20693.333333333332, ans=0.0
+2024-08-25 04:13:12,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=20693.333333333332, ans=0.125
+2024-08-25 04:13:18,526 INFO [train.py:1114] (3/4) Epoch 2, batch 1400, loss[loss=0.2981, simple_loss=0.3192, pruned_loss=0.09802, ctc_loss=0.2026, over 19670.00 frames. ], tot_loss[loss=0.3679, simple_loss=0.3714, pruned_loss=0.1322, ctc_loss=0.2503, over 3865506.63 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 04:14:03,158 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.385e+02 2.674e+02 3.744e+02 6.684e+02, threshold=5.347e+02, percent-clipped=6.0
+2024-08-25 04:14:08,271 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=2.506e-03
+2024-08-25 04:14:08,586 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.19 vs. limit=15.0
+2024-08-25 04:14:11,048 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=20853.333333333332, ans=0.125
+2024-08-25 04:14:13,330 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=20906.666666666668, ans=0.0
+2024-08-25 04:14:19,455 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=20906.666666666668, ans=10.0
+2024-08-25 04:14:19,585 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20906.666666666668, ans=0.1
+2024-08-25 04:14:36,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=21013.333333333332, ans=0.125
+2024-08-25 04:14:37,938 INFO [train.py:1114] (3/4) Epoch 2, batch 1450, loss[loss=0.3879, simple_loss=0.391, pruned_loss=0.14, ctc_loss=0.262, over 19681.00 frames. ], tot_loss[loss=0.369, simple_loss=0.3722, pruned_loss=0.1327, ctc_loss=0.2512, over 3862745.91 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:14:42,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=21013.333333333332, ans=0.2
+2024-08-25 04:15:58,395 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.39 vs. limit=10.0
+2024-08-25 04:16:00,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=21120.0, ans=0.2
+2024-08-25 04:16:10,963 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=21173.333333333332, ans=0.125
+2024-08-25 04:16:11,013 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=21173.333333333332, ans=0.04949747468305833
+2024-08-25 04:16:13,488 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=21173.333333333332, ans=0.5
+2024-08-25 04:16:28,413 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=21226.666666666668, ans=0.125
+2024-08-25 04:16:30,888 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=21226.666666666668, ans=0.006255072463768116
+2024-08-25 04:16:33,077 INFO [train.py:1114] (3/4) Epoch 2, batch 1500, loss[loss=0.3892, simple_loss=0.3928, pruned_loss=0.1415, ctc_loss=0.2568, over 19574.00 frames. ], tot_loss[loss=0.3682, simple_loss=0.372, pruned_loss=0.1321, ctc_loss=0.2502, over 3862326.39 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 04:16:33,827 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.12 vs. limit=15.0
+2024-08-25 04:16:36,013 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=21280.0, ans=0.1
+2024-08-25 04:16:44,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=21333.333333333332, ans=0.125
+2024-08-25 04:17:08,007 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.509e+02 2.906e+02 4.274e+02 8.598e+02, threshold=5.813e+02, percent-clipped=13.0
+2024-08-25 04:17:19,228 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:17:21,697 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=21440.0, ans=0.125
+2024-08-25 04:17:36,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=21493.333333333332, ans=0.125
+2024-08-25 04:17:38,425 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=21493.333333333332, ans=0.125
+2024-08-25 04:17:42,728 INFO [train.py:1114] (3/4) Epoch 2, batch 1550, loss[loss=0.3934, simple_loss=0.3964, pruned_loss=0.143, ctc_loss=0.2606, over 19604.00 frames. ], tot_loss[loss=0.3698, simple_loss=0.3727, pruned_loss=0.1331, ctc_loss=0.2518, over 3847724.41 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 04:17:47,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=21546.666666666668, ans=10.0
+2024-08-25 04:18:03,026 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=5.64 vs. limit=15.0
+2024-08-25 04:18:33,015 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=21760.0, ans=0.125
+2024-08-25 04:18:44,957 INFO [train.py:1114] (3/4) Epoch 2, batch 1600, loss[loss=0.3975, simple_loss=0.3924, pruned_loss=0.1471, ctc_loss=0.2708, over 19836.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.3724, pruned_loss=0.133, ctc_loss=0.2519, over 3835939.44 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:18:56,278 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=21866.666666666668, ans=0.04949747468305833
+2024-08-25 04:19:13,741 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.370e+02 2.902e+02 3.664e+02 6.938e+02, threshold=5.803e+02, percent-clipped=2.0
+2024-08-25 04:19:42,156 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=22026.666666666668, ans=0.1
+2024-08-25 04:19:42,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=22026.666666666668, ans=0.2
+2024-08-25 04:19:49,396 INFO [train.py:1114] (3/4) Epoch 2, batch 1650, loss[loss=0.3698, simple_loss=0.3857, pruned_loss=0.1282, ctc_loss=0.2438, over 19647.00 frames. ], tot_loss[loss=0.3688, simple_loss=0.3718, pruned_loss=0.1327, ctc_loss=0.2511, over 3832389.91 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 04:19:57,323 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.32 vs. limit=12.0
+2024-08-25 04:20:02,821 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=22133.333333333332, ans=0.2
+2024-08-25 04:20:27,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=22240.0, ans=0.125
+2024-08-25 04:20:38,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=22293.333333333332, ans=0.0
+2024-08-25 04:20:46,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=22293.333333333332, ans=0.125
+2024-08-25 04:20:48,535 INFO [train.py:1114] (3/4) Epoch 2, batch 1700, loss[loss=0.3243, simple_loss=0.3327, pruned_loss=0.1124, ctc_loss=0.228, over 19685.00 frames. ], tot_loss[loss=0.3662, simple_loss=0.3704, pruned_loss=0.1313, ctc_loss=0.2486, over 3846786.50 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:21:04,218 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:21:11,043 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=22400.0, ans=0.125
+2024-08-25 04:21:14,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=22453.333333333332, ans=0.125
+2024-08-25 04:21:16,630 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.264e+02 2.715e+02 3.253e+02 5.462e+02, threshold=5.430e+02, percent-clipped=0.0
+2024-08-25 04:21:29,804 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.60 vs. limit=22.5
+2024-08-25 04:21:31,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=22506.666666666668, ans=0.125
+2024-08-25 04:21:40,954 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.04 vs. limit=6.0
+2024-08-25 04:21:48,267 INFO [train.py:1114] (3/4) Epoch 2, batch 1750, loss[loss=0.3623, simple_loss=0.3546, pruned_loss=0.1357, ctc_loss=0.2466, over 19653.00 frames. ], tot_loss[loss=0.3652, simple_loss=0.3697, pruned_loss=0.1309, ctc_loss=0.2475, over 3852038.36 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 32.0
+2024-08-25 04:21:51,885 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=22613.333333333332, ans=0.125
+2024-08-25 04:21:58,714 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=22666.666666666668, ans=0.005942028985507246
+2024-08-25 04:22:12,829 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=22720.0, ans=0.125
+2024-08-25 04:22:13,808 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=22720.0, ans=0.125
+2024-08-25 04:22:28,792 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=22773.333333333332, ans=0.125
+2024-08-25 04:22:28,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=22773.333333333332, ans=0.0
+2024-08-25 04:22:53,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=22826.666666666668, ans=0.0
+2024-08-25 04:23:02,442 INFO [train.py:1114] (3/4) Epoch 2, batch 1800, loss[loss=0.3418, simple_loss=0.3637, pruned_loss=0.1153, ctc_loss=0.2233, over 19625.00 frames. ], tot_loss[loss=0.3646, simple_loss=0.3694, pruned_loss=0.1305, ctc_loss=0.247, over 3853550.70 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:23:08,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=22880.0, ans=0.2
+2024-08-25 04:23:28,005 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.473e+02 2.913e+02 3.585e+02 6.262e+02, threshold=5.825e+02, percent-clipped=5.0
+2024-08-25 04:23:35,271 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.94 vs. limit=22.5
+2024-08-25 04:23:40,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=23040.0, ans=0.125
+2024-08-25 04:23:48,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=23093.333333333332, ans=0.07
+2024-08-25 04:23:59,517 INFO [train.py:1114] (3/4) Epoch 2, batch 1850, loss[loss=0.3764, simple_loss=0.3855, pruned_loss=0.1322, ctc_loss=0.2575, over 19602.00 frames. ], tot_loss[loss=0.3636, simple_loss=0.3691, pruned_loss=0.1299, ctc_loss=0.2456, over 3856750.56 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 32.0
+2024-08-25 04:24:01,490 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=11.32 vs. limit=15.0
+2024-08-25 04:24:03,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=23146.666666666668, ans=0.1
+2024-08-25 04:24:15,087 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=23200.0, ans=0.0
+2024-08-25 04:24:25,577 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=4.67 vs. limit=15.0
+2024-08-25 04:24:41,545 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=23306.666666666668, ans=0.0
+2024-08-25 04:24:56,425 INFO [train.py:1114] (3/4) Epoch 2, batch 1900, loss[loss=0.347, simple_loss=0.3708, pruned_loss=0.1172, ctc_loss=0.2218, over 19659.00 frames. ], tot_loss[loss=0.3618, simple_loss=0.3685, pruned_loss=0.1288, ctc_loss=0.2436, over 3861920.01 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 32.0
+2024-08-25 04:24:57,877 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=23413.333333333332, ans=0.0057797101449275365
+2024-08-25 04:25:07,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=23466.666666666668, ans=0.005768115942028985
+2024-08-25 04:25:12,590 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.65 vs. limit=22.5
+2024-08-25 04:25:14,835 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.61 vs. limit=15.0
+2024-08-25 04:25:16,726 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=23466.666666666668, ans=0.125
+2024-08-25 04:25:21,313 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.247e+02 2.781e+02 3.399e+02 7.136e+02, threshold=5.561e+02, percent-clipped=3.0
+2024-08-25 04:25:28,650 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=23520.0, ans=0.0
+2024-08-25 04:25:55,283 INFO [train.py:1114] (3/4) Epoch 2, batch 1950, loss[loss=0.3432, simple_loss=0.3551, pruned_loss=0.1218, ctc_loss=0.219, over 19583.00 frames. ], tot_loss[loss=0.3615, simple_loss=0.3692, pruned_loss=0.1284, ctc_loss=0.2427, over 3870854.51 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:26:09,994 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=23733.333333333332, ans=0.005710144927536232
+2024-08-25 04:26:10,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=23733.333333333332, ans=0.125
+2024-08-25 04:26:11,147 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=23733.333333333332, ans=0.025
+2024-08-25 04:26:24,794 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=23786.666666666668, ans=0.0
+2024-08-25 04:26:30,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=23840.0, ans=0.125
+2024-08-25 04:26:40,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=23840.0, ans=0.125
+2024-08-25 04:26:46,694 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=23893.333333333332, ans=0.125
+2024-08-25 04:26:54,475 INFO [train.py:1114] (3/4) Epoch 2, batch 2000, loss[loss=0.2948, simple_loss=0.3168, pruned_loss=0.0991, ctc_loss=0.1867, over 19653.00 frames. ], tot_loss[loss=0.3636, simple_loss=0.3702, pruned_loss=0.1296, ctc_loss=0.2446, over 3855664.61 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 04:26:59,245 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=23946.666666666668, ans=0.125
+2024-08-25 04:27:20,444 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.625e+02 3.128e+02 3.968e+02 6.078e+02, threshold=6.255e+02, percent-clipped=2.0
+2024-08-25 04:27:26,078 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=24053.333333333332, ans=0.1
+2024-08-25 04:27:51,117 INFO [train.py:1114] (3/4) Epoch 2, batch 2050, loss[loss=0.303, simple_loss=0.3207, pruned_loss=0.1046, ctc_loss=0.19, over 19710.00 frames. ], tot_loss[loss=0.3612, simple_loss=0.3682, pruned_loss=0.1286, ctc_loss=0.2427, over 3851469.00 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:28:05,448 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.16 vs. limit=15.0
+2024-08-25 04:28:10,379 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=24266.666666666668, ans=0.125
+2024-08-25 04:28:13,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=24320.0, ans=0.005582608695652174
+2024-08-25 04:28:14,190 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=24320.0, ans=0.0
+2024-08-25 04:28:14,352 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=24320.0, ans=0.125
+2024-08-25 04:28:14,661 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.12 vs. limit=12.0
+2024-08-25 04:28:19,317 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.68 vs. limit=15.0
+2024-08-25 04:28:28,691 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.81 vs. limit=22.5
+2024-08-25 04:28:32,614 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=24373.333333333332, ans=0.2
+2024-08-25 04:28:47,789 INFO [train.py:1114] (3/4) Epoch 2, batch 2100, loss[loss=0.3409, simple_loss=0.3615, pruned_loss=0.1156, ctc_loss=0.2227, over 19771.00 frames. ], tot_loss[loss=0.3594, simple_loss=0.3669, pruned_loss=0.1278, ctc_loss=0.2411, over 3858121.59 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 04:28:48,039 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=24480.0, ans=0.125
+2024-08-25 04:28:57,414 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.91 vs. limit=22.5
+2024-08-25 04:29:04,242 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:29:14,130 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.311e+02 2.619e+02 3.137e+02 5.086e+02, threshold=5.238e+02, percent-clipped=0.0
+2024-08-25 04:29:14,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=24586.666666666668, ans=0.1
+2024-08-25 04:29:28,037 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.28 vs. limit=22.5
+2024-08-25 04:29:32,256 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=24693.333333333332, ans=0.005501449275362319
+2024-08-25 04:29:43,434 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=24746.666666666668, ans=0.125
+2024-08-25 04:29:44,331 INFO [train.py:1114] (3/4) Epoch 2, batch 2150, loss[loss=0.3222, simple_loss=0.3447, pruned_loss=0.1073, ctc_loss=0.2124, over 19847.00 frames. ], tot_loss[loss=0.3565, simple_loss=0.365, pruned_loss=0.1263, ctc_loss=0.2381, over 3870257.90 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 04:29:58,980 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=24800.0, ans=0.1
+2024-08-25 04:30:03,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=24800.0, ans=0.125
+2024-08-25 04:30:04,825 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:30:09,484 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=24853.333333333332, ans=0.125
+2024-08-25 04:30:10,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=24853.333333333332, ans=0.125
+2024-08-25 04:30:13,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=24853.333333333332, ans=0.125
+2024-08-25 04:30:24,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=24906.666666666668, ans=0.0
+2024-08-25 04:30:32,380 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=24960.0, ans=0.1
+2024-08-25 04:30:35,797 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=24960.0, ans=0.07
+2024-08-25 04:30:40,040 INFO [train.py:1114] (3/4) Epoch 2, batch 2200, loss[loss=0.3701, simple_loss=0.3771, pruned_loss=0.1312, ctc_loss=0.2521, over 19587.00 frames. ], tot_loss[loss=0.3554, simple_loss=0.3646, pruned_loss=0.1257, ctc_loss=0.237, over 3869038.25 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:30:40,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=25013.333333333332, ans=0.125
+2024-08-25 04:30:46,960 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=25013.333333333332, ans=0.0
+2024-08-25 04:30:55,579 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=25066.666666666668, ans=0.125
+2024-08-25 04:31:06,340 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.398e+02 2.814e+02 3.505e+02 8.042e+02, threshold=5.628e+02, percent-clipped=3.0
+2024-08-25 04:31:17,473 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=25173.333333333332, ans=0.125
+2024-08-25 04:31:21,936 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=25173.333333333332, ans=0.95
+2024-08-25 04:31:23,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=25173.333333333332, ans=0.125
+2024-08-25 04:31:27,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25226.666666666668, ans=0.1
+2024-08-25 04:31:31,674 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=25226.666666666668, ans=0.0
+2024-08-25 04:31:37,462 INFO [train.py:1114] (3/4) Epoch 2, batch 2250, loss[loss=0.3619, simple_loss=0.3826, pruned_loss=0.1227, ctc_loss=0.2394, over 19606.00 frames. ], tot_loss[loss=0.3557, simple_loss=0.3648, pruned_loss=0.1258, ctc_loss=0.2375, over 3868519.86 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 04:31:42,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25280.0, ans=0.1
+2024-08-25 04:31:49,078 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.74 vs. limit=15.0
+2024-08-25 04:31:57,743 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=25333.333333333332, ans=0.0
+2024-08-25 04:32:02,798 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.33 vs. limit=6.0
+2024-08-25 04:32:08,123 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 04:32:16,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=25440.0, ans=0.0053391304347826084
+2024-08-25 04:32:21,529 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:32:29,329 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=25493.333333333332, ans=0.0
+2024-08-25 04:32:30,507 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=25493.333333333332, ans=0.2
+2024-08-25 04:32:33,457 INFO [train.py:1114] (3/4) Epoch 2, batch 2300, loss[loss=0.3435, simple_loss=0.3516, pruned_loss=0.1224, ctc_loss=0.2268, over 19513.00 frames. ], tot_loss[loss=0.3553, simple_loss=0.3639, pruned_loss=0.1259, ctc_loss=0.2371, over 3862000.82 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 16.0
+2024-08-25 04:32:45,800 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=25600.0, ans=0.025
+2024-08-25 04:32:47,527 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.65 vs. limit=5.0
+2024-08-25 04:32:51,576 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=25600.0, ans=0.04949747468305833
+2024-08-25 04:32:55,968 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.80 vs. limit=15.0
+2024-08-25 04:33:03,045 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.317e+02 2.709e+02 3.466e+02 6.027e+02, threshold=5.417e+02, percent-clipped=4.0
+2024-08-25 04:33:04,407 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=25653.333333333332, ans=0.0
+2024-08-25 04:33:04,820 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=25653.333333333332, ans=0.125
+2024-08-25 04:33:17,114 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=25706.666666666668, ans=0.04949747468305833
+2024-08-25 04:33:20,282 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=25760.0, ans=0.05
+2024-08-25 04:33:27,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=25760.0, ans=0.125
+2024-08-25 04:33:32,438 INFO [train.py:1114] (3/4) Epoch 2, batch 2350, loss[loss=0.3448, simple_loss=0.3661, pruned_loss=0.117, ctc_loss=0.2237, over 19655.00 frames. ], tot_loss[loss=0.3551, simple_loss=0.3636, pruned_loss=0.1259, ctc_loss=0.2369, over 3864149.94 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 16.0
+2024-08-25 04:33:34,118 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=25813.333333333332, ans=0.2
+2024-08-25 04:33:46,438 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=25866.666666666668, ans=0.1
+2024-08-25 04:34:11,562 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.58 vs. limit=15.0
+2024-08-25 04:34:25,103 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=26026.666666666668, ans=0.1
+2024-08-25 04:34:25,491 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.19 vs. limit=10.0
+2024-08-25 04:34:30,678 INFO [train.py:1114] (3/4) Epoch 2, batch 2400, loss[loss=0.3553, simple_loss=0.3678, pruned_loss=0.1233, ctc_loss=0.2405, over 19243.00 frames. ], tot_loss[loss=0.3588, simple_loss=0.3668, pruned_loss=0.1275, ctc_loss=0.2396, over 3858949.39 frames. ], batch size: 71, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 04:34:36,424 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=26080.0, ans=0.125
+2024-08-25 04:34:40,967 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26133.333333333332, ans=0.1
+2024-08-25 04:34:57,158 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.184e+02 2.505e+02 3.102e+02 8.045e+02, threshold=5.010e+02, percent-clipped=5.0
+2024-08-25 04:34:59,049 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.66 vs. limit=22.5
+2024-08-25 04:34:59,768 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=26186.666666666668, ans=0.125
+2024-08-25 04:35:00,859 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=26186.666666666668, ans=0.2
+2024-08-25 04:35:06,431 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=26240.0, ans=0.125
+2024-08-25 04:35:27,216 INFO [train.py:1114] (3/4) Epoch 2, batch 2450, loss[loss=0.4348, simple_loss=0.4045, pruned_loss=0.1706, ctc_loss=0.3098, over 13311.00 frames. ], tot_loss[loss=0.3686, simple_loss=0.3722, pruned_loss=0.1327, ctc_loss=0.2491, over 3728997.81 frames. ], batch size: 140, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 04:35:51,981 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=26453.333333333332, ans=0.125
+2024-08-25 04:36:50,859 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.83 vs. limit=6.0
+2024-08-25 04:36:55,752 INFO [train.py:1114] (3/4) Epoch 3, batch 0, loss[loss=0.3412, simple_loss=0.3479, pruned_loss=0.1227, ctc_loss=0.2227, over 19796.00 frames. ], tot_loss[loss=0.3412, simple_loss=0.3479, pruned_loss=0.1227, ctc_loss=0.2227, over 19796.00 frames. ], batch size: 49, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 04:36:55,753 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 04:37:08,021 INFO [train.py:1146] (3/4) Epoch 3, validation: loss=0.2847, simple_loss=0.3461, pruned_loss=0.08168, ctc_loss=0.1499, over 944034.00 frames.
+2024-08-25 04:37:08,022 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 04:37:28,854 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=26608.0, ans=0.125
+2024-08-25 04:37:45,284 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=26714.666666666668, ans=0.125
+2024-08-25 04:37:50,807 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.252e+02 2.580e+02 3.143e+02 6.401e+02, threshold=5.159e+02, percent-clipped=2.0
+2024-08-25 04:38:10,078 INFO [train.py:1114] (3/4) Epoch 3, batch 50, loss[loss=0.3261, simple_loss=0.3443, pruned_loss=0.1111, ctc_loss=0.2142, over 19705.00 frames. ], tot_loss[loss=0.3662, simple_loss=0.3716, pruned_loss=0.1309, ctc_loss=0.2474, over 844545.25 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:38:45,744 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=26874.666666666668, ans=0.125
+2024-08-25 04:38:48,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=26928.0, ans=0.125
+2024-08-25 04:38:51,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=26928.0, ans=0.125
+2024-08-25 04:39:07,082 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=26981.333333333332, ans=0.0
+2024-08-25 04:39:14,207 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=26981.333333333332, ans=0.2
+2024-08-25 04:39:28,423 INFO [train.py:1114] (3/4) Epoch 3, batch 100, loss[loss=0.3169, simple_loss=0.3371, pruned_loss=0.108, ctc_loss=0.2016, over 19696.00 frames. ], tot_loss[loss=0.3653, simple_loss=0.372, pruned_loss=0.1302, ctc_loss=0.2453, over 1498666.18 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 04:39:40,577 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=27141.333333333332, ans=0.5
+2024-08-25 04:39:46,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=27141.333333333332, ans=0.04949747468305833
+2024-08-25 04:39:54,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=27194.666666666668, ans=0.125
+2024-08-25 04:40:11,091 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.221e+02 2.583e+02 3.158e+02 4.904e+02, threshold=5.165e+02, percent-clipped=0.0
+2024-08-25 04:40:27,485 INFO [train.py:1114] (3/4) Epoch 3, batch 150, loss[loss=0.3696, simple_loss=0.3601, pruned_loss=0.1381, ctc_loss=0.2568, over 19710.00 frames. ], tot_loss[loss=0.3562, simple_loss=0.3654, pruned_loss=0.1261, ctc_loss=0.237, over 2028092.68 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 04:40:35,820 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=27354.666666666668, ans=0.0
+2024-08-25 04:40:57,672 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=27461.333333333332, ans=0.125
+2024-08-25 04:41:07,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=27514.666666666668, ans=0.004888115942028985
+2024-08-25 04:41:23,726 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=27568.0, ans=0.125
+2024-08-25 04:41:29,363 INFO [train.py:1114] (3/4) Epoch 3, batch 200, loss[loss=0.3931, simple_loss=0.3906, pruned_loss=0.1445, ctc_loss=0.2664, over 18470.00 frames. ], tot_loss[loss=0.3517, simple_loss=0.3623, pruned_loss=0.124, ctc_loss=0.233, over 2435922.79 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:41:33,339 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=27621.333333333332, ans=0.2
+2024-08-25 04:42:06,210 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27781.333333333332, ans=0.1
+2024-08-25 04:42:14,170 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.192e+02 2.550e+02 3.125e+02 5.269e+02, threshold=5.099e+02, percent-clipped=1.0
+2024-08-25 04:42:33,259 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.54 vs. limit=10.0
+2024-08-25 04:42:35,062 INFO [train.py:1114] (3/4) Epoch 3, batch 250, loss[loss=0.3965, simple_loss=0.3988, pruned_loss=0.1444, ctc_loss=0.2633, over 19425.00 frames. ], tot_loss[loss=0.3504, simple_loss=0.3617, pruned_loss=0.1233, ctc_loss=0.2314, over 2756123.64 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 04:42:59,918 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=27994.666666666668, ans=0.0
+2024-08-25 04:43:00,971 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=27994.666666666668, ans=0.125
+2024-08-25 04:43:02,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=27994.666666666668, ans=0.125
+2024-08-25 04:43:18,846 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=4.14 vs. limit=15.0
+2024-08-25 04:43:33,531 INFO [train.py:1114] (3/4) Epoch 3, batch 300, loss[loss=0.4106, simple_loss=0.4041, pruned_loss=0.1545, ctc_loss=0.2703, over 19505.00 frames. ], tot_loss[loss=0.3477, simple_loss=0.3599, pruned_loss=0.122, ctc_loss=0.2287, over 3001487.37 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:43:45,845 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=28208.0, ans=0.0
+2024-08-25 04:44:05,223 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=28261.333333333332, ans=0.125
+2024-08-25 04:44:18,923 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.242e+02 2.624e+02 3.299e+02 5.169e+02, threshold=5.248e+02, percent-clipped=1.0
+2024-08-25 04:44:21,726 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 04:44:21,770 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 04:44:36,139 INFO [train.py:1114] (3/4) Epoch 3, batch 350, loss[loss=0.3032, simple_loss=0.3204, pruned_loss=0.1047, ctc_loss=0.1915, over 19746.00 frames. ], tot_loss[loss=0.3463, simple_loss=0.3594, pruned_loss=0.1211, ctc_loss=0.2274, over 3191503.78 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 04:44:49,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=28474.666666666668, ans=0.125
+2024-08-25 04:44:50,068 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=28474.666666666668, ans=0.2
+2024-08-25 04:45:00,816 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=28528.0, ans=0.1
+2024-08-25 04:45:36,890 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=28581.333333333332, ans=0.125
+2024-08-25 04:45:52,923 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=28634.666666666668, ans=0.00464463768115942
+2024-08-25 04:46:41,818 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=28634.666666666668, ans=0.95
+2024-08-25 04:46:55,918 INFO [train.py:1114] (3/4) Epoch 3, batch 400, loss[loss=0.3328, simple_loss=0.3601, pruned_loss=0.111, ctc_loss=0.2091, over 19500.00 frames. ], tot_loss[loss=0.3465, simple_loss=0.3595, pruned_loss=0.1211, ctc_loss=0.228, over 3341862.26 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 04:47:59,659 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=28848.0, ans=0.125
+2024-08-25 04:48:22,798 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.232e+02 2.568e+02 3.025e+02 1.134e+03, threshold=5.136e+02, percent-clipped=4.0
+2024-08-25 04:48:27,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=28848.0, ans=0.0
+2024-08-25 04:48:47,442 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=28954.666666666668, ans=0.125
+2024-08-25 04:48:48,325 INFO [train.py:1114] (3/4) Epoch 3, batch 450, loss[loss=0.3278, simple_loss=0.3502, pruned_loss=0.1095, ctc_loss=0.2157, over 19607.00 frames. ], tot_loss[loss=0.3462, simple_loss=0.3595, pruned_loss=0.1209, ctc_loss=0.2275, over 3449635.25 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:49:04,834 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=29008.0, ans=0.004563478260869565
+2024-08-25 04:49:07,113 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:49:08,326 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=29008.0, ans=0.125
+2024-08-25 04:49:12,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=29061.333333333332, ans=0.125
+2024-08-25 04:49:22,775 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29061.333333333332, ans=0.1
+2024-08-25 04:49:35,168 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=29114.666666666668, ans=0.0
+2024-08-25 04:50:09,364 INFO [train.py:1114] (3/4) Epoch 3, batch 500, loss[loss=0.3872, simple_loss=0.3863, pruned_loss=0.1406, ctc_loss=0.2674, over 19696.00 frames. ], tot_loss[loss=0.3455, simple_loss=0.3585, pruned_loss=0.1209, ctc_loss=0.227, over 3545514.73 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 04:50:12,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=29221.333333333332, ans=0.125
+2024-08-25 04:50:56,888 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=29328.0, ans=0.1
+2024-08-25 04:51:09,139 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.370e+02 2.734e+02 3.745e+02 5.336e+02, threshold=5.469e+02, percent-clipped=1.0
+2024-08-25 04:51:20,293 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=29434.666666666668, ans=0.0
+2024-08-25 04:51:28,436 INFO [train.py:1114] (3/4) Epoch 3, batch 550, loss[loss=0.3999, simple_loss=0.3967, pruned_loss=0.1447, ctc_loss=0.2838, over 19297.00 frames. ], tot_loss[loss=0.3442, simple_loss=0.3579, pruned_loss=0.1201, ctc_loss=0.2256, over 3608395.42 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:51:42,844 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=29541.333333333332, ans=0.125
+2024-08-25 04:51:47,941 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.05 vs. limit=22.5
+2024-08-25 04:51:50,928 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=29594.666666666668, ans=0.125
+2024-08-25 04:51:55,653 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.07 vs. limit=15.0
+2024-08-25 04:52:54,310 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=29701.333333333332, ans=0.125
+2024-08-25 04:53:06,025 INFO [train.py:1114] (3/4) Epoch 3, batch 600, loss[loss=0.3771, simple_loss=0.3835, pruned_loss=0.1362, ctc_loss=0.2457, over 19344.00 frames. ], tot_loss[loss=0.3418, simple_loss=0.3566, pruned_loss=0.1189, ctc_loss=0.2231, over 3665281.45 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 32.0
+2024-08-25 04:53:06,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.14 vs. limit=15.0
+2024-08-25 04:53:12,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=29754.666666666668, ans=0.004401159420289855
+2024-08-25 04:53:13,783 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.49 vs. limit=15.0
+2024-08-25 04:53:17,340 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.25 vs. limit=15.0
+2024-08-25 04:53:24,289 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=29808.0, ans=0.5
+2024-08-25 04:53:45,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=29914.666666666668, ans=0.125
+2024-08-25 04:53:49,306 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.141e+02 2.536e+02 3.031e+02 6.622e+02, threshold=5.071e+02, percent-clipped=2.0
+2024-08-25 04:53:59,378 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=29968.0, ans=0.125
+2024-08-25 04:54:00,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=29968.0, ans=0.004354782608695653
+2024-08-25 04:54:05,130 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30021.333333333332, ans=0.1
+2024-08-25 04:54:06,081 INFO [train.py:1114] (3/4) Epoch 3, batch 650, loss[loss=0.3295, simple_loss=0.3517, pruned_loss=0.1113, ctc_loss=0.212, over 19747.00 frames. ], tot_loss[loss=0.3409, simple_loss=0.3558, pruned_loss=0.1185, ctc_loss=0.2225, over 3715143.58 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 32.0
+2024-08-25 04:54:13,521 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=30021.333333333332, ans=0.1
+2024-08-25 04:54:15,824 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=30021.333333333332, ans=0.09899494936611666
+2024-08-25 04:54:19,488 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:54:34,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 04:54:45,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=30128.0, ans=0.1
+2024-08-25 04:54:51,031 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=30128.0, ans=0.0
+2024-08-25 04:54:51,091 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=30128.0, ans=0.125
+2024-08-25 04:54:51,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=30128.0, ans=0.125
+2024-08-25 04:54:57,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=30181.333333333332, ans=0.125
+2024-08-25 04:55:11,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=30234.666666666668, ans=0.1
+2024-08-25 04:55:19,017 INFO [train.py:1114] (3/4) Epoch 3, batch 700, loss[loss=0.344, simple_loss=0.3571, pruned_loss=0.1205, ctc_loss=0.2251, over 19722.00 frames. ], tot_loss[loss=0.3424, simple_loss=0.3569, pruned_loss=0.1192, ctc_loss=0.2237, over 3747185.94 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:55:20,489 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=30288.0, ans=0.1
+2024-08-25 04:55:22,900 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=30288.0, ans=0.125
+2024-08-25 04:55:23,897 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=30288.0, ans=0.0
+2024-08-25 04:55:44,323 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=30394.666666666668, ans=0.025
+2024-08-25 04:55:45,513 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=30394.666666666668, ans=0.2
+2024-08-25 04:56:38,924 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.292e+02 2.520e+02 3.192e+02 5.203e+02, threshold=5.040e+02, percent-clipped=1.0
+2024-08-25 04:56:56,426 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=30554.666666666668, ans=0.125
+2024-08-25 04:56:57,173 INFO [train.py:1114] (3/4) Epoch 3, batch 750, loss[loss=0.3526, simple_loss=0.3711, pruned_loss=0.1221, ctc_loss=0.2243, over 19504.00 frames. ], tot_loss[loss=0.3409, simple_loss=0.3558, pruned_loss=0.1185, ctc_loss=0.2225, over 3774363.66 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 32.0
+2024-08-25 04:56:57,537 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=30554.666666666668, ans=0.125
+2024-08-25 04:56:59,734 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=30554.666666666668, ans=0.125
+2024-08-25 04:57:11,668 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=30608.0, ans=0.125
+2024-08-25 04:57:37,567 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.69 vs. limit=22.5
+2024-08-25 04:57:59,145 INFO [train.py:1114] (3/4) Epoch 3, batch 800, loss[loss=0.3183, simple_loss=0.3336, pruned_loss=0.1106, ctc_loss=0.2047, over 19416.00 frames. ], tot_loss[loss=0.3398, simple_loss=0.3552, pruned_loss=0.1179, ctc_loss=0.2213, over 3794555.81 frames. ], batch size: 48, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:58:23,499 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=30928.0, ans=0.025
+2024-08-25 04:58:37,593 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.75 vs. limit=10.0
+2024-08-25 04:58:42,758 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.211e+02 2.622e+02 3.205e+02 5.257e+02, threshold=5.244e+02, percent-clipped=1.0
+2024-08-25 04:58:46,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=31034.666666666668, ans=0.2
+2024-08-25 04:58:54,236 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.98 vs. limit=22.5
+2024-08-25 04:58:59,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=31034.666666666668, ans=0.0
+2024-08-25 04:58:59,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=31034.666666666668, ans=0.2
+2024-08-25 04:59:01,979 INFO [train.py:1114] (3/4) Epoch 3, batch 850, loss[loss=0.3491, simple_loss=0.3694, pruned_loss=0.1185, ctc_loss=0.2299, over 19669.00 frames. ], tot_loss[loss=0.3402, simple_loss=0.3557, pruned_loss=0.1181, ctc_loss=0.2217, over 3814526.82 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 04:59:11,691 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=31088.0, ans=0.125
+2024-08-25 04:59:25,659 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.64 vs. limit=5.0
+2024-08-25 04:59:37,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=31248.0, ans=0.125
+2024-08-25 04:59:43,223 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=31248.0, ans=0.0
+2024-08-25 04:59:57,648 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=31301.333333333332, ans=0.1
+2024-08-25 05:00:02,327 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=31301.333333333332, ans=0.125
+2024-08-25 05:00:04,399 INFO [train.py:1114] (3/4) Epoch 3, batch 900, loss[loss=0.3031, simple_loss=0.3222, pruned_loss=0.1033, ctc_loss=0.1937, over 19806.00 frames. ], tot_loss[loss=0.3417, simple_loss=0.3563, pruned_loss=0.1189, ctc_loss=0.2232, over 3817708.33 frames. ], batch size: 49, lr: 3.72e-02, grad_scale: 8.0
+2024-08-25 05:00:26,338 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.98 vs. limit=15.0
+2024-08-25 05:00:27,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=31408.0, ans=0.2
+2024-08-25 05:00:40,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=31461.333333333332, ans=0.125
+2024-08-25 05:00:47,483 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=31514.666666666668, ans=0.125
+2024-08-25 05:00:54,420 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.296e+02 2.736e+02 3.525e+02 1.528e+03, threshold=5.472e+02, percent-clipped=4.0
+2024-08-25 05:01:06,158 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=31568.0, ans=0.125
+2024-08-25 05:01:08,275 INFO [train.py:1114] (3/4) Epoch 3, batch 950, loss[loss=0.2841, simple_loss=0.3124, pruned_loss=0.0925, ctc_loss=0.1767, over 19519.00 frames. ], tot_loss[loss=0.3412, simple_loss=0.3561, pruned_loss=0.1186, ctc_loss=0.2228, over 3820935.01 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:01:09,882 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.48 vs. limit=22.5
+2024-08-25 05:01:10,652 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=31621.333333333332, ans=0.125
+2024-08-25 05:01:13,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=31621.333333333332, ans=0.125
+2024-08-25 05:01:14,801 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.28 vs. limit=22.5
+2024-08-25 05:01:19,152 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=31674.666666666668, ans=0.003983768115942029
+2024-08-25 05:01:52,042 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=31781.333333333332, ans=0.1
+2024-08-25 05:01:52,315 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.33 vs. limit=10.0
+2024-08-25 05:01:53,167 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=31781.333333333332, ans=0.015
+2024-08-25 05:01:55,739 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=31834.666666666668, ans=0.0
+2024-08-25 05:02:07,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=31888.0, ans=0.2
+2024-08-25 05:02:08,113 INFO [train.py:1114] (3/4) Epoch 3, batch 1000, loss[loss=0.3313, simple_loss=0.3407, pruned_loss=0.1182, ctc_loss=0.2139, over 19866.00 frames. ], tot_loss[loss=0.343, simple_loss=0.3572, pruned_loss=0.1195, ctc_loss=0.2244, over 3817290.58 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 8.0
+2024-08-25 05:02:13,693 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.47 vs. limit=22.5
+2024-08-25 05:02:30,301 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.42 vs. limit=15.0
+2024-08-25 05:02:34,922 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=31994.666666666668, ans=0.1
+2024-08-25 05:02:41,385 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.25 vs. limit=22.5
+2024-08-25 05:02:56,471 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.163e+02 2.492e+02 3.027e+02 5.724e+02, threshold=4.983e+02, percent-clipped=1.0
+2024-08-25 05:03:06,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=32101.333333333332, ans=0.125
+2024-08-25 05:03:13,725 INFO [train.py:1114] (3/4) Epoch 3, batch 1050, loss[loss=0.3691, simple_loss=0.3704, pruned_loss=0.1356, ctc_loss=0.2415, over 19833.00 frames. ], tot_loss[loss=0.3424, simple_loss=0.3566, pruned_loss=0.1193, ctc_loss=0.2241, over 3823057.75 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:03:14,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=32154.666666666668, ans=0.1
+2024-08-25 05:03:23,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=32154.666666666668, ans=0.2
+2024-08-25 05:04:02,375 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=32208.0, ans=0.0
+2024-08-25 05:04:14,149 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=32261.333333333332, ans=0.0
+2024-08-25 05:05:02,461 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=32368.0, ans=0.125
+2024-08-25 05:05:04,405 INFO [train.py:1114] (3/4) Epoch 3, batch 1100, loss[loss=0.3154, simple_loss=0.3372, pruned_loss=0.1058, ctc_loss=0.2053, over 19594.00 frames. ], tot_loss[loss=0.3407, simple_loss=0.3558, pruned_loss=0.1183, ctc_loss=0.2226, over 3830355.15 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 8.0
+2024-08-25 05:06:00,567 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.355e+02 2.517e+02 3.019e+02 4.945e+02, threshold=5.033e+02, percent-clipped=0.0
+2024-08-25 05:06:03,172 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=32634.666666666668, ans=0.1
+2024-08-25 05:06:23,030 INFO [train.py:1114] (3/4) Epoch 3, batch 1150, loss[loss=0.3148, simple_loss=0.3414, pruned_loss=0.105, ctc_loss=0.1958, over 19570.00 frames. ], tot_loss[loss=0.3405, simple_loss=0.3554, pruned_loss=0.1183, ctc_loss=0.2223, over 3829999.35 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 8.0
+2024-08-25 05:06:53,374 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=32794.666666666664, ans=0.125
+2024-08-25 05:07:07,494 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=32848.0, ans=0.0037286956521739127
+2024-08-25 05:07:11,667 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.53 vs. limit=12.0
+2024-08-25 05:07:32,065 INFO [train.py:1114] (3/4) Epoch 3, batch 1200, loss[loss=0.3661, simple_loss=0.3807, pruned_loss=0.1275, ctc_loss=0.2411, over 19848.00 frames. ], tot_loss[loss=0.3409, simple_loss=0.3559, pruned_loss=0.1184, ctc_loss=0.2226, over 3825593.25 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:07:32,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=32954.666666666664, ans=0.0037055072463768124
+2024-08-25 05:07:34,884 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=32954.666666666664, ans=0.0
+2024-08-25 05:07:50,088 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=33008.0, ans=0.04949747468305833
+2024-08-25 05:08:01,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=33061.333333333336, ans=0.125
+2024-08-25 05:08:19,682 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.128e+02 2.359e+02 2.757e+02 6.653e+02, threshold=4.718e+02, percent-clipped=2.0
+2024-08-25 05:08:22,212 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=33168.0, ans=0.125
+2024-08-25 05:08:30,374 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.19 vs. limit=15.0
+2024-08-25 05:08:35,668 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=33168.0, ans=0.2
+2024-08-25 05:08:38,022 INFO [train.py:1114] (3/4) Epoch 3, batch 1250, loss[loss=0.3761, simple_loss=0.3821, pruned_loss=0.1361, ctc_loss=0.2444, over 19540.00 frames. ], tot_loss[loss=0.3403, simple_loss=0.3563, pruned_loss=0.1178, ctc_loss=0.2216, over 3843355.26 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 16.0
+2024-08-25 05:09:15,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=33328.0, ans=0.2
+2024-08-25 05:09:29,754 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.78 vs. limit=15.0
+2024-08-25 05:09:36,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=33434.666666666664, ans=0.125
+2024-08-25 05:09:36,718 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.09 vs. limit=10.0
+2024-08-25 05:09:42,106 INFO [train.py:1114] (3/4) Epoch 3, batch 1300, loss[loss=0.4156, simple_loss=0.4035, pruned_loss=0.1568, ctc_loss=0.285, over 18785.00 frames. ], tot_loss[loss=0.3389, simple_loss=0.355, pruned_loss=0.1172, ctc_loss=0.2205, over 3846466.03 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:09:44,549 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=33488.0, ans=0.035
+2024-08-25 05:09:49,145 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=33488.0, ans=0.125
+2024-08-25 05:09:58,685 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=33541.333333333336, ans=0.025
+2024-08-25 05:10:03,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=33541.333333333336, ans=0.0
+2024-08-25 05:10:04,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=33594.666666666664, ans=0.003566376811594203
+2024-08-25 05:10:05,060 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=21.30 vs. limit=22.5
+2024-08-25 05:10:29,945 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.58 vs. limit=10.0
+2024-08-25 05:10:30,082 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.16 vs. limit=10.0
+2024-08-25 05:10:34,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=33594.666666666664, ans=0.0
+2024-08-25 05:10:36,375 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=33594.666666666664, ans=0.09899494936611666
+2024-08-25 05:10:39,913 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=33648.0, ans=0.015
+2024-08-25 05:10:41,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=33648.0, ans=0.0035547826086956523
+2024-08-25 05:10:48,148 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.161e+02 2.525e+02 2.896e+02 5.464e+02, threshold=5.050e+02, percent-clipped=3.0
+2024-08-25 05:11:02,303 INFO [train.py:1114] (3/4) Epoch 3, batch 1350, loss[loss=0.2848, simple_loss=0.3271, pruned_loss=0.08652, ctc_loss=0.1739, over 19785.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3538, pruned_loss=0.1164, ctc_loss=0.2191, over 3858148.22 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 16.0
+2024-08-25 05:11:10,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=33754.666666666664, ans=0.125
+2024-08-25 05:11:18,988 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.72 vs. limit=6.0
+2024-08-25 05:11:40,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=33808.0, ans=0.0
+2024-08-25 05:11:48,311 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.68 vs. limit=22.5
+2024-08-25 05:11:56,188 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=33861.333333333336, ans=0.125
+2024-08-25 05:11:57,582 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=33861.333333333336, ans=0.025
+2024-08-25 05:12:02,733 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.99 vs. limit=15.0
+2024-08-25 05:12:06,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=33914.666666666664, ans=0.05
+2024-08-25 05:12:20,994 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.15 vs. limit=15.0
+2024-08-25 05:12:26,313 INFO [train.py:1114] (3/4) Epoch 3, batch 1400, loss[loss=0.3144, simple_loss=0.3285, pruned_loss=0.1088, ctc_loss=0.207, over 19685.00 frames. ], tot_loss[loss=0.3358, simple_loss=0.3532, pruned_loss=0.1157, ctc_loss=0.2176, over 3865726.13 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 16.0
+2024-08-25 05:12:30,157 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=34021.333333333336, ans=0.125
+2024-08-25 05:13:31,980 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.210e+02 2.531e+02 3.096e+02 9.067e+02, threshold=5.062e+02, percent-clipped=2.0
+2024-08-25 05:13:32,709 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.37 vs. limit=12.0
+2024-08-25 05:14:14,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34234.666666666664, ans=0.1
+2024-08-25 05:14:21,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=34234.666666666664, ans=0.125
+2024-08-25 05:14:24,485 INFO [train.py:1114] (3/4) Epoch 3, batch 1450, loss[loss=0.3823, simple_loss=0.3842, pruned_loss=0.1387, ctc_loss=0.2577, over 19664.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3543, pruned_loss=0.1163, ctc_loss=0.2184, over 3863914.65 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:14:47,313 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=34341.333333333336, ans=0.5
+2024-08-25 05:14:49,365 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=34341.333333333336, ans=0.125
+2024-08-25 05:15:15,780 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.03 vs. limit=15.0
+2024-08-25 05:15:32,910 INFO [train.py:1114] (3/4) Epoch 3, batch 1500, loss[loss=0.3522, simple_loss=0.3639, pruned_loss=0.1236, ctc_loss=0.2331, over 19574.00 frames. ], tot_loss[loss=0.3376, simple_loss=0.3548, pruned_loss=0.1165, ctc_loss=0.2189, over 3864371.48 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 16.0
+2024-08-25 05:15:54,230 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.75 vs. limit=15.0
+2024-08-25 05:16:38,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=34661.333333333336, ans=0.125
+2024-08-25 05:16:38,472 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=34661.333333333336, ans=0.125
+2024-08-25 05:16:51,231 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.151e+02 2.498e+02 3.151e+02 6.810e+02, threshold=4.996e+02, percent-clipped=2.0
+2024-08-25 05:19:31,703 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=34768.0, ans=0.125
+2024-08-25 05:20:00,632 INFO [train.py:1114] (3/4) Epoch 3, batch 1550, loss[loss=0.3347, simple_loss=0.3613, pruned_loss=0.1129, ctc_loss=0.2057, over 19610.00 frames. ], tot_loss[loss=0.3376, simple_loss=0.3546, pruned_loss=0.1165, ctc_loss=0.2189, over 3849402.66 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 16.0
+2024-08-25 05:20:19,237 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.81 vs. limit=15.0
+2024-08-25 05:20:19,814 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=34874.666666666664, ans=0.0
+2024-08-25 05:20:20,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=34874.666666666664, ans=0.125
+2024-08-25 05:20:32,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=34928.0, ans=0.125
+2024-08-25 05:21:29,593 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.11 vs. limit=22.5
+2024-08-25 05:22:04,968 INFO [train.py:1114] (3/4) Epoch 3, batch 1600, loss[loss=0.3409, simple_loss=0.3601, pruned_loss=0.1173, ctc_loss=0.218, over 19838.00 frames. ], tot_loss[loss=0.338, simple_loss=0.3545, pruned_loss=0.1169, ctc_loss=0.2194, over 3838381.03 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 05:22:12,863 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=35088.0, ans=0.125
+2024-08-25 05:22:57,555 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.61 vs. limit=15.0
+2024-08-25 05:23:43,086 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.193e+02 2.529e+02 3.233e+02 6.645e+02, threshold=5.059e+02, percent-clipped=2.0
+2024-08-25 05:23:43,480 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=35248.0, ans=0.125
+2024-08-25 05:24:22,997 INFO [train.py:1114] (3/4) Epoch 3, batch 1650, loss[loss=0.2871, simple_loss=0.3312, pruned_loss=0.08768, ctc_loss=0.1691, over 19671.00 frames. ], tot_loss[loss=0.3381, simple_loss=0.3545, pruned_loss=0.1169, ctc_loss=0.2195, over 3833842.25 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 05:24:31,311 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=35354.666666666664, ans=0.0
+2024-08-25 05:25:11,217 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=35408.0, ans=0.2
+2024-08-25 05:25:24,632 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=35461.333333333336, ans=0.0031605797101449274
+2024-08-25 05:25:27,828 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=35461.333333333336, ans=0.2
+2024-08-25 05:26:02,557 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.04 vs. limit=22.5
+2024-08-25 05:26:16,042 INFO [train.py:1114] (3/4) Epoch 3, batch 1700, loss[loss=0.3123, simple_loss=0.324, pruned_loss=0.1081, ctc_loss=0.2107, over 19677.00 frames. ], tot_loss[loss=0.3366, simple_loss=0.3536, pruned_loss=0.1162, ctc_loss=0.2177, over 3847148.22 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:26:18,017 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.59 vs. limit=15.0
+2024-08-25 05:26:20,053 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.98 vs. limit=22.5
+2024-08-25 05:26:26,392 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=35674.666666666664, ans=0.125
+2024-08-25 05:26:30,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=35674.666666666664, ans=0.2
+2024-08-25 05:27:02,478 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:27:10,191 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.342e+02 2.819e+02 3.429e+02 5.215e+02, threshold=5.637e+02, percent-clipped=1.0
+2024-08-25 05:27:10,376 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=35781.333333333336, ans=0.003091014492753622
+2024-08-25 05:27:23,555 INFO [train.py:1114] (3/4) Epoch 3, batch 1750, loss[loss=0.3217, simple_loss=0.3322, pruned_loss=0.1145, ctc_loss=0.2058, over 19625.00 frames. ], tot_loss[loss=0.3359, simple_loss=0.3533, pruned_loss=0.1158, ctc_loss=0.2171, over 3852016.43 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 05:27:25,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=35888.0, ans=0.125
+2024-08-25 05:27:25,556 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.58 vs. limit=22.5
+2024-08-25 05:27:40,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=35941.333333333336, ans=0.125
+2024-08-25 05:27:40,691 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.96 vs. limit=22.5
+2024-08-25 05:28:38,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36101.333333333336, ans=0.1
+2024-08-25 05:29:19,789 INFO [train.py:1114] (3/4) Epoch 3, batch 1800, loss[loss=0.3556, simple_loss=0.3749, pruned_loss=0.1215, ctc_loss=0.2333, over 19605.00 frames. ], tot_loss[loss=0.3357, simple_loss=0.3534, pruned_loss=0.1157, ctc_loss=0.217, over 3853205.65 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:31:33,855 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36208.0, ans=0.1
+2024-08-25 05:31:47,496 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=36314.666666666664, ans=0.125
+2024-08-25 05:31:58,650 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.106e+02 2.466e+02 3.299e+02 1.077e+03, threshold=4.933e+02, percent-clipped=1.0
+2024-08-25 05:32:10,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=36368.0, ans=22.5
+2024-08-25 05:32:11,706 INFO [train.py:1114] (3/4) Epoch 3, batch 1850, loss[loss=0.3554, simple_loss=0.3659, pruned_loss=0.1247, ctc_loss=0.239, over 19592.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3521, pruned_loss=0.115, ctc_loss=0.2158, over 3857174.83 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 16.0
+2024-08-25 05:32:14,519 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=36421.333333333336, ans=0.125
+2024-08-25 05:32:14,523 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=36421.333333333336, ans=0.125
+2024-08-25 05:32:19,326 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.14 vs. limit=22.5
+2024-08-25 05:32:19,557 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.33 vs. limit=12.0
+2024-08-25 05:33:05,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=36634.666666666664, ans=0.035
+2024-08-25 05:33:06,372 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=36634.666666666664, ans=0.002905507246376812
+2024-08-25 05:33:12,872 INFO [train.py:1114] (3/4) Epoch 3, batch 1900, loss[loss=0.3653, simple_loss=0.3783, pruned_loss=0.1276, ctc_loss=0.2429, over 19621.00 frames. ], tot_loss[loss=0.3356, simple_loss=0.3531, pruned_loss=0.1156, ctc_loss=0.2169, over 3861414.14 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 16.0
+2024-08-25 05:33:15,037 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=36688.0, ans=0.125
+2024-08-25 05:33:40,801 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36794.666666666664, ans=0.1
+2024-08-25 05:34:05,261 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.260e+02 2.560e+02 3.105e+02 5.689e+02, threshold=5.120e+02, percent-clipped=2.0
+2024-08-25 05:34:05,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.whiten.whitening_limit, batch_count=36901.333333333336, ans=12.0
+2024-08-25 05:34:12,493 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=36901.333333333336, ans=0.125
+2024-08-25 05:34:47,823 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=36901.333333333336, ans=0.0
+2024-08-25 05:34:49,850 INFO [train.py:1114] (3/4) Epoch 3, batch 1950, loss[loss=0.333, simple_loss=0.3441, pruned_loss=0.1163, ctc_loss=0.223, over 19586.00 frames. ], tot_loss[loss=0.3369, simple_loss=0.3547, pruned_loss=0.1161, ctc_loss=0.2174, over 3869991.40 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 16.0
+2024-08-25 05:36:31,725 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.17 vs. limit=15.0
+2024-08-25 05:36:45,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=37114.666666666664, ans=0.125
+2024-08-25 05:36:59,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=37168.0, ans=0.125
+2024-08-25 05:37:04,540 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=37168.0, ans=0.2
+2024-08-25 05:37:09,013 INFO [train.py:1114] (3/4) Epoch 3, batch 2000, loss[loss=0.2943, simple_loss=0.3121, pruned_loss=0.1008, ctc_loss=0.1875, over 19631.00 frames. ], tot_loss[loss=0.3377, simple_loss=0.3552, pruned_loss=0.1164, ctc_loss=0.2182, over 3854736.13 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 05:37:10,806 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.65 vs. limit=15.0
+2024-08-25 05:37:35,097 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=37328.0, ans=0.002754782608695652
+2024-08-25 05:37:45,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=37381.333333333336, ans=0.125
+2024-08-25 05:37:57,169 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.61 vs. limit=6.0
+2024-08-25 05:38:02,418 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.243e+02 2.650e+02 3.292e+02 1.299e+03, threshold=5.300e+02, percent-clipped=6.0
+2024-08-25 05:38:10,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=37434.666666666664, ans=15.0
+2024-08-25 05:38:13,916 INFO [train.py:1114] (3/4) Epoch 3, batch 2050, loss[loss=0.2891, simple_loss=0.315, pruned_loss=0.0966, ctc_loss=0.1752, over 19692.00 frames. ], tot_loss[loss=0.3357, simple_loss=0.3534, pruned_loss=0.1157, ctc_loss=0.2168, over 3850078.92 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:38:47,930 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.07 vs. limit=15.0
+2024-08-25 05:39:04,545 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=37701.333333333336, ans=0.025
+2024-08-25 05:39:07,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=37701.333333333336, ans=0.125
+2024-08-25 05:39:40,803 INFO [train.py:1114] (3/4) Epoch 3, batch 2100, loss[loss=0.3438, simple_loss=0.3603, pruned_loss=0.1176, ctc_loss=0.2301, over 19758.00 frames. ], tot_loss[loss=0.335, simple_loss=0.3529, pruned_loss=0.1153, ctc_loss=0.2163, over 3857945.32 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 05:40:00,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=37808.0, ans=0.002650434782608696
+2024-08-25 05:40:07,615 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=37808.0, ans=0.125
+2024-08-25 05:40:15,285 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=37808.0, ans=0.0
+2024-08-25 05:40:42,308 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.72 vs. limit=15.0
+2024-08-25 05:40:45,258 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=37861.333333333336, ans=0.0026388405797101453
+2024-08-25 05:40:58,553 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.072e+02 2.352e+02 2.718e+02 4.903e+02, threshold=4.703e+02, percent-clipped=0.0
+2024-08-25 05:41:00,069 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=37968.0, ans=0.125
+2024-08-25 05:41:05,154 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.61 vs. limit=22.5
+2024-08-25 05:41:05,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=37968.0, ans=0.125
+2024-08-25 05:41:10,091 INFO [train.py:1114] (3/4) Epoch 3, batch 2150, loss[loss=0.2877, simple_loss=0.324, pruned_loss=0.09031, ctc_loss=0.1769, over 19869.00 frames. ], tot_loss[loss=0.3338, simple_loss=0.352, pruned_loss=0.1148, ctc_loss=0.2152, over 3869275.29 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 05:41:14,797 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=38021.333333333336, ans=0.2
+2024-08-25 05:41:18,600 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=26.32 vs. limit=22.5
+2024-08-25 05:41:20,829 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.30 vs. limit=15.0
+2024-08-25 05:41:24,295 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.59 vs. limit=22.5
+2024-08-25 05:42:24,843 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=38181.333333333336, ans=0.125
+2024-08-25 05:42:42,871 INFO [train.py:1114] (3/4) Epoch 3, batch 2200, loss[loss=0.3519, simple_loss=0.3714, pruned_loss=0.1207, ctc_loss=0.2276, over 19596.00 frames. ], tot_loss[loss=0.3331, simple_loss=0.3518, pruned_loss=0.1143, ctc_loss=0.2142, over 3867911.36 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:42:43,126 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=38288.0, ans=0.0025460869565217398
+2024-08-25 05:42:52,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=38288.0, ans=0.015
+2024-08-25 05:43:02,881 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=38341.333333333336, ans=0.07
+2024-08-25 05:43:11,762 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38394.666666666664, ans=0.1
+2024-08-25 05:43:23,539 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=38448.0, ans=0.0
+2024-08-25 05:43:29,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=38448.0, ans=0.09899494936611666
+2024-08-25 05:43:34,311 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.197e+02 2.629e+02 2.994e+02 6.107e+02, threshold=5.259e+02, percent-clipped=1.0
+2024-08-25 05:43:51,063 INFO [train.py:1114] (3/4) Epoch 3, batch 2250, loss[loss=0.319, simple_loss=0.3472, pruned_loss=0.1057, ctc_loss=0.1986, over 19614.00 frames. ], tot_loss[loss=0.3327, simple_loss=0.3514, pruned_loss=0.1142, ctc_loss=0.2139, over 3868294.23 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 05:43:51,711 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.90 vs. limit=6.0
+2024-08-25 05:44:16,923 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.44 vs. limit=22.5
+2024-08-25 05:44:20,818 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.80 vs. limit=6.0
+2024-08-25 05:44:34,709 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=38661.333333333336, ans=0.1
+2024-08-25 05:44:42,097 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=38661.333333333336, ans=0.0
+2024-08-25 05:44:50,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=38714.666666666664, ans=0.125
+2024-08-25 05:45:15,057 INFO [train.py:1114] (3/4) Epoch 3, batch 2300, loss[loss=0.2903, simple_loss=0.3166, pruned_loss=0.09609, ctc_loss=0.1796, over 19512.00 frames. ], tot_loss[loss=0.3326, simple_loss=0.3507, pruned_loss=0.1144, ctc_loss=0.2145, over 3860984.35 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:45:15,360 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=38821.333333333336, ans=0.0
+2024-08-25 05:45:19,913 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.81 vs. limit=15.0
+2024-08-25 05:45:23,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38821.333333333336, ans=0.1
+2024-08-25 05:46:12,500 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=38928.0, ans=0.2
+2024-08-25 05:46:21,583 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=38981.333333333336, ans=0.5
+2024-08-25 05:47:15,679 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.233e+02 2.542e+02 3.133e+02 7.552e+02, threshold=5.083e+02, percent-clipped=3.0
+2024-08-25 05:47:23,739 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=39034.666666666664, ans=0.05
+2024-08-25 05:47:27,930 INFO [train.py:1114] (3/4) Epoch 3, batch 2350, loss[loss=0.3271, simple_loss=0.3545, pruned_loss=0.1114, ctc_loss=0.1921, over 19691.00 frames. ], tot_loss[loss=0.3334, simple_loss=0.3511, pruned_loss=0.1149, ctc_loss=0.215, over 3863350.47 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 05:47:32,587 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=39088.0, ans=0.0
+2024-08-25 05:48:07,949 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=39248.0, ans=0.2
+2024-08-25 05:48:13,561 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=39301.333333333336, ans=0.125
+2024-08-25 05:48:21,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=39301.333333333336, ans=0.0
+2024-08-25 05:48:24,937 INFO [train.py:1114] (3/4) Epoch 3, batch 2400, loss[loss=0.3348, simple_loss=0.3608, pruned_loss=0.113, ctc_loss=0.2069, over 19154.00 frames. ], tot_loss[loss=0.3353, simple_loss=0.353, pruned_loss=0.1156, ctc_loss=0.216, over 3857217.27 frames. ], batch size: 71, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 05:48:47,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=39461.333333333336, ans=0.125
+2024-08-25 05:49:04,782 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=39514.666666666664, ans=0.125
+2024-08-25 05:49:04,844 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=39514.666666666664, ans=0.025
+2024-08-25 05:49:10,300 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 2.241e+02 2.672e+02 3.161e+02 5.607e+02, threshold=5.344e+02, percent-clipped=4.0
+2024-08-25 05:49:26,439 INFO [train.py:1114] (3/4) Epoch 3, batch 2450, loss[loss=0.4617, simple_loss=0.4144, pruned_loss=0.1857, ctc_loss=0.344, over 13418.00 frames. ], tot_loss[loss=0.3446, simple_loss=0.3584, pruned_loss=0.1204, ctc_loss=0.2251, over 3731394.84 frames. ], batch size: 141, lr: 3.53e-02, grad_scale: 32.0
+2024-08-25 05:50:01,549 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.25 vs. limit=12.0
+2024-08-25 05:50:52,399 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.69 vs. limit=12.0
+2024-08-25 05:51:05,723 INFO [train.py:1114] (3/4) Epoch 4, batch 0, loss[loss=0.342, simple_loss=0.3501, pruned_loss=0.1227, ctc_loss=0.2215, over 19391.00 frames. ], tot_loss[loss=0.342, simple_loss=0.3501, pruned_loss=0.1227, ctc_loss=0.2215, over 19391.00 frames. ], batch size: 48, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:51:05,724 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 05:51:35,399 INFO [train.py:1146] (3/4) Epoch 4, validation: loss=0.2629, simple_loss=0.3337, pruned_loss=0.07032, ctc_loss=0.1284, over 944034.00 frames.
+2024-08-25 05:51:35,400 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 05:52:07,171 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=39936.0, ans=0.125
+2024-08-25 05:52:41,488 INFO [train.py:1114] (3/4) Epoch 4, batch 50, loss[loss=0.2953, simple_loss=0.3134, pruned_loss=0.1028, ctc_loss=0.1789, over 19726.00 frames. ], tot_loss[loss=0.337, simple_loss=0.3541, pruned_loss=0.1166, ctc_loss=0.2169, over 843670.46 frames. ], batch size: 47, lr: 3.30e-02, grad_scale: 32.0
+2024-08-25 05:52:47,060 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.147e+02 2.483e+02 2.920e+02 4.932e+02, threshold=4.967e+02, percent-clipped=0.0
+2024-08-25 05:53:07,771 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=40149.333333333336, ans=0.125
+2024-08-25 05:53:38,366 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=40202.666666666664, ans=0.125
+2024-08-25 05:53:57,661 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=40309.333333333336, ans=0.125
+2024-08-25 05:54:08,135 INFO [train.py:1114] (3/4) Epoch 4, batch 100, loss[loss=0.2934, simple_loss=0.3325, pruned_loss=0.0942, ctc_loss=0.1646, over 19704.00 frames. ], tot_loss[loss=0.3364, simple_loss=0.355, pruned_loss=0.1156, ctc_loss=0.2164, over 1498193.89 frames. ], batch size: 51, lr: 3.29e-02, grad_scale: 32.0
+2024-08-25 05:54:08,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40362.666666666664, ans=0.1
+2024-08-25 05:54:09,517 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=40362.666666666664, ans=0.2
+2024-08-25 05:54:27,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=40362.666666666664, ans=0.125
+2024-08-25 05:54:35,339 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=40416.0, ans=0.2
+2024-08-25 05:55:14,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=40469.333333333336, ans=0.0
+2024-08-25 05:55:55,485 INFO [train.py:1114] (3/4) Epoch 4, batch 150, loss[loss=0.3126, simple_loss=0.3176, pruned_loss=0.1134, ctc_loss=0.2022, over 19710.00 frames. ], tot_loss[loss=0.331, simple_loss=0.3507, pruned_loss=0.1132, ctc_loss=0.2122, over 2026939.58 frames. ], batch size: 47, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:55:56,482 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.033e+02 2.286e+02 2.661e+02 4.118e+02, threshold=4.571e+02, percent-clipped=0.0
+2024-08-25 05:56:13,743 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=40682.666666666664, ans=0.125
+2024-08-25 05:56:20,104 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.41 vs. limit=15.0
+2024-08-25 05:56:22,004 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=40736.0, ans=0.125
+2024-08-25 05:56:31,863 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=40736.0, ans=0.2
+2024-08-25 05:56:49,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=40789.333333333336, ans=0.125
+2024-08-25 05:56:51,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=40789.333333333336, ans=0.2
+2024-08-25 05:56:51,506 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=40789.333333333336, ans=0.125
+2024-08-25 05:56:52,721 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=40842.666666666664, ans=0.125
+2024-08-25 05:57:00,550 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=40842.666666666664, ans=0.125
+2024-08-25 05:57:01,863 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.14 vs. limit=15.0
+2024-08-25 05:57:04,737 INFO [train.py:1114] (3/4) Epoch 4, batch 200, loss[loss=0.4021, simple_loss=0.395, pruned_loss=0.1498, ctc_loss=0.2743, over 18226.00 frames. ], tot_loss[loss=0.3277, simple_loss=0.3486, pruned_loss=0.1116, ctc_loss=0.2091, over 2434884.14 frames. ], batch size: 85, lr: 3.28e-02, grad_scale: 32.0
+2024-08-25 05:57:23,838 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.08 vs. limit=15.0
+2024-08-25 05:57:27,156 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.26 vs. limit=10.0
+2024-08-25 05:57:40,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=40949.333333333336, ans=0.125
+2024-08-25 05:57:42,844 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:58:06,985 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=41056.0, ans=0.125
+2024-08-25 05:58:51,172 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=41109.333333333336, ans=0.125
+2024-08-25 05:58:56,561 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=41109.333333333336, ans=0.125
+2024-08-25 05:59:03,061 INFO [train.py:1114] (3/4) Epoch 4, batch 250, loss[loss=0.3417, simple_loss=0.3625, pruned_loss=0.1184, ctc_loss=0.2103, over 19431.00 frames. ], tot_loss[loss=0.325, simple_loss=0.3469, pruned_loss=0.1102, ctc_loss=0.2069, over 2755249.50 frames. ], batch size: 67, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 05:59:04,093 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.098e+02 2.387e+02 2.939e+02 4.251e+02, threshold=4.774e+02, percent-clipped=0.0
+2024-08-25 05:59:26,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=41216.0, ans=0.125
+2024-08-25 05:59:34,252 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=41269.333333333336, ans=0.0
+2024-08-25 05:59:46,172 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=41269.333333333336, ans=0.125
+2024-08-25 05:59:48,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=41322.666666666664, ans=0.125
+2024-08-25 05:59:54,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=41322.666666666664, ans=0.0
+2024-08-25 06:00:05,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=41376.0, ans=0.1
+2024-08-25 06:00:10,352 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.93 vs. limit=10.0
+2024-08-25 06:00:14,169 INFO [train.py:1114] (3/4) Epoch 4, batch 300, loss[loss=0.3111, simple_loss=0.3348, pruned_loss=0.1047, ctc_loss=0.1952, over 19531.00 frames. ], tot_loss[loss=0.3232, simple_loss=0.3453, pruned_loss=0.1094, ctc_loss=0.2058, over 3000480.96 frames. ], batch size: 61, lr: 3.27e-02, grad_scale: 32.0
+2024-08-25 06:00:54,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=41536.0, ans=0.125
+2024-08-25 06:00:54,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=41536.0, ans=0.125
+2024-08-25 06:01:16,659 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=41642.666666666664, ans=0.0
+2024-08-25 06:01:19,390 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.30 vs. limit=15.0
+2024-08-25 06:01:27,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=41696.0, ans=0.1
+2024-08-25 06:01:35,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=41696.0, ans=0.1
+2024-08-25 06:01:36,621 INFO [train.py:1114] (3/4) Epoch 4, batch 350, loss[loss=0.2768, simple_loss=0.3085, pruned_loss=0.08896, ctc_loss=0.1678, over 19754.00 frames. ], tot_loss[loss=0.322, simple_loss=0.3449, pruned_loss=0.1087, ctc_loss=0.2046, over 3189928.60 frames. ], batch size: 48, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:01:37,789 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.143e+02 2.517e+02 2.887e+02 6.595e+02, threshold=5.034e+02, percent-clipped=1.0
+2024-08-25 06:01:39,379 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=41696.0, ans=0.05
+2024-08-25 06:01:46,500 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=41696.0, ans=0.1
+2024-08-25 06:01:49,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=41749.333333333336, ans=0.125
+2024-08-25 06:02:06,057 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=41802.666666666664, ans=0.0
+2024-08-25 06:02:07,500 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=41802.666666666664, ans=0.0
+2024-08-25 06:02:38,783 INFO [train.py:1114] (3/4) Epoch 4, batch 400, loss[loss=0.3066, simple_loss=0.3373, pruned_loss=0.1012, ctc_loss=0.1842, over 19504.00 frames. ], tot_loss[loss=0.3215, simple_loss=0.3448, pruned_loss=0.1083, ctc_loss=0.2037, over 3342217.39 frames. ], batch size: 54, lr: 3.26e-02, grad_scale: 32.0
+2024-08-25 06:02:48,158 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=41962.666666666664, ans=0.001747246376811595
+2024-08-25 06:03:06,469 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.02 vs. limit=22.5
+2024-08-25 06:03:14,950 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=42069.333333333336, ans=0.125
+2024-08-25 06:03:24,475 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=42122.666666666664, ans=0.125
+2024-08-25 06:04:04,056 INFO [train.py:1114] (3/4) Epoch 4, batch 450, loss[loss=0.351, simple_loss=0.3675, pruned_loss=0.1207, ctc_loss=0.2324, over 19615.00 frames. ], tot_loss[loss=0.3219, simple_loss=0.3449, pruned_loss=0.1087, ctc_loss=0.2041, over 3450482.58 frames. ], batch size: 55, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:04:06,524 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.107e+02 2.479e+02 2.897e+02 5.564e+02, threshold=4.958e+02, percent-clipped=2.0
+2024-08-25 06:04:12,709 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=42229.333333333336, ans=0.125
+2024-08-25 06:04:13,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=42229.333333333336, ans=0.1
+2024-08-25 06:04:32,530 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=42336.0, ans=0.125
+2024-08-25 06:04:36,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=42336.0, ans=0.125
+2024-08-25 06:04:39,571 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=42336.0, ans=0.1
+2024-08-25 06:04:53,930 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=42336.0, ans=0.125
+2024-08-25 06:04:55,046 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=42389.333333333336, ans=0.0016544927536231869
+2024-08-25 06:05:02,016 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=42389.333333333336, ans=0.125
+2024-08-25 06:05:03,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=42389.333333333336, ans=0.1
+2024-08-25 06:05:15,385 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.60 vs. limit=10.0
+2024-08-25 06:05:32,244 INFO [train.py:1114] (3/4) Epoch 4, batch 500, loss[loss=0.3389, simple_loss=0.3669, pruned_loss=0.1133, ctc_loss=0.2106, over 19663.00 frames. ], tot_loss[loss=0.3198, simple_loss=0.3435, pruned_loss=0.1077, ctc_loss=0.2021, over 3545452.44 frames. ], batch size: 63, lr: 3.25e-02, grad_scale: 32.0
+2024-08-25 06:05:42,036 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=42496.0, ans=0.125
+2024-08-25 06:05:49,417 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.98 vs. limit=22.5
+2024-08-25 06:05:57,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=42602.666666666664, ans=0.0
+2024-08-25 06:06:00,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=42602.666666666664, ans=0.1
+2024-08-25 06:06:28,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=42709.333333333336, ans=0.025
+2024-08-25 06:06:41,087 INFO [train.py:1114] (3/4) Epoch 4, batch 550, loss[loss=0.313, simple_loss=0.3414, pruned_loss=0.1038, ctc_loss=0.1923, over 19267.00 frames. ], tot_loss[loss=0.3197, simple_loss=0.3435, pruned_loss=0.1076, ctc_loss=0.202, over 3608723.52 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:06:44,779 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.027e+02 2.416e+02 2.881e+02 5.051e+02, threshold=4.833e+02, percent-clipped=1.0
+2024-08-25 06:06:45,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=42762.666666666664, ans=0.1
+2024-08-25 06:06:46,289 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=42762.666666666664, ans=0.125
+2024-08-25 06:06:55,649 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.16 vs. limit=15.0
+2024-08-25 06:07:07,181 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=42869.333333333336, ans=0.0
+2024-08-25 06:07:17,057 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=42869.333333333336, ans=0.07
+2024-08-25 06:07:18,327 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.03 vs. limit=15.0
+2024-08-25 06:07:23,976 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=42922.666666666664, ans=0.2
+2024-08-25 06:07:40,086 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=42976.0, ans=0.2
+2024-08-25 06:07:41,431 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=42976.0, ans=0.2
+2024-08-25 06:07:42,945 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.78 vs. limit=6.0
+2024-08-25 06:07:50,586 INFO [train.py:1114] (3/4) Epoch 4, batch 600, loss[loss=0.3903, simple_loss=0.385, pruned_loss=0.1452, ctc_loss=0.263, over 19423.00 frames. ], tot_loss[loss=0.3188, simple_loss=0.3432, pruned_loss=0.107, ctc_loss=0.2011, over 3665691.44 frames. ], batch size: 67, lr: 3.24e-02, grad_scale: 16.0
+2024-08-25 06:07:52,632 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.09 vs. limit=5.0
+2024-08-25 06:07:53,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=43029.333333333336, ans=0.125
+2024-08-25 06:08:15,985 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=43136.0, ans=0.0014921739130434788
+2024-08-25 06:08:43,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=43242.666666666664, ans=0.125
+2024-08-25 06:08:57,410 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=43242.666666666664, ans=0.125
+2024-08-25 06:09:00,685 INFO [train.py:1114] (3/4) Epoch 4, batch 650, loss[loss=0.3069, simple_loss=0.342, pruned_loss=0.09763, ctc_loss=0.1915, over 19783.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3421, pruned_loss=0.1063, ctc_loss=0.1995, over 3716000.01 frames. ], batch size: 54, lr: 3.23e-02, grad_scale: 16.0
+2024-08-25 06:09:15,859 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.140e+02 2.544e+02 3.023e+02 7.017e+02, threshold=5.088e+02, percent-clipped=9.0
+2024-08-25 06:09:23,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=43349.333333333336, ans=0.125
+2024-08-25 06:09:49,012 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=43402.666666666664, ans=0.125
+2024-08-25 06:10:14,482 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=43509.333333333336, ans=0.2
+2024-08-25 06:10:16,797 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=43509.333333333336, ans=0.2
+2024-08-25 06:10:18,932 INFO [train.py:1114] (3/4) Epoch 4, batch 700, loss[loss=0.2677, simple_loss=0.3108, pruned_loss=0.08223, ctc_loss=0.1506, over 19732.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3427, pruned_loss=0.1066, ctc_loss=0.2006, over 3748854.21 frames. ], batch size: 51, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:10:27,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=43562.666666666664, ans=0.0
+2024-08-25 06:10:34,565 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=10.39 vs. limit=15.0
+2024-08-25 06:10:37,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=43616.0, ans=0.125
+2024-08-25 06:10:50,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=43669.333333333336, ans=0.125
+2024-08-25 06:10:50,420 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=43669.333333333336, ans=0.0
+2024-08-25 06:10:53,038 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.35 vs. limit=22.5
+2024-08-25 06:11:09,209 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.68 vs. limit=12.0
+2024-08-25 06:11:18,647 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=43776.0, ans=0.125
+2024-08-25 06:11:21,640 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=43776.0, ans=0.2
+2024-08-25 06:11:23,834 INFO [train.py:1114] (3/4) Epoch 4, batch 750, loss[loss=0.3695, simple_loss=0.3745, pruned_loss=0.1322, ctc_loss=0.2506, over 19507.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3419, pruned_loss=0.1061, ctc_loss=0.1995, over 3775312.06 frames. ], batch size: 54, lr: 3.22e-02, grad_scale: 16.0
+2024-08-25 06:11:28,681 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.141e+02 2.481e+02 2.931e+02 4.472e+02, threshold=4.962e+02, percent-clipped=0.0
+2024-08-25 06:11:39,534 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=43882.666666666664, ans=0.125
+2024-08-25 06:11:39,821 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.90 vs. limit=6.0
+2024-08-25 06:11:52,098 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=43882.666666666664, ans=0.001329855072463769
+2024-08-25 06:12:05,503 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=43989.333333333336, ans=0.125
+2024-08-25 06:12:20,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=44042.666666666664, ans=0.2
+2024-08-25 06:12:27,251 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=44042.666666666664, ans=0.2
+2024-08-25 06:12:29,312 INFO [train.py:1114] (3/4) Epoch 4, batch 800, loss[loss=0.2853, simple_loss=0.3064, pruned_loss=0.09612, ctc_loss=0.18, over 19817.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3417, pruned_loss=0.106, ctc_loss=0.1994, over 3796139.58 frames. ], batch size: 49, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:12:29,457 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=44096.0, ans=0.0
+2024-08-25 06:12:39,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.54 vs. limit=6.0
+2024-08-25 06:12:50,889 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=44149.333333333336, ans=0.05
+2024-08-25 06:12:51,002 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=44149.333333333336, ans=0.125
+2024-08-25 06:12:55,796 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=44202.666666666664, ans=0.125
+2024-08-25 06:13:07,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=44256.0, ans=0.0012486956521739132
+2024-08-25 06:13:13,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten.whitening_limit, batch_count=44256.0, ans=15.0
+2024-08-25 06:13:19,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=44309.333333333336, ans=0.125
+2024-08-25 06:13:28,037 INFO [train.py:1114] (3/4) Epoch 4, batch 850, loss[loss=0.3354, simple_loss=0.3603, pruned_loss=0.1127, ctc_loss=0.2128, over 19650.00 frames. ], tot_loss[loss=0.3161, simple_loss=0.3411, pruned_loss=0.1057, ctc_loss=0.1989, over 3815674.58 frames. ], batch size: 59, lr: 3.21e-02, grad_scale: 32.0
+2024-08-25 06:13:28,697 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.57 vs. limit=15.0
+2024-08-25 06:13:31,252 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.074e+02 2.402e+02 2.888e+02 5.555e+02, threshold=4.804e+02, percent-clipped=1.0
+2024-08-25 06:13:39,677 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=44416.0, ans=0.1
+2024-08-25 06:13:51,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=44469.333333333336, ans=15.0
+2024-08-25 06:14:00,623 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=44469.333333333336, ans=0.125
+2024-08-25 06:14:03,466 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.39 vs. limit=10.0
+2024-08-25 06:14:09,195 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.20 vs. limit=15.0
+2024-08-25 06:14:11,066 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=44522.666666666664, ans=0.2
+2024-08-25 06:14:32,266 INFO [train.py:1114] (3/4) Epoch 4, batch 900, loss[loss=0.3161, simple_loss=0.332, pruned_loss=0.1102, ctc_loss=0.1999, over 19805.00 frames. ], tot_loss[loss=0.3178, simple_loss=0.3421, pruned_loss=0.1066, ctc_loss=0.2006, over 3819998.93 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:14:33,613 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=44629.333333333336, ans=0.125
+2024-08-25 06:14:56,452 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:15:29,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=44842.666666666664, ans=0.07
+2024-08-25 06:15:35,279 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=44842.666666666664, ans=0.0
+2024-08-25 06:15:38,553 INFO [train.py:1114] (3/4) Epoch 4, batch 950, loss[loss=0.2728, simple_loss=0.315, pruned_loss=0.08352, ctc_loss=0.159, over 19522.00 frames. ], tot_loss[loss=0.318, simple_loss=0.3423, pruned_loss=0.1067, ctc_loss=0.2006, over 3822507.68 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 32.0
+2024-08-25 06:15:42,138 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.101e+02 2.364e+02 2.735e+02 6.196e+02, threshold=4.728e+02, percent-clipped=2.0
+2024-08-25 06:15:59,824 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.49 vs. limit=15.0
+2024-08-25 06:16:12,476 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=45002.666666666664, ans=0.125
+2024-08-25 06:16:21,072 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.79 vs. limit=15.0
+2024-08-25 06:16:21,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=45056.0, ans=0.125
+2024-08-25 06:16:35,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=45109.333333333336, ans=0.125
+2024-08-25 06:16:42,346 INFO [train.py:1114] (3/4) Epoch 4, batch 1000, loss[loss=0.3016, simple_loss=0.3237, pruned_loss=0.1024, ctc_loss=0.1866, over 19859.00 frames. ], tot_loss[loss=0.3198, simple_loss=0.3437, pruned_loss=0.1075, ctc_loss=0.202, over 3818724.57 frames. ], batch size: 52, lr: 3.19e-02, grad_scale: 32.0
+2024-08-25 06:17:06,952 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=45216.0, ans=0.0
+2024-08-25 06:17:48,482 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=45322.666666666664, ans=0.0010168115942028998
+2024-08-25 06:18:00,176 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=45376.0, ans=0.0
+2024-08-25 06:18:10,597 INFO [train.py:1114] (3/4) Epoch 4, batch 1050, loss[loss=0.348, simple_loss=0.3686, pruned_loss=0.1196, ctc_loss=0.2207, over 19842.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3424, pruned_loss=0.1069, ctc_loss=0.201, over 3824088.49 frames. ], batch size: 57, lr: 3.19e-02, grad_scale: 16.0
+2024-08-25 06:18:26,178 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.982e+02 2.200e+02 2.634e+02 5.388e+02, threshold=4.401e+02, percent-clipped=1.0
+2024-08-25 06:18:26,571 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45429.333333333336, ans=0.1
+2024-08-25 06:18:27,759 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=45429.333333333336, ans=0.1
+2024-08-25 06:18:31,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=45429.333333333336, ans=0.125
+2024-08-25 06:18:41,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=45482.666666666664, ans=0.125
+2024-08-25 06:18:44,949 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.45 vs. limit=10.0
+2024-08-25 06:18:53,838 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.52 vs. limit=6.0
+2024-08-25 06:19:04,937 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.47 vs. limit=12.0
+2024-08-25 06:19:30,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=45642.666666666664, ans=0.0
+2024-08-25 06:19:31,807 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45642.666666666664, ans=0.1
+2024-08-25 06:19:36,308 INFO [train.py:1114] (3/4) Epoch 4, batch 1100, loss[loss=0.2936, simple_loss=0.3243, pruned_loss=0.09547, ctc_loss=0.1798, over 19586.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.342, pruned_loss=0.1062, ctc_loss=0.1998, over 3832440.39 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:19:39,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=45696.0, ans=0.125
+2024-08-25 06:19:44,050 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.14 vs. limit=12.0
+2024-08-25 06:19:48,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=45696.0, ans=0.2
+2024-08-25 06:19:53,079 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=5.188e-03
+2024-08-25 06:19:57,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=45749.333333333336, ans=0.125
+2024-08-25 06:20:09,389 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.94 vs. limit=15.0
+2024-08-25 06:20:47,927 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.23 vs. limit=6.0
+2024-08-25 06:20:48,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=45909.333333333336, ans=0.2
+2024-08-25 06:20:52,241 INFO [train.py:1114] (3/4) Epoch 4, batch 1150, loss[loss=0.3327, simple_loss=0.3432, pruned_loss=0.1168, ctc_loss=0.2211, over 19569.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3422, pruned_loss=0.1069, ctc_loss=0.2009, over 3830740.64 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-25 06:20:57,033 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.122e+02 2.390e+02 2.706e+02 4.199e+02, threshold=4.779e+02, percent-clipped=0.0
+2024-08-25 06:21:31,145 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.96 vs. limit=15.0
+2024-08-25 06:21:32,942 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=46069.333333333336, ans=0.025
+2024-08-25 06:21:37,938 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=46122.666666666664, ans=0.125
+2024-08-25 06:21:43,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=46122.666666666664, ans=0.2
+2024-08-25 06:21:59,995 INFO [train.py:1114] (3/4) Epoch 4, batch 1200, loss[loss=0.3151, simple_loss=0.3409, pruned_loss=0.1047, ctc_loss=0.1995, over 19841.00 frames. ], tot_loss[loss=0.3201, simple_loss=0.3436, pruned_loss=0.1078, ctc_loss=0.2025, over 3825768.46 frames. ], batch size: 57, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:22:06,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=46229.333333333336, ans=0.125
+2024-08-25 06:22:14,930 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.35 vs. limit=5.0
+2024-08-25 06:22:38,264 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=46336.0, ans=0.125
+2024-08-25 06:22:55,999 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.20 vs. limit=15.0
+2024-08-25 06:23:21,322 INFO [train.py:1114] (3/4) Epoch 4, batch 1250, loss[loss=0.3157, simple_loss=0.3496, pruned_loss=0.1031, ctc_loss=0.1892, over 19502.00 frames. ], tot_loss[loss=0.3198, simple_loss=0.344, pruned_loss=0.1075, ctc_loss=0.2018, over 3844202.12 frames. ], batch size: 61, lr: 3.17e-02, grad_scale: 32.0
+2024-08-25 06:23:26,217 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.962e+02 2.225e+02 2.468e+02 3.508e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 06:23:30,925 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.08 vs. limit=6.0
+2024-08-25 06:23:35,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=46496.0, ans=0.2
+2024-08-25 06:23:50,794 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=46549.333333333336, ans=0.125
+2024-08-25 06:24:12,212 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=46602.666666666664, ans=0.0
+2024-08-25 06:24:36,282 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=46709.333333333336, ans=0.0007153623188405796
+2024-08-25 06:24:39,536 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=46709.333333333336, ans=0.125
+2024-08-25 06:24:43,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=46709.333333333336, ans=0.04949747468305833
+2024-08-25 06:24:48,924 INFO [train.py:1114] (3/4) Epoch 4, batch 1300, loss[loss=0.3391, simple_loss=0.359, pruned_loss=0.1157, ctc_loss=0.2198, over 18910.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.3427, pruned_loss=0.1065, ctc_loss=0.2002, over 3847151.65 frames. ], batch size: 76, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:24:50,442 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=46762.666666666664, ans=0.125
+2024-08-25 06:25:15,167 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.47 vs. limit=15.0
+2024-08-25 06:25:26,195 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=46922.666666666664, ans=0.2
+2024-08-25 06:25:43,692 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=46976.0, ans=0.125
+2024-08-25 06:25:52,878 INFO [train.py:1114] (3/4) Epoch 4, batch 1350, loss[loss=0.3235, simple_loss=0.348, pruned_loss=0.1078, ctc_loss=0.2083, over 19767.00 frames. ], tot_loss[loss=0.3171, simple_loss=0.3424, pruned_loss=0.1061, ctc_loss=0.1991, over 3859051.87 frames. ], batch size: 54, lr: 3.16e-02, grad_scale: 32.0
+2024-08-25 06:26:07,746 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.269e+02 2.560e+02 3.229e+02 4.886e+02, threshold=5.120e+02, percent-clipped=5.0
+2024-08-25 06:26:26,309 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.11 vs. limit=15.0
+2024-08-25 06:26:32,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=47082.666666666664, ans=0.1
+2024-08-25 06:26:32,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=47082.666666666664, ans=0.025
+2024-08-25 06:26:38,705 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=47082.666666666664, ans=0.125
+2024-08-25 06:26:41,476 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.12 vs. limit=10.0
+2024-08-25 06:26:53,725 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=47136.0, ans=0.125
+2024-08-25 06:26:53,925 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.84 vs. limit=12.0
+2024-08-25 06:27:11,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=47242.666666666664, ans=0.1
+2024-08-25 06:27:20,738 INFO [train.py:1114] (3/4) Epoch 4, batch 1400, loss[loss=0.2977, simple_loss=0.3138, pruned_loss=0.1043, ctc_loss=0.1827, over 19683.00 frames. ], tot_loss[loss=0.3163, simple_loss=0.3418, pruned_loss=0.1056, ctc_loss=0.1987, over 3865983.97 frames. ], batch size: 46, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:27:39,127 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:27:45,648 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.49 vs. limit=22.5
+2024-08-25 06:28:02,597 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.37 vs. limit=22.5
+2024-08-25 06:28:10,918 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=47402.666666666664, ans=0.025
+2024-08-25 06:28:22,068 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=47456.0, ans=0.025
+2024-08-25 06:28:36,167 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=47509.333333333336, ans=0.125
+2024-08-25 06:28:43,704 INFO [train.py:1114] (3/4) Epoch 4, batch 1450, loss[loss=0.2829, simple_loss=0.3361, pruned_loss=0.08215, ctc_loss=0.1632, over 19707.00 frames. ], tot_loss[loss=0.3189, simple_loss=0.3435, pruned_loss=0.1069, ctc_loss=0.2012, over 3862963.78 frames. ], batch size: 63, lr: 3.15e-02, grad_scale: 32.0
+2024-08-25 06:28:45,119 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=47562.666666666664, ans=0.0
+2024-08-25 06:28:45,859 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=47562.666666666664, ans=0.0005298550724637686
+2024-08-25 06:28:48,584 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.026e+02 2.327e+02 2.659e+02 4.329e+02, threshold=4.654e+02, percent-clipped=0.0
+2024-08-25 06:29:08,192 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.18 vs. limit=15.0
+2024-08-25 06:29:10,528 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.06 vs. limit=22.5
+2024-08-25 06:29:11,434 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.29 vs. limit=12.0
+2024-08-25 06:29:28,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=47722.666666666664, ans=0.125
+2024-08-25 06:29:38,172 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.90 vs. limit=22.5
+2024-08-25 06:29:44,347 INFO [train.py:1114] (3/4) Epoch 4, batch 1500, loss[loss=0.2726, simple_loss=0.3284, pruned_loss=0.07739, ctc_loss=0.1549, over 19585.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3431, pruned_loss=0.1064, ctc_loss=0.2004, over 3862305.15 frames. ], batch size: 57, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:29:46,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=47829.333333333336, ans=0.00047188405797101395
+2024-08-25 06:30:01,913 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=47882.666666666664, ans=0.2
+2024-08-25 06:30:03,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=47882.666666666664, ans=0.1
+2024-08-25 06:30:05,990 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.49 vs. limit=15.0
+2024-08-25 06:30:13,139 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.80 vs. limit=15.0
+2024-08-25 06:30:38,171 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=48042.666666666664, ans=0.1
+2024-08-25 06:31:30,546 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=48042.666666666664, ans=0.09899494936611666
+2024-08-25 06:31:31,775 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=48042.666666666664, ans=0.2
+2024-08-25 06:31:38,021 INFO [train.py:1114] (3/4) Epoch 4, batch 1550, loss[loss=0.3299, simple_loss=0.3651, pruned_loss=0.1084, ctc_loss=0.195, over 19628.00 frames. ], tot_loss[loss=0.3194, simple_loss=0.3438, pruned_loss=0.1072, ctc_loss=0.2017, over 3846711.23 frames. ], batch size: 60, lr: 3.14e-02, grad_scale: 16.0
+2024-08-25 06:31:49,992 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.013e+02 2.262e+02 2.770e+02 1.090e+03, threshold=4.525e+02, percent-clipped=1.0
+2024-08-25 06:31:59,432 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.59 vs. limit=12.0
+2024-08-25 06:32:14,734 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=48149.333333333336, ans=0.125
+2024-08-25 06:32:15,719 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=48149.333333333336, ans=0.125
+2024-08-25 06:32:24,380 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=48202.666666666664, ans=0.125
+2024-08-25 06:32:24,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=48202.666666666664, ans=0.125
+2024-08-25 06:32:33,134 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=48256.0, ans=0.125
+2024-08-25 06:32:33,164 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=48256.0, ans=0.125
+2024-08-25 06:33:21,538 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=48362.666666666664, ans=0.125
+2024-08-25 06:33:26,250 INFO [train.py:1114] (3/4) Epoch 4, batch 1600, loss[loss=0.2705, simple_loss=0.3207, pruned_loss=0.07922, ctc_loss=0.1548, over 19850.00 frames. ], tot_loss[loss=0.3192, simple_loss=0.3434, pruned_loss=0.1072, ctc_loss=0.2014, over 3836199.50 frames. ], batch size: 57, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:33:32,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=48362.666666666664, ans=0.125
+2024-08-25 06:33:52,614 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.79 vs. limit=15.0
+2024-08-25 06:33:59,173 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.38 vs. limit=22.5
+2024-08-25 06:34:13,576 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=48469.333333333336, ans=0.125
+2024-08-25 06:35:15,009 INFO [train.py:1114] (3/4) Epoch 4, batch 1650, loss[loss=0.3283, simple_loss=0.3499, pruned_loss=0.1112, ctc_loss=0.2109, over 19672.00 frames. ], tot_loss[loss=0.319, simple_loss=0.343, pruned_loss=0.1072, ctc_loss=0.2015, over 3832501.89 frames. ], batch size: 59, lr: 3.13e-02, grad_scale: 32.0
+2024-08-25 06:35:21,181 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.079e+02 2.506e+02 2.996e+02 5.422e+02, threshold=5.011e+02, percent-clipped=2.0
+2024-08-25 06:36:19,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=48789.333333333336, ans=0.125
+2024-08-25 06:36:23,843 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=48789.333333333336, ans=0.0
+2024-08-25 06:36:37,432 INFO [train.py:1114] (3/4) Epoch 4, batch 1700, loss[loss=0.2967, simple_loss=0.3166, pruned_loss=0.09946, ctc_loss=0.1948, over 19656.00 frames. ], tot_loss[loss=0.318, simple_loss=0.3424, pruned_loss=0.1067, ctc_loss=0.2005, over 3846039.49 frames. ], batch size: 46, lr: 3.12e-02, grad_scale: 32.0
+2024-08-25 06:37:03,704 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=48949.333333333336, ans=0.125
+2024-08-25 06:37:15,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=49002.666666666664, ans=0.125
+2024-08-25 06:37:19,048 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.47 vs. limit=22.5
+2024-08-25 06:37:26,363 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=49002.666666666664, ans=0.125
+2024-08-25 06:37:39,744 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=49056.0, ans=0.0
+2024-08-25 06:37:43,144 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:37:58,650 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=49056.0, ans=0.0
+2024-08-25 06:38:18,580 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:38:25,885 INFO [train.py:1114] (3/4) Epoch 4, batch 1750, loss[loss=0.3124, simple_loss=0.3224, pruned_loss=0.1099, ctc_loss=0.2061, over 19646.00 frames. ], tot_loss[loss=0.3163, simple_loss=0.3411, pruned_loss=0.1059, ctc_loss=0.1991, over 3852194.08 frames. ], batch size: 45, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:38:33,086 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 1.987e+02 2.278e+02 2.713e+02 5.908e+02, threshold=4.555e+02, percent-clipped=1.0
+2024-08-25 06:38:55,259 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.17 vs. limit=15.0
+2024-08-25 06:38:56,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=49269.333333333336, ans=0.035
+2024-08-25 06:39:31,714 INFO [train.py:1114] (3/4) Epoch 4, batch 1800, loss[loss=0.3047, simple_loss=0.3404, pruned_loss=0.0989, ctc_loss=0.178, over 19617.00 frames. ], tot_loss[loss=0.3165, simple_loss=0.3413, pruned_loss=0.106, ctc_loss=0.199, over 3854640.81 frames. ], batch size: 55, lr: 3.11e-02, grad_scale: 32.0
+2024-08-25 06:39:33,301 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=49429.333333333336, ans=0.025
+2024-08-25 06:39:34,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=49429.333333333336, ans=0.125
+2024-08-25 06:40:18,829 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=49536.0, ans=0.2
+2024-08-25 06:40:25,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=49536.0, ans=0.04949747468305833
+2024-08-25 06:40:37,173 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.51 vs. limit=15.0
+2024-08-25 06:40:39,153 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=49589.333333333336, ans=0.125
+2024-08-25 06:40:54,714 INFO [train.py:1114] (3/4) Epoch 4, batch 1850, loss[loss=0.3613, simple_loss=0.3767, pruned_loss=0.1256, ctc_loss=0.2367, over 19608.00 frames. ], tot_loss[loss=0.3152, simple_loss=0.3406, pruned_loss=0.1054, ctc_loss=0.1977, over 3857733.12 frames. ], batch size: 57, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:41:01,669 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.149e+02 2.307e+02 2.574e+02 4.619e+02, threshold=4.614e+02, percent-clipped=1.0
+2024-08-25 06:41:12,845 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.96 vs. limit=22.5
+2024-08-25 06:41:23,446 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=49802.666666666664, ans=0.0
+2024-08-25 06:41:59,174 INFO [train.py:1114] (3/4) Epoch 4, batch 1900, loss[loss=0.3494, simple_loss=0.3701, pruned_loss=0.1189, ctc_loss=0.2271, over 19650.00 frames. ], tot_loss[loss=0.3158, simple_loss=0.3414, pruned_loss=0.1055, ctc_loss=0.1978, over 3862698.23 frames. ], batch size: 59, lr: 3.10e-02, grad_scale: 32.0
+2024-08-25 06:42:58,559 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=50069.333333333336, ans=0.2
+2024-08-25 06:43:00,435 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:43:10,930 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:43:37,704 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=50176.0, ans=0.2
+2024-08-25 06:43:39,882 INFO [train.py:1114] (3/4) Epoch 4, batch 1950, loss[loss=0.3153, simple_loss=0.3383, pruned_loss=0.1064, ctc_loss=0.1987, over 19594.00 frames. ], tot_loss[loss=0.3163, simple_loss=0.3426, pruned_loss=0.1055, ctc_loss=0.1976, over 3871808.73 frames. ], batch size: 52, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:43:45,596 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.065e+02 2.259e+02 2.635e+02 4.732e+02, threshold=4.517e+02, percent-clipped=1.0
+2024-08-25 06:43:51,291 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=50282.666666666664, ans=0.125
+2024-08-25 06:43:55,753 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=50282.666666666664, ans=0.125
+2024-08-25 06:44:13,580 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=50389.333333333336, ans=0.125
+2024-08-25 06:44:27,809 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.33 vs. limit=15.0
+2024-08-25 06:44:42,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=50496.0, ans=0.125
+2024-08-25 06:44:48,799 INFO [train.py:1114] (3/4) Epoch 4, batch 2000, loss[loss=0.264, simple_loss=0.2976, pruned_loss=0.08286, ctc_loss=0.1616, over 19633.00 frames. ], tot_loss[loss=0.3168, simple_loss=0.3427, pruned_loss=0.1058, ctc_loss=0.1982, over 3856302.96 frames. ], batch size: 45, lr: 3.09e-02, grad_scale: 32.0
+2024-08-25 06:44:54,812 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=50496.0, ans=0.0
+2024-08-25 06:45:57,301 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=50602.666666666664, ans=0.025
+2024-08-25 06:46:02,181 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.18 vs. limit=22.5
+2024-08-25 06:46:12,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=50656.0, ans=0.125
+2024-08-25 06:46:13,859 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.33 vs. limit=15.0
+2024-08-25 06:46:35,030 INFO [train.py:1114] (3/4) Epoch 4, batch 2050, loss[loss=0.3002, simple_loss=0.3249, pruned_loss=0.1006, ctc_loss=0.1861, over 19726.00 frames. ], tot_loss[loss=0.3165, simple_loss=0.3418, pruned_loss=0.106, ctc_loss=0.1979, over 3852633.65 frames. ], batch size: 47, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:46:45,625 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.046e+02 2.338e+02 2.720e+02 4.537e+02, threshold=4.675e+02, percent-clipped=1.0
+2024-08-25 06:46:53,494 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=50816.0, ans=0.125
+2024-08-25 06:46:55,859 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=50816.0, ans=0.125
+2024-08-25 06:47:12,568 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=50869.333333333336, ans=0.0
+2024-08-25 06:47:37,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=50976.0, ans=0.1
+2024-08-25 06:47:44,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=50976.0, ans=0.025
+2024-08-25 06:47:47,562 INFO [train.py:1114] (3/4) Epoch 4, batch 2100, loss[loss=0.3393, simple_loss=0.3585, pruned_loss=0.1176, ctc_loss=0.2119, over 19752.00 frames. ], tot_loss[loss=0.3147, simple_loss=0.3406, pruned_loss=0.1051, ctc_loss=0.1967, over 3859963.60 frames. ], batch size: 54, lr: 3.08e-02, grad_scale: 32.0
+2024-08-25 06:47:51,158 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=51029.333333333336, ans=0.125
+2024-08-25 06:48:05,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=51029.333333333336, ans=0.125
+2024-08-25 06:48:39,334 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=51082.666666666664, ans=0.2
+2024-08-25 06:49:03,832 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=51189.333333333336, ans=0.1
+2024-08-25 06:49:06,178 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=51189.333333333336, ans=0.0
+2024-08-25 06:49:08,186 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.60 vs. limit=15.0
+2024-08-25 06:49:17,569 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=51242.666666666664, ans=0.0
+2024-08-25 06:49:45,533 INFO [train.py:1114] (3/4) Epoch 4, batch 2150, loss[loss=0.3061, simple_loss=0.3328, pruned_loss=0.1018, ctc_loss=0.1898, over 19850.00 frames. ], tot_loss[loss=0.3134, simple_loss=0.3394, pruned_loss=0.1045, ctc_loss=0.1958, over 3870785.76 frames. ], batch size: 52, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:49:54,448 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.035e+02 2.305e+02 2.639e+02 4.596e+02, threshold=4.610e+02, percent-clipped=0.0
+2024-08-25 06:50:41,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=51402.666666666664, ans=0.1
+2024-08-25 06:51:12,331 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=51509.333333333336, ans=0.125
+2024-08-25 06:51:14,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=51562.666666666664, ans=0.2
+2024-08-25 06:51:15,393 INFO [train.py:1114] (3/4) Epoch 4, batch 2200, loss[loss=0.3651, simple_loss=0.3702, pruned_loss=0.1319, ctc_loss=0.2403, over 19595.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3393, pruned_loss=0.1042, ctc_loss=0.1953, over 3868547.89 frames. ], batch size: 57, lr: 3.07e-02, grad_scale: 32.0
+2024-08-25 06:51:26,554 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=51616.0, ans=0.125
+2024-08-25 06:51:34,424 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=51616.0, ans=0.0
+2024-08-25 06:51:45,066 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.93 vs. limit=22.5
+2024-08-25 06:51:53,355 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=51669.333333333336, ans=0.125
+2024-08-25 06:52:05,826 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.55 vs. limit=15.0
+2024-08-25 06:52:18,870 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=51776.0, ans=0.0
+2024-08-25 06:52:24,910 INFO [train.py:1114] (3/4) Epoch 4, batch 2250, loss[loss=0.3042, simple_loss=0.3396, pruned_loss=0.09705, ctc_loss=0.1869, over 19616.00 frames. ], tot_loss[loss=0.3125, simple_loss=0.3392, pruned_loss=0.1039, ctc_loss=0.195, over 3868291.30 frames. ], batch size: 55, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:52:31,994 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.164e+02 2.622e+02 3.263e+02 6.940e+02, threshold=5.245e+02, percent-clipped=2.0
+2024-08-25 06:52:32,181 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=51829.333333333336, ans=0.025
+2024-08-25 06:52:50,686 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=51936.0, ans=0.125
+2024-08-25 06:52:53,388 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.08 vs. limit=15.0
+2024-08-25 06:52:57,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=51936.0, ans=0.2
+2024-08-25 06:52:58,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51989.333333333336, ans=0.1
+2024-08-25 06:52:59,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=51989.333333333336, ans=0.125
+2024-08-25 06:53:00,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=51989.333333333336, ans=0.125
+2024-08-25 06:53:05,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=51989.333333333336, ans=0.125
+2024-08-25 06:53:09,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=51989.333333333336, ans=0.0
+2024-08-25 06:53:10,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=52042.666666666664, ans=0.125
+2024-08-25 06:53:15,365 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=52042.666666666664, ans=0.125
+2024-08-25 06:53:26,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=52042.666666666664, ans=0.1
+2024-08-25 06:53:27,981 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=52042.666666666664, ans=0.0
+2024-08-25 06:53:30,931 INFO [train.py:1114] (3/4) Epoch 4, batch 2300, loss[loss=0.2712, simple_loss=0.3088, pruned_loss=0.08436, ctc_loss=0.1619, over 19494.00 frames. ], tot_loss[loss=0.3114, simple_loss=0.3381, pruned_loss=0.1035, ctc_loss=0.1944, over 3861732.83 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 32.0
+2024-08-25 06:53:41,086 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=52096.0, ans=0.0
+2024-08-25 06:53:44,393 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=52096.0, ans=0.05
+2024-08-25 06:54:15,033 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=52256.0, ans=0.125
+2024-08-25 06:54:23,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=52256.0, ans=0.0
+2024-08-25 06:54:33,424 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.08 vs. limit=6.0
+2024-08-25 06:54:53,355 INFO [train.py:1114] (3/4) Epoch 4, batch 2350, loss[loss=0.3277, simple_loss=0.3614, pruned_loss=0.1087, ctc_loss=0.1915, over 19687.00 frames. ], tot_loss[loss=0.311, simple_loss=0.3377, pruned_loss=0.1033, ctc_loss=0.1939, over 3864397.32 frames. ], batch size: 63, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 06:54:55,008 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=52362.666666666664, ans=15.0
+2024-08-25 06:54:58,714 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.121e+02 2.497e+02 3.048e+02 4.745e+02, threshold=4.995e+02, percent-clipped=0.0
+2024-08-25 06:55:38,782 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.93 vs. limit=10.0
+2024-08-25 06:56:03,690 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=52469.333333333336, ans=0.0
+2024-08-25 06:59:35,405 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=52522.666666666664, ans=0.0
+2024-08-25 07:00:47,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=52576.0, ans=0.125
+2024-08-25 07:00:48,082 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=52576.0, ans=15.0
+2024-08-25 07:07:21,823 INFO [train.py:1114] (3/4) Epoch 4, batch 2400, loss[loss=0.2933, simple_loss=0.3336, pruned_loss=0.0911, ctc_loss=0.177, over 19311.00 frames. ], tot_loss[loss=0.3141, simple_loss=0.3404, pruned_loss=0.1046, ctc_loss=0.1962, over 3858609.02 frames. ], batch size: 71, lr: 3.05e-02, grad_scale: 32.0
+2024-08-25 07:09:08,343 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=52629.333333333336, ans=0.125
+2024-08-25 07:19:37,662 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52736.0, ans=0.1
+2024-08-25 07:21:10,771 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=52736.0, ans=0.0
+2024-08-25 07:21:10,869 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=52736.0, ans=0.125
+2024-08-25 07:21:52,152 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=52736.0, ans=0.0
+2024-08-25 07:33:05,323 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 07:34:58,797 INFO [train.py:1114] (3/4) Epoch 4, batch 2450, loss[loss=0.3799, simple_loss=0.3676, pruned_loss=0.1421, ctc_loss=0.2703, over 13352.00 frames. ], tot_loss[loss=0.3226, simple_loss=0.3454, pruned_loss=0.109, ctc_loss=0.2046, over 3729049.40 frames. ], batch size: 140, lr: 3.05e-02, grad_scale: 16.0
+2024-08-25 07:36:27,118 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.096e+02 2.355e+02 2.735e+02 5.246e+02, threshold=4.710e+02, percent-clipped=1.0
+2024-08-25 07:37:21,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=52949.333333333336, ans=0.2
+2024-08-25 07:40:48,913 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=53002.666666666664, ans=0.125
+2024-08-25 07:43:06,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=53002.666666666664, ans=0.125
+2024-08-25 07:46:30,891 INFO [train.py:1114] (3/4) Epoch 5, batch 0, loss[loss=0.3131, simple_loss=0.3271, pruned_loss=0.1096, ctc_loss=0.1994, over 19433.00 frames. ], tot_loss[loss=0.3131, simple_loss=0.3271, pruned_loss=0.1096, ctc_loss=0.1994, over 19433.00 frames. ], batch size: 48, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 07:46:30,891 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 07:49:02,123 INFO [train.py:1146] (3/4) Epoch 5, validation: loss=0.2543, simple_loss=0.3259, pruned_loss=0.06691, ctc_loss=0.1221, over 944034.00 frames.
+2024-08-25 07:49:02,124 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 07:54:37,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=53157.333333333336, ans=0.0
+2024-08-25 07:57:03,890 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=53210.666666666664, ans=0.2
+2024-08-25 07:58:10,509 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=53210.666666666664, ans=0.125
+2024-08-25 07:59:10,141 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.00 vs. limit=12.0
+2024-08-25 08:00:17,477 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=53317.333333333336, ans=0.0
+2024-08-25 08:01:56,939 INFO [train.py:1114] (3/4) Epoch 5, batch 50, loss[loss=0.2714, simple_loss=0.3019, pruned_loss=0.08808, ctc_loss=0.1616, over 19704.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3418, pruned_loss=0.106, ctc_loss=0.1996, over 843823.62 frames. ], batch size: 47, lr: 2.83e-02, grad_scale: 32.0
+2024-08-25 08:03:51,548 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 1.984e+02 2.202e+02 2.522e+02 4.045e+02, threshold=4.404e+02, percent-clipped=0.0
+2024-08-25 08:04:58,847 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.78 vs. limit=15.0
+2024-08-25 08:05:13,500 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=53477.333333333336, ans=0.2
+2024-08-25 08:05:40,078 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=53477.333333333336, ans=0.025
+2024-08-25 08:07:22,869 INFO [train.py:1114] (3/4) Epoch 5, batch 100, loss[loss=0.281, simple_loss=0.3208, pruned_loss=0.08716, ctc_loss=0.1672, over 19698.00 frames. ], tot_loss[loss=0.3146, simple_loss=0.3418, pruned_loss=0.1044, ctc_loss=0.1964, over 1498517.54 frames. ], batch size: 51, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:07:24,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=53637.333333333336, ans=0.09899494936611666
+2024-08-25 08:08:14,873 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=53690.666666666664, ans=0.125
+2024-08-25 08:08:27,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=53690.666666666664, ans=0.1
+2024-08-25 08:08:44,363 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=53744.0, ans=0.0
+2024-08-25 08:08:46,562 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=53744.0, ans=0.125
+2024-08-25 08:09:42,245 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.06 vs. limit=15.0
+2024-08-25 08:09:54,741 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=53850.666666666664, ans=0.125
+2024-08-25 08:10:03,748 INFO [train.py:1114] (3/4) Epoch 5, batch 150, loss[loss=0.2503, simple_loss=0.2959, pruned_loss=0.07488, ctc_loss=0.1374, over 19709.00 frames. ], tot_loss[loss=0.31, simple_loss=0.3383, pruned_loss=0.1024, ctc_loss=0.1926, over 2027402.71 frames. ], batch size: 47, lr: 2.82e-02, grad_scale: 32.0
+2024-08-25 08:10:09,570 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=53904.0, ans=0.0
+2024-08-25 08:10:31,435 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=53957.333333333336, ans=0.0
+2024-08-25 08:10:40,320 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.115e+02 2.389e+02 2.764e+02 4.531e+02, threshold=4.777e+02, percent-clipped=1.0
+2024-08-25 08:10:42,970 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.93 vs. limit=6.0
+2024-08-25 08:11:09,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=54010.666666666664, ans=0.05
+2024-08-25 08:11:34,653 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=54064.0, ans=0.2
+2024-08-25 08:12:01,138 INFO [train.py:1114] (3/4) Epoch 5, batch 200, loss[loss=0.3733, simple_loss=0.3809, pruned_loss=0.1349, ctc_loss=0.2395, over 18289.00 frames. ], tot_loss[loss=0.3079, simple_loss=0.3367, pruned_loss=0.1015, ctc_loss=0.1904, over 2435589.46 frames. ], batch size: 85, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:15:43,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=54384.0, ans=0.0
+2024-08-25 08:15:56,680 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=54384.0, ans=0.015
+2024-08-25 08:15:58,937 INFO [train.py:1114] (3/4) Epoch 5, batch 250, loss[loss=0.3509, simple_loss=0.3635, pruned_loss=0.1248, ctc_loss=0.2218, over 19386.00 frames. ], tot_loss[loss=0.3051, simple_loss=0.335, pruned_loss=0.1001, ctc_loss=0.188, over 2755462.74 frames. ], batch size: 67, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:16:47,937 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 1.969e+02 2.164e+02 2.373e+02 3.326e+02, threshold=4.328e+02, percent-clipped=0.0
+2024-08-25 08:16:49,402 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=54490.666666666664, ans=0.125
+2024-08-25 08:16:59,715 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=54544.0, ans=0.0
+2024-08-25 08:17:02,192 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=54544.0, ans=0.0
+2024-08-25 08:17:09,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=54597.333333333336, ans=0.0
+2024-08-25 08:17:26,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=54650.666666666664, ans=0.125
+2024-08-25 08:17:32,568 INFO [train.py:1114] (3/4) Epoch 5, batch 300, loss[loss=0.3492, simple_loss=0.3663, pruned_loss=0.1215, ctc_loss=0.2229, over 19483.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3345, pruned_loss=0.09987, ctc_loss=0.1873, over 2998701.36 frames. ], batch size: 61, lr: 2.81e-02, grad_scale: 32.0
+2024-08-25 08:17:58,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=54810.666666666664, ans=0.1
+2024-08-25 08:18:38,534 INFO [train.py:1114] (3/4) Epoch 5, batch 350, loss[loss=0.3095, simple_loss=0.324, pruned_loss=0.1078, ctc_loss=0.1987, over 19765.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.335, pruned_loss=0.1003, ctc_loss=0.1882, over 3188431.96 frames. ], batch size: 48, lr: 2.80e-02, grad_scale: 16.0
+2024-08-25 08:18:38,882 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=54970.666666666664, ans=0.125
+2024-08-25 08:18:53,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=55024.0, ans=0.125
+2024-08-25 08:19:00,000 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=55024.0, ans=0.0
+2024-08-25 08:19:08,646 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=55024.0, ans=0.125
+2024-08-25 08:19:10,175 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.65 vs. limit=15.0
+2024-08-25 08:19:10,799 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.967e+02 2.265e+02 2.794e+02 4.039e+02, threshold=4.529e+02, percent-clipped=0.0
+2024-08-25 08:19:51,988 INFO [train.py:1114] (3/4) Epoch 5, batch 400, loss[loss=0.3323, simple_loss=0.3573, pruned_loss=0.1112, ctc_loss=0.2122, over 19493.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3345, pruned_loss=0.09981, ctc_loss=0.1878, over 3340848.49 frames. ], batch size: 54, lr: 2.80e-02, grad_scale: 32.0
+2024-08-25 08:19:52,815 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=55237.333333333336, ans=0.125
+2024-08-25 08:20:14,725 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=55290.666666666664, ans=0.125
+2024-08-25 08:20:28,637 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=55344.0, ans=0.125
+2024-08-25 08:21:10,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=55450.666666666664, ans=0.125
+2024-08-25 08:21:27,044 INFO [train.py:1114] (3/4) Epoch 5, batch 450, loss[loss=0.3558, simple_loss=0.3694, pruned_loss=0.1245, ctc_loss=0.2335, over 19616.00 frames. ], tot_loss[loss=0.3053, simple_loss=0.3348, pruned_loss=0.1003, ctc_loss=0.1885, over 3449345.08 frames. ], batch size: 55, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:21:47,864 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.008e+02 2.249e+02 2.774e+02 4.428e+02, threshold=4.498e+02, percent-clipped=0.0
+2024-08-25 08:21:55,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=55610.666666666664, ans=0.0
+2024-08-25 08:22:01,358 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=55664.0, ans=0.125
+2024-08-25 08:22:01,435 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=55664.0, ans=0.0
+2024-08-25 08:22:02,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=55664.0, ans=0.2
+2024-08-25 08:22:04,947 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=55664.0, ans=0.09899494936611666
+2024-08-25 08:22:21,899 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.19 vs. limit=12.0
+2024-08-25 08:22:32,193 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=55717.333333333336, ans=0.04949747468305833
+2024-08-25 08:22:58,217 INFO [train.py:1114] (3/4) Epoch 5, batch 500, loss[loss=0.3398, simple_loss=0.3697, pruned_loss=0.1133, ctc_loss=0.2082, over 19658.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3333, pruned_loss=0.099, ctc_loss=0.1859, over 3544776.48 frames. ], batch size: 63, lr: 2.79e-02, grad_scale: 32.0
+2024-08-25 08:23:14,737 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=55770.666666666664, ans=0.0
+2024-08-25 08:23:14,826 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=55770.666666666664, ans=0.0
+2024-08-25 08:23:22,080 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=55824.0, ans=0.0
+2024-08-25 08:23:51,274 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=55877.333333333336, ans=0.2
+2024-08-25 08:23:52,388 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=55877.333333333336, ans=0.035
+2024-08-25 08:24:00,857 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=55930.666666666664, ans=0.2
+2024-08-25 08:24:11,487 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=55984.0, ans=0.95
+2024-08-25 08:24:21,967 INFO [train.py:1114] (3/4) Epoch 5, batch 550, loss[loss=0.289, simple_loss=0.3317, pruned_loss=0.08991, ctc_loss=0.1661, over 19241.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3333, pruned_loss=0.09896, ctc_loss=0.1862, over 3607987.84 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:24:47,069 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 1.991e+02 2.247e+02 2.867e+02 6.260e+02, threshold=4.494e+02, percent-clipped=1.0
+2024-08-25 08:24:48,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=56144.0, ans=0.125
+2024-08-25 08:25:26,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=56250.666666666664, ans=0.125
+2024-08-25 08:25:37,713 INFO [train.py:1114] (3/4) Epoch 5, batch 600, loss[loss=0.3262, simple_loss=0.3538, pruned_loss=0.109, ctc_loss=0.2014, over 19442.00 frames. ], tot_loss[loss=0.3014, simple_loss=0.3328, pruned_loss=0.09811, ctc_loss=0.1845, over 3666424.63 frames. ], batch size: 67, lr: 2.78e-02, grad_scale: 32.0
+2024-08-25 08:25:52,785 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=56304.0, ans=0.125
+2024-08-25 08:26:03,251 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=56357.333333333336, ans=0.125
+2024-08-25 08:26:06,899 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=56410.666666666664, ans=0.2
+2024-08-25 08:26:21,379 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.87 vs. limit=22.5
+2024-08-25 08:26:38,907 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=56517.333333333336, ans=0.125
+2024-08-25 08:26:47,451 INFO [train.py:1114] (3/4) Epoch 5, batch 650, loss[loss=0.2607, simple_loss=0.3107, pruned_loss=0.0764, ctc_loss=0.1446, over 19771.00 frames. ], tot_loss[loss=0.2997, simple_loss=0.3315, pruned_loss=0.09727, ctc_loss=0.1835, over 3716413.52 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:27:13,346 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 1.957e+02 2.352e+02 2.685e+02 4.359e+02, threshold=4.704e+02, percent-clipped=0.0
+2024-08-25 08:27:15,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=56677.333333333336, ans=0.125
+2024-08-25 08:27:55,532 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.30 vs. limit=15.0
+2024-08-25 08:28:10,106 INFO [train.py:1114] (3/4) Epoch 5, batch 700, loss[loss=0.2729, simple_loss=0.3119, pruned_loss=0.08381, ctc_loss=0.1658, over 19719.00 frames. ], tot_loss[loss=0.3002, simple_loss=0.3322, pruned_loss=0.09736, ctc_loss=0.1836, over 3748716.32 frames. ], batch size: 51, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:28:26,057 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=56890.666666666664, ans=0.0
+2024-08-25 08:28:42,365 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=56890.666666666664, ans=0.0
+2024-08-25 08:29:20,192 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=56997.333333333336, ans=0.0
+2024-08-25 08:29:29,190 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=57050.666666666664, ans=0.125
+2024-08-25 08:29:41,359 INFO [train.py:1114] (3/4) Epoch 5, batch 750, loss[loss=0.3003, simple_loss=0.3374, pruned_loss=0.09591, ctc_loss=0.1787, over 19497.00 frames. ], tot_loss[loss=0.2989, simple_loss=0.3313, pruned_loss=0.09676, ctc_loss=0.1824, over 3774382.21 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 32.0
+2024-08-25 08:30:02,387 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=57104.0, ans=0.1
+2024-08-25 08:30:08,932 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=57104.0, ans=0.1
+2024-08-25 08:30:26,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=57157.333333333336, ans=0.125
+2024-08-25 08:30:40,363 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.099e+02 2.472e+02 3.181e+02 5.803e+02, threshold=4.945e+02, percent-clipped=2.0
+2024-08-25 08:31:04,165 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.71 vs. limit=12.0
+2024-08-25 08:31:16,103 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=57264.0, ans=0.125
+2024-08-25 08:31:25,959 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.53 vs. limit=12.0
+2024-08-25 08:31:31,921 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.88 vs. limit=15.0
+2024-08-25 08:31:42,817 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.78 vs. limit=15.0
+2024-08-25 08:31:58,395 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.31 vs. limit=15.0
+2024-08-25 08:31:59,201 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.84 vs. limit=15.0
+2024-08-25 08:32:05,741 INFO [train.py:1114] (3/4) Epoch 5, batch 800, loss[loss=0.2659, simple_loss=0.3082, pruned_loss=0.08133, ctc_loss=0.1523, over 19418.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.331, pruned_loss=0.09644, ctc_loss=0.1815, over 3795967.69 frames. ], batch size: 48, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:32:36,831 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.59 vs. limit=15.0
+2024-08-25 08:32:55,912 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=57477.333333333336, ans=0.0
+2024-08-25 08:33:24,602 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=57584.0, ans=0.125
+2024-08-25 08:33:34,842 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=57584.0, ans=0.5
+2024-08-25 08:33:37,755 INFO [train.py:1114] (3/4) Epoch 5, batch 850, loss[loss=0.2944, simple_loss=0.333, pruned_loss=0.09327, ctc_loss=0.1733, over 19656.00 frames. ], tot_loss[loss=0.2976, simple_loss=0.3303, pruned_loss=0.0962, ctc_loss=0.181, over 3814629.94 frames. ], batch size: 59, lr: 2.76e-02, grad_scale: 32.0
+2024-08-25 08:34:26,552 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.963e+02 2.197e+02 2.544e+02 4.330e+02, threshold=4.395e+02, percent-clipped=0.0
+2024-08-25 08:34:28,027 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:34:33,814 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=57744.0, ans=0.125
+2024-08-25 08:34:36,518 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.05 vs. limit=10.0
+2024-08-25 08:34:48,167 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=57797.333333333336, ans=0.125
+2024-08-25 08:34:50,456 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=57797.333333333336, ans=0.0
+2024-08-25 08:34:56,440 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=57850.666666666664, ans=0.1
+2024-08-25 08:35:13,470 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=57850.666666666664, ans=0.125
+2024-08-25 08:35:17,361 INFO [train.py:1114] (3/4) Epoch 5, batch 900, loss[loss=0.2724, simple_loss=0.3041, pruned_loss=0.08732, ctc_loss=0.1651, over 19418.00 frames. ], tot_loss[loss=0.2989, simple_loss=0.3309, pruned_loss=0.097, ctc_loss=0.1823, over 3818402.36 frames. ], batch size: 48, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:35:28,791 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:35:39,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=57957.333333333336, ans=0.125
+2024-08-25 08:35:56,009 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=17.65 vs. limit=15.0
+2024-08-25 08:36:00,209 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.41 vs. limit=22.5
+2024-08-25 08:36:23,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=58117.333333333336, ans=0.125
+2024-08-25 08:36:41,308 INFO [train.py:1114] (3/4) Epoch 5, batch 950, loss[loss=0.2884, simple_loss=0.3172, pruned_loss=0.09439, ctc_loss=0.1769, over 19513.00 frames. ], tot_loss[loss=0.2993, simple_loss=0.3311, pruned_loss=0.09717, ctc_loss=0.1828, over 3819311.00 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-25 08:37:00,433 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=58224.0, ans=0.025
+2024-08-25 08:37:02,455 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.021e+02 2.236e+02 2.607e+02 6.234e+02, threshold=4.471e+02, percent-clipped=1.0
+2024-08-25 08:37:02,731 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=58224.0, ans=0.0
+2024-08-25 08:37:14,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=58277.333333333336, ans=0.0
+2024-08-25 08:37:19,534 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=58277.333333333336, ans=0.0
+2024-08-25 08:37:22,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=58277.333333333336, ans=0.2
+2024-08-25 08:37:29,309 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.96 vs. limit=15.0
+2024-08-25 08:37:37,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=58384.0, ans=0.125
+2024-08-25 08:37:49,069 INFO [train.py:1114] (3/4) Epoch 5, batch 1000, loss[loss=0.2972, simple_loss=0.3264, pruned_loss=0.09688, ctc_loss=0.1859, over 19854.00 frames. ], tot_loss[loss=0.3007, simple_loss=0.332, pruned_loss=0.09792, ctc_loss=0.184, over 3815031.46 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:39:12,428 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.09 vs. limit=15.0
+2024-08-25 08:39:14,357 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=58650.666666666664, ans=0.125
+2024-08-25 08:39:18,403 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=58704.0, ans=0.1
+2024-08-25 08:39:20,335 INFO [train.py:1114] (3/4) Epoch 5, batch 1050, loss[loss=0.3122, simple_loss=0.3428, pruned_loss=0.1019, ctc_loss=0.1944, over 19846.00 frames. ], tot_loss[loss=0.3002, simple_loss=0.3315, pruned_loss=0.09768, ctc_loss=0.1838, over 3822360.93 frames. ], batch size: 57, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:39:41,251 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.929e+02 2.228e+02 2.594e+02 4.447e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 08:40:02,335 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.31 vs. limit=15.0
+2024-08-25 08:40:20,597 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=58864.0, ans=0.125
+2024-08-25 08:40:32,534 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=58917.333333333336, ans=0.025
+2024-08-25 08:40:42,208 INFO [train.py:1114] (3/4) Epoch 5, batch 1100, loss[loss=0.3192, simple_loss=0.3393, pruned_loss=0.1088, ctc_loss=0.2039, over 19583.00 frames. ], tot_loss[loss=0.2986, simple_loss=0.3305, pruned_loss=0.09682, ctc_loss=0.1826, over 3828880.11 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-25 08:41:00,927 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:41:35,709 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.90 vs. limit=22.5
+2024-08-25 08:42:06,695 INFO [train.py:1114] (3/4) Epoch 5, batch 1150, loss[loss=0.2831, simple_loss=0.3196, pruned_loss=0.09051, ctc_loss=0.1642, over 19581.00 frames. ], tot_loss[loss=0.2994, simple_loss=0.3308, pruned_loss=0.09732, ctc_loss=0.1833, over 3827292.51 frames. ], batch size: 52, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:42:17,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=59237.333333333336, ans=0.0
+2024-08-25 08:42:24,800 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=59290.666666666664, ans=0.09899494936611666
+2024-08-25 08:42:38,153 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.022e+02 2.244e+02 2.636e+02 4.087e+02, threshold=4.489e+02, percent-clipped=0.0
+2024-08-25 08:42:44,158 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=59344.0, ans=0.125
+2024-08-25 08:43:33,325 INFO [train.py:1114] (3/4) Epoch 5, batch 1200, loss[loss=0.3044, simple_loss=0.3437, pruned_loss=0.09605, ctc_loss=0.1822, over 19834.00 frames. ], tot_loss[loss=0.3, simple_loss=0.3317, pruned_loss=0.09749, ctc_loss=0.1835, over 3823627.46 frames. ], batch size: 57, lr: 2.73e-02, grad_scale: 32.0
+2024-08-25 08:44:02,918 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.17 vs. limit=15.0
+2024-08-25 08:44:12,146 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=59610.666666666664, ans=0.125
+2024-08-25 08:44:13,618 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.66 vs. limit=22.5
+2024-08-25 08:44:15,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=59610.666666666664, ans=0.0
+2024-08-25 08:44:16,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=59610.666666666664, ans=0.09899494936611666
+2024-08-25 08:44:25,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=59664.0, ans=0.125
+2024-08-25 08:44:41,802 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.83 vs. limit=15.0
+2024-08-25 08:44:53,028 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=7.68 vs. limit=12.0
+2024-08-25 08:44:55,327 INFO [train.py:1114] (3/4) Epoch 5, batch 1250, loss[loss=0.3264, simple_loss=0.3613, pruned_loss=0.1062, ctc_loss=0.1978, over 19515.00 frames. ], tot_loss[loss=0.299, simple_loss=0.3317, pruned_loss=0.09678, ctc_loss=0.182, over 3842236.16 frames. ], batch size: 61, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:44:59,089 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=59770.666666666664, ans=0.0
+2024-08-25 08:45:21,216 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 1.906e+02 2.098e+02 2.362e+02 4.005e+02, threshold=4.196e+02, percent-clipped=0.0
+2024-08-25 08:45:25,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=59877.333333333336, ans=0.0
+2024-08-25 08:45:29,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=59877.333333333336, ans=0.2
+2024-08-25 08:45:39,945 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=59930.666666666664, ans=0.125
+2024-08-25 08:46:03,578 INFO [train.py:1114] (3/4) Epoch 5, batch 1300, loss[loss=0.3446, simple_loss=0.3554, pruned_loss=0.1227, ctc_loss=0.2209, over 18794.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3305, pruned_loss=0.09607, ctc_loss=0.1807, over 3846118.66 frames. ], batch size: 76, lr: 2.72e-02, grad_scale: 32.0
+2024-08-25 08:46:19,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=60037.333333333336, ans=0.125
+2024-08-25 08:47:14,789 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=60250.666666666664, ans=0.125
+2024-08-25 08:47:27,093 INFO [train.py:1114] (3/4) Epoch 5, batch 1350, loss[loss=0.2997, simple_loss=0.3294, pruned_loss=0.09788, ctc_loss=0.1855, over 19778.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.3292, pruned_loss=0.09533, ctc_loss=0.1794, over 3857716.81 frames. ], batch size: 54, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:47:32,091 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=60304.0, ans=0.2
+2024-08-25 08:48:06,336 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.950e+02 2.204e+02 2.621e+02 4.331e+02, threshold=4.409e+02, percent-clipped=1.0
+2024-08-25 08:48:12,250 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.72 vs. limit=15.0
+2024-08-25 08:48:13,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=60410.666666666664, ans=0.125
+2024-08-25 08:48:22,322 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=60410.666666666664, ans=0.125
+2024-08-25 08:49:12,176 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=60517.333333333336, ans=0.125
+2024-08-25 08:49:13,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=60570.666666666664, ans=0.125
+2024-08-25 08:49:14,314 INFO [train.py:1114] (3/4) Epoch 5, batch 1400, loss[loss=0.2452, simple_loss=0.2903, pruned_loss=0.07154, ctc_loss=0.1426, over 19634.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.3288, pruned_loss=0.09486, ctc_loss=0.1784, over 3864927.74 frames. ], batch size: 46, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 08:49:22,876 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.83 vs. limit=22.5
+2024-08-25 08:49:25,533 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.24 vs. limit=6.0
+2024-08-25 08:49:46,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=60677.333333333336, ans=0.025
+2024-08-25 08:49:47,550 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.73 vs. limit=15.0
+2024-08-25 08:58:06,991 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=60784.0, ans=0.0
+2024-08-25 08:58:37,060 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.82 vs. limit=15.0
+2024-08-25 09:00:00,152 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=60784.0, ans=0.0
+2024-08-25 09:01:57,446 INFO [train.py:1114] (3/4) Epoch 5, batch 1450, loss[loss=0.2885, simple_loss=0.3445, pruned_loss=0.08403, ctc_loss=0.1611, over 19664.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.3297, pruned_loss=0.09539, ctc_loss=0.1795, over 3862918.22 frames. ], batch size: 63, lr: 2.71e-02, grad_scale: 32.0
+2024-08-25 09:11:45,394 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=60890.666666666664, ans=0.125
+2024-08-25 09:14:29,261 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 1.942e+02 2.164e+02 2.480e+02 4.633e+02, threshold=4.329e+02, percent-clipped=1.0
+2024-08-25 09:18:15,274 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=60944.0, ans=0.125
+2024-08-25 09:19:20,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=60944.0, ans=0.125
+2024-08-25 09:22:22,265 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=60997.333333333336, ans=0.125
+2024-08-25 09:27:50,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=60997.333333333336, ans=0.125
+2024-08-25 09:27:51,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=60997.333333333336, ans=0.07
+2024-08-25 09:27:54,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=60997.333333333336, ans=0.09899494936611666
+2024-08-25 09:35:37,840 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=61050.666666666664, ans=0.0
+2024-08-25 09:36:13,475 INFO [train.py:1114] (3/4) Epoch 5, batch 1500, loss[loss=0.3197, simple_loss=0.3507, pruned_loss=0.1061, ctc_loss=0.191, over 19600.00 frames. ], tot_loss[loss=0.297, simple_loss=0.3303, pruned_loss=0.09584, ctc_loss=0.1802, over 3862222.53 frames. ], batch size: 57, lr: 2.70e-02, grad_scale: 32.0
+2024-08-25 09:42:54,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=61104.0, ans=0.0
+2024-08-25 09:50:12,643 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.18 vs. limit=15.0
+2024-08-25 10:02:13,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=61317.333333333336, ans=0.125
+2024-08-25 10:03:52,725 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=3.858e-02
+2024-08-25 10:05:15,728 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=61317.333333333336, ans=0.2
+2024-08-25 10:06:52,313 INFO [train.py:1114] (3/4) Epoch 5, batch 1550, loss[loss=0.3324, simple_loss=0.3532, pruned_loss=0.114, ctc_loss=0.2095, over 19602.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3304, pruned_loss=0.09618, ctc_loss=0.1809, over 3846393.16 frames. ], batch size: 60, lr: 2.70e-02, grad_scale: 16.0
+2024-08-25 10:10:28,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=61370.666666666664, ans=0.0
+2024-08-25 10:12:35,281 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.49 vs. limit=15.0
+2024-08-25 10:14:17,551 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=61424.0, ans=0.125
+2024-08-25 10:14:47,423 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.971e+02 2.260e+02 2.611e+02 5.554e+02, threshold=4.519e+02, percent-clipped=3.0
+2024-08-25 10:18:14,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=61530.666666666664, ans=0.125
+2024-08-25 10:18:59,013 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.15 vs. limit=22.5
+2024-08-25 10:23:07,693 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=61530.666666666664, ans=0.125
+2024-08-25 10:26:34,893 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=61584.0, ans=0.1
+2024-08-25 10:28:13,716 INFO [train.py:1114] (3/4) Epoch 5, batch 1600, loss[loss=0.3018, simple_loss=0.3442, pruned_loss=0.09453, ctc_loss=0.1755, over 19830.00 frames. ], tot_loss[loss=0.2972, simple_loss=0.3299, pruned_loss=0.09605, ctc_loss=0.1808, over 3834995.66 frames. ], batch size: 57, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:35:03,622 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.61 vs. limit=15.0
+2024-08-25 10:35:04,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=61744.0, ans=0.1
+2024-08-25 10:35:04,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=61744.0, ans=0.0
+2024-08-25 10:40:45,725 INFO [train.py:1114] (3/4) Epoch 5, batch 1650, loss[loss=0.2742, simple_loss=0.3288, pruned_loss=0.07958, ctc_loss=0.1509, over 19679.00 frames. ], tot_loss[loss=0.2973, simple_loss=0.3299, pruned_loss=0.09619, ctc_loss=0.1809, over 3830975.06 frames. ], batch size: 59, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:41:37,704 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=61904.0, ans=0.125
+2024-08-25 10:42:22,439 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=61957.333333333336, ans=0.2
+2024-08-25 10:43:04,116 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.985e+02 2.336e+02 2.616e+02 4.728e+02, threshold=4.672e+02, percent-clipped=1.0
+2024-08-25 10:43:52,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=62010.666666666664, ans=0.125
+2024-08-25 10:44:58,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=62064.0, ans=0.125
+2024-08-25 10:45:48,019 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=62117.333333333336, ans=0.07
+2024-08-25 10:46:43,739 INFO [train.py:1114] (3/4) Epoch 5, batch 1700, loss[loss=0.2579, simple_loss=0.2946, pruned_loss=0.07991, ctc_loss=0.1532, over 19650.00 frames. ], tot_loss[loss=0.2971, simple_loss=0.3298, pruned_loss=0.09611, ctc_loss=0.1805, over 3845488.88 frames. ], batch size: 46, lr: 2.69e-02, grad_scale: 32.0
+2024-08-25 10:48:01,220 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=62224.0, ans=0.0
+2024-08-25 10:49:18,527 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=62330.666666666664, ans=0.05
+2024-08-25 10:49:29,213 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.46 vs. limit=6.0
+2024-08-25 10:49:57,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=62330.666666666664, ans=0.0
+2024-08-25 10:50:15,514 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=62384.0, ans=0.125
+2024-08-25 10:50:16,711 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.88 vs. limit=15.0
+2024-08-25 10:50:54,973 INFO [train.py:1114] (3/4) Epoch 5, batch 1750, loss[loss=0.2663, simple_loss=0.2989, pruned_loss=0.08538, ctc_loss=0.1575, over 19660.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.329, pruned_loss=0.09518, ctc_loss=0.1791, over 3850086.81 frames. ], batch size: 45, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:51:32,020 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=62437.333333333336, ans=0.025
+2024-08-25 10:53:52,305 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=62490.666666666664, ans=0.025
+2024-08-25 10:53:52,980 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.010e+02 2.326e+02 2.972e+02 6.446e+02, threshold=4.653e+02, percent-clipped=3.0
+2024-08-25 10:53:53,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=62544.0, ans=0.125
+2024-08-25 10:57:03,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=62650.666666666664, ans=0.125
+2024-08-25 10:57:03,251 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=62650.666666666664, ans=0.025
+2024-08-25 10:57:11,529 INFO [train.py:1114] (3/4) Epoch 5, batch 1800, loss[loss=0.297, simple_loss=0.3359, pruned_loss=0.09437, ctc_loss=0.1737, over 19601.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3301, pruned_loss=0.09579, ctc_loss=0.1801, over 3852345.49 frames. ], batch size: 55, lr: 2.68e-02, grad_scale: 32.0
+2024-08-25 10:57:58,200 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=62757.333333333336, ans=0.1
+2024-08-25 10:58:12,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=62757.333333333336, ans=0.0
+2024-08-25 10:58:26,279 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.28 vs. limit=15.0
+2024-08-25 10:58:27,249 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=62864.0, ans=0.0
+2024-08-25 10:58:40,330 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=62864.0, ans=0.125
+2024-08-25 10:58:54,211 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=62917.333333333336, ans=0.125
+2024-08-25 10:59:02,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62917.333333333336, ans=0.1
+2024-08-25 10:59:06,166 INFO [train.py:1114] (3/4) Epoch 5, batch 1850, loss[loss=0.3142, simple_loss=0.3518, pruned_loss=0.1003, ctc_loss=0.1897, over 19604.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3297, pruned_loss=0.09558, ctc_loss=0.1794, over 3855212.65 frames. ], batch size: 57, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 10:59:07,283 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=62970.666666666664, ans=0.125
+2024-08-25 10:59:12,618 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=62970.666666666664, ans=0.2
+2024-08-25 10:59:17,019 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62970.666666666664, ans=0.1
+2024-08-25 10:59:20,216 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=63024.0, ans=0.125
+2024-08-25 10:59:32,442 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.044e+02 2.314e+02 2.820e+02 4.474e+02, threshold=4.628e+02, percent-clipped=0.0
+2024-08-25 10:59:35,277 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=63077.333333333336, ans=0.0
+2024-08-25 10:59:49,341 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.17 vs. limit=22.5
+2024-08-25 11:00:09,714 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=63184.0, ans=0.125
+2024-08-25 11:00:20,616 INFO [train.py:1114] (3/4) Epoch 5, batch 1900, loss[loss=0.3041, simple_loss=0.3486, pruned_loss=0.09439, ctc_loss=0.1769, over 19617.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3301, pruned_loss=0.09557, ctc_loss=0.1795, over 3860616.91 frames. ], batch size: 59, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:00:28,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff2.min_abs, batch_count=63237.333333333336, ans=0.1
+2024-08-25 11:00:46,154 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=63290.666666666664, ans=0.07
+2024-08-25 11:00:55,216 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.09 vs. limit=15.0
+2024-08-25 11:00:55,305 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.47 vs. limit=12.0
+2024-08-25 11:01:13,317 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=63344.0, ans=0.1
+2024-08-25 11:01:32,020 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=63397.333333333336, ans=0.125
+2024-08-25 11:01:55,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=63397.333333333336, ans=0.125
+2024-08-25 11:01:57,676 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=63397.333333333336, ans=0.05
+2024-08-25 11:02:16,861 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=9.94 vs. limit=12.0
+2024-08-25 11:02:34,471 INFO [train.py:1114] (3/4) Epoch 5, batch 1950, loss[loss=0.2657, simple_loss=0.3086, pruned_loss=0.08044, ctc_loss=0.1548, over 19587.00 frames. ], tot_loss[loss=0.2966, simple_loss=0.3307, pruned_loss=0.09535, ctc_loss=0.1792, over 3869650.84 frames. ], batch size: 52, lr: 2.67e-02, grad_scale: 32.0
+2024-08-25 11:03:16,687 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 1.932e+02 2.130e+02 2.461e+02 4.838e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 11:03:49,101 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=63664.0, ans=0.95
+2024-08-25 11:03:49,163 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=63664.0, ans=0.125
+2024-08-25 11:04:37,797 INFO [train.py:1114] (3/4) Epoch 5, batch 2000, loss[loss=0.2495, simple_loss=0.2934, pruned_loss=0.0747, ctc_loss=0.1406, over 19657.00 frames. ], tot_loss[loss=0.2979, simple_loss=0.3315, pruned_loss=0.09609, ctc_loss=0.1805, over 3856342.36 frames. ], batch size: 45, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:04:47,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=63770.666666666664, ans=0.125
+2024-08-25 11:04:49,825 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.69 vs. limit=15.0
+2024-08-25 11:04:50,703 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=12.19 vs. limit=15.0
+2024-08-25 11:05:12,738 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.65 vs. limit=12.0
+2024-08-25 11:05:38,887 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=63930.666666666664, ans=0.125
+2024-08-25 11:05:44,349 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=63930.666666666664, ans=0.0
+2024-08-25 11:06:04,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=63984.0, ans=0.025
+2024-08-25 11:06:08,383 INFO [train.py:1114] (3/4) Epoch 5, batch 2050, loss[loss=0.2388, simple_loss=0.2814, pruned_loss=0.07183, ctc_loss=0.1314, over 19708.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3304, pruned_loss=0.09561, ctc_loss=0.1797, over 3853329.76 frames. ], batch size: 47, lr: 2.66e-02, grad_scale: 32.0
+2024-08-25 11:06:29,162 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.037e+02 2.272e+02 2.892e+02 6.343e+02, threshold=4.544e+02, percent-clipped=1.0
+2024-08-25 11:06:50,181 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=64144.0, ans=0.125
+2024-08-25 11:07:48,384 INFO [train.py:1114] (3/4) Epoch 5, batch 2100, loss[loss=0.2929, simple_loss=0.3252, pruned_loss=0.09436, ctc_loss=0.1794, over 19774.00 frames. ], tot_loss[loss=0.2953, simple_loss=0.3293, pruned_loss=0.09496, ctc_loss=0.1784, over 3860031.85 frames. ], batch size: 54, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:08:25,814 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=64410.666666666664, ans=0.0
+2024-08-25 11:08:33,630 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=7.93 vs. limit=12.0
+2024-08-25 11:08:37,501 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=64464.0, ans=0.125
+2024-08-25 11:08:49,564 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=64464.0, ans=0.2
+2024-08-25 11:09:21,094 INFO [train.py:1114] (3/4) Epoch 5, batch 2150, loss[loss=0.2858, simple_loss=0.3204, pruned_loss=0.09111, ctc_loss=0.1725, over 19858.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3279, pruned_loss=0.09439, ctc_loss=0.1772, over 3871060.79 frames. ], batch size: 52, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:09:29,130 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.23 vs. limit=22.5
+2024-08-25 11:09:34,456 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=64624.0, ans=0.1
+2024-08-25 11:09:37,025 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.57 vs. limit=8.0
+2024-08-25 11:09:44,518 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.041e+02 2.279e+02 2.689e+02 3.624e+02, threshold=4.557e+02, percent-clipped=0.0
+2024-08-25 11:09:44,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=64677.333333333336, ans=0.025
+2024-08-25 11:10:09,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=64730.666666666664, ans=0.0
+2024-08-25 11:10:13,785 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=64730.666666666664, ans=0.125
+2024-08-25 11:10:17,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=64730.666666666664, ans=0.07
+2024-08-25 11:10:25,322 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=64784.0, ans=0.125
+2024-08-25 11:10:34,041 INFO [train.py:1114] (3/4) Epoch 5, batch 2200, loss[loss=0.286, simple_loss=0.3341, pruned_loss=0.08678, ctc_loss=0.1609, over 19586.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3283, pruned_loss=0.09442, ctc_loss=0.1772, over 3868287.77 frames. ], batch size: 57, lr: 2.65e-02, grad_scale: 32.0
+2024-08-25 11:10:40,539 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.79 vs. limit=12.0
+2024-08-25 11:10:43,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=64837.333333333336, ans=0.025
+2024-08-25 11:11:05,477 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=64944.0, ans=0.5
+2024-08-25 11:11:29,238 INFO [train.py:1114] (3/4) Epoch 5, batch 2250, loss[loss=0.3058, simple_loss=0.3386, pruned_loss=0.1001, ctc_loss=0.1819, over 19612.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3278, pruned_loss=0.0938, ctc_loss=0.1758, over 3867721.41 frames. ], batch size: 55, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:11:51,986 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.180e+02 2.514e+02 3.003e+02 5.559e+02, threshold=5.029e+02, percent-clipped=2.0
+2024-08-25 11:11:59,844 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=65210.666666666664, ans=0.09899494936611666
+2024-08-25 11:12:34,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=65317.333333333336, ans=0.1
+2024-08-25 11:12:36,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=65317.333333333336, ans=0.2
+2024-08-25 11:12:38,227 INFO [train.py:1114] (3/4) Epoch 5, batch 2300, loss[loss=0.267, simple_loss=0.3127, pruned_loss=0.08052, ctc_loss=0.1504, over 19506.00 frames. ], tot_loss[loss=0.2926, simple_loss=0.3271, pruned_loss=0.09383, ctc_loss=0.1759, over 3862186.62 frames. ], batch size: 49, lr: 2.64e-02, grad_scale: 32.0
+2024-08-25 11:12:56,253 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.41 vs. limit=15.0
+2024-08-25 11:13:16,009 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.47 vs. limit=6.0
+2024-08-25 11:13:50,080 INFO [train.py:1114] (3/4) Epoch 5, batch 2350, loss[loss=0.2938, simple_loss=0.3386, pruned_loss=0.09051, ctc_loss=0.17, over 19668.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3264, pruned_loss=0.09344, ctc_loss=0.1751, over 3864782.88 frames. ], batch size: 63, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:13:53,583 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=65637.33333333333, ans=0.2
+2024-08-25 11:13:57,099 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=65637.33333333333, ans=0.1
+2024-08-25 11:14:00,358 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=65637.33333333333, ans=0.125
+2024-08-25 11:14:17,615 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=65637.33333333333, ans=0.125
+2024-08-25 11:14:26,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=65690.66666666667, ans=0.125
+2024-08-25 11:14:31,469 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.936e+02 2.303e+02 2.820e+02 4.151e+02, threshold=4.606e+02, percent-clipped=0.0
+2024-08-25 11:14:55,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-25 11:14:56,621 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-25 11:14:58,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=65797.33333333333, ans=0.025
+2024-08-25 11:15:23,115 INFO [train.py:1114] (3/4) Epoch 5, batch 2400, loss[loss=0.3049, simple_loss=0.3454, pruned_loss=0.09768, ctc_loss=0.1725, over 19361.00 frames. ], tot_loss[loss=0.295, simple_loss=0.3293, pruned_loss=0.09487, ctc_loss=0.1773, over 3859883.67 frames. ], batch size: 71, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:15:29,528 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.03 vs. limit=15.0
+2024-08-25 11:15:39,196 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=65957.33333333333, ans=0.125
+2024-08-25 11:15:55,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=65957.33333333333, ans=0.09899494936611666
+2024-08-25 11:15:59,795 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=66010.66666666667, ans=0.125
+2024-08-25 11:16:31,428 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.83 vs. limit=15.0
+2024-08-25 11:16:35,636 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=66117.33333333333, ans=0.025
+2024-08-25 11:16:53,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=66170.66666666667, ans=0.0
+2024-08-25 11:16:56,811 INFO [train.py:1114] (3/4) Epoch 5, batch 2450, loss[loss=0.3492, simple_loss=0.3598, pruned_loss=0.1228, ctc_loss=0.2327, over 14190.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3344, pruned_loss=0.0994, ctc_loss=0.1861, over 3732592.76 frames. ], batch size: 140, lr: 2.63e-02, grad_scale: 32.0
+2024-08-25 11:17:39,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66224.0, ans=0.1
+2024-08-25 11:17:43,153 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.021e+02 2.221e+02 2.524e+02 3.558e+02, threshold=4.443e+02, percent-clipped=0.0
+2024-08-25 11:18:05,879 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66330.66666666667, ans=0.1
+2024-08-25 11:19:28,342 INFO [train.py:1114] (3/4) Epoch 6, batch 0, loss[loss=0.2974, simple_loss=0.3213, pruned_loss=0.1006, ctc_loss=0.1807, over 19443.00 frames. ], tot_loss[loss=0.2974, simple_loss=0.3213, pruned_loss=0.1006, ctc_loss=0.1807, over 19443.00 frames. ], batch size: 48, lr: 2.45e-02, grad_scale: 32.0
+2024-08-25 11:19:28,342 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 11:20:29,253 INFO [train.py:1146] (3/4) Epoch 6, validation: loss=0.2388, simple_loss=0.3147, pruned_loss=0.05993, ctc_loss=0.1076, over 944034.00 frames.
+2024-08-25 11:20:29,254 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 11:20:29,695 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.91 vs. limit=15.0
+2024-08-25 11:20:32,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=66378.66666666667, ans=0.05
+2024-08-25 11:20:45,793 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=66432.0, ans=0.0
+2024-08-25 11:20:55,184 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.15 vs. limit=10.0
+2024-08-25 11:21:00,604 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=66485.33333333333, ans=0.0
+2024-08-25 11:21:24,092 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=66592.0, ans=0.0
+2024-08-25 11:21:26,426 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=66592.0, ans=0.125
+2024-08-25 11:21:56,940 INFO [train.py:1114] (3/4) Epoch 6, batch 50, loss[loss=0.2767, simple_loss=0.307, pruned_loss=0.09066, ctc_loss=0.1629, over 19697.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.3322, pruned_loss=0.09778, ctc_loss=0.1848, over 844717.56 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:22:10,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66645.33333333333, ans=0.1
+2024-08-25 11:22:46,349 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=66752.0, ans=0.0
+2024-08-25 11:22:50,713 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 1.959e+02 2.174e+02 2.569e+02 5.460e+02, threshold=4.347e+02, percent-clipped=1.0
+2024-08-25 11:22:52,440 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=66805.33333333333, ans=0.125
+2024-08-25 11:23:08,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=66858.66666666667, ans=0.125
+2024-08-25 11:23:12,378 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=66858.66666666667, ans=0.2
+2024-08-25 11:23:18,887 INFO [train.py:1114] (3/4) Epoch 6, batch 100, loss[loss=0.2478, simple_loss=0.2958, pruned_loss=0.07283, ctc_loss=0.1353, over 19740.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.332, pruned_loss=0.09562, ctc_loss=0.1792, over 1499352.90 frames. ], batch size: 51, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:23:25,997 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.27 vs. limit=15.0
+2024-08-25 11:23:32,955 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.91 vs. limit=15.0
+2024-08-25 11:23:40,507 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=66965.33333333333, ans=0.0
+2024-08-25 11:23:45,258 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=67018.66666666667, ans=0.125
+2024-08-25 11:23:54,760 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.72 vs. limit=15.0
+2024-08-25 11:23:56,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=67018.66666666667, ans=0.05
+2024-08-25 11:24:07,219 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=67072.0, ans=0.125
+2024-08-25 11:24:21,826 INFO [train.py:1114] (3/4) Epoch 6, batch 150, loss[loss=0.2355, simple_loss=0.2801, pruned_loss=0.06991, ctc_loss=0.1277, over 19692.00 frames. ], tot_loss[loss=0.2915, simple_loss=0.3279, pruned_loss=0.09268, ctc_loss=0.1743, over 2028735.89 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-25 11:24:24,507 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=67178.66666666667, ans=0.1
+2024-08-25 11:25:04,948 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.947e+02 2.172e+02 2.650e+02 4.091e+02, threshold=4.343e+02, percent-clipped=0.0
+2024-08-25 11:25:06,444 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=67338.66666666667, ans=0.125
+2024-08-25 11:25:16,046 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=67338.66666666667, ans=0.2
+2024-08-25 11:25:16,352 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.22 vs. limit=22.5
+2024-08-25 11:25:17,555 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.87 vs. limit=15.0
+2024-08-25 11:25:35,916 INFO [train.py:1114] (3/4) Epoch 6, batch 200, loss[loss=0.3299, simple_loss=0.3523, pruned_loss=0.1122, ctc_loss=0.2081, over 18149.00 frames. ], tot_loss[loss=0.2876, simple_loss=0.3251, pruned_loss=0.09083, ctc_loss=0.171, over 2435769.68 frames. ], batch size: 85, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:25:46,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=67445.33333333333, ans=0.025
+2024-08-25 11:25:51,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=67445.33333333333, ans=0.0
+2024-08-25 11:26:34,121 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.92 vs. limit=15.0
+2024-08-25 11:26:34,160 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.62 vs. limit=15.0
+2024-08-25 11:27:11,150 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.32 vs. limit=15.0
+2024-08-25 11:27:19,510 INFO [train.py:1114] (3/4) Epoch 6, batch 250, loss[loss=0.3351, simple_loss=0.351, pruned_loss=0.1159, ctc_loss=0.2184, over 19379.00 frames. ], tot_loss[loss=0.2881, simple_loss=0.3252, pruned_loss=0.09117, ctc_loss=0.1717, over 2756426.68 frames. ], batch size: 67, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:27:52,122 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=67765.33333333333, ans=0.1
+2024-08-25 11:27:53,278 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=67765.33333333333, ans=0.0
+2024-08-25 11:27:55,707 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=67765.33333333333, ans=0.2
+2024-08-25 11:28:18,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=67818.66666666667, ans=0.025
+2024-08-25 11:28:36,840 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 1.900e+02 2.111e+02 2.483e+02 4.707e+02, threshold=4.222e+02, percent-clipped=1.0
+2024-08-25 11:29:06,838 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=67872.0, ans=0.125
+2024-08-25 11:29:20,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=67925.33333333333, ans=0.0
+2024-08-25 11:29:38,949 INFO [train.py:1114] (3/4) Epoch 6, batch 300, loss[loss=0.3085, simple_loss=0.346, pruned_loss=0.09899, ctc_loss=0.1826, over 19516.00 frames. ], tot_loss[loss=0.2868, simple_loss=0.3239, pruned_loss=0.09071, ctc_loss=0.1708, over 3000834.48 frames. ], batch size: 61, lr: 2.43e-02, grad_scale: 32.0
+2024-08-25 11:30:15,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=68032.0, ans=0.025
+2024-08-25 11:30:22,607 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=68032.0, ans=0.125
+2024-08-25 11:30:23,915 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=68032.0, ans=0.0
+2024-08-25 11:31:11,577 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=68192.0, ans=0.07
+2024-08-25 11:31:25,256 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=68192.0, ans=0.2
+2024-08-25 11:31:39,920 INFO [train.py:1114] (3/4) Epoch 6, batch 350, loss[loss=0.2357, simple_loss=0.2866, pruned_loss=0.06749, ctc_loss=0.1245, over 19773.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.3239, pruned_loss=0.09067, ctc_loss=0.1704, over 3190322.04 frames. ], batch size: 48, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:31:42,621 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=68245.33333333333, ans=0.2
+2024-08-25 11:32:02,178 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=68245.33333333333, ans=0.0
+2024-08-25 11:32:25,406 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=68352.0, ans=0.1
+2024-08-25 11:32:30,239 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.67 vs. limit=15.0
+2024-08-25 11:32:35,304 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.039e+02 2.360e+02 2.872e+02 5.301e+02, threshold=4.720e+02, percent-clipped=2.0
+2024-08-25 11:32:41,712 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=68405.33333333333, ans=0.125
+2024-08-25 11:32:48,248 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:32:49,504 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=68458.66666666667, ans=0.1
+2024-08-25 11:32:52,985 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=68458.66666666667, ans=0.0
+2024-08-25 11:33:02,558 INFO [train.py:1114] (3/4) Epoch 6, batch 400, loss[loss=0.2996, simple_loss=0.3353, pruned_loss=0.09586, ctc_loss=0.1802, over 19500.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.3226, pruned_loss=0.08972, ctc_loss=0.1688, over 3341848.39 frames. ], batch size: 54, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:33:20,586 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.38 vs. limit=15.0
+2024-08-25 11:33:59,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=68672.0, ans=0.0
+2024-08-25 11:34:12,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=68778.66666666667, ans=0.025
+2024-08-25 11:34:13,410 INFO [train.py:1114] (3/4) Epoch 6, batch 450, loss[loss=0.2968, simple_loss=0.3334, pruned_loss=0.09422, ctc_loss=0.1795, over 19615.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3234, pruned_loss=0.09016, ctc_loss=0.1695, over 3450445.40 frames. ], batch size: 55, lr: 2.42e-02, grad_scale: 32.0
+2024-08-25 11:34:17,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=68778.66666666667, ans=0.0
+2024-08-25 11:34:30,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=68832.0, ans=0.125
+2024-08-25 11:34:39,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=68885.33333333333, ans=0.0
+2024-08-25 11:34:49,666 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.969e+02 2.191e+02 2.793e+02 4.218e+02, threshold=4.382e+02, percent-clipped=0.0
+2024-08-25 11:34:49,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=68938.66666666667, ans=0.125
+2024-08-25 11:34:55,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=68938.66666666667, ans=0.125
+2024-08-25 11:34:57,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=68938.66666666667, ans=0.125
+2024-08-25 11:34:58,261 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=10.12 vs. limit=15.0
+2024-08-25 11:35:08,446 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=68992.0, ans=0.125
+2024-08-25 11:35:10,580 INFO [train.py:1114] (3/4) Epoch 6, batch 500, loss[loss=0.2753, simple_loss=0.3272, pruned_loss=0.08207, ctc_loss=0.148, over 19677.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3221, pruned_loss=0.08933, ctc_loss=0.1685, over 3545248.39 frames. ], batch size: 63, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:35:25,878 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=69098.66666666667, ans=0.125
+2024-08-25 11:35:30,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=69098.66666666667, ans=0.1
+2024-08-25 11:35:46,127 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=69205.33333333333, ans=0.0
+2024-08-25 11:35:52,139 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=69205.33333333333, ans=0.125
+2024-08-25 11:36:00,179 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=69258.66666666667, ans=0.125
+2024-08-25 11:36:10,425 INFO [train.py:1114] (3/4) Epoch 6, batch 550, loss[loss=0.2711, simple_loss=0.3221, pruned_loss=0.08077, ctc_loss=0.1465, over 19338.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3229, pruned_loss=0.08997, ctc_loss=0.1694, over 3607603.70 frames. ], batch size: 71, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:36:14,403 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=69312.0, ans=0.025
+2024-08-25 11:36:20,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=69312.0, ans=0.0
+2024-08-25 11:36:29,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=69365.33333333333, ans=0.125
+2024-08-25 11:36:36,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=69418.66666666667, ans=0.125
+2024-08-25 11:36:42,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=69418.66666666667, ans=0.125
+2024-08-25 11:36:46,537 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.100e+02 2.439e+02 2.966e+02 5.259e+02, threshold=4.878e+02, percent-clipped=1.0
+2024-08-25 11:37:12,002 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=69525.33333333333, ans=0.2
+2024-08-25 11:37:27,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=69578.66666666667, ans=0.1
+2024-08-25 11:37:28,399 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.19 vs. limit=15.0
+2024-08-25 11:37:28,776 INFO [train.py:1114] (3/4) Epoch 6, batch 600, loss[loss=0.3162, simple_loss=0.3479, pruned_loss=0.1049, ctc_loss=0.1871, over 19379.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.323, pruned_loss=0.08991, ctc_loss=0.1691, over 3666523.33 frames. ], batch size: 67, lr: 2.41e-02, grad_scale: 32.0
+2024-08-25 11:37:29,003 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=69578.66666666667, ans=0.1
+2024-08-25 11:37:29,092 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=69578.66666666667, ans=0.0
+2024-08-25 11:38:41,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=69792.0, ans=0.125
+2024-08-25 11:38:58,896 INFO [train.py:1114] (3/4) Epoch 6, batch 650, loss[loss=0.2815, simple_loss=0.3189, pruned_loss=0.08963, ctc_loss=0.1623, over 19764.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.3225, pruned_loss=0.08979, ctc_loss=0.1689, over 3716957.91 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:39:50,473 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 1.931e+02 2.137e+02 2.425e+02 3.711e+02, threshold=4.274e+02, percent-clipped=0.0
+2024-08-25 11:39:55,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=70005.33333333333, ans=0.125
+2024-08-25 11:40:16,192 INFO [train.py:1114] (3/4) Epoch 6, batch 700, loss[loss=0.2706, simple_loss=0.3119, pruned_loss=0.08378, ctc_loss=0.1544, over 19726.00 frames. ], tot_loss[loss=0.2852, simple_loss=0.323, pruned_loss=0.08986, ctc_loss=0.1693, over 3747934.63 frames. ], batch size: 51, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:41:16,544 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.71 vs. limit=10.0
+2024-08-25 11:41:22,935 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.83 vs. limit=15.0
+2024-08-25 11:41:33,719 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=70272.0, ans=0.2
+2024-08-25 11:42:06,461 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.55 vs. limit=12.0
+2024-08-25 11:42:12,749 INFO [train.py:1114] (3/4) Epoch 6, batch 750, loss[loss=0.2881, simple_loss=0.3276, pruned_loss=0.08983, ctc_loss=0.1723, over 19486.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3223, pruned_loss=0.08957, ctc_loss=0.1684, over 3774371.07 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-25 11:42:38,712 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.23 vs. limit=15.0
+2024-08-25 11:43:09,519 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.022e+02 2.297e+02 2.693e+02 4.652e+02, threshold=4.594e+02, percent-clipped=2.0
+2024-08-25 11:43:32,909 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=70592.0, ans=0.2
+2024-08-25 11:43:34,912 INFO [train.py:1114] (3/4) Epoch 6, batch 800, loss[loss=0.2412, simple_loss=0.2863, pruned_loss=0.07236, ctc_loss=0.1285, over 19804.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3225, pruned_loss=0.08969, ctc_loss=0.1684, over 3796209.12 frames. ], batch size: 49, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:43:43,506 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=70645.33333333333, ans=0.125
+2024-08-25 11:44:09,772 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=70698.66666666667, ans=0.125
+2024-08-25 11:44:46,463 INFO [train.py:1114] (3/4) Epoch 6, batch 850, loss[loss=0.3056, simple_loss=0.3423, pruned_loss=0.09668, ctc_loss=0.189, over 19654.00 frames. ], tot_loss[loss=0.2831, simple_loss=0.3213, pruned_loss=0.08908, ctc_loss=0.167, over 3815529.01 frames. ], batch size: 59, lr: 2.39e-02, grad_scale: 32.0
+2024-08-25 11:44:51,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=70912.0, ans=0.125
+2024-08-25 11:45:37,822 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71018.66666666667, ans=0.1
+2024-08-25 11:45:40,747 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=71018.66666666667, ans=0.2
+2024-08-25 11:45:46,254 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.893e+02 2.077e+02 2.374e+02 4.075e+02, threshold=4.154e+02, percent-clipped=0.0
+2024-08-25 11:45:54,513 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:45:56,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=71125.33333333333, ans=0.0
+2024-08-25 11:46:03,648 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.33 vs. limit=15.0
+2024-08-25 11:46:07,494 INFO [train.py:1114] (3/4) Epoch 6, batch 900, loss[loss=0.2893, simple_loss=0.3124, pruned_loss=0.09728, ctc_loss=0.1793, over 19424.00 frames. ], tot_loss[loss=0.284, simple_loss=0.3218, pruned_loss=0.08953, ctc_loss=0.1678, over 3818432.12 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 16.0
+2024-08-25 11:46:18,243 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.64 vs. limit=15.0
+2024-08-25 11:46:26,795 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=71232.0, ans=0.125
+2024-08-25 11:46:35,443 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=71232.0, ans=0.2
+2024-08-25 11:46:59,457 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.05 vs. limit=15.0
+2024-08-25 11:47:14,903 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=71392.0, ans=0.07
+2024-08-25 11:47:21,584 INFO [train.py:1114] (3/4) Epoch 6, batch 950, loss[loss=0.2862, simple_loss=0.3131, pruned_loss=0.09352, ctc_loss=0.1809, over 19512.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3227, pruned_loss=0.09014, ctc_loss=0.1693, over 3820133.53 frames. ], batch size: 49, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:47:34,452 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.81 vs. limit=15.0
+2024-08-25 11:47:56,614 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.84 vs. limit=15.0
+2024-08-25 11:48:03,082 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=71552.0, ans=0.0
+2024-08-25 11:48:09,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=71552.0, ans=0.1
+2024-08-25 11:48:23,508 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.900e+02 2.167e+02 2.553e+02 4.088e+02, threshold=4.334e+02, percent-clipped=0.0
+2024-08-25 11:48:32,590 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=71605.33333333333, ans=0.0
+2024-08-25 11:48:33,104 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71605.33333333333, ans=0.1
+2024-08-25 11:48:36,301 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=71658.66666666667, ans=0.125
+2024-08-25 11:49:01,764 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.46 vs. limit=12.0
+2024-08-25 11:49:03,398 INFO [train.py:1114] (3/4) Epoch 6, batch 1000, loss[loss=0.3056, simple_loss=0.3383, pruned_loss=0.09923, ctc_loss=0.1864, over 19863.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.3229, pruned_loss=0.0902, ctc_loss=0.1694, over 3816607.31 frames. ], batch size: 52, lr: 2.38e-02, grad_scale: 16.0
+2024-08-25 11:49:10,890 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71712.0, ans=0.1
+2024-08-25 11:50:03,444 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:50:04,247 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=71872.0, ans=0.0
+2024-08-25 11:50:33,885 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=71925.33333333333, ans=0.2
+2024-08-25 11:50:49,397 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=71925.33333333333, ans=0.0
+2024-08-25 11:50:57,799 INFO [train.py:1114] (3/4) Epoch 6, batch 1050, loss[loss=0.3167, simple_loss=0.3496, pruned_loss=0.1014, ctc_loss=0.2026, over 19840.00 frames. ], tot_loss[loss=0.2847, simple_loss=0.322, pruned_loss=0.08988, ctc_loss=0.169, over 3823402.83 frames. ], batch size: 57, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:51:03,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=71978.66666666667, ans=0.125
+2024-08-25 11:51:57,023 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=72138.66666666667, ans=0.04949747468305833
+2024-08-25 11:51:58,523 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.58 vs. limit=15.0
+2024-08-25 11:51:58,780 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.33 vs. limit=5.0
+2024-08-25 11:52:00,143 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.944e+02 2.201e+02 2.550e+02 3.957e+02, threshold=4.403e+02, percent-clipped=0.0
+2024-08-25 11:52:48,880 INFO [train.py:1114] (3/4) Epoch 6, batch 1100, loss[loss=0.2727, simple_loss=0.3146, pruned_loss=0.08556, ctc_loss=0.1491, over 19584.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3223, pruned_loss=0.08994, ctc_loss=0.169, over 3831345.65 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:52:53,374 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=72245.33333333333, ans=0.125
+2024-08-25 11:52:56,276 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=72245.33333333333, ans=0.025
+2024-08-25 11:53:00,428 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=72245.33333333333, ans=0.2
+2024-08-25 11:53:14,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=72298.66666666667, ans=0.0
+2024-08-25 11:53:26,615 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=72352.0, ans=0.125
+2024-08-25 11:53:30,309 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=72405.33333333333, ans=15.0
+2024-08-25 11:53:41,747 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=72458.66666666667, ans=0.0
+2024-08-25 11:53:56,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=72458.66666666667, ans=0.125
+2024-08-25 11:53:58,643 INFO [train.py:1114] (3/4) Epoch 6, batch 1150, loss[loss=0.2977, simple_loss=0.3283, pruned_loss=0.09665, ctc_loss=0.1843, over 19595.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3223, pruned_loss=0.0901, ctc_loss=0.1695, over 3830517.04 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-25 11:54:04,249 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.38 vs. limit=15.0
+2024-08-25 11:54:43,449 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 1.952e+02 2.194e+02 2.505e+02 4.680e+02, threshold=4.387e+02, percent-clipped=1.0
+2024-08-25 11:54:54,236 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.14 vs. limit=15.0
+2024-08-25 11:55:02,827 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=72725.33333333333, ans=0.125
+2024-08-25 11:55:10,978 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=72778.66666666667, ans=0.1
+2024-08-25 11:55:11,874 INFO [train.py:1114] (3/4) Epoch 6, batch 1200, loss[loss=0.3051, simple_loss=0.3461, pruned_loss=0.09474, ctc_loss=0.1864, over 19839.00 frames. ], tot_loss[loss=0.2862, simple_loss=0.3234, pruned_loss=0.09044, ctc_loss=0.1703, over 3825177.28 frames. ], batch size: 57, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:55:27,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=72832.0, ans=0.025
+2024-08-25 11:55:48,753 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=72885.33333333333, ans=0.2
+2024-08-25 11:55:48,757 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72885.33333333333, ans=0.1
+2024-08-25 11:56:12,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=72938.66666666667, ans=0.2
+2024-08-25 11:56:15,099 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 11:56:24,110 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=72938.66666666667, ans=0.125
+2024-08-25 11:56:55,077 INFO [train.py:1114] (3/4) Epoch 6, batch 1250, loss[loss=0.2969, simple_loss=0.3433, pruned_loss=0.0909, ctc_loss=0.1718, over 19508.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3229, pruned_loss=0.08967, ctc_loss=0.1689, over 3843224.54 frames. ], batch size: 61, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:57:04,067 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.74 vs. limit=15.0
+2024-08-25 11:57:12,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=73098.66666666667, ans=0.0
+2024-08-25 11:57:17,539 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.60 vs. limit=15.0
+2024-08-25 11:58:13,322 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.073e+02 2.305e+02 2.660e+02 4.224e+02, threshold=4.609e+02, percent-clipped=0.0
+2024-08-25 11:58:15,879 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=73205.33333333333, ans=0.09899494936611666
+2024-08-25 11:58:32,417 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=73258.66666666667, ans=0.125
+2024-08-25 11:58:46,693 INFO [train.py:1114] (3/4) Epoch 6, batch 1300, loss[loss=0.3047, simple_loss=0.3318, pruned_loss=0.101, ctc_loss=0.1888, over 18852.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3221, pruned_loss=0.08916, ctc_loss=0.1679, over 3846827.06 frames. ], batch size: 76, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 11:59:01,299 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.04 vs. limit=15.0
+2024-08-25 11:59:03,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=73312.0, ans=0.125
+2024-08-25 11:59:07,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=73312.0, ans=0.0
+2024-08-25 11:59:35,552 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=73365.33333333333, ans=0.2
+2024-08-25 11:59:35,553 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=73365.33333333333, ans=0.125
+2024-08-25 11:59:35,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=73365.33333333333, ans=0.125
+2024-08-25 12:00:19,973 INFO [train.py:1114] (3/4) Epoch 6, batch 1350, loss[loss=0.2734, simple_loss=0.3202, pruned_loss=0.08184, ctc_loss=0.1573, over 19779.00 frames. ], tot_loss[loss=0.2834, simple_loss=0.3219, pruned_loss=0.08899, ctc_loss=0.1672, over 3858002.22 frames. ], batch size: 54, lr: 2.36e-02, grad_scale: 32.0
+2024-08-25 12:00:27,318 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:00:34,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=73578.66666666667, ans=0.125
+2024-08-25 12:00:43,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=73632.0, ans=0.0
+2024-08-25 12:00:53,389 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=73685.33333333333, ans=0.1
+2024-08-25 12:01:04,163 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=73738.66666666667, ans=0.2
+2024-08-25 12:01:04,998 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.025e+02 2.295e+02 2.579e+02 4.133e+02, threshold=4.590e+02, percent-clipped=0.0
+2024-08-25 12:01:12,883 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.82 vs. limit=15.0
+2024-08-25 12:01:30,923 INFO [train.py:1114] (3/4) Epoch 6, batch 1400, loss[loss=0.2435, simple_loss=0.2828, pruned_loss=0.07507, ctc_loss=0.1353, over 19687.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3222, pruned_loss=0.08922, ctc_loss=0.1677, over 3864324.59 frames. ], batch size: 46, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:01:38,421 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.79 vs. limit=22.5
+2024-08-25 12:01:51,404 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.59 vs. limit=15.0
+2024-08-25 12:01:54,294 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:02:32,343 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.61 vs. limit=15.0
+2024-08-25 12:02:52,965 INFO [train.py:1114] (3/4) Epoch 6, batch 1450, loss[loss=0.2972, simple_loss=0.3448, pruned_loss=0.09154, ctc_loss=0.1661, over 19683.00 frames. ], tot_loss[loss=0.285, simple_loss=0.323, pruned_loss=0.08974, ctc_loss=0.1689, over 3862527.25 frames. ], batch size: 63, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:03:02,732 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=74112.0, ans=0.0
+2024-08-25 12:03:40,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=74218.66666666667, ans=0.125
+2024-08-25 12:03:53,270 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 1.998e+02 2.330e+02 2.811e+02 4.670e+02, threshold=4.661e+02, percent-clipped=1.0
+2024-08-25 12:03:59,125 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.23 vs. limit=15.0
+2024-08-25 12:04:24,587 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=74378.66666666667, ans=0.125
+2024-08-25 12:04:25,540 INFO [train.py:1114] (3/4) Epoch 6, batch 1500, loss[loss=0.3065, simple_loss=0.3405, pruned_loss=0.09953, ctc_loss=0.1838, over 19602.00 frames. ], tot_loss[loss=0.2857, simple_loss=0.3233, pruned_loss=0.09014, ctc_loss=0.1696, over 3861644.56 frames. ], batch size: 57, lr: 2.35e-02, grad_scale: 32.0
+2024-08-25 12:04:35,317 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=74378.66666666667, ans=0.125
+2024-08-25 12:04:39,753 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74432.0, ans=0.1
+2024-08-25 12:04:50,656 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.62 vs. limit=10.0
+2024-08-25 12:05:37,425 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=74538.66666666667, ans=0.2
+2024-08-25 12:05:44,081 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=74538.66666666667, ans=0.2
+2024-08-25 12:05:59,081 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=74592.0, ans=0.125
+2024-08-25 12:06:00,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=74645.33333333333, ans=0.125
+2024-08-25 12:06:01,369 INFO [train.py:1114] (3/4) Epoch 6, batch 1550, loss[loss=0.2748, simple_loss=0.3192, pruned_loss=0.08372, ctc_loss=0.1576, over 19622.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3236, pruned_loss=0.09057, ctc_loss=0.1701, over 3846569.08 frames. ], batch size: 60, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:06:28,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=74752.0, ans=0.125
+2024-08-25 12:06:37,897 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.061e+02 2.512e+02 3.027e+02 4.789e+02, threshold=5.024e+02, percent-clipped=1.0
+2024-08-25 12:06:48,925 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74858.66666666667, ans=0.1
+2024-08-25 12:07:01,751 INFO [train.py:1114] (3/4) Epoch 6, batch 1600, loss[loss=0.3129, simple_loss=0.3435, pruned_loss=0.1023, ctc_loss=0.1943, over 19836.00 frames. ], tot_loss[loss=0.2862, simple_loss=0.3232, pruned_loss=0.09061, ctc_loss=0.1701, over 3836820.87 frames. ], batch size: 57, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:07:08,027 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.24 vs. limit=15.0
+2024-08-25 12:07:09,924 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=74912.0, ans=0.2
+2024-08-25 12:07:29,818 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=75018.66666666667, ans=0.0
+2024-08-25 12:07:32,673 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.26 vs. limit=15.0
+2024-08-25 12:07:32,698 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.92 vs. limit=15.0
+2024-08-25 12:08:00,978 INFO [train.py:1114] (3/4) Epoch 6, batch 1650, loss[loss=0.2827, simple_loss=0.3297, pruned_loss=0.08627, ctc_loss=0.1579, over 19675.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.3228, pruned_loss=0.09016, ctc_loss=0.1694, over 3833812.59 frames. ], batch size: 59, lr: 2.34e-02, grad_scale: 32.0
+2024-08-25 12:08:03,731 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=75178.66666666667, ans=0.125
+2024-08-25 12:08:05,129 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=75178.66666666667, ans=0.5
+2024-08-25 12:08:21,882 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75232.0, ans=0.1
+2024-08-25 12:08:23,551 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.07 vs. limit=6.0
+2024-08-25 12:08:37,753 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.893e+02 2.381e+02 2.784e+02 7.281e+02, threshold=4.762e+02, percent-clipped=1.0
+2024-08-25 12:08:45,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=75338.66666666667, ans=0.09899494936611666
+2024-08-25 12:09:00,111 INFO [train.py:1114] (3/4) Epoch 6, batch 1700, loss[loss=0.251, simple_loss=0.2899, pruned_loss=0.07601, ctc_loss=0.1504, over 19682.00 frames. ], tot_loss[loss=0.2833, simple_loss=0.3213, pruned_loss=0.08919, ctc_loss=0.1674, over 3848443.30 frames. ], batch size: 46, lr: 2.33e-02, grad_scale: 32.0
+2024-08-25 12:09:00,348 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=75445.33333333333, ans=0.05
+2024-08-25 12:09:10,580 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=75498.66666666667, ans=0.04949747468305833
+2024-08-25 12:09:13,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=75498.66666666667, ans=0.125
+2024-08-25 12:09:19,754 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.62 vs. limit=15.0
+2024-08-25 12:09:22,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=75552.0, ans=0.125
+2024-08-25 12:09:28,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=75552.0, ans=0.125
+2024-08-25 12:09:44,228 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.62 vs. limit=15.0
+2024-08-25 12:09:50,569 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=75658.66666666667, ans=0.0
+2024-08-25 12:09:54,134 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=75658.66666666667, ans=0.125
+2024-08-25 12:09:55,903 INFO [train.py:1114] (3/4) Epoch 6, batch 1750, loss[loss=0.2429, simple_loss=0.2811, pruned_loss=0.07523, ctc_loss=0.1356, over 19670.00 frames. ], tot_loss[loss=0.281, simple_loss=0.3199, pruned_loss=0.08801, ctc_loss=0.1652, over 3852158.83 frames. ], batch size: 45, lr: 2.33e-02, grad_scale: 16.0
+2024-08-25 12:09:57,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=75712.0, ans=0.125
+2024-08-25 12:10:03,234 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.23 vs. limit=15.0
+2024-08-25 12:10:14,629 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=75765.33333333333, ans=0.2
+2024-08-25 12:10:16,659 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=75818.66666666667, ans=0.125
+2024-08-25 12:10:20,366 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.34 vs. limit=15.0
+2024-08-25 12:10:22,020 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=75818.66666666667, ans=0.1
+2024-08-25 12:10:24,310 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=75818.66666666667, ans=0.0
+2024-08-25 12:10:26,452 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=75818.66666666667, ans=0.125
+2024-08-25 12:10:29,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=75872.0, ans=22.5
+2024-08-25 12:10:32,753 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.890e+02 2.130e+02 2.587e+02 4.262e+02, threshold=4.260e+02, percent-clipped=0.0
+2024-08-25 12:10:33,318 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.59 vs. limit=15.0
+2024-08-25 12:10:46,002 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=75925.33333333333, ans=0.0
+2024-08-25 12:10:50,352 INFO [train.py:1114] (3/4) Epoch 6, batch 1800, loss[loss=0.292, simple_loss=0.3353, pruned_loss=0.08939, ctc_loss=0.1747, over 19619.00 frames. ], tot_loss[loss=0.2805, simple_loss=0.3199, pruned_loss=0.08767, ctc_loss=0.1646, over 3853493.58 frames. ], batch size: 55, lr: 2.33e-02, grad_scale: 8.0
+2024-08-25 12:10:54,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=75978.66666666667, ans=0.125
+2024-08-25 12:10:58,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=75978.66666666667, ans=0.125
+2024-08-25 12:11:22,513 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=76138.66666666667, ans=0.0
+2024-08-25 12:11:30,199 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=76138.66666666667, ans=0.0
+2024-08-25 12:11:44,385 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=14.16 vs. limit=15.0
+2024-08-25 12:11:44,877 INFO [train.py:1114] (3/4) Epoch 6, batch 1850, loss[loss=0.2916, simple_loss=0.3352, pruned_loss=0.08976, ctc_loss=0.1712, over 19592.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.3193, pruned_loss=0.08701, ctc_loss=0.1634, over 3857006.66 frames. ], batch size: 57, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:11:45,503 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.76 vs. limit=15.0
+2024-08-25 12:11:48,243 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=76245.33333333333, ans=0.1
+2024-08-25 12:11:48,436 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.20 vs. limit=15.0
+2024-08-25 12:11:52,674 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=76245.33333333333, ans=0.0
+2024-08-25 12:12:01,525 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=76298.66666666667, ans=0.125
+2024-08-25 12:12:14,658 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=76352.0, ans=0.09899494936611666
+2024-08-25 12:12:20,210 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=76405.33333333333, ans=0.0
+2024-08-25 12:12:22,237 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.994e+02 2.285e+02 2.712e+02 4.413e+02, threshold=4.569e+02, percent-clipped=2.0
+2024-08-25 12:12:43,375 INFO [train.py:1114] (3/4) Epoch 6, batch 1900, loss[loss=0.2779, simple_loss=0.3267, pruned_loss=0.08245, ctc_loss=0.1606, over 19647.00 frames. ], tot_loss[loss=0.2795, simple_loss=0.3196, pruned_loss=0.08705, ctc_loss=0.1635, over 3861518.57 frames. ], batch size: 59, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:12:48,835 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=76512.0, ans=0.125
+2024-08-25 12:12:51,019 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=76512.0, ans=0.0
+2024-08-25 12:12:56,943 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.61 vs. limit=15.0
+2024-08-25 12:13:09,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=76618.66666666667, ans=0.125
+2024-08-25 12:13:15,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=76672.0, ans=0.0
+2024-08-25 12:13:17,481 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.20 vs. limit=15.0
+2024-08-25 12:13:20,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=76672.0, ans=0.2
+2024-08-25 12:13:20,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=76672.0, ans=0.125
+2024-08-25 12:13:22,987 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=76672.0, ans=0.2
+2024-08-25 12:13:27,355 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=76672.0, ans=0.0
+2024-08-25 12:13:32,246 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.35 vs. limit=10.0
+2024-08-25 12:13:36,853 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.63 vs. limit=22.5
+2024-08-25 12:13:40,518 INFO [train.py:1114] (3/4) Epoch 6, batch 1950, loss[loss=0.2533, simple_loss=0.3036, pruned_loss=0.07437, ctc_loss=0.1357, over 19569.00 frames. ], tot_loss[loss=0.2811, simple_loss=0.3209, pruned_loss=0.08768, ctc_loss=0.1648, over 3870912.95 frames. ], batch size: 52, lr: 2.32e-02, grad_scale: 8.0
+2024-08-25 12:13:44,253 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.97 vs. limit=15.0
+2024-08-25 12:13:45,669 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.43 vs. limit=15.0
+2024-08-25 12:14:11,225 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=76885.33333333333, ans=0.125
+2024-08-25 12:14:12,592 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.89 vs. limit=15.0
+2024-08-25 12:14:18,631 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 1.890e+02 2.137e+02 2.349e+02 3.743e+02, threshold=4.275e+02, percent-clipped=0.0
+2024-08-25 12:14:35,989 INFO [train.py:1114] (3/4) Epoch 6, batch 2000, loss[loss=0.2555, simple_loss=0.293, pruned_loss=0.07994, ctc_loss=0.1452, over 19647.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.3211, pruned_loss=0.08789, ctc_loss=0.1653, over 3855858.15 frames. ], batch size: 45, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:14:56,098 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=77098.66666666667, ans=0.07
+2024-08-25 12:14:59,733 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.59 vs. limit=12.0
+2024-08-25 12:15:15,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=77205.33333333333, ans=0.125
+2024-08-25 12:15:28,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=77258.66666666667, ans=0.07
+2024-08-25 12:15:28,270 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=77258.66666666667, ans=0.125
+2024-08-25 12:15:30,080 INFO [train.py:1114] (3/4) Epoch 6, batch 2050, loss[loss=0.2429, simple_loss=0.2853, pruned_loss=0.07247, ctc_loss=0.1387, over 19719.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3197, pruned_loss=0.08729, ctc_loss=0.1644, over 3852479.07 frames. ], batch size: 47, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:15:59,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=77418.66666666667, ans=0.1
+2024-08-25 12:16:14,693 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.955e+02 2.380e+02 2.986e+02 1.021e+03, threshold=4.760e+02, percent-clipped=7.0
+2024-08-25 12:16:18,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=77472.0, ans=0.0
+2024-08-25 12:16:21,514 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.08 vs. limit=15.0
+2024-08-25 12:16:29,998 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=77525.33333333333, ans=0.0
+2024-08-25 12:16:30,095 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=77525.33333333333, ans=0.2
+2024-08-25 12:16:32,226 INFO [train.py:1114] (3/4) Epoch 6, batch 2100, loss[loss=0.2365, simple_loss=0.2993, pruned_loss=0.06184, ctc_loss=0.1251, over 19757.00 frames. ], tot_loss[loss=0.2794, simple_loss=0.3193, pruned_loss=0.087, ctc_loss=0.1638, over 3859139.25 frames. ], batch size: 54, lr: 2.31e-02, grad_scale: 16.0
+2024-08-25 12:16:35,696 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=77578.66666666667, ans=0.0
+2024-08-25 12:16:45,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=77632.0, ans=0.07
+2024-08-25 12:16:49,993 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=77632.0, ans=0.0
+2024-08-25 12:17:00,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=77685.33333333333, ans=0.0
+2024-08-25 12:17:20,749 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=77792.0, ans=0.0
+2024-08-25 12:17:28,071 INFO [train.py:1114] (3/4) Epoch 6, batch 2150, loss[loss=0.2684, simple_loss=0.3135, pruned_loss=0.08004, ctc_loss=0.1583, over 19868.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3179, pruned_loss=0.08602, ctc_loss=0.1617, over 3870860.64 frames. ], batch size: 52, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:17:30,106 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=77845.33333333333, ans=0.04949747468305833
+2024-08-25 12:17:31,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=77845.33333333333, ans=0.025
+2024-08-25 12:17:43,188 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=77898.66666666667, ans=0.07
+2024-08-25 12:18:05,593 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=77952.0, ans=0.1
+2024-08-25 12:18:12,045 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=77952.0, ans=0.2
+2024-08-25 12:18:15,388 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=78005.33333333333, ans=0.5
+2024-08-25 12:18:19,488 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.877e+02 2.258e+02 2.799e+02 6.726e+02, threshold=4.515e+02, percent-clipped=2.0
+2024-08-25 12:18:28,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=78058.66666666667, ans=0.0
+2024-08-25 12:19:06,982 INFO [train.py:1114] (3/4) Epoch 6, batch 2200, loss[loss=0.3126, simple_loss=0.3478, pruned_loss=0.1012, ctc_loss=0.1874, over 19579.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.318, pruned_loss=0.08612, ctc_loss=0.1617, over 3868103.31 frames. ], batch size: 57, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:19:13,807 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=78112.0, ans=0.1
+2024-08-25 12:19:21,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=78165.33333333333, ans=0.125
+2024-08-25 12:19:23,496 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=78165.33333333333, ans=0.2
+2024-08-25 12:19:24,519 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=78165.33333333333, ans=0.125
+2024-08-25 12:19:27,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=78218.66666666667, ans=0.125
+2024-08-25 12:20:02,381 INFO [train.py:1114] (3/4) Epoch 6, batch 2250, loss[loss=0.2691, simple_loss=0.3259, pruned_loss=0.07726, ctc_loss=0.1444, over 19627.00 frames. ], tot_loss[loss=0.2776, simple_loss=0.3182, pruned_loss=0.08617, ctc_loss=0.1618, over 3868286.74 frames. ], batch size: 55, lr: 2.30e-02, grad_scale: 16.0
+2024-08-25 12:20:16,923 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.62 vs. limit=15.0
+2024-08-25 12:20:38,624 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.005e+02 2.234e+02 2.581e+02 4.325e+02, threshold=4.468e+02, percent-clipped=0.0
+2024-08-25 12:20:47,910 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=78592.0, ans=0.125
+2024-08-25 12:20:56,306 INFO [train.py:1114] (3/4) Epoch 6, batch 2300, loss[loss=0.2494, simple_loss=0.2939, pruned_loss=0.07436, ctc_loss=0.1405, over 19502.00 frames. ], tot_loss[loss=0.2772, simple_loss=0.3174, pruned_loss=0.08615, ctc_loss=0.1617, over 3861804.80 frames. ], batch size: 49, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:21:11,059 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.76 vs. limit=22.5
+2024-08-25 12:21:17,452 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=78698.66666666667, ans=0.125
+2024-08-25 12:21:24,263 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=78752.0, ans=0.05
+2024-08-25 12:21:28,735 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=78752.0, ans=0.1
+2024-08-25 12:21:41,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=78858.66666666667, ans=0.125
+2024-08-25 12:21:49,801 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=78858.66666666667, ans=0.0
+2024-08-25 12:21:52,656 INFO [train.py:1114] (3/4) Epoch 6, batch 2350, loss[loss=0.2916, simple_loss=0.3362, pruned_loss=0.09028, ctc_loss=0.166, over 19652.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3175, pruned_loss=0.08623, ctc_loss=0.1615, over 3863942.86 frames. ], batch size: 63, lr: 2.29e-02, grad_scale: 16.0
+2024-08-25 12:21:53,868 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=78912.0, ans=0.125
+2024-08-25 12:21:54,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=78912.0, ans=0.125
+2024-08-25 12:22:17,310 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=79018.66666666667, ans=0.025
+2024-08-25 12:22:24,971 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=79072.0, ans=0.125
+2024-08-25 12:22:27,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=79072.0, ans=0.125
+2024-08-25 12:22:30,291 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 2.097e+02 2.553e+02 3.084e+02 6.792e+02, threshold=5.106e+02, percent-clipped=2.0
+2024-08-25 12:22:47,956 INFO [train.py:1114] (3/4) Epoch 6, batch 2400, loss[loss=0.2689, simple_loss=0.3162, pruned_loss=0.08023, ctc_loss=0.1527, over 19333.00 frames. ], tot_loss[loss=0.2794, simple_loss=0.3197, pruned_loss=0.08697, ctc_loss=0.163, over 3859078.99 frames. ], batch size: 71, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:23:01,750 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=79232.0, ans=0.125
+2024-08-25 12:23:16,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=79285.33333333333, ans=0.125
+2024-08-25 12:23:45,695 INFO [train.py:1114] (3/4) Epoch 6, batch 2450, loss[loss=0.3666, simple_loss=0.3637, pruned_loss=0.1345, ctc_loss=0.2517, over 13517.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3245, pruned_loss=0.09123, ctc_loss=0.171, over 3733220.80 frames. ], batch size: 141, lr: 2.29e-02, grad_scale: 32.0
+2024-08-25 12:23:48,667 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.20 vs. limit=22.5
+2024-08-25 12:24:35,623 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=79498.66666666667, ans=0.125
+2024-08-25 12:27:49,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=79605.33333333333, ans=0.025
+2024-08-25 12:28:01,642 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.056e+02 2.291e+02 2.526e+02 5.572e+02, threshold=4.582e+02, percent-clipped=1.0
+2024-08-25 12:29:27,604 INFO [train.py:1114] (3/4) Epoch 7, batch 0, loss[loss=0.2943, simple_loss=0.326, pruned_loss=0.09684, ctc_loss=0.1723, over 19820.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.326, pruned_loss=0.09684, ctc_loss=0.1723, over 19820.00 frames. ], batch size: 49, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:29:27,605 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 12:29:44,280 INFO [train.py:1146] (3/4) Epoch 7, validation: loss=0.2269, simple_loss=0.307, pruned_loss=0.05393, ctc_loss=0.0975, over 944034.00 frames.
+2024-08-25 12:29:44,281 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 12:29:47,784 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=79658.66666666667, ans=0.125
+2024-08-25 12:30:16,003 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.09 vs. limit=22.5
+2024-08-25 12:30:20,738 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.00 vs. limit=10.0
+2024-08-25 12:31:23,253 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.58 vs. limit=22.5
+2024-08-25 12:31:28,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=79818.66666666667, ans=0.07
+2024-08-25 12:31:35,576 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=79818.66666666667, ans=0.2
+2024-08-25 12:33:04,668 INFO [train.py:1114] (3/4) Epoch 7, batch 50, loss[loss=0.2438, simple_loss=0.2919, pruned_loss=0.07049, ctc_loss=0.1366, over 19712.00 frames. ], tot_loss[loss=0.2837, simple_loss=0.3225, pruned_loss=0.08895, ctc_loss=0.1675, over 843690.50 frames. ], batch size: 47, lr: 2.14e-02, grad_scale: 32.0
+2024-08-25 12:33:14,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=79925.33333333333, ans=0.2
+2024-08-25 12:33:38,330 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:33:57,327 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=80085.33333333333, ans=0.0
+2024-08-25 12:34:17,264 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.999e+02 2.246e+02 2.808e+02 5.514e+02, threshold=4.492e+02, percent-clipped=3.0
+2024-08-25 12:34:24,293 INFO [train.py:1114] (3/4) Epoch 7, batch 100, loss[loss=0.2334, simple_loss=0.2852, pruned_loss=0.06524, ctc_loss=0.1277, over 19716.00 frames. ], tot_loss[loss=0.283, simple_loss=0.3225, pruned_loss=0.08838, ctc_loss=0.1666, over 1498479.81 frames. ], batch size: 51, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:34:40,075 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=80245.33333333333, ans=0.0
+2024-08-25 12:34:54,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=80298.66666666667, ans=0.09899494936611666
+2024-08-25 12:34:58,910 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=80352.0, ans=0.1
+2024-08-25 12:35:08,444 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=80352.0, ans=0.125
+2024-08-25 12:35:10,765 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=80405.33333333333, ans=0.025
+2024-08-25 12:35:23,289 INFO [train.py:1114] (3/4) Epoch 7, batch 150, loss[loss=0.2098, simple_loss=0.2712, pruned_loss=0.0533, ctc_loss=0.1045, over 19754.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3185, pruned_loss=0.08548, ctc_loss=0.1608, over 2028113.79 frames. ], batch size: 47, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:35:26,793 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=80458.66666666667, ans=0.09899494936611666
+2024-08-25 12:35:30,478 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.60 vs. limit=22.5
+2024-08-25 12:35:36,081 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=80512.0, ans=0.125
+2024-08-25 12:35:46,560 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=80512.0, ans=0.125
+2024-08-25 12:36:01,850 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.52 vs. limit=15.0
+2024-08-25 12:36:03,911 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=80618.66666666667, ans=0.125
+2024-08-25 12:36:04,132 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.94 vs. limit=12.0
+2024-08-25 12:36:09,136 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.27 vs. limit=12.0
+2024-08-25 12:36:12,078 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=80618.66666666667, ans=0.2
+2024-08-25 12:36:15,600 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=80672.0, ans=0.125
+2024-08-25 12:36:18,824 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.959e+02 2.217e+02 2.953e+02 5.735e+02, threshold=4.434e+02, percent-clipped=2.0
+2024-08-25 12:36:26,007 INFO [train.py:1114] (3/4) Epoch 7, batch 200, loss[loss=0.3082, simple_loss=0.3388, pruned_loss=0.0998, ctc_loss=0.1948, over 18310.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3161, pruned_loss=0.08428, ctc_loss=0.1591, over 2434946.64 frames. ], batch size: 85, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:36:26,208 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:36:47,476 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=80832.0, ans=0.0
+2024-08-25 12:36:49,752 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=80832.0, ans=0.2
+2024-08-25 12:36:57,800 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.08 vs. limit=15.0
+2024-08-25 12:36:59,755 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=80885.33333333333, ans=0.125
+2024-08-25 12:37:01,194 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.20 vs. limit=12.0
+2024-08-25 12:37:22,863 INFO [train.py:1114] (3/4) Epoch 7, batch 250, loss[loss=0.3218, simple_loss=0.3508, pruned_loss=0.1066, ctc_loss=0.1987, over 19305.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3168, pruned_loss=0.08493, ctc_loss=0.1604, over 2754437.13 frames. ], batch size: 67, lr: 2.13e-02, grad_scale: 32.0
+2024-08-25 12:37:40,298 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=81045.33333333333, ans=0.125
+2024-08-25 12:38:01,853 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=81152.0, ans=0.125
+2024-08-25 12:38:04,103 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=81152.0, ans=0.125
+2024-08-25 12:38:16,696 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.901e+02 2.294e+02 2.833e+02 4.254e+02, threshold=4.587e+02, percent-clipped=0.0
+2024-08-25 12:38:17,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=81205.33333333333, ans=0.0
+2024-08-25 12:38:23,061 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.24 vs. limit=6.0
+2024-08-25 12:38:23,347 INFO [train.py:1114] (3/4) Epoch 7, batch 300, loss[loss=0.2797, simple_loss=0.3235, pruned_loss=0.08738, ctc_loss=0.1526, over 19536.00 frames. ], tot_loss[loss=0.274, simple_loss=0.316, pruned_loss=0.08423, ctc_loss=0.1587, over 3000366.08 frames. ], batch size: 61, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:38:30,587 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=81258.66666666667, ans=0.1
+2024-08-25 12:38:34,192 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=81312.0, ans=10.0
+2024-08-25 12:38:38,075 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.98 vs. limit=15.0
+2024-08-25 12:38:39,084 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=81312.0, ans=0.0
+2024-08-25 12:38:41,626 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=81312.0, ans=0.125
+2024-08-25 12:38:56,023 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=81365.33333333333, ans=0.125
+2024-08-25 12:38:57,379 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.83 vs. limit=12.0
+2024-08-25 12:39:01,915 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.63 vs. limit=15.0
+2024-08-25 12:39:52,666 INFO [train.py:1114] (3/4) Epoch 7, batch 350, loss[loss=0.2494, simple_loss=0.2937, pruned_loss=0.07346, ctc_loss=0.1451, over 19739.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3164, pruned_loss=0.08428, ctc_loss=0.1592, over 3189719.98 frames. ], batch size: 48, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:39:52,956 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=81525.33333333333, ans=0.125
+2024-08-25 12:39:57,713 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=81525.33333333333, ans=0.125
+2024-08-25 12:40:05,015 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.46 vs. limit=6.0
+2024-08-25 12:40:41,309 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.35 vs. limit=15.0
+2024-08-25 12:40:43,970 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.980e+02 2.268e+02 2.810e+02 5.782e+02, threshold=4.535e+02, percent-clipped=1.0
+2024-08-25 12:40:49,288 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.62 vs. limit=15.0
+2024-08-25 12:40:50,663 INFO [train.py:1114] (3/4) Epoch 7, batch 400, loss[loss=0.2756, simple_loss=0.325, pruned_loss=0.08155, ctc_loss=0.1577, over 19482.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3158, pruned_loss=0.08369, ctc_loss=0.1582, over 3341175.01 frames. ], batch size: 54, lr: 2.12e-02, grad_scale: 32.0
+2024-08-25 12:40:57,899 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=81792.0, ans=0.025
+2024-08-25 12:40:59,208 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.95 vs. limit=6.0
+2024-08-25 12:41:08,485 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=81845.33333333333, ans=0.05
+2024-08-25 12:41:21,742 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=81898.66666666667, ans=0.125
+2024-08-25 12:41:24,114 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.38 vs. limit=15.0
+2024-08-25 12:41:30,455 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=81952.0, ans=0.125
+2024-08-25 12:41:41,974 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=82005.33333333333, ans=0.0
+2024-08-25 12:41:49,020 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82005.33333333333, ans=0.1
+2024-08-25 12:41:52,335 INFO [train.py:1114] (3/4) Epoch 7, batch 450, loss[loss=0.2853, simple_loss=0.3278, pruned_loss=0.08781, ctc_loss=0.1681, over 19613.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.3163, pruned_loss=0.08401, ctc_loss=0.1586, over 3449286.53 frames. ], batch size: 55, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:42:02,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=82058.66666666667, ans=0.125
+2024-08-25 12:42:22,496 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:42:37,486 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82272.0, ans=0.1
+2024-08-25 12:42:40,947 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=82272.0, ans=0.125
+2024-08-25 12:42:43,144 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 1.947e+02 2.448e+02 2.960e+02 4.262e+02, threshold=4.896e+02, percent-clipped=0.0
+2024-08-25 12:42:46,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=82272.0, ans=0.1
+2024-08-25 12:42:52,051 INFO [train.py:1114] (3/4) Epoch 7, batch 500, loss[loss=0.2713, simple_loss=0.3231, pruned_loss=0.07985, ctc_loss=0.1497, over 19659.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3147, pruned_loss=0.0832, ctc_loss=0.1569, over 3543684.98 frames. ], batch size: 63, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:42:55,300 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=82325.33333333333, ans=0.125
+2024-08-25 12:43:00,898 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=82325.33333333333, ans=0.125
+2024-08-25 12:43:14,942 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=82378.66666666667, ans=0.125
+2024-08-25 12:43:32,564 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=82485.33333333333, ans=0.1
+2024-08-25 12:43:38,488 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=82485.33333333333, ans=0.5
+2024-08-25 12:43:51,836 INFO [train.py:1114] (3/4) Epoch 7, batch 550, loss[loss=0.2888, simple_loss=0.3248, pruned_loss=0.09182, ctc_loss=0.1729, over 19276.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3149, pruned_loss=0.08332, ctc_loss=0.157, over 3605939.25 frames. ], batch size: 71, lr: 2.11e-02, grad_scale: 32.0
+2024-08-25 12:43:53,298 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=82592.0, ans=0.1
+2024-08-25 12:44:32,326 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=82752.0, ans=0.0
+2024-08-25 12:44:33,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=82752.0, ans=0.125
+2024-08-25 12:44:43,265 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82805.33333333333, ans=0.1
+2024-08-25 12:44:44,975 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.000e+02 2.364e+02 2.910e+02 5.356e+02, threshold=4.728e+02, percent-clipped=1.0
+2024-08-25 12:44:52,598 INFO [train.py:1114] (3/4) Epoch 7, batch 600, loss[loss=0.3211, simple_loss=0.3527, pruned_loss=0.1049, ctc_loss=0.1993, over 19414.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.3141, pruned_loss=0.08248, ctc_loss=0.1558, over 3664664.55 frames. ], batch size: 67, lr: 2.11e-02, grad_scale: 16.0
+2024-08-25 12:45:04,379 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.31 vs. limit=22.5
+2024-08-25 12:45:24,655 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=82965.33333333333, ans=0.125
+2024-08-25 12:45:36,122 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=83018.66666666667, ans=0.125
+2024-08-25 12:45:51,128 INFO [train.py:1114] (3/4) Epoch 7, batch 650, loss[loss=0.2733, simple_loss=0.3153, pruned_loss=0.08426, ctc_loss=0.1567, over 19781.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3133, pruned_loss=0.08212, ctc_loss=0.1552, over 3715434.76 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:46:01,989 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.50 vs. limit=15.0
+2024-08-25 12:46:12,694 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=83178.66666666667, ans=0.125
+2024-08-25 12:46:47,174 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.844e+02 2.004e+02 2.285e+02 4.065e+02, threshold=4.009e+02, percent-clipped=0.0
+2024-08-25 12:46:52,102 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.26 vs. limit=15.0
+2024-08-25 12:46:52,899 INFO [train.py:1114] (3/4) Epoch 7, batch 700, loss[loss=0.2778, simple_loss=0.322, pruned_loss=0.08479, ctc_loss=0.1598, over 19722.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3143, pruned_loss=0.08265, ctc_loss=0.1558, over 3747583.31 frames. ], batch size: 51, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:47:21,168 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=83498.66666666667, ans=0.0
+2024-08-25 12:47:33,196 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.81 vs. limit=10.0
+2024-08-25 12:47:49,560 INFO [train.py:1114] (3/4) Epoch 7, batch 750, loss[loss=0.2791, simple_loss=0.3164, pruned_loss=0.0877, ctc_loss=0.1661, over 19486.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.3139, pruned_loss=0.08271, ctc_loss=0.1557, over 3774145.79 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-25 12:48:05,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=83712.0, ans=0.125
+2024-08-25 12:48:06,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=83712.0, ans=0.125
+2024-08-25 12:48:06,460 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=83712.0, ans=6.0
+2024-08-25 12:48:10,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=83712.0, ans=0.125
+2024-08-25 12:48:11,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=83712.0, ans=0.125
+2024-08-25 12:48:42,065 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.33 vs. limit=22.5
+2024-08-25 12:48:45,001 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 1.885e+02 2.166e+02 2.690e+02 4.534e+02, threshold=4.331e+02, percent-clipped=3.0
+2024-08-25 12:48:46,368 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:48:47,405 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=83872.0, ans=0.2
+2024-08-25 12:48:50,700 INFO [train.py:1114] (3/4) Epoch 7, batch 800, loss[loss=0.2643, simple_loss=0.3072, pruned_loss=0.08022, ctc_loss=0.1524, over 19402.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3137, pruned_loss=0.08243, ctc_loss=0.1551, over 3794791.59 frames. ], batch size: 48, lr: 2.10e-02, grad_scale: 32.0
+2024-08-25 12:48:51,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=83925.33333333333, ans=0.0
+2024-08-25 12:48:54,197 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 12:49:00,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=83978.66666666667, ans=0.0
+2024-08-25 12:49:03,381 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=83978.66666666667, ans=0.2
+2024-08-25 12:49:10,238 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=83978.66666666667, ans=0.125
+2024-08-25 12:49:11,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=83978.66666666667, ans=0.1
+2024-08-25 12:49:24,276 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=84085.33333333333, ans=0.0
+2024-08-25 12:49:25,386 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=84085.33333333333, ans=0.125
+2024-08-25 12:49:27,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=84085.33333333333, ans=0.125
+2024-08-25 12:49:27,609 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=84085.33333333333, ans=0.125
+2024-08-25 12:49:38,968 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=84138.66666666667, ans=0.0
+2024-08-25 12:49:51,366 INFO [train.py:1114] (3/4) Epoch 7, batch 850, loss[loss=0.2915, simple_loss=0.3364, pruned_loss=0.0899, ctc_loss=0.1669, over 19645.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.3139, pruned_loss=0.08268, ctc_loss=0.1557, over 3814345.83 frames. ], batch size: 59, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:49:52,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=84192.0, ans=10.0
+2024-08-25 12:49:59,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=84192.0, ans=0.125
+2024-08-25 12:50:22,209 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.25 vs. limit=15.0
+2024-08-25 12:50:25,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=84352.0, ans=0.5
+2024-08-25 12:50:27,483 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=84352.0, ans=0.125
+2024-08-25 12:50:43,488 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 1.946e+02 2.270e+02 2.825e+02 4.143e+02, threshold=4.540e+02, percent-clipped=0.0
+2024-08-25 12:50:49,141 INFO [train.py:1114] (3/4) Epoch 7, batch 900, loss[loss=0.2394, simple_loss=0.2813, pruned_loss=0.07136, ctc_loss=0.1368, over 19424.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3148, pruned_loss=0.08338, ctc_loss=0.1566, over 3817862.89 frames. ], batch size: 48, lr: 2.09e-02, grad_scale: 32.0
+2024-08-25 12:51:12,225 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.04 vs. limit=12.0
+2024-08-25 12:51:16,704 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.29 vs. limit=22.5
+2024-08-25 12:51:41,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=84618.66666666667, ans=0.1
+2024-08-25 12:51:58,842 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=84672.0, ans=0.0
+2024-08-25 12:52:05,346 INFO [train.py:1114] (3/4) Epoch 7, batch 950, loss[loss=0.2633, simple_loss=0.3023, pruned_loss=0.08158, ctc_loss=0.153, over 19497.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3151, pruned_loss=0.08366, ctc_loss=0.1571, over 3819933.60 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 16.0
+2024-08-25 12:52:11,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=84725.33333333333, ans=0.1
+2024-08-25 12:52:25,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=84778.66666666667, ans=0.125
+2024-08-25 12:52:34,062 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=84832.0, ans=0.125
+2024-08-25 12:52:59,184 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.065e+02 2.373e+02 2.949e+02 1.128e+03, threshold=4.746e+02, percent-clipped=6.0
+2024-08-25 12:53:05,268 INFO [train.py:1114] (3/4) Epoch 7, batch 1000, loss[loss=0.2518, simple_loss=0.2987, pruned_loss=0.07423, ctc_loss=0.1411, over 19861.00 frames. ], tot_loss[loss=0.2746, simple_loss=0.3163, pruned_loss=0.0847, ctc_loss=0.1589, over 3816057.85 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:53:18,216 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.48 vs. limit=22.5
+2024-08-25 12:53:19,674 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.58 vs. limit=6.0
+2024-08-25 12:53:20,407 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=85045.33333333333, ans=0.2
+2024-08-25 12:53:22,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=85045.33333333333, ans=0.0
+2024-08-25 12:53:27,898 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.75 vs. limit=10.0
+2024-08-25 12:53:49,603 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.85 vs. limit=15.0
+2024-08-25 12:54:05,088 INFO [train.py:1114] (3/4) Epoch 7, batch 1050, loss[loss=0.281, simple_loss=0.3251, pruned_loss=0.08592, ctc_loss=0.1628, over 19825.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3151, pruned_loss=0.08396, ctc_loss=0.1575, over 3822662.85 frames. ], batch size: 57, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:54:36,658 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=85365.33333333333, ans=0.1
+2024-08-25 12:54:36,684 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=85365.33333333333, ans=0.025
+2024-08-25 12:54:43,594 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=85418.66666666667, ans=0.125
+2024-08-25 12:54:47,330 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=85418.66666666667, ans=0.125
+2024-08-25 12:54:54,085 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=85472.0, ans=0.125
+2024-08-25 12:54:57,714 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=85472.0, ans=0.2
+2024-08-25 12:55:01,659 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 1.918e+02 2.325e+02 2.776e+02 4.591e+02, threshold=4.650e+02, percent-clipped=1.0
+2024-08-25 12:55:02,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=85472.0, ans=0.125
+2024-08-25 12:55:05,434 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=85525.33333333333, ans=0.125
+2024-08-25 12:55:06,551 INFO [train.py:1114] (3/4) Epoch 7, batch 1100, loss[loss=0.2974, simple_loss=0.3298, pruned_loss=0.09705, ctc_loss=0.1775, over 19586.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3143, pruned_loss=0.08317, ctc_loss=0.1561, over 3829769.58 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:55:08,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=85525.33333333333, ans=0.0
+2024-08-25 12:56:01,605 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=85738.66666666667, ans=0.125
+2024-08-25 12:56:05,757 INFO [train.py:1114] (3/4) Epoch 7, batch 1150, loss[loss=0.2483, simple_loss=0.2949, pruned_loss=0.07354, ctc_loss=0.1363, over 19586.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3141, pruned_loss=0.08328, ctc_loss=0.1563, over 3829012.43 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-25 12:56:08,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=85792.0, ans=0.125
+2024-08-25 12:56:11,221 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.89 vs. limit=10.0
+2024-08-25 12:56:21,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=85845.33333333333, ans=0.1
+2024-08-25 12:56:25,046 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=85845.33333333333, ans=0.125
+2024-08-25 12:56:57,151 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=86005.33333333333, ans=0.0
+2024-08-25 12:56:58,414 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=86005.33333333333, ans=0.125
+2024-08-25 12:57:02,236 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.94 vs. limit=15.0
+2024-08-25 12:57:02,979 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 1.959e+02 2.167e+02 2.666e+02 4.946e+02, threshold=4.335e+02, percent-clipped=2.0
+2024-08-25 12:57:05,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=86005.33333333333, ans=0.025
+2024-08-25 12:57:07,690 INFO [train.py:1114] (3/4) Epoch 7, batch 1200, loss[loss=0.2728, simple_loss=0.322, pruned_loss=0.08066, ctc_loss=0.1556, over 19842.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3151, pruned_loss=0.08389, ctc_loss=0.1576, over 3824988.64 frames. ], batch size: 57, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:57:17,293 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.99 vs. limit=15.0
+2024-08-25 12:57:36,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=86165.33333333333, ans=0.1
+2024-08-25 12:57:56,088 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.35 vs. limit=6.0
+2024-08-25 12:57:58,444 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.02 vs. limit=22.5
+2024-08-25 12:58:05,919 INFO [train.py:1114] (3/4) Epoch 7, batch 1250, loss[loss=0.2644, simple_loss=0.3201, pruned_loss=0.07603, ctc_loss=0.1414, over 19533.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3147, pruned_loss=0.08308, ctc_loss=0.156, over 3843369.71 frames. ], batch size: 61, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:58:06,126 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=86325.33333333333, ans=0.1
+2024-08-25 12:58:12,348 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.50 vs. limit=15.0
+2024-08-25 12:58:28,684 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.50 vs. limit=6.0
+2024-08-25 12:58:41,040 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=86432.0, ans=0.025
+2024-08-25 12:58:41,182 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=86432.0, ans=0.0
+2024-08-25 12:58:47,024 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=86485.33333333333, ans=0.125
+2024-08-25 12:58:48,115 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=86485.33333333333, ans=0.2
+2024-08-25 12:59:02,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=86538.66666666667, ans=0.125
+2024-08-25 12:59:02,855 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 1.964e+02 2.304e+02 2.729e+02 5.465e+02, threshold=4.608e+02, percent-clipped=2.0
+2024-08-25 12:59:07,515 INFO [train.py:1114] (3/4) Epoch 7, batch 1300, loss[loss=0.2675, simple_loss=0.3096, pruned_loss=0.08184, ctc_loss=0.1544, over 18798.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3136, pruned_loss=0.08228, ctc_loss=0.1545, over 3845458.99 frames. ], batch size: 76, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 12:59:07,593 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=86592.0, ans=0.125
+2024-08-25 12:59:19,306 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=86645.33333333333, ans=0.125
+2024-08-25 12:59:20,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=86645.33333333333, ans=0.125
+2024-08-25 12:59:34,113 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=86698.66666666667, ans=0.2
+2024-08-25 12:59:38,551 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=86698.66666666667, ans=0.0
+2024-08-25 12:59:45,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=86752.0, ans=0.0
+2024-08-25 12:59:51,597 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=86752.0, ans=0.0
+2024-08-25 13:00:04,613 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=86805.33333333333, ans=0.1
+2024-08-25 13:00:07,957 INFO [train.py:1114] (3/4) Epoch 7, batch 1350, loss[loss=0.2905, simple_loss=0.324, pruned_loss=0.09312, ctc_loss=0.1771, over 19761.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3126, pruned_loss=0.0815, ctc_loss=0.1532, over 3857306.00 frames. ], batch size: 54, lr: 2.07e-02, grad_scale: 32.0
+2024-08-25 13:00:22,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=86912.0, ans=0.125
+2024-08-25 13:00:22,149 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=86912.0, ans=0.0
+2024-08-25 13:00:29,375 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.64 vs. limit=6.0
+2024-08-25 13:00:35,108 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.74 vs. limit=15.0
+2024-08-25 13:00:39,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=86965.33333333333, ans=0.0
+2024-08-25 13:01:52,641 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=87072.0, ans=0.0
+2024-08-25 13:01:59,605 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.935e+02 2.309e+02 3.009e+02 4.449e+02, threshold=4.618e+02, percent-clipped=0.0
+2024-08-25 13:02:04,191 INFO [train.py:1114] (3/4) Epoch 7, batch 1400, loss[loss=0.2143, simple_loss=0.2697, pruned_loss=0.05821, ctc_loss=0.1061, over 19672.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3129, pruned_loss=0.08193, ctc_loss=0.1541, over 3863716.77 frames. ], batch size: 46, lr: 2.06e-02, grad_scale: 32.0
+2024-08-25 13:02:13,304 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=87125.33333333333, ans=0.0
+2024-08-25 13:02:55,533 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=87338.66666666667, ans=0.0
+2024-08-25 13:02:58,673 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=87338.66666666667, ans=0.0
+2024-08-25 13:03:02,255 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:03:05,400 INFO [train.py:1114] (3/4) Epoch 7, batch 1450, loss[loss=0.288, simple_loss=0.3331, pruned_loss=0.088, ctc_loss=0.1673, over 19679.00 frames. ], tot_loss[loss=0.2697, simple_loss=0.3133, pruned_loss=0.08211, ctc_loss=0.1546, over 3861825.90 frames. ], batch size: 63, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:03:07,902 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=87392.0, ans=0.0
+2024-08-25 13:03:26,252 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=87445.33333333333, ans=0.125
+2024-08-25 13:03:35,889 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=87498.66666666667, ans=0.0
+2024-08-25 13:04:26,204 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=87552.0, ans=0.125
+2024-08-25 13:04:30,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=87552.0, ans=0.2
+2024-08-25 13:04:32,731 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=87552.0, ans=0.0
+2024-08-25 13:04:34,871 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=87552.0, ans=0.125
+2024-08-25 13:04:38,530 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=87605.33333333333, ans=0.0
+2024-08-25 13:04:46,559 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.015e+02 2.285e+02 2.716e+02 4.465e+02, threshold=4.569e+02, percent-clipped=0.0
+2024-08-25 13:04:50,187 INFO [train.py:1114] (3/4) Epoch 7, batch 1500, loss[loss=0.2821, simple_loss=0.3261, pruned_loss=0.0858, ctc_loss=0.1665, over 19590.00 frames. ], tot_loss[loss=0.2702, simple_loss=0.3139, pruned_loss=0.08227, ctc_loss=0.1549, over 3860974.96 frames. ], batch size: 57, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:04:55,373 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=87658.66666666667, ans=0.0
+2024-08-25 13:05:21,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=87765.33333333333, ans=0.125
+2024-08-25 13:05:37,875 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=87818.66666666667, ans=0.125
+2024-08-25 13:05:39,789 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=87818.66666666667, ans=0.015
+2024-08-25 13:05:57,427 INFO [train.py:1114] (3/4) Epoch 7, batch 1550, loss[loss=0.2968, simple_loss=0.3401, pruned_loss=0.09256, ctc_loss=0.1709, over 19590.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3143, pruned_loss=0.08276, ctc_loss=0.1556, over 3846531.22 frames. ], batch size: 60, lr: 2.06e-02, grad_scale: 16.0
+2024-08-25 13:06:05,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=87925.33333333333, ans=0.2
+2024-08-25 13:06:11,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=87978.66666666667, ans=0.125
+2024-08-25 13:06:34,771 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=88085.33333333333, ans=0.0
+2024-08-25 13:06:55,898 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.880e+02 2.225e+02 2.757e+02 4.141e+02, threshold=4.451e+02, percent-clipped=0.0
+2024-08-25 13:07:00,952 INFO [train.py:1114] (3/4) Epoch 7, batch 1600, loss[loss=0.2773, simple_loss=0.3206, pruned_loss=0.08464, ctc_loss=0.1618, over 19851.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3145, pruned_loss=0.08307, ctc_loss=0.1561, over 3835162.06 frames. ], batch size: 57, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:07:24,576 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.37 vs. limit=15.0
+2024-08-25 13:07:36,836 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=88352.0, ans=0.125
+2024-08-25 13:07:53,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=88405.33333333333, ans=0.0
+2024-08-25 13:07:58,847 INFO [train.py:1114] (3/4) Epoch 7, batch 1650, loss[loss=0.2507, simple_loss=0.31, pruned_loss=0.0691, ctc_loss=0.133, over 19636.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3141, pruned_loss=0.083, ctc_loss=0.1559, over 3832284.41 frames. ], batch size: 59, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:08:07,619 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=88458.66666666667, ans=0.125
+2024-08-25 13:08:21,012 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=88512.0, ans=0.2
+2024-08-25 13:08:25,402 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=88565.33333333333, ans=0.0
+2024-08-25 13:08:47,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=88672.0, ans=0.125
+2024-08-25 13:08:54,968 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.917e+02 2.131e+02 2.729e+02 4.248e+02, threshold=4.261e+02, percent-clipped=0.0
+2024-08-25 13:08:58,381 INFO [train.py:1114] (3/4) Epoch 7, batch 1700, loss[loss=0.2143, simple_loss=0.2592, pruned_loss=0.06156, ctc_loss=0.1159, over 19685.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3133, pruned_loss=0.0822, ctc_loss=0.1546, over 3847011.65 frames. ], batch size: 46, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:09:06,724 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=88725.33333333333, ans=0.125
+2024-08-25 13:09:12,653 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=88778.66666666667, ans=0.2
+2024-08-25 13:09:16,103 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=88778.66666666667, ans=0.1
+2024-08-25 13:09:50,821 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=88938.66666666667, ans=0.2
+2024-08-25 13:09:53,137 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=88938.66666666667, ans=0.95
+2024-08-25 13:09:55,061 INFO [train.py:1114] (3/4) Epoch 7, batch 1750, loss[loss=0.2475, simple_loss=0.289, pruned_loss=0.07482, ctc_loss=0.141, over 19671.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3125, pruned_loss=0.08172, ctc_loss=0.1538, over 3852333.62 frames. ], batch size: 45, lr: 2.05e-02, grad_scale: 32.0
+2024-08-25 13:09:58,640 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=88992.0, ans=0.125
+2024-08-25 13:11:03,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=88992.0, ans=0.0
+2024-08-25 13:16:07,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=89045.33333333333, ans=0.09899494936611666
+2024-08-25 13:17:17,231 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.06 vs. limit=22.5
+2024-08-25 13:17:35,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=89152.0, ans=0.125
+2024-08-25 13:17:36,522 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:25:02,960 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=89152.0, ans=0.125
+2024-08-25 13:25:11,018 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=89205.33333333333, ans=0.0
+2024-08-25 13:29:44,218 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.972e+02 2.344e+02 2.828e+02 4.449e+02, threshold=4.688e+02, percent-clipped=1.0
+2024-08-25 13:29:47,700 INFO [train.py:1114] (3/4) Epoch 7, batch 1800, loss[loss=0.2623, simple_loss=0.3149, pruned_loss=0.07566, ctc_loss=0.1459, over 19612.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.3126, pruned_loss=0.08192, ctc_loss=0.1542, over 3854206.50 frames. ], batch size: 55, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:31:00,535 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=89258.66666666667, ans=0.025
+2024-08-25 13:31:12,336 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.39 vs. limit=15.0
+2024-08-25 13:37:24,991 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=89365.33333333333, ans=0.125
+2024-08-25 13:38:30,519 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=89418.66666666667, ans=0.0
+2024-08-25 13:38:36,878 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.95 vs. limit=15.0
+2024-08-25 13:40:34,862 INFO [train.py:1114] (3/4) Epoch 7, batch 1850, loss[loss=0.2804, simple_loss=0.3212, pruned_loss=0.08607, ctc_loss=0.1684, over 19581.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3125, pruned_loss=0.08181, ctc_loss=0.154, over 3857402.30 frames. ], batch size: 57, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:41:30,270 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=89578.66666666667, ans=0.2
+2024-08-25 13:41:37,567 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=89578.66666666667, ans=0.125
+2024-08-25 13:42:31,287 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.18 vs. limit=15.0
+2024-08-25 13:42:49,602 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=89632.0, ans=0.0
+2024-08-25 13:44:01,301 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.852e+02 2.070e+02 2.397e+02 4.608e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-25 13:44:07,952 INFO [train.py:1114] (3/4) Epoch 7, batch 1900, loss[loss=0.2444, simple_loss=0.3173, pruned_loss=0.06249, ctc_loss=0.1165, over 19648.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.313, pruned_loss=0.08158, ctc_loss=0.1536, over 3861127.34 frames. ], batch size: 59, lr: 2.04e-02, grad_scale: 32.0
+2024-08-25 13:45:36,196 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=90005.33333333333, ans=0.0
+2024-08-25 13:45:41,198 INFO [train.py:1114] (3/4) Epoch 7, batch 1950, loss[loss=0.2734, simple_loss=0.3188, pruned_loss=0.08279, ctc_loss=0.156, over 19592.00 frames. ], tot_loss[loss=0.2695, simple_loss=0.3142, pruned_loss=0.08167, ctc_loss=0.1537, over 3870000.94 frames. ], batch size: 52, lr: 2.04e-02, grad_scale: 16.0
+2024-08-25 13:45:41,395 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=90058.66666666667, ans=0.0
+2024-08-25 13:46:06,098 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=90112.0, ans=0.025
+2024-08-25 13:46:28,128 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=90218.66666666667, ans=0.025
+2024-08-25 13:46:29,157 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=90218.66666666667, ans=0.1
+2024-08-25 13:46:29,806 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.20 vs. limit=6.0
+2024-08-25 13:46:34,523 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.64 vs. limit=10.0
+2024-08-25 13:46:42,769 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 1.896e+02 2.177e+02 2.703e+02 3.964e+02, threshold=4.354e+02, percent-clipped=0.0
+2024-08-25 13:46:45,061 INFO [train.py:1114] (3/4) Epoch 7, batch 2000, loss[loss=0.2254, simple_loss=0.2721, pruned_loss=0.0653, ctc_loss=0.1204, over 19647.00 frames. ], tot_loss[loss=0.2704, simple_loss=0.3146, pruned_loss=0.08216, ctc_loss=0.1545, over 3853995.48 frames. ], batch size: 45, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:46:57,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=90378.66666666667, ans=0.95
+2024-08-25 13:47:01,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90378.66666666667, ans=0.1
+2024-08-25 13:47:16,535 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=90432.0, ans=0.125
+2024-08-25 13:47:28,107 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.93 vs. limit=15.0
+2024-08-25 13:47:36,707 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=90538.66666666667, ans=0.0
+2024-08-25 13:47:41,004 INFO [train.py:1114] (3/4) Epoch 7, batch 2050, loss[loss=0.2475, simple_loss=0.2873, pruned_loss=0.07588, ctc_loss=0.1396, over 19728.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3136, pruned_loss=0.08194, ctc_loss=0.1542, over 3850036.56 frames. ], batch size: 47, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:47:45,759 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=90592.0, ans=0.1
+2024-08-25 13:47:58,702 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=90645.33333333333, ans=0.125
+2024-08-25 13:48:08,170 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=90698.66666666667, ans=0.125
+2024-08-25 13:48:19,062 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90752.0, ans=0.1
+2024-08-25 13:48:26,780 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 13:48:36,338 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.053e+02 2.413e+02 3.017e+02 5.203e+02, threshold=4.827e+02, percent-clipped=2.0
+2024-08-25 13:48:38,604 INFO [train.py:1114] (3/4) Epoch 7, batch 2100, loss[loss=0.2616, simple_loss=0.3127, pruned_loss=0.07656, ctc_loss=0.1432, over 19769.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3122, pruned_loss=0.08079, ctc_loss=0.1521, over 3858319.64 frames. ], batch size: 54, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:48:39,924 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=90858.66666666667, ans=0.0
+2024-08-25 13:48:55,312 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=90912.0, ans=0.0
+2024-08-25 13:48:57,570 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=90912.0, ans=0.0
+2024-08-25 13:48:59,841 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=90912.0, ans=0.07
+2024-08-25 13:49:01,897 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=90912.0, ans=0.1
+2024-08-25 13:49:23,662 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=91018.66666666667, ans=0.0
+2024-08-25 13:49:30,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=91018.66666666667, ans=0.025
+2024-08-25 13:49:33,273 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=91072.0, ans=0.0
+2024-08-25 13:49:41,265 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=91072.0, ans=0.125
+2024-08-25 13:49:43,244 INFO [train.py:1114] (3/4) Epoch 7, batch 2150, loss[loss=0.2565, simple_loss=0.3044, pruned_loss=0.07614, ctc_loss=0.1409, over 19860.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3108, pruned_loss=0.08009, ctc_loss=0.1507, over 3869465.65 frames. ], batch size: 52, lr: 2.03e-02, grad_scale: 32.0
+2024-08-25 13:49:43,614 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.22 vs. limit=6.0
+2024-08-25 13:50:07,775 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=91232.0, ans=0.125
+2024-08-25 13:50:23,392 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.75 vs. limit=22.5
+2024-08-25 13:50:29,944 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=91338.66666666667, ans=0.125
+2024-08-25 13:50:35,534 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91338.66666666667, ans=0.1
+2024-08-25 13:50:36,452 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 1.920e+02 2.200e+02 2.924e+02 5.090e+02, threshold=4.400e+02, percent-clipped=1.0
+2024-08-25 13:50:39,137 INFO [train.py:1114] (3/4) Epoch 7, batch 2200, loss[loss=0.2414, simple_loss=0.3066, pruned_loss=0.06342, ctc_loss=0.1233, over 19586.00 frames. ], tot_loss[loss=0.2657, simple_loss=0.311, pruned_loss=0.08007, ctc_loss=0.1508, over 3868252.74 frames. ], batch size: 57, lr: 2.02e-02, grad_scale: 32.0
+2024-08-25 13:50:44,891 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=91392.0, ans=0.125
+2024-08-25 13:50:51,215 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.46 vs. limit=12.0
+2024-08-25 13:50:57,369 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=91445.33333333333, ans=0.015
+2024-08-25 13:51:08,470 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=91498.66666666667, ans=0.125
+2024-08-25 13:51:13,514 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.88 vs. limit=15.0
+2024-08-25 13:51:33,858 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=91658.66666666667, ans=0.125
+2024-08-25 13:51:34,993 INFO [train.py:1114] (3/4) Epoch 7, batch 2250, loss[loss=0.279, simple_loss=0.3189, pruned_loss=0.08712, ctc_loss=0.162, over 19598.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3118, pruned_loss=0.08057, ctc_loss=0.1516, over 3868037.68 frames. ], batch size: 55, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:51:53,717 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=91712.0, ans=0.0
+2024-08-25 13:51:54,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=91712.0, ans=0.05
+2024-08-25 13:52:10,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=91818.66666666667, ans=0.2
+2024-08-25 13:52:25,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.89 vs. limit=15.0
+2024-08-25 13:52:28,396 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.146e+02 2.677e+02 3.204e+02 4.930e+02, threshold=5.354e+02, percent-clipped=3.0
+2024-08-25 13:52:29,555 INFO [train.py:1114] (3/4) Epoch 7, batch 2300, loss[loss=0.2586, simple_loss=0.3032, pruned_loss=0.07827, ctc_loss=0.1439, over 19499.00 frames. ], tot_loss[loss=0.2669, simple_loss=0.3112, pruned_loss=0.08092, ctc_loss=0.1522, over 3860645.93 frames. ], batch size: 49, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:52:36,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=91925.33333333333, ans=0.125
+2024-08-25 13:52:43,042 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=91978.66666666667, ans=0.125
+2024-08-25 13:52:50,136 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.68 vs. limit=15.0
+2024-08-25 13:53:25,153 INFO [train.py:1114] (3/4) Epoch 7, batch 2350, loss[loss=0.2513, simple_loss=0.3063, pruned_loss=0.07176, ctc_loss=0.1321, over 19666.00 frames. ], tot_loss[loss=0.267, simple_loss=0.311, pruned_loss=0.08107, ctc_loss=0.152, over 3862855.98 frames. ], batch size: 63, lr: 2.02e-02, grad_scale: 16.0
+2024-08-25 13:53:27,575 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=92192.0, ans=0.025
+2024-08-25 13:53:28,497 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=92192.0, ans=0.07
+2024-08-25 13:53:44,902 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=92245.33333333333, ans=0.0
+2024-08-25 13:53:46,849 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=92298.66666666667, ans=0.2
+2024-08-25 13:53:47,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=92298.66666666667, ans=0.125
+2024-08-25 13:53:49,065 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=92298.66666666667, ans=0.125
+2024-08-25 13:53:50,562 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=92298.66666666667, ans=0.0
+2024-08-25 13:53:56,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=92352.0, ans=0.1
+2024-08-25 13:54:18,219 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.985e+02 2.336e+02 2.802e+02 4.974e+02, threshold=4.671e+02, percent-clipped=0.0
+2024-08-25 13:54:19,278 INFO [train.py:1114] (3/4) Epoch 7, batch 2400, loss[loss=0.2554, simple_loss=0.3126, pruned_loss=0.07284, ctc_loss=0.1314, over 19292.00 frames. ], tot_loss[loss=0.2702, simple_loss=0.3139, pruned_loss=0.08238, ctc_loss=0.1543, over 3857788.50 frames. ], batch size: 71, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:54:21,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=92458.66666666667, ans=0.1
+2024-08-25 13:54:21,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff2.min_abs, batch_count=92458.66666666667, ans=0.1
+2024-08-25 13:54:22,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=92458.66666666667, ans=0.0
+2024-08-25 13:54:22,721 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=92458.66666666667, ans=0.125
+2024-08-25 13:54:23,819 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=92458.66666666667, ans=0.125
+2024-08-25 13:54:31,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=92512.0, ans=0.0
+2024-08-25 13:55:53,237 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=92618.66666666667, ans=0.2
+2024-08-25 13:56:03,417 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=9.609e-01
+2024-08-25 13:56:05,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=92672.0, ans=0.07
+2024-08-25 13:56:05,884 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=92672.0, ans=0.125
+2024-08-25 13:56:13,547 INFO [train.py:1114] (3/4) Epoch 7, batch 2450, loss[loss=0.3709, simple_loss=0.3636, pruned_loss=0.1388, ctc_loss=0.2512, over 13682.00 frames. ], tot_loss[loss=0.278, simple_loss=0.3186, pruned_loss=0.08629, ctc_loss=0.162, over 3730784.06 frames. ], batch size: 140, lr: 2.01e-02, grad_scale: 32.0
+2024-08-25 13:56:44,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=92778.66666666667, ans=0.0
+2024-08-25 13:56:57,750 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=92885.33333333333, ans=0.2
+2024-08-25 13:57:54,269 INFO [train.py:1114] (3/4) Epoch 8, batch 0, loss[loss=0.2478, simple_loss=0.2915, pruned_loss=0.07495, ctc_loss=0.1355, over 19816.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.2915, pruned_loss=0.07495, ctc_loss=0.1355, over 19816.00 frames. ], batch size: 49, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 13:57:54,270 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 13:59:56,296 INFO [train.py:1146] (3/4) Epoch 8, validation: loss=0.2171, simple_loss=0.2997, pruned_loss=0.04948, ctc_loss=0.08904, over 944034.00 frames.
+2024-08-25 13:59:56,297 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 13:59:56,784 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.80 vs. limit=15.0
+2024-08-25 14:01:03,644 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.158e+02 2.483e+02 2.902e+02 5.180e+02, threshold=4.965e+02, percent-clipped=2.0
+2024-08-25 14:01:10,815 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=92986.66666666667, ans=0.125
+2024-08-25 14:02:04,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=93146.66666666667, ans=0.125
+2024-08-25 14:02:06,460 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93146.66666666667, ans=0.1
+2024-08-25 14:02:13,487 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=93146.66666666667, ans=0.025
+2024-08-25 14:02:17,067 INFO [train.py:1114] (3/4) Epoch 8, batch 50, loss[loss=0.2487, simple_loss=0.2929, pruned_loss=0.07385, ctc_loss=0.1417, over 19730.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.3147, pruned_loss=0.08233, ctc_loss=0.1555, over 845075.57 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:02:32,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=93253.33333333333, ans=0.0
+2024-08-25 14:02:32,347 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=93253.33333333333, ans=0.125
+2024-08-25 14:02:46,790 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=93306.66666666667, ans=0.125
+2024-08-25 14:02:49,729 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.25 vs. limit=15.0
+2024-08-25 14:05:03,226 INFO [train.py:1114] (3/4) Epoch 8, batch 100, loss[loss=0.2546, simple_loss=0.2982, pruned_loss=0.07646, ctc_loss=0.145, over 19721.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3159, pruned_loss=0.08228, ctc_loss=0.1551, over 1500583.81 frames. ], batch size: 51, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:05:14,938 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.910e+02 2.219e+02 2.660e+02 5.043e+02, threshold=4.439e+02, percent-clipped=1.0
+2024-08-25 14:05:15,243 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=93520.0, ans=0.125
+2024-08-25 14:05:25,655 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=93520.0, ans=0.125
+2024-08-25 14:05:40,803 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.55 vs. limit=15.0
+2024-08-25 14:05:43,897 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=93626.66666666667, ans=0.09899494936611666
+2024-08-25 14:05:45,245 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.74 vs. limit=22.5
+2024-08-25 14:05:53,890 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=93680.0, ans=0.0
+2024-08-25 14:07:14,435 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=93680.0, ans=0.2
+2024-08-25 14:07:16,383 INFO [train.py:1114] (3/4) Epoch 8, batch 150, loss[loss=0.2325, simple_loss=0.2818, pruned_loss=0.06641, ctc_loss=0.1262, over 19701.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3114, pruned_loss=0.07967, ctc_loss=0.1501, over 2027987.36 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-25 14:07:16,590 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93733.33333333333, ans=0.1
+2024-08-25 14:08:12,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=93786.66666666667, ans=0.125
+2024-08-25 14:09:12,954 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=93840.0, ans=0.125
+2024-08-25 14:10:15,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=94000.0, ans=0.0
+2024-08-25 14:10:16,272 INFO [train.py:1114] (3/4) Epoch 8, batch 200, loss[loss=0.3256, simple_loss=0.353, pruned_loss=0.1073, ctc_loss=0.2089, over 18240.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3097, pruned_loss=0.07904, ctc_loss=0.1488, over 2435470.75 frames. ], batch size: 85, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:10:20,173 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=94000.0, ans=0.0
+2024-08-25 14:10:29,224 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.854e+02 2.093e+02 2.544e+02 5.078e+02, threshold=4.187e+02, percent-clipped=1.0
+2024-08-25 14:10:36,956 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.28 vs. limit=6.0
+2024-08-25 14:10:38,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=94053.33333333333, ans=0.1
+2024-08-25 14:10:39,560 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=94106.66666666667, ans=0.2
+2024-08-25 14:10:43,030 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=94106.66666666667, ans=10.0
+2024-08-25 14:10:48,638 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=94106.66666666667, ans=0.0
+2024-08-25 14:10:49,717 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:10:55,599 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=94160.0, ans=0.125
+2024-08-25 14:11:05,340 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=94213.33333333333, ans=0.0
+2024-08-25 14:11:07,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=94213.33333333333, ans=0.125
+2024-08-25 14:11:17,842 INFO [train.py:1114] (3/4) Epoch 8, batch 250, loss[loss=0.2901, simple_loss=0.334, pruned_loss=0.0919, ctc_loss=0.1561, over 19306.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3094, pruned_loss=0.07866, ctc_loss=0.1481, over 2755765.24 frames. ], batch size: 67, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:11:27,510 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=94266.66666666667, ans=0.2
+2024-08-25 14:12:26,490 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=94320.0, ans=0.04949747468305833
+2024-08-25 14:13:10,538 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=94480.0, ans=0.04949747468305833
+2024-08-25 14:13:21,902 INFO [train.py:1114] (3/4) Epoch 8, batch 300, loss[loss=0.2471, simple_loss=0.3091, pruned_loss=0.06679, ctc_loss=0.1286, over 19546.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.309, pruned_loss=0.07824, ctc_loss=0.1473, over 3000913.20 frames. ], batch size: 61, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:13:28,800 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=94533.33333333333, ans=0.2
+2024-08-25 14:13:33,354 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 1.987e+02 2.340e+02 3.022e+02 6.047e+02, threshold=4.681e+02, percent-clipped=9.0
+2024-08-25 14:13:34,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=94586.66666666667, ans=0.2
+2024-08-25 14:13:44,101 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=94640.0, ans=0.0
+2024-08-25 14:13:45,334 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=94640.0, ans=0.1
+2024-08-25 14:14:22,772 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=94640.0, ans=0.2
+2024-08-25 14:14:52,110 INFO [train.py:1114] (3/4) Epoch 8, batch 350, loss[loss=0.2167, simple_loss=0.2687, pruned_loss=0.05773, ctc_loss=0.123, over 19750.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3085, pruned_loss=0.07791, ctc_loss=0.1464, over 3189723.59 frames. ], batch size: 48, lr: 1.88e-02, grad_scale: 32.0
+2024-08-25 14:14:55,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=94800.0, ans=0.0
+2024-08-25 14:15:43,251 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=94853.33333333333, ans=0.125
+2024-08-25 14:15:45,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=94853.33333333333, ans=0.125
+2024-08-25 14:15:52,271 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=94906.66666666667, ans=0.1
+2024-08-25 14:16:26,835 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.31 vs. limit=12.0
+2024-08-25 14:16:46,323 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=95013.33333333333, ans=0.1
+2024-08-25 14:16:47,732 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=95013.33333333333, ans=0.04949747468305833
+2024-08-25 14:16:50,820 INFO [train.py:1114] (3/4) Epoch 8, batch 400, loss[loss=0.2709, simple_loss=0.3178, pruned_loss=0.08067, ctc_loss=0.1568, over 19466.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.3081, pruned_loss=0.07753, ctc_loss=0.1461, over 3341207.10 frames. ], batch size: 54, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:16:58,991 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=95066.66666666667, ans=0.0
+2024-08-25 14:17:00,495 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.84 vs. limit=15.0
+2024-08-25 14:17:03,858 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.019e+02 2.528e+02 3.132e+02 5.852e+02, threshold=5.056e+02, percent-clipped=7.0
+2024-08-25 14:17:12,243 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.93 vs. limit=15.0
+2024-08-25 14:17:15,524 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.61 vs. limit=15.0
+2024-08-25 14:17:16,595 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=95173.33333333333, ans=0.2
+2024-08-25 14:17:44,855 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=95280.0, ans=0.125
+2024-08-25 14:17:50,388 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95280.0, ans=0.1
+2024-08-25 14:18:36,768 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.63 vs. limit=15.0
+2024-08-25 14:18:38,393 INFO [train.py:1114] (3/4) Epoch 8, batch 450, loss[loss=0.2783, simple_loss=0.3237, pruned_loss=0.08384, ctc_loss=0.163, over 19616.00 frames. ], tot_loss[loss=0.2619, simple_loss=0.3086, pruned_loss=0.0781, ctc_loss=0.1474, over 3451064.08 frames. ], batch size: 55, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:18:52,286 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.59 vs. limit=15.0
+2024-08-25 14:19:05,240 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.44 vs. limit=15.0
+2024-08-25 14:19:10,665 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=95440.0, ans=0.1
+2024-08-25 14:19:32,668 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=95546.66666666667, ans=0.125
+2024-08-25 14:19:39,033 INFO [train.py:1114] (3/4) Epoch 8, batch 500, loss[loss=0.2596, simple_loss=0.3172, pruned_loss=0.07321, ctc_loss=0.1388, over 19674.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3071, pruned_loss=0.0769, ctc_loss=0.145, over 3546644.13 frames. ], batch size: 63, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:21:42,076 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.925e+02 2.242e+02 2.655e+02 4.786e+02, threshold=4.483e+02, percent-clipped=0.0
+2024-08-25 14:21:43,722 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=13.99 vs. limit=15.0
+2024-08-25 14:21:45,733 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=95653.33333333333, ans=0.2
+2024-08-25 14:22:03,998 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=95760.0, ans=0.0
+2024-08-25 14:22:27,758 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.69 vs. limit=6.0
+2024-08-25 14:22:28,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=95813.33333333333, ans=0.025
+2024-08-25 14:22:28,382 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=95813.33333333333, ans=0.5
+2024-08-25 14:22:36,073 INFO [train.py:1114] (3/4) Epoch 8, batch 550, loss[loss=0.2844, simple_loss=0.3281, pruned_loss=0.0879, ctc_loss=0.162, over 19235.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.3063, pruned_loss=0.07632, ctc_loss=0.1436, over 3609559.30 frames. ], batch size: 71, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:22:39,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=95866.66666666667, ans=0.125
+2024-08-25 14:23:55,191 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=95920.0, ans=0.0
+2024-08-25 14:23:59,686 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=95920.0, ans=0.125
+2024-08-25 14:25:20,972 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.73 vs. limit=15.0
+2024-08-25 14:25:26,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=96026.66666666667, ans=0.125
+2024-08-25 14:25:34,156 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:25:43,162 INFO [train.py:1114] (3/4) Epoch 8, batch 600, loss[loss=0.2677, simple_loss=0.319, pruned_loss=0.07937, ctc_loss=0.1442, over 19404.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3069, pruned_loss=0.07633, ctc_loss=0.1436, over 3666516.95 frames. ], batch size: 67, lr: 1.87e-02, grad_scale: 32.0
+2024-08-25 14:25:54,322 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 1.975e+02 2.461e+02 2.998e+02 6.685e+02, threshold=4.922e+02, percent-clipped=2.0
+2024-08-25 14:26:23,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=96293.33333333333, ans=0.125
+2024-08-25 14:29:18,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=96346.66666666667, ans=0.04949747468305833
+2024-08-25 14:29:23,574 INFO [train.py:1114] (3/4) Epoch 8, batch 650, loss[loss=0.268, simple_loss=0.3188, pruned_loss=0.07962, ctc_loss=0.1449, over 19766.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.3061, pruned_loss=0.07618, ctc_loss=0.1434, over 3716400.18 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:29:30,026 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.16 vs. limit=15.0
+2024-08-25 14:29:46,694 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=96453.33333333333, ans=10.0
+2024-08-25 14:30:52,429 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=96560.0, ans=0.2
+2024-08-25 14:30:53,532 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=96560.0, ans=0.0
+2024-08-25 14:31:04,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=96613.33333333333, ans=0.025
+2024-08-25 14:31:04,613 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.90 vs. limit=15.0
+2024-08-25 14:31:23,586 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:31:24,399 INFO [train.py:1114] (3/4) Epoch 8, batch 700, loss[loss=0.2366, simple_loss=0.2908, pruned_loss=0.06588, ctc_loss=0.1263, over 19724.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3061, pruned_loss=0.076, ctc_loss=0.1429, over 3748173.97 frames. ], batch size: 51, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:31:36,075 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 1.952e+02 2.228e+02 2.907e+02 4.140e+02, threshold=4.456e+02, percent-clipped=0.0
+2024-08-25 14:31:48,956 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=96720.0, ans=0.125
+2024-08-25 14:32:06,951 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.81 vs. limit=6.0
+2024-08-25 14:32:10,095 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=96826.66666666667, ans=0.2
+2024-08-25 14:32:35,094 INFO [train.py:1114] (3/4) Epoch 8, batch 750, loss[loss=0.2576, simple_loss=0.3102, pruned_loss=0.07447, ctc_loss=0.1399, over 19509.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3053, pruned_loss=0.07564, ctc_loss=0.142, over 3774076.83 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:32:35,379 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=96933.33333333333, ans=0.125
+2024-08-25 14:32:38,933 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=96933.33333333333, ans=0.0
+2024-08-25 14:32:44,067 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=96933.33333333333, ans=0.0
+2024-08-25 14:33:04,676 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=97040.0, ans=0.125
+2024-08-25 14:33:24,037 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.42 vs. limit=10.0
+2024-08-25 14:33:32,416 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.525e-03
+2024-08-25 14:33:34,555 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=97146.66666666667, ans=0.125
+2024-08-25 14:33:44,732 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=97200.0, ans=0.0
+2024-08-25 14:33:45,415 INFO [train.py:1114] (3/4) Epoch 8, batch 800, loss[loss=0.2251, simple_loss=0.2763, pruned_loss=0.06359, ctc_loss=0.1167, over 19806.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3055, pruned_loss=0.07583, ctc_loss=0.1425, over 3795285.75 frames. ], batch size: 49, lr: 1.86e-02, grad_scale: 32.0
+2024-08-25 14:34:33,007 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:34:35,086 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 1.855e+02 2.176e+02 2.933e+02 4.905e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-25 14:35:04,321 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=97360.0, ans=10.0
+2024-08-25 14:35:10,785 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.94 vs. limit=15.0
+2024-08-25 14:35:22,238 INFO [train.py:1114] (3/4) Epoch 8, batch 850, loss[loss=0.2731, simple_loss=0.322, pruned_loss=0.08247, ctc_loss=0.1479, over 19658.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.306, pruned_loss=0.07638, ctc_loss=0.1436, over 3815373.82 frames. ], batch size: 59, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:35:39,694 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=97520.0, ans=0.125
+2024-08-25 14:35:47,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=97573.33333333333, ans=0.125
+2024-08-25 14:35:48,782 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=97573.33333333333, ans=0.125
+2024-08-25 14:36:04,690 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=97626.66666666667, ans=0.1
+2024-08-25 14:36:19,786 INFO [train.py:1114] (3/4) Epoch 8, batch 900, loss[loss=0.2519, simple_loss=0.293, pruned_loss=0.07669, ctc_loss=0.1438, over 19419.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.3065, pruned_loss=0.07655, ctc_loss=0.1441, over 3817949.58 frames. ], batch size: 48, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:38:27,318 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=97733.33333333333, ans=0.025
+2024-08-25 14:38:30,484 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 1.935e+02 2.327e+02 2.780e+02 5.034e+02, threshold=4.654e+02, percent-clipped=2.0
+2024-08-25 14:38:52,913 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=97840.0, ans=0.125
+2024-08-25 14:38:56,596 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=2.695e-03
+2024-08-25 14:38:58,907 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=97893.33333333333, ans=0.125
+2024-08-25 14:40:01,369 INFO [train.py:1114] (3/4) Epoch 8, batch 950, loss[loss=0.2596, simple_loss=0.2963, pruned_loss=0.08246, ctc_loss=0.1449, over 19496.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3066, pruned_loss=0.0767, ctc_loss=0.1441, over 3819282.94 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:40:20,432 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.64 vs. limit=22.5
+2024-08-25 14:42:03,541 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=98160.0, ans=0.125
+2024-08-25 14:42:04,996 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.86 vs. limit=10.0
+2024-08-25 14:42:05,878 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=98160.0, ans=0.1
+2024-08-25 14:43:29,287 INFO [train.py:1114] (3/4) Epoch 8, batch 1000, loss[loss=0.2084, simple_loss=0.2756, pruned_loss=0.05185, ctc_loss=0.09379, over 19860.00 frames. ], tot_loss[loss=0.2598, simple_loss=0.3074, pruned_loss=0.07713, ctc_loss=0.1448, over 3815697.00 frames. ], batch size: 52, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:43:31,874 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=98266.66666666667, ans=0.125
+2024-08-25 14:43:36,643 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=98266.66666666667, ans=0.125
+2024-08-25 14:43:47,366 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.014e+02 2.465e+02 3.304e+02 4.205e+02, threshold=4.930e+02, percent-clipped=0.0
+2024-08-25 14:43:49,233 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.56 vs. limit=12.0
+2024-08-25 14:46:06,674 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=98320.0, ans=0.07
+2024-08-25 14:46:27,109 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=98426.66666666667, ans=0.0
+2024-08-25 14:46:36,727 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=98480.0, ans=0.0
+2024-08-25 14:46:39,029 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=98480.0, ans=0.025
+2024-08-25 14:46:41,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=98480.0, ans=0.0
+2024-08-25 14:46:44,408 INFO [train.py:1114] (3/4) Epoch 8, batch 1050, loss[loss=0.2376, simple_loss=0.303, pruned_loss=0.06182, ctc_loss=0.1215, over 19846.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3071, pruned_loss=0.07699, ctc_loss=0.1445, over 3822946.03 frames. ], batch size: 57, lr: 1.85e-02, grad_scale: 32.0
+2024-08-25 14:46:53,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=98533.33333333333, ans=0.125
+2024-08-25 14:47:14,835 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.13 vs. limit=15.0
+2024-08-25 14:47:26,158 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.36 vs. limit=22.5
+2024-08-25 14:47:38,147 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=98746.66666666667, ans=0.2
+2024-08-25 14:47:42,838 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten.whitening_limit, batch_count=98746.66666666667, ans=22.5
+2024-08-25 14:47:44,587 INFO [train.py:1114] (3/4) Epoch 8, batch 1100, loss[loss=0.2491, simple_loss=0.3072, pruned_loss=0.0687, ctc_loss=0.134, over 19584.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3073, pruned_loss=0.07689, ctc_loss=0.1447, over 3831513.25 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:48:13,773 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.814e+02 2.071e+02 2.620e+02 3.682e+02, threshold=4.142e+02, percent-clipped=0.0
+2024-08-25 14:49:20,533 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.56 vs. limit=12.0
+2024-08-25 14:49:50,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=99013.33333333333, ans=0.0
+2024-08-25 14:50:00,867 INFO [train.py:1114] (3/4) Epoch 8, batch 1150, loss[loss=0.2382, simple_loss=0.2962, pruned_loss=0.06697, ctc_loss=0.1158, over 19593.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.307, pruned_loss=0.07697, ctc_loss=0.1445, over 3830419.49 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:51:18,030 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=99120.0, ans=0.125
+2024-08-25 14:51:18,423 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.01 vs. limit=15.0
+2024-08-25 14:52:31,539 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=99226.66666666667, ans=0.1
+2024-08-25 14:52:33,717 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff2.min_abs, batch_count=99226.66666666667, ans=0.1
+2024-08-25 14:52:45,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=99280.0, ans=0.0
+2024-08-25 14:52:46,251 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=99280.0, ans=0.1
+2024-08-25 14:52:51,785 INFO [train.py:1114] (3/4) Epoch 8, batch 1200, loss[loss=0.2465, simple_loss=0.3019, pruned_loss=0.06897, ctc_loss=0.1329, over 19836.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3077, pruned_loss=0.07706, ctc_loss=0.1447, over 3825608.64 frames. ], batch size: 57, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:52:57,191 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.32 vs. limit=15.0
+2024-08-25 14:53:06,250 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.879e+02 2.149e+02 2.634e+02 4.011e+02, threshold=4.298e+02, percent-clipped=0.0
+2024-08-25 14:53:08,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=99386.66666666667, ans=0.1
+2024-08-25 14:53:48,973 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=99546.66666666667, ans=0.0
+2024-08-25 14:53:52,344 INFO [train.py:1114] (3/4) Epoch 8, batch 1250, loss[loss=0.2561, simple_loss=0.3127, pruned_loss=0.07327, ctc_loss=0.1324, over 19545.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3075, pruned_loss=0.07654, ctc_loss=0.1436, over 3843494.08 frames. ], batch size: 61, lr: 1.84e-02, grad_scale: 32.0
+2024-08-25 14:54:04,881 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.64 vs. limit=15.0
+2024-08-25 14:55:27,211 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=99706.66666666667, ans=0.025
+2024-08-25 14:55:39,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=99760.0, ans=0.125
+2024-08-25 14:55:44,527 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.71 vs. limit=15.0
+2024-08-25 14:55:45,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=99760.0, ans=0.125
+2024-08-25 14:55:51,283 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.48 vs. limit=10.0
+2024-08-25 14:55:55,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=99813.33333333333, ans=0.0
+2024-08-25 14:56:05,547 INFO [train.py:1114] (3/4) Epoch 8, batch 1300, loss[loss=0.272, simple_loss=0.315, pruned_loss=0.08249, ctc_loss=0.16, over 18841.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.3067, pruned_loss=0.07647, ctc_loss=0.1436, over 3846002.07 frames. ], batch size: 76, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:56:17,010 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.809e+02 2.147e+02 2.747e+02 4.726e+02, threshold=4.293e+02, percent-clipped=4.0
+2024-08-25 14:56:22,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=99920.0, ans=0.125
+2024-08-25 14:58:00,713 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.13 vs. limit=6.0
+2024-08-25 14:58:02,822 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=100026.66666666667, ans=0.0
+2024-08-25 14:58:48,378 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100080.0, ans=0.125
+2024-08-25 14:58:49,311 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=100080.0, ans=0.125
+2024-08-25 14:58:50,506 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=100080.0, ans=0.0
+2024-08-25 14:58:53,765 INFO [train.py:1114] (3/4) Epoch 8, batch 1350, loss[loss=0.2561, simple_loss=0.3056, pruned_loss=0.07419, ctc_loss=0.1456, over 19766.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.3062, pruned_loss=0.07604, ctc_loss=0.1428, over 3857437.49 frames. ], batch size: 54, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:59:07,847 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100186.66666666667, ans=0.125
+2024-08-25 14:59:10,171 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100186.66666666667, ans=0.125
+2024-08-25 14:59:20,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=100240.0, ans=0.2
+2024-08-25 14:59:30,998 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=100293.33333333333, ans=0.5
+2024-08-25 14:59:31,068 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100293.33333333333, ans=0.1
+2024-08-25 14:59:44,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=100346.66666666667, ans=0.05
+2024-08-25 14:59:46,620 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 14:59:51,904 INFO [train.py:1114] (3/4) Epoch 8, batch 1400, loss[loss=0.2425, simple_loss=0.2815, pruned_loss=0.07416, ctc_loss=0.1382, over 19649.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3066, pruned_loss=0.07665, ctc_loss=0.1438, over 3863980.64 frames. ], batch size: 46, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 14:59:53,209 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100400.0, ans=0.0
+2024-08-25 14:59:54,688 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.26 vs. limit=12.0
+2024-08-25 15:00:03,306 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.018e+02 2.600e+02 3.300e+02 7.375e+02, threshold=5.199e+02, percent-clipped=11.0
+2024-08-25 15:00:43,982 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=100613.33333333333, ans=0.125
+2024-08-25 15:00:56,110 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.16 vs. limit=15.0
+2024-08-25 15:00:56,211 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.19 vs. limit=15.0
+2024-08-25 15:00:56,739 INFO [train.py:1114] (3/4) Epoch 8, batch 1450, loss[loss=0.2314, simple_loss=0.3029, pruned_loss=0.05843, ctc_loss=0.1077, over 19702.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3076, pruned_loss=0.07733, ctc_loss=0.145, over 3861968.41 frames. ], batch size: 63, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:01:27,493 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=100773.33333333333, ans=0.5
+2024-08-25 15:01:29,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=100773.33333333333, ans=0.5
+2024-08-25 15:01:37,929 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=100826.66666666667, ans=0.125
+2024-08-25 15:01:47,215 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.34 vs. limit=22.5
+2024-08-25 15:03:17,725 INFO [train.py:1114] (3/4) Epoch 8, batch 1500, loss[loss=0.2624, simple_loss=0.3218, pruned_loss=0.07299, ctc_loss=0.1425, over 19581.00 frames. ], tot_loss[loss=0.2603, simple_loss=0.3079, pruned_loss=0.07735, ctc_loss=0.1451, over 3861705.14 frames. ], batch size: 57, lr: 1.83e-02, grad_scale: 32.0
+2024-08-25 15:04:56,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=100933.33333333333, ans=0.0
+2024-08-25 15:05:23,454 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=100986.66666666667, ans=0.015
+2024-08-25 15:05:24,439 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.972e+02 2.271e+02 2.845e+02 5.404e+02, threshold=4.542e+02, percent-clipped=1.0
+2024-08-25 15:05:38,359 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=100986.66666666667, ans=0.125
+2024-08-25 15:07:43,787 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:10:00,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=101146.66666666667, ans=0.125
+2024-08-25 15:10:11,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=101146.66666666667, ans=0.125
+2024-08-25 15:10:18,892 INFO [train.py:1114] (3/4) Epoch 8, batch 1550, loss[loss=0.2931, simple_loss=0.3416, pruned_loss=0.08891, ctc_loss=0.1672, over 19613.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.3079, pruned_loss=0.07752, ctc_loss=0.1456, over 3846191.26 frames. ], batch size: 60, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:12:39,139 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=101306.66666666667, ans=0.125
+2024-08-25 15:13:04,595 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101360.0, ans=0.125
+2024-08-25 15:14:02,118 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=101413.33333333333, ans=0.0
+2024-08-25 15:14:11,854 INFO [train.py:1114] (3/4) Epoch 8, batch 1600, loss[loss=0.2444, simple_loss=0.308, pruned_loss=0.06541, ctc_loss=0.1251, over 19838.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3078, pruned_loss=0.07743, ctc_loss=0.1453, over 3835823.42 frames. ], batch size: 57, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:14:22,194 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.06 vs. limit=15.0
+2024-08-25 15:14:31,979 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 1.915e+02 2.222e+02 2.696e+02 4.640e+02, threshold=4.444e+02, percent-clipped=1.0
+2024-08-25 15:14:43,483 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=101520.0, ans=0.125
+2024-08-25 15:15:16,594 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=101680.0, ans=0.0
+2024-08-25 15:15:17,788 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101680.0, ans=0.125
+2024-08-25 15:15:23,788 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=101680.0, ans=0.125
+2024-08-25 15:15:30,475 INFO [train.py:1114] (3/4) Epoch 8, batch 1650, loss[loss=0.3117, simple_loss=0.3453, pruned_loss=0.1006, ctc_loss=0.1922, over 19651.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3078, pruned_loss=0.07762, ctc_loss=0.1461, over 3833814.27 frames. ], batch size: 59, lr: 1.82e-02, grad_scale: 32.0
+2024-08-25 15:15:36,930 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_na.min_abs, batch_count=101733.33333333333, ans=0.02
+2024-08-25 15:15:38,272 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.99 vs. limit=15.0
+2024-08-25 15:15:40,174 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 15:15:50,867 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.60 vs. limit=22.5
+2024-08-25 15:15:51,104 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.08 vs. limit=22.5
+2024-08-25 15:16:28,205 INFO [train.py:1114] (3/4) Epoch 8, batch 1700, loss[loss=0.2196, simple_loss=0.2675, pruned_loss=0.0631, ctc_loss=0.1138, over 19700.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3074, pruned_loss=0.07724, ctc_loss=0.1453, over 3847576.27 frames. ], batch size: 46, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:16:40,736 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 1.920e+02 2.237e+02 2.711e+02 4.644e+02, threshold=4.474e+02, percent-clipped=2.0
+2024-08-25 15:17:01,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=102106.66666666667, ans=0.125
+2024-08-25 15:17:15,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=102160.0, ans=0.125
+2024-08-25 15:17:39,498 INFO [train.py:1114] (3/4) Epoch 8, batch 1750, loss[loss=0.2554, simple_loss=0.2976, pruned_loss=0.0778, ctc_loss=0.1439, over 19644.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3066, pruned_loss=0.07658, ctc_loss=0.1441, over 3852621.28 frames. ], batch size: 45, lr: 1.82e-02, grad_scale: 16.0
+2024-08-25 15:17:43,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=102266.66666666667, ans=0.0
+2024-08-25 15:17:45,677 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.13 vs. limit=12.0
+2024-08-25 15:17:45,952 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.57 vs. limit=5.0
+2024-08-25 15:18:04,460 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=102320.0, ans=0.125
+2024-08-25 15:20:04,030 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=102426.66666666667, ans=0.0
+2024-08-25 15:20:12,815 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=102426.66666666667, ans=0.125
+2024-08-25 15:20:17,948 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.22 vs. limit=15.0
+2024-08-25 15:20:25,848 INFO [train.py:1114] (3/4) Epoch 8, batch 1800, loss[loss=0.2683, simple_loss=0.3145, pruned_loss=0.08117, ctc_loss=0.1494, over 19618.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.3064, pruned_loss=0.07664, ctc_loss=0.1439, over 3853804.88 frames. ], batch size: 55, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:20:37,815 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 1.874e+02 2.230e+02 2.859e+02 4.439e+02, threshold=4.460e+02, percent-clipped=0.0
+2024-08-25 15:20:38,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=102586.66666666667, ans=0.0
+2024-08-25 15:20:39,925 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=102586.66666666667, ans=0.125
+2024-08-25 15:23:34,822 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.615e-02
+2024-08-25 15:23:36,318 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.75 vs. limit=22.5
+2024-08-25 15:24:44,680 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=102640.0, ans=0.0
+2024-08-25 15:26:48,858 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.98 vs. limit=15.0
+2024-08-25 15:26:49,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=102693.33333333333, ans=0.0
+2024-08-25 15:26:49,894 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=19.84 vs. limit=22.5
+2024-08-25 15:26:54,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=102746.66666666667, ans=0.125
+2024-08-25 15:28:56,995 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=102746.66666666667, ans=0.025
+2024-08-25 15:28:58,632 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.35 vs. limit=22.5
+2024-08-25 15:28:59,113 INFO [train.py:1114] (3/4) Epoch 8, batch 1850, loss[loss=0.2546, simple_loss=0.311, pruned_loss=0.07073, ctc_loss=0.1421, over 19596.00 frames. ], tot_loss[loss=0.2583, simple_loss=0.3063, pruned_loss=0.07644, ctc_loss=0.1437, over 3856677.92 frames. ], batch size: 57, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:29:28,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=102906.66666666667, ans=0.125
+2024-08-25 15:29:42,081 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten.whitening_limit, batch_count=102960.0, ans=15.0
+2024-08-25 15:29:46,448 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=102960.0, ans=0.125
+2024-08-25 15:32:38,672 INFO [train.py:1114] (3/4) Epoch 8, batch 1900, loss[loss=0.2448, simple_loss=0.3177, pruned_loss=0.06188, ctc_loss=0.1202, over 19665.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3068, pruned_loss=0.07629, ctc_loss=0.1434, over 3860546.74 frames. ], batch size: 59, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:32:52,964 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 1.872e+02 2.139e+02 2.618e+02 5.849e+02, threshold=4.279e+02, percent-clipped=4.0
+2024-08-25 15:33:12,304 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=103173.33333333333, ans=0.0
+2024-08-25 15:33:19,941 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=103226.66666666667, ans=0.125
+2024-08-25 15:33:20,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103226.66666666667, ans=0.1
+2024-08-25 15:33:30,532 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.56 vs. limit=22.5
+2024-08-25 15:33:31,309 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=103280.0, ans=0.0
+2024-08-25 15:33:37,630 INFO [train.py:1114] (3/4) Epoch 8, batch 1950, loss[loss=0.2423, simple_loss=0.2944, pruned_loss=0.06967, ctc_loss=0.127, over 19582.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3077, pruned_loss=0.07654, ctc_loss=0.1436, over 3869516.51 frames. ], batch size: 52, lr: 1.81e-02, grad_scale: 16.0
+2024-08-25 15:33:54,195 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=103333.33333333333, ans=0.125
+2024-08-25 15:34:14,105 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=103440.0, ans=0.0
+2024-08-25 15:34:23,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=103493.33333333333, ans=0.125
+2024-08-25 15:34:34,428 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=103546.66666666667, ans=0.125
+2024-08-25 15:34:36,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.min_positive, batch_count=103546.66666666667, ans=0.05
+2024-08-25 15:34:42,882 INFO [train.py:1114] (3/4) Epoch 8, batch 2000, loss[loss=0.2327, simple_loss=0.278, pruned_loss=0.06873, ctc_loss=0.125, over 19650.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3083, pruned_loss=0.07693, ctc_loss=0.1444, over 3854954.45 frames. ], batch size: 45, lr: 1.81e-02, grad_scale: 32.0
+2024-08-25 15:34:44,172 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=103600.0, ans=0.125
+2024-08-25 15:34:46,390 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.69 vs. limit=12.0
+2024-08-25 15:34:48,564 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.82 vs. limit=22.5
+2024-08-25 15:34:55,658 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 2.022e+02 2.450e+02 4.734e+02, threshold=4.043e+02, percent-clipped=1.0
+2024-08-25 15:34:57,902 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=103653.33333333333, ans=0.0
+2024-08-25 15:35:17,881 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=103760.0, ans=0.1
+2024-08-25 15:35:24,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=103760.0, ans=0.125
+2024-08-25 15:35:25,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=103760.0, ans=0.125
+2024-08-25 15:35:32,565 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.90 vs. limit=15.0
+2024-08-25 15:35:36,562 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=103813.33333333333, ans=0.025
+2024-08-25 15:35:38,626 INFO [train.py:1114] (3/4) Epoch 8, batch 2050, loss[loss=0.2508, simple_loss=0.2872, pruned_loss=0.07842, ctc_loss=0.1439, over 19689.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.307, pruned_loss=0.07686, ctc_loss=0.1444, over 3851533.26 frames. ], batch size: 47, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:35:41,840 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=103866.66666666667, ans=0.0
+2024-08-25 15:35:48,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=103920.0, ans=0.125
+2024-08-25 15:35:58,051 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=103920.0, ans=0.0
+2024-08-25 15:35:58,188 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=103920.0, ans=0.0
+2024-08-25 15:36:11,309 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=104026.66666666667, ans=0.125
+2024-08-25 15:36:26,701 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104080.0, ans=0.1
+2024-08-25 15:36:30,858 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=104080.0, ans=0.0
+2024-08-25 15:36:32,748 INFO [train.py:1114] (3/4) Epoch 8, batch 2100, loss[loss=0.236, simple_loss=0.2994, pruned_loss=0.06166, ctc_loss=0.1232, over 19763.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3059, pruned_loss=0.07579, ctc_loss=0.1425, over 3858443.25 frames. ], batch size: 54, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:36:44,892 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.055e+02 2.348e+02 2.987e+02 4.948e+02, threshold=4.695e+02, percent-clipped=5.0
+2024-08-25 15:37:02,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=104240.0, ans=0.0
+2024-08-25 15:37:11,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=104293.33333333333, ans=0.07
+2024-08-25 15:37:18,104 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.94 vs. limit=22.5
+2024-08-25 15:37:26,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=104400.0, ans=0.0
+2024-08-25 15:37:27,186 INFO [train.py:1114] (3/4) Epoch 8, batch 2150, loss[loss=0.2284, simple_loss=0.2906, pruned_loss=0.05968, ctc_loss=0.1169, over 19847.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3042, pruned_loss=0.07474, ctc_loss=0.1406, over 3870537.57 frames. ], batch size: 52, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:37:52,708 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.18 vs. limit=15.0
+2024-08-25 15:38:01,130 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=104560.0, ans=0.125
+2024-08-25 15:38:05,328 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=104560.0, ans=0.0
+2024-08-25 15:38:10,915 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=104613.33333333333, ans=0.125
+2024-08-25 15:38:23,248 INFO [train.py:1114] (3/4) Epoch 8, batch 2200, loss[loss=0.28, simple_loss=0.3188, pruned_loss=0.0883, ctc_loss=0.1616, over 19586.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3046, pruned_loss=0.07525, ctc_loss=0.1416, over 3869428.46 frames. ], batch size: 57, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:38:28,181 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.70 vs. limit=15.0
+2024-08-25 15:38:29,082 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=104666.66666666667, ans=0.125
+2024-08-25 15:38:33,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=104720.0, ans=0.125
+2024-08-25 15:38:35,670 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.961e+02 2.280e+02 3.038e+02 5.675e+02, threshold=4.560e+02, percent-clipped=2.0
+2024-08-25 15:38:52,864 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.22 vs. limit=22.5
+2024-08-25 15:38:53,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=104773.33333333333, ans=0.125
+2024-08-25 15:38:54,643 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=104773.33333333333, ans=0.0
+2024-08-25 15:38:56,198 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.96 vs. limit=15.0
+2024-08-25 15:39:10,694 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.71 vs. limit=15.0
+2024-08-25 15:39:16,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=104880.0, ans=0.125
+2024-08-25 15:39:19,051 INFO [train.py:1114] (3/4) Epoch 8, batch 2250, loss[loss=0.2554, simple_loss=0.3087, pruned_loss=0.07228, ctc_loss=0.1437, over 19610.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3045, pruned_loss=0.07527, ctc_loss=0.1415, over 3868784.71 frames. ], batch size: 55, lr: 1.80e-02, grad_scale: 32.0
+2024-08-25 15:39:30,803 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.43 vs. limit=22.5
+2024-08-25 15:39:31,730 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.86 vs. limit=15.0
+2024-08-25 15:39:41,164 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=105040.0, ans=0.09899494936611666
+2024-08-25 15:39:43,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105040.0, ans=0.1
+2024-08-25 15:39:53,531 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.12 vs. limit=15.0
+2024-08-25 15:40:04,552 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=105146.66666666667, ans=0.0
+2024-08-25 15:40:07,665 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=105146.66666666667, ans=0.2
+2024-08-25 15:40:14,494 INFO [train.py:1114] (3/4) Epoch 8, batch 2300, loss[loss=0.237, simple_loss=0.2901, pruned_loss=0.0668, ctc_loss=0.1259, over 19479.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.304, pruned_loss=0.07544, ctc_loss=0.1417, over 3862172.15 frames. ], batch size: 49, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:40:15,561 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=105200.0, ans=0.125
+2024-08-25 15:40:28,020 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.907e+02 2.167e+02 2.593e+02 4.976e+02, threshold=4.335e+02, percent-clipped=1.0
+2024-08-25 15:40:28,232 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=105253.33333333333, ans=0.125
+2024-08-25 15:40:36,293 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=105253.33333333333, ans=0.125
+2024-08-25 15:40:41,009 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.73 vs. limit=6.0
+2024-08-25 15:41:04,034 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.34 vs. limit=6.0
+2024-08-25 15:41:06,790 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105413.33333333333, ans=0.125
+2024-08-25 15:41:11,097 INFO [train.py:1114] (3/4) Epoch 8, batch 2350, loss[loss=0.2796, simple_loss=0.3264, pruned_loss=0.08427, ctc_loss=0.1606, over 19709.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3043, pruned_loss=0.07545, ctc_loss=0.1415, over 3863657.07 frames. ], batch size: 63, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:41:24,248 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=105520.0, ans=0.0
+2024-08-25 15:41:34,536 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.28 vs. limit=6.0
+2024-08-25 15:41:40,782 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-25 15:41:42,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-25 15:42:06,006 INFO [train.py:1114] (3/4) Epoch 8, batch 2400, loss[loss=0.288, simple_loss=0.3362, pruned_loss=0.08826, ctc_loss=0.1583, over 19299.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3073, pruned_loss=0.07687, ctc_loss=0.1442, over 3857803.31 frames. ], batch size: 71, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:42:18,058 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.983e+02 2.255e+02 2.870e+02 5.067e+02, threshold=4.510e+02, percent-clipped=2.0
+2024-08-25 15:42:29,147 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=105840.0, ans=0.5
+2024-08-25 15:42:44,102 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.81 vs. limit=22.5
+2024-08-25 15:43:01,713 INFO [train.py:1114] (3/4) Epoch 8, batch 2450, loss[loss=0.3468, simple_loss=0.3508, pruned_loss=0.1241, ctc_loss=0.2362, over 13559.00 frames. ], tot_loss[loss=0.2669, simple_loss=0.3118, pruned_loss=0.08064, ctc_loss=0.1517, over 3729557.09 frames. ], batch size: 140, lr: 1.79e-02, grad_scale: 32.0
+2024-08-25 15:43:04,122 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=106000.0, ans=0.125
+2024-08-25 15:43:19,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=106053.33333333333, ans=15.0
+2024-08-25 15:43:24,733 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=106106.66666666667, ans=0.0
+2024-08-25 15:44:31,283 INFO [train.py:1114] (3/4) Epoch 9, batch 0, loss[loss=0.2321, simple_loss=0.2806, pruned_loss=0.06637, ctc_loss=0.1273, over 19434.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.2806, pruned_loss=0.06637, ctc_loss=0.1273, over 19434.00 frames. ], batch size: 48, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:44:31,283 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 15:44:49,829 INFO [train.py:1146] (3/4) Epoch 9, validation: loss=0.21, simple_loss=0.2947, pruned_loss=0.04621, ctc_loss=0.08206, over 944034.00 frames.
+2024-08-25 15:44:49,829 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 15:44:52,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=106208.0, ans=0.125
+2024-08-25 15:44:53,268 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=106208.0, ans=0.125
+2024-08-25 15:44:55,838 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=106208.0, ans=0.0
+2024-08-25 15:45:15,533 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.154e+02 2.510e+02 2.953e+02 5.707e+02, threshold=5.019e+02, percent-clipped=2.0
+2024-08-25 15:45:19,083 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=106314.66666666667, ans=0.125
+2024-08-25 15:46:20,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=106368.0, ans=0.05
+2024-08-25 15:46:22,784 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=106368.0, ans=0.07
+2024-08-25 15:46:36,875 INFO [train.py:1114] (3/4) Epoch 9, batch 50, loss[loss=0.2374, simple_loss=0.2863, pruned_loss=0.0684, ctc_loss=0.1295, over 19699.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.3084, pruned_loss=0.07757, ctc_loss=0.1454, over 844655.46 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:46:44,768 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=106474.66666666667, ans=0.125
+2024-08-25 15:46:55,374 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=106528.0, ans=0.05
+2024-08-25 15:47:08,607 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.52 vs. limit=15.0
+2024-08-25 15:47:17,356 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.67 vs. limit=22.5
+2024-08-25 15:47:28,705 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=106634.66666666667, ans=0.035
+2024-08-25 15:47:34,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=106688.0, ans=0.0
+2024-08-25 15:47:35,153 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=106688.0, ans=0.125
+2024-08-25 15:47:42,082 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=106688.0, ans=0.2
+2024-08-25 15:47:44,293 INFO [train.py:1114] (3/4) Epoch 9, batch 100, loss[loss=0.2293, simple_loss=0.2863, pruned_loss=0.0625, ctc_loss=0.1183, over 19735.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3084, pruned_loss=0.07697, ctc_loss=0.1446, over 1499016.47 frames. ], batch size: 51, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:47:52,330 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106741.33333333333, ans=0.1
+2024-08-25 15:48:00,642 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=106794.66666666667, ans=0.035
+2024-08-25 15:48:08,612 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=106848.0, ans=0.2
+2024-08-25 15:48:09,483 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.842e+02 2.163e+02 2.785e+02 4.838e+02, threshold=4.326e+02, percent-clipped=0.0
+2024-08-25 15:48:09,777 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=106848.0, ans=0.0
+2024-08-25 15:48:16,834 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=106848.0, ans=0.125
+2024-08-25 15:48:42,140 INFO [train.py:1114] (3/4) Epoch 9, batch 150, loss[loss=0.2106, simple_loss=0.2736, pruned_loss=0.05368, ctc_loss=0.1008, over 19680.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3042, pruned_loss=0.07422, ctc_loss=0.1398, over 2026886.82 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-25 15:48:45,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=107008.0, ans=0.125
+2024-08-25 15:48:46,869 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.86 vs. limit=22.5
+2024-08-25 15:48:50,179 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107008.0, ans=0.125
+2024-08-25 15:48:54,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=107061.33333333333, ans=0.125
+2024-08-25 15:49:02,333 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=107061.33333333333, ans=10.0
+2024-08-25 15:49:04,583 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=107061.33333333333, ans=0.0
+2024-08-25 15:49:05,702 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107114.66666666667, ans=0.125
+2024-08-25 15:49:08,566 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=5.25 vs. limit=5.0
+2024-08-25 15:49:20,942 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.31 vs. limit=15.0
+2024-08-25 15:49:38,208 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.34 vs. limit=15.0
+2024-08-25 15:49:41,042 INFO [train.py:1114] (3/4) Epoch 9, batch 200, loss[loss=0.2853, simple_loss=0.3255, pruned_loss=0.08863, ctc_loss=0.1697, over 18314.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3024, pruned_loss=0.07296, ctc_loss=0.1373, over 2435917.01 frames. ], batch size: 85, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:50:01,879 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=107328.0, ans=0.0
+2024-08-25 15:50:06,171 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.799e+02 2.039e+02 2.617e+02 5.282e+02, threshold=4.078e+02, percent-clipped=1.0
+2024-08-25 15:50:43,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=107381.33333333333, ans=0.125
+2024-08-25 15:50:46,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=107381.33333333333, ans=0.0
+2024-08-25 15:50:53,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=107434.66666666667, ans=0.0
+2024-08-25 15:50:54,244 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.53 vs. limit=15.0
+2024-08-25 15:51:09,683 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.18 vs. limit=6.0
+2024-08-25 15:51:14,080 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107488.0, ans=0.1
+2024-08-25 15:51:17,116 INFO [train.py:1114] (3/4) Epoch 9, batch 250, loss[loss=0.266, simple_loss=0.3185, pruned_loss=0.07843, ctc_loss=0.1417, over 19343.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3028, pruned_loss=0.07342, ctc_loss=0.1381, over 2755622.38 frames. ], batch size: 67, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:51:21,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=107541.33333333333, ans=0.0
+2024-08-25 15:51:36,070 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=107594.66666666667, ans=0.2
+2024-08-25 15:52:09,640 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=107754.66666666667, ans=0.125
+2024-08-25 15:52:10,796 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-25 15:52:13,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=107754.66666666667, ans=10.0
+2024-08-25 15:52:13,114 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-25 15:52:14,225 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=107754.66666666667, ans=0.125
+2024-08-25 15:52:15,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107754.66666666667, ans=0.125
+2024-08-25 15:52:18,772 INFO [train.py:1114] (3/4) Epoch 9, batch 300, loss[loss=0.2936, simple_loss=0.3399, pruned_loss=0.09013, ctc_loss=0.1677, over 19523.00 frames. ], tot_loss[loss=0.253, simple_loss=0.303, pruned_loss=0.07373, ctc_loss=0.1387, over 3000418.68 frames. ], batch size: 61, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:52:38,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107861.33333333333, ans=0.1
+2024-08-25 15:52:39,945 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=107861.33333333333, ans=0.2
+2024-08-25 15:52:47,050 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 1.831e+02 2.248e+02 2.885e+02 5.251e+02, threshold=4.495e+02, percent-clipped=2.0
+2024-08-25 15:52:57,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=107968.0, ans=0.0
+2024-08-25 15:52:58,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=107968.0, ans=0.2
+2024-08-25 15:53:06,000 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.46 vs. limit=15.0
+2024-08-25 15:53:10,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=108021.33333333333, ans=0.0
+2024-08-25 15:53:18,332 INFO [train.py:1114] (3/4) Epoch 9, batch 350, loss[loss=0.2489, simple_loss=0.2984, pruned_loss=0.07315, ctc_loss=0.133, over 19744.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3034, pruned_loss=0.07354, ctc_loss=0.1382, over 3190835.51 frames. ], batch size: 48, lr: 1.68e-02, grad_scale: 16.0
+2024-08-25 15:53:20,790 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=108074.66666666667, ans=0.125
+2024-08-25 15:53:39,164 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=108128.0, ans=0.125
+2024-08-25 15:53:55,013 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=108234.66666666667, ans=0.0
+2024-08-25 15:54:05,147 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=108288.0, ans=0.0
+2024-08-25 15:54:07,501 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=108288.0, ans=0.125
+2024-08-25 15:54:14,909 INFO [train.py:1114] (3/4) Epoch 9, batch 400, loss[loss=0.2372, simple_loss=0.2956, pruned_loss=0.06489, ctc_loss=0.1226, over 19519.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3026, pruned_loss=0.07303, ctc_loss=0.1374, over 3342303.24 frames. ], batch size: 54, lr: 1.68e-02, grad_scale: 32.0
+2024-08-25 15:54:16,381 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=108341.33333333333, ans=0.0
+2024-08-25 15:54:16,617 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.40 vs. limit=12.0
+2024-08-25 15:54:43,458 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.039e+02 2.514e+02 3.062e+02 4.428e+02, threshold=5.028e+02, percent-clipped=0.0
+2024-08-25 15:54:50,212 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.66 vs. limit=15.0
+2024-08-25 15:55:05,836 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108554.66666666667, ans=0.1
+2024-08-25 15:55:18,512 INFO [train.py:1114] (3/4) Epoch 9, batch 450, loss[loss=0.2361, simple_loss=0.2983, pruned_loss=0.06248, ctc_loss=0.1223, over 19625.00 frames. ], tot_loss[loss=0.2521, simple_loss=0.3029, pruned_loss=0.0731, ctc_loss=0.1376, over 3451742.59 frames. ], batch size: 55, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:55:38,131 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff3.min_abs, batch_count=108661.33333333333, ans=0.2
+2024-08-25 15:55:51,278 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108768.0, ans=0.1
+2024-08-25 15:57:34,576 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.66 vs. limit=6.0
+2024-08-25 15:58:59,080 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=108821.33333333333, ans=0.125
+2024-08-25 15:59:11,160 INFO [train.py:1114] (3/4) Epoch 9, batch 500, loss[loss=0.2646, simple_loss=0.3219, pruned_loss=0.075, ctc_loss=0.1429, over 19699.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3025, pruned_loss=0.07314, ctc_loss=0.1378, over 3546623.14 frames. ], batch size: 63, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 15:59:23,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108928.0, ans=0.1
+2024-08-25 15:59:37,499 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.839e+02 2.298e+02 3.023e+02 4.931e+02, threshold=4.596e+02, percent-clipped=0.0
+2024-08-25 15:59:50,794 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=109034.66666666667, ans=0.125
+2024-08-25 16:00:08,624 INFO [train.py:1114] (3/4) Epoch 9, batch 550, loss[loss=0.2649, simple_loss=0.3146, pruned_loss=0.07926, ctc_loss=0.1416, over 19302.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3022, pruned_loss=0.07318, ctc_loss=0.1379, over 3608514.03 frames. ], batch size: 71, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:00:20,650 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=109194.66666666667, ans=0.09899494936611666
+2024-08-25 16:00:37,029 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=109248.0, ans=0.0
+2024-08-25 16:01:12,454 INFO [train.py:1114] (3/4) Epoch 9, batch 600, loss[loss=0.2694, simple_loss=0.3181, pruned_loss=0.08136, ctc_loss=0.1448, over 19437.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3029, pruned_loss=0.07353, ctc_loss=0.1382, over 3665876.29 frames. ], batch size: 67, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:01:39,759 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=109461.33333333333, ans=0.0
+2024-08-25 16:01:51,487 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.960e+02 2.208e+02 2.721e+02 5.490e+02, threshold=4.416e+02, percent-clipped=2.0
+2024-08-25 16:02:21,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=109514.66666666667, ans=0.2
+2024-08-25 16:02:25,893 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=109568.0, ans=0.125
+2024-08-25 16:02:27,622 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.93 vs. limit=22.5
+2024-08-25 16:02:28,444 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=109568.0, ans=0.0
+2024-08-25 16:02:33,921 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109568.0, ans=0.1
+2024-08-25 16:02:37,318 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:02:47,613 INFO [train.py:1114] (3/4) Epoch 9, batch 650, loss[loss=0.24, simple_loss=0.3068, pruned_loss=0.06316, ctc_loss=0.117, over 19767.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3013, pruned_loss=0.07242, ctc_loss=0.1361, over 3716373.21 frames. ], batch size: 54, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:02:48,979 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:03:14,244 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=109781.33333333333, ans=0.0
+2024-08-25 16:03:16,707 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:03:23,659 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=109834.66666666667, ans=0.125
+2024-08-25 16:03:25,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109834.66666666667, ans=0.1
+2024-08-25 16:03:30,912 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.38 vs. limit=12.0
+2024-08-25 16:03:35,531 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.85 vs. limit=22.5
+2024-08-25 16:03:47,858 INFO [train.py:1114] (3/4) Epoch 9, batch 700, loss[loss=0.2317, simple_loss=0.2903, pruned_loss=0.06276, ctc_loss=0.1191, over 19716.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3012, pruned_loss=0.07222, ctc_loss=0.1359, over 3747551.70 frames. ], batch size: 51, lr: 1.67e-02, grad_scale: 32.0
+2024-08-25 16:03:49,624 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.90 vs. limit=15.0
+2024-08-25 16:04:04,235 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:04:14,379 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 1.949e+02 2.382e+02 2.859e+02 4.618e+02, threshold=4.764e+02, percent-clipped=1.0
+2024-08-25 16:04:15,743 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.751e-01
+2024-08-25 16:04:29,301 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=110101.33333333333, ans=0.0
+2024-08-25 16:04:30,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=110101.33333333333, ans=0.0
+2024-08-25 16:04:36,394 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.whiten.whitening_limit, batch_count=110154.66666666667, ans=15.0
+2024-08-25 16:04:37,094 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.81 vs. limit=6.0
+2024-08-25 16:04:44,749 INFO [train.py:1114] (3/4) Epoch 9, batch 750, loss[loss=0.245, simple_loss=0.3024, pruned_loss=0.06797, ctc_loss=0.1289, over 19492.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3012, pruned_loss=0.07224, ctc_loss=0.1358, over 3774251.77 frames. ], batch size: 54, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:04:51,345 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=110208.0, ans=0.125
+2024-08-25 16:05:00,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=110261.33333333333, ans=0.0
+2024-08-25 16:05:40,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=110421.33333333333, ans=0.0
+2024-08-25 16:05:48,067 INFO [train.py:1114] (3/4) Epoch 9, batch 800, loss[loss=0.2523, simple_loss=0.2927, pruned_loss=0.07578, ctc_loss=0.1508, over 19402.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3007, pruned_loss=0.07199, ctc_loss=0.1357, over 3794706.26 frames. ], batch size: 48, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:05:56,707 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.11 vs. limit=10.0
+2024-08-25 16:06:01,191 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=110528.0, ans=0.2
+2024-08-25 16:06:14,146 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=110581.33333333333, ans=0.2
+2024-08-25 16:06:14,964 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.861e+02 2.104e+02 2.558e+02 4.618e+02, threshold=4.207e+02, percent-clipped=0.0
+2024-08-25 16:06:22,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=110634.66666666667, ans=0.2
+2024-08-25 16:06:25,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=110634.66666666667, ans=0.1
+2024-08-25 16:06:45,200 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=110688.0, ans=0.2
+2024-08-25 16:06:47,181 INFO [train.py:1114] (3/4) Epoch 9, batch 850, loss[loss=0.2522, simple_loss=0.3061, pruned_loss=0.07318, ctc_loss=0.1298, over 19644.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3003, pruned_loss=0.07183, ctc_loss=0.1354, over 3813664.29 frames. ], batch size: 59, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:07:09,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=110848.0, ans=0.125
+2024-08-25 16:07:28,085 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:07:34,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110954.66666666667, ans=0.125
+2024-08-25 16:08:39,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=110954.66666666667, ans=0.2
+2024-08-25 16:08:42,729 INFO [train.py:1114] (3/4) Epoch 9, batch 900, loss[loss=0.2663, simple_loss=0.3013, pruned_loss=0.08307, ctc_loss=0.1628, over 19412.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3005, pruned_loss=0.07216, ctc_loss=0.136, over 3817494.92 frames. ], batch size: 48, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:09:12,348 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.982e+02 2.328e+02 2.784e+02 5.806e+02, threshold=4.657e+02, percent-clipped=1.0
+2024-08-25 16:09:18,926 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.70 vs. limit=15.0
+2024-08-25 16:09:20,575 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=111168.0, ans=0.0
+2024-08-25 16:09:24,564 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.71 vs. limit=6.0
+2024-08-25 16:09:47,296 INFO [train.py:1114] (3/4) Epoch 9, batch 950, loss[loss=0.2272, simple_loss=0.2826, pruned_loss=0.06272, ctc_loss=0.1162, over 19494.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3012, pruned_loss=0.07267, ctc_loss=0.1371, over 3818012.97 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:09:56,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=111274.66666666667, ans=0.125
+2024-08-25 16:10:29,280 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=111434.66666666667, ans=0.0
+2024-08-25 16:10:30,461 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111434.66666666667, ans=0.125
+2024-08-25 16:10:37,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=111488.0, ans=0.125
+2024-08-25 16:10:45,138 INFO [train.py:1114] (3/4) Epoch 9, batch 1000, loss[loss=0.2237, simple_loss=0.2804, pruned_loss=0.0604, ctc_loss=0.1153, over 19873.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3017, pruned_loss=0.07305, ctc_loss=0.1379, over 3813686.65 frames. ], batch size: 52, lr: 1.66e-02, grad_scale: 32.0
+2024-08-25 16:11:03,877 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=111594.66666666667, ans=0.0
+2024-08-25 16:11:13,873 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.864e+02 2.156e+02 2.793e+02 4.751e+02, threshold=4.311e+02, percent-clipped=1.0
+2024-08-25 16:11:20,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=111648.0, ans=0.0
+2024-08-25 16:11:21,400 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=111701.33333333333, ans=0.025
+2024-08-25 16:11:21,438 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=111701.33333333333, ans=0.125
+2024-08-25 16:11:31,201 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.82 vs. limit=15.0
+2024-08-25 16:11:39,396 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=13.08 vs. limit=15.0
+2024-08-25 16:11:45,639 INFO [train.py:1114] (3/4) Epoch 9, batch 1050, loss[loss=0.2467, simple_loss=0.3022, pruned_loss=0.06981, ctc_loss=0.1288, over 19837.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3013, pruned_loss=0.07296, ctc_loss=0.1374, over 3819984.06 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:11:48,190 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111808.0, ans=0.1
+2024-08-25 16:12:06,658 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=111861.33333333333, ans=0.0
+2024-08-25 16:12:14,668 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=111914.66666666667, ans=0.0
+2024-08-25 16:12:20,949 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.49 vs. limit=15.0
+2024-08-25 16:12:32,482 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=111968.0, ans=0.0
+2024-08-25 16:12:41,400 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=112021.33333333333, ans=0.125
+2024-08-25 16:12:51,813 INFO [train.py:1114] (3/4) Epoch 9, batch 1100, loss[loss=0.255, simple_loss=0.2967, pruned_loss=0.07714, ctc_loss=0.1477, over 19591.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3005, pruned_loss=0.0722, ctc_loss=0.136, over 3828029.26 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:12:54,230 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=112074.66666666667, ans=0.1
+2024-08-25 16:13:19,831 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 1.820e+02 2.090e+02 2.645e+02 4.523e+02, threshold=4.179e+02, percent-clipped=2.0
+2024-08-25 16:13:20,625 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.52 vs. limit=15.0
+2024-08-25 16:13:26,060 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.13 vs. limit=15.0
+2024-08-25 16:13:50,951 INFO [train.py:1114] (3/4) Epoch 9, batch 1150, loss[loss=0.2418, simple_loss=0.2902, pruned_loss=0.07009, ctc_loss=0.1331, over 19586.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3006, pruned_loss=0.07235, ctc_loss=0.1364, over 3828256.56 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:14:11,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=112394.66666666667, ans=0.025
+2024-08-25 16:14:30,266 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=112501.33333333333, ans=0.1
+2024-08-25 16:14:30,284 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112501.33333333333, ans=0.125
+2024-08-25 16:14:49,870 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=112608.0, ans=0.125
+2024-08-25 16:14:51,118 INFO [train.py:1114] (3/4) Epoch 9, batch 1200, loss[loss=0.2562, simple_loss=0.3109, pruned_loss=0.07379, ctc_loss=0.1351, over 19818.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3014, pruned_loss=0.07255, ctc_loss=0.1367, over 3824123.47 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:14:51,562 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.05 vs. limit=6.0
+2024-08-25 16:14:59,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=112608.0, ans=0.125
+2024-08-25 16:15:10,037 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=112661.33333333333, ans=0.0
+2024-08-25 16:15:51,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=112661.33333333333, ans=0.125
+2024-08-25 16:15:52,800 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=112714.66666666667, ans=0.125
+2024-08-25 16:16:05,754 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.875e+02 2.166e+02 2.598e+02 4.323e+02, threshold=4.331e+02, percent-clipped=2.0
+2024-08-25 16:16:17,024 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=112768.0, ans=0.0
+2024-08-25 16:16:37,317 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112821.33333333333, ans=0.0
+2024-08-25 16:16:38,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112874.66666666667, ans=0.125
+2024-08-25 16:16:39,519 INFO [train.py:1114] (3/4) Epoch 9, batch 1250, loss[loss=0.2501, simple_loss=0.3027, pruned_loss=0.07163, ctc_loss=0.1355, over 19537.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3018, pruned_loss=0.07262, ctc_loss=0.1367, over 3842786.22 frames. ], batch size: 61, lr: 1.65e-02, grad_scale: 32.0
+2024-08-25 16:16:40,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=112874.66666666667, ans=0.2
+2024-08-25 16:17:38,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=113088.0, ans=0.04949747468305833
+2024-08-25 16:17:39,932 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=113141.33333333333, ans=0.95
+2024-08-25 16:17:39,964 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=113141.33333333333, ans=0.125
+2024-08-25 16:17:40,925 INFO [train.py:1114] (3/4) Epoch 9, batch 1300, loss[loss=0.2641, simple_loss=0.3093, pruned_loss=0.08024, ctc_loss=0.1461, over 18758.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3008, pruned_loss=0.07225, ctc_loss=0.1361, over 3846324.30 frames. ], batch size: 76, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:17:43,281 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=113141.33333333333, ans=0.0
+2024-08-25 16:18:08,515 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 1.959e+02 2.315e+02 2.984e+02 4.812e+02, threshold=4.630e+02, percent-clipped=1.0
+2024-08-25 16:18:23,301 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=113301.33333333333, ans=0.0
+2024-08-25 16:18:26,909 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=113301.33333333333, ans=0.07
+2024-08-25 16:18:30,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=113354.66666666667, ans=0.125
+2024-08-25 16:18:37,855 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.13 vs. limit=15.0
+2024-08-25 16:18:42,120 INFO [train.py:1114] (3/4) Epoch 9, batch 1350, loss[loss=0.2442, simple_loss=0.3013, pruned_loss=0.06763, ctc_loss=0.1296, over 19765.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3003, pruned_loss=0.07179, ctc_loss=0.1351, over 3857696.96 frames. ], batch size: 54, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:18:49,210 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=113408.0, ans=0.0
+2024-08-25 16:19:04,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=113514.66666666667, ans=0.0
+2024-08-25 16:19:12,395 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=113514.66666666667, ans=0.07
+2024-08-25 16:19:26,183 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.67 vs. limit=6.0
+2024-08-25 16:19:40,004 INFO [train.py:1114] (3/4) Epoch 9, batch 1400, loss[loss=0.2381, simple_loss=0.28, pruned_loss=0.07137, ctc_loss=0.1339, over 19689.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3, pruned_loss=0.0718, ctc_loss=0.1348, over 3864354.05 frames. ], batch size: 46, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:19:42,647 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=113674.66666666667, ans=0.2
+2024-08-25 16:20:07,554 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.860e+02 2.127e+02 2.545e+02 4.134e+02, threshold=4.253e+02, percent-clipped=0.0
+2024-08-25 16:20:07,727 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=113781.33333333333, ans=0.0
+2024-08-25 16:20:43,001 INFO [train.py:1114] (3/4) Epoch 9, batch 1450, loss[loss=0.2662, simple_loss=0.3124, pruned_loss=0.08079, ctc_loss=0.1459, over 19658.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3007, pruned_loss=0.07209, ctc_loss=0.1354, over 3861727.81 frames. ], batch size: 63, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:20:46,558 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=113941.33333333333, ans=0.0
+2024-08-25 16:21:00,088 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113994.66666666667, ans=0.1
+2024-08-25 16:21:11,826 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.88 vs. limit=15.0
+2024-08-25 16:21:18,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=114101.33333333333, ans=0.0
+2024-08-25 16:21:24,886 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.79 vs. limit=6.0
+2024-08-25 16:21:36,234 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=114154.66666666667, ans=0.025
+2024-08-25 16:21:45,079 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=114208.0, ans=0.1
+2024-08-25 16:21:45,893 INFO [train.py:1114] (3/4) Epoch 9, batch 1500, loss[loss=0.2273, simple_loss=0.2912, pruned_loss=0.05954, ctc_loss=0.1106, over 19577.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3009, pruned_loss=0.07205, ctc_loss=0.1352, over 3861680.15 frames. ], batch size: 57, lr: 1.64e-02, grad_scale: 32.0
+2024-08-25 16:22:01,631 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=114261.33333333333, ans=0.1
+2024-08-25 16:22:06,327 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=114261.33333333333, ans=0.0
+2024-08-25 16:22:08,788 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.42 vs. limit=15.0
+2024-08-25 16:22:15,431 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 1.928e+02 2.180e+02 2.740e+02 4.350e+02, threshold=4.360e+02, percent-clipped=2.0
+2024-08-25 16:22:24,720 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=114368.0, ans=0.0
+2024-08-25 16:22:42,560 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=114421.33333333333, ans=0.0
+2024-08-25 16:22:45,650 INFO [train.py:1114] (3/4) Epoch 9, batch 1550, loss[loss=0.2893, simple_loss=0.3304, pruned_loss=0.08958, ctc_loss=0.1726, over 19610.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3013, pruned_loss=0.07256, ctc_loss=0.1363, over 3846864.95 frames. ], batch size: 60, lr: 1.64e-02, grad_scale: 16.0
+2024-08-25 16:22:57,769 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.15 vs. limit=15.0
+2024-08-25 16:22:58,662 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=114528.0, ans=0.0
+2024-08-25 16:23:05,630 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.81 vs. limit=15.0
+2024-08-25 16:23:08,873 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=114581.33333333333, ans=0.125
+2024-08-25 16:23:18,008 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=114581.33333333333, ans=0.2
+2024-08-25 16:23:18,016 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=114581.33333333333, ans=0.0
+2024-08-25 16:23:42,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten.whitening_limit, batch_count=114688.0, ans=22.5
+2024-08-25 16:23:47,226 INFO [train.py:1114] (3/4) Epoch 9, batch 1600, loss[loss=0.2217, simple_loss=0.2913, pruned_loss=0.0547, ctc_loss=0.1069, over 19832.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3013, pruned_loss=0.07253, ctc_loss=0.1364, over 3836243.84 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 32.0
+2024-08-25 16:24:12,758 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.35 vs. limit=12.0
+2024-08-25 16:24:16,813 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.930e+02 2.504e+02 3.084e+02 5.673e+02, threshold=5.009e+02, percent-clipped=4.0
+2024-08-25 16:24:46,353 INFO [train.py:1114] (3/4) Epoch 9, batch 1650, loss[loss=0.2868, simple_loss=0.333, pruned_loss=0.08857, ctc_loss=0.1587, over 19664.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3023, pruned_loss=0.07325, ctc_loss=0.1375, over 3833329.32 frames. ], batch size: 59, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:24:55,623 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:25:13,806 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=115114.66666666667, ans=0.025
+2024-08-25 16:25:39,946 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.17 vs. limit=10.0
+2024-08-25 16:25:45,147 INFO [train.py:1114] (3/4) Epoch 9, batch 1700, loss[loss=0.2292, simple_loss=0.2727, pruned_loss=0.06789, ctc_loss=0.1248, over 19672.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3018, pruned_loss=0.07264, ctc_loss=0.1366, over 3847405.19 frames. ], batch size: 46, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:25:45,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=115274.66666666667, ans=0.125
+2024-08-25 16:26:13,046 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.773e+02 1.969e+02 2.283e+02 4.673e+02, threshold=3.938e+02, percent-clipped=0.0
+2024-08-25 16:26:15,201 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=115381.33333333333, ans=0.125
+2024-08-25 16:26:27,657 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.66 vs. limit=15.0
+2024-08-25 16:26:40,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=115541.33333333333, ans=0.0
+2024-08-25 16:26:41,720 INFO [train.py:1114] (3/4) Epoch 9, batch 1750, loss[loss=0.2218, simple_loss=0.2702, pruned_loss=0.06366, ctc_loss=0.1154, over 19620.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3011, pruned_loss=0.07202, ctc_loss=0.1352, over 3850976.40 frames. ], batch size: 45, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:27:25,256 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=115541.33333333333, ans=0.125
+2024-08-25 16:27:26,742 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.87 vs. limit=22.5
+2024-08-25 16:28:06,998 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=115754.66666666667, ans=0.2
+2024-08-25 16:28:12,434 INFO [train.py:1114] (3/4) Epoch 9, batch 1800, loss[loss=0.229, simple_loss=0.2914, pruned_loss=0.05937, ctc_loss=0.1196, over 19613.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3007, pruned_loss=0.07174, ctc_loss=0.1347, over 3851723.27 frames. ], batch size: 55, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:28:30,700 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=115808.0, ans=0.125
+2024-08-25 16:28:45,715 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=115914.66666666667, ans=0.125
+2024-08-25 16:28:46,409 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.29 vs. limit=15.0
+2024-08-25 16:28:49,002 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 1.840e+02 2.097e+02 2.711e+02 4.220e+02, threshold=4.193e+02, percent-clipped=2.0
+2024-08-25 16:28:53,828 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=115914.66666666667, ans=0.1
+2024-08-25 16:28:57,596 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.13 vs. limit=15.0
+2024-08-25 16:29:04,213 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 16:29:05,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=115968.0, ans=0.125
+2024-08-25 16:29:25,112 INFO [train.py:1114] (3/4) Epoch 9, batch 1850, loss[loss=0.2478, simple_loss=0.3124, pruned_loss=0.06781, ctc_loss=0.1193, over 19589.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3004, pruned_loss=0.07164, ctc_loss=0.1344, over 3856604.38 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 16.0
+2024-08-25 16:29:43,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=116128.0, ans=0.125
+2024-08-25 16:30:24,571 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=116181.33333333333, ans=0.0
+2024-08-25 16:30:27,704 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=116181.33333333333, ans=0.2
+2024-08-25 16:30:29,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=116181.33333333333, ans=0.125
+2024-08-25 16:30:50,208 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.61 vs. limit=15.0
+2024-08-25 16:30:52,233 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=116288.0, ans=0.0
+2024-08-25 16:30:56,362 INFO [train.py:1114] (3/4) Epoch 9, batch 1900, loss[loss=0.2895, simple_loss=0.3266, pruned_loss=0.09156, ctc_loss=0.173, over 19651.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3008, pruned_loss=0.07169, ctc_loss=0.1342, over 3860965.01 frames. ], batch size: 59, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:30:58,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=116341.33333333333, ans=0.125
+2024-08-25 16:31:03,252 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=116341.33333333333, ans=0.0
+2024-08-25 16:32:03,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=116394.66666666667, ans=0.125
+2024-08-25 16:32:06,803 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.11 vs. limit=12.0
+2024-08-25 16:32:16,690 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=116448.0, ans=0.0
+2024-08-25 16:32:21,938 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.810e+02 2.075e+02 2.674e+02 4.757e+02, threshold=4.150e+02, percent-clipped=3.0
+2024-08-25 16:32:30,090 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=116501.33333333333, ans=0.2
+2024-08-25 16:32:43,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=116554.66666666667, ans=0.125
+2024-08-25 16:32:47,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=116554.66666666667, ans=0.0
+2024-08-25 16:33:06,102 INFO [train.py:1114] (3/4) Epoch 9, batch 1950, loss[loss=0.2082, simple_loss=0.2718, pruned_loss=0.05215, ctc_loss=0.1005, over 19588.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3018, pruned_loss=0.07195, ctc_loss=0.1347, over 3870204.12 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 16.0
+2024-08-25 16:33:10,955 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=116608.0, ans=0.2
+2024-08-25 16:33:25,851 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=116661.33333333333, ans=0.125
+2024-08-25 16:33:39,682 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.27 vs. limit=22.5
+2024-08-25 16:33:47,093 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=116768.0, ans=0.2
+2024-08-25 16:33:51,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=116821.33333333333, ans=0.125
+2024-08-25 16:34:02,729 INFO [train.py:1114] (3/4) Epoch 9, batch 2000, loss[loss=0.2175, simple_loss=0.2711, pruned_loss=0.05937, ctc_loss=0.1129, over 19670.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3023, pruned_loss=0.07234, ctc_loss=0.1353, over 3855943.67 frames. ], batch size: 45, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:34:03,298 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.48 vs. limit=15.0
+2024-08-25 16:34:11,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=116874.66666666667, ans=0.0
+2024-08-25 16:34:16,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=116928.0, ans=0.125
+2024-08-25 16:34:28,912 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=116981.33333333333, ans=0.05
+2024-08-25 16:34:30,973 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 1.787e+02 2.122e+02 2.673e+02 5.196e+02, threshold=4.245e+02, percent-clipped=10.0
+2024-08-25 16:34:38,485 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=117034.66666666667, ans=0.0
+2024-08-25 16:34:59,603 INFO [train.py:1114] (3/4) Epoch 9, batch 2050, loss[loss=0.2349, simple_loss=0.275, pruned_loss=0.06998, ctc_loss=0.1372, over 19728.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3006, pruned_loss=0.07194, ctc_loss=0.1346, over 3851663.93 frames. ], batch size: 47, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:35:13,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=117194.66666666667, ans=0.025
+2024-08-25 16:35:25,367 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.65 vs. limit=15.0
+2024-08-25 16:36:57,338 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.40 vs. limit=15.0
+2024-08-25 16:36:57,814 INFO [train.py:1114] (3/4) Epoch 9, batch 2100, loss[loss=0.2505, simple_loss=0.3086, pruned_loss=0.06987, ctc_loss=0.1314, over 19760.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.2999, pruned_loss=0.07142, ctc_loss=0.1338, over 3857941.15 frames. ], batch size: 54, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:37:33,874 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=117514.66666666667, ans=0.0
+2024-08-25 16:37:38,955 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 1.824e+02 2.012e+02 2.446e+02 4.504e+02, threshold=4.025e+02, percent-clipped=2.0
+2024-08-25 16:37:41,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=117514.66666666667, ans=0.0
+2024-08-25 16:37:57,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=117621.33333333333, ans=0.125
+2024-08-25 16:37:59,237 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=117621.33333333333, ans=0.0
+2024-08-25 16:37:59,644 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.93 vs. limit=15.0
+2024-08-25 16:38:05,873 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=117674.66666666667, ans=0.2
+2024-08-25 16:38:06,799 INFO [train.py:1114] (3/4) Epoch 9, batch 2150, loss[loss=0.222, simple_loss=0.2829, pruned_loss=0.05861, ctc_loss=0.1097, over 19849.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.2995, pruned_loss=0.07099, ctc_loss=0.133, over 3869521.36 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 32.0
+2024-08-25 16:38:19,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=117728.0, ans=0.125
+2024-08-25 16:38:22,701 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=117728.0, ans=0.125
+2024-08-25 16:38:24,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=117728.0, ans=0.125
+2024-08-25 16:38:27,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=117728.0, ans=0.125
+2024-08-25 16:38:35,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=117781.33333333333, ans=0.0
+2024-08-25 16:38:37,485 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=117781.33333333333, ans=0.125
+2024-08-25 16:38:38,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=117781.33333333333, ans=0.1
+2024-08-25 16:38:45,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=117834.66666666667, ans=0.0
+2024-08-25 16:38:49,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=117834.66666666667, ans=0.09899494936611666
+2024-08-25 16:38:53,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=117888.0, ans=0.2
+2024-08-25 16:39:02,679 INFO [train.py:1114] (3/4) Epoch 9, batch 2200, loss[loss=0.2637, simple_loss=0.3162, pruned_loss=0.07629, ctc_loss=0.1464, over 19598.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.2994, pruned_loss=0.07105, ctc_loss=0.133, over 3867350.51 frames. ], batch size: 57, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:39:12,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=117941.33333333333, ans=0.07
+2024-08-25 16:39:14,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=117994.66666666667, ans=0.0
+2024-08-25 16:39:15,403 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=117994.66666666667, ans=0.125
+2024-08-25 16:39:15,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=117994.66666666667, ans=0.0
+2024-08-25 16:39:30,919 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.840e+02 2.263e+02 2.882e+02 6.553e+02, threshold=4.526e+02, percent-clipped=9.0
+2024-08-25 16:39:59,974 INFO [train.py:1114] (3/4) Epoch 9, batch 2250, loss[loss=0.2371, simple_loss=0.3053, pruned_loss=0.05965, ctc_loss=0.1238, over 19612.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3002, pruned_loss=0.0714, ctc_loss=0.1338, over 3867293.27 frames. ], batch size: 55, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:40:03,225 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_ff2.min_abs, batch_count=118208.0, ans=0.1
+2024-08-25 16:40:39,424 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=118368.0, ans=0.04949747468305833
+2024-08-25 16:40:50,774 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=118421.33333333333, ans=15.0
+2024-08-25 16:40:54,820 INFO [train.py:1114] (3/4) Epoch 9, batch 2300, loss[loss=0.242, simple_loss=0.2916, pruned_loss=0.06953, ctc_loss=0.1337, over 19512.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.2996, pruned_loss=0.07161, ctc_loss=0.134, over 3860876.33 frames. ], batch size: 49, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:41:09,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=118528.0, ans=0.125
+2024-08-25 16:41:23,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=118581.33333333333, ans=0.0
+2024-08-25 16:41:24,912 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.864e+02 2.265e+02 3.023e+02 5.230e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 16:41:26,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=118581.33333333333, ans=0.125
+2024-08-25 16:41:26,164 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=118581.33333333333, ans=0.1
+2024-08-25 16:41:26,553 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.46 vs. limit=10.0
+2024-08-25 16:41:48,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=118688.0, ans=0.025
+2024-08-25 16:41:51,086 INFO [train.py:1114] (3/4) Epoch 9, batch 2350, loss[loss=0.2787, simple_loss=0.3218, pruned_loss=0.08684, ctc_loss=0.1548, over 19655.00 frames. ], tot_loss[loss=0.249, simple_loss=0.2999, pruned_loss=0.07209, ctc_loss=0.1349, over 3863477.13 frames. ], batch size: 63, lr: 1.61e-02, grad_scale: 16.0
+2024-08-25 16:42:10,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=118794.66666666667, ans=0.95
+2024-08-25 16:42:12,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=118848.0, ans=0.035
+2024-08-25 16:42:39,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=118901.33333333333, ans=0.125
+2024-08-25 16:42:46,333 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=118901.33333333333, ans=0.125
+2024-08-25 16:42:47,455 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=118901.33333333333, ans=0.2
+2024-08-25 16:42:51,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=118954.66666666667, ans=0.0
+2024-08-25 16:42:52,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=118954.66666666667, ans=0.0
+2024-08-25 16:43:02,772 INFO [train.py:1114] (3/4) Epoch 9, batch 2400, loss[loss=0.2874, simple_loss=0.3348, pruned_loss=0.08829, ctc_loss=0.1586, over 19275.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3025, pruned_loss=0.07327, ctc_loss=0.1369, over 3857594.78 frames. ], batch size: 71, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:43:08,275 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=119008.0, ans=0.0
+2024-08-25 16:43:09,550 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=119008.0, ans=0.0
+2024-08-25 16:43:09,626 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=119008.0, ans=0.125
+2024-08-25 16:43:31,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=119114.66666666667, ans=0.2
+2024-08-25 16:43:32,520 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 1.930e+02 2.301e+02 2.799e+02 4.768e+02, threshold=4.601e+02, percent-clipped=1.0
+2024-08-25 16:43:39,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=119168.0, ans=0.0
+2024-08-25 16:43:46,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=119168.0, ans=0.125
+2024-08-25 16:43:57,327 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=119221.33333333333, ans=0.0
+2024-08-25 16:43:59,354 INFO [train.py:1114] (3/4) Epoch 9, batch 2450, loss[loss=0.3338, simple_loss=0.3434, pruned_loss=0.1158, ctc_loss=0.2314, over 13405.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.3067, pruned_loss=0.07675, ctc_loss=0.1438, over 3729870.34 frames. ], batch size: 141, lr: 1.61e-02, grad_scale: 32.0
+2024-08-25 16:44:21,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=119381.33333333333, ans=0.0
+2024-08-25 16:44:29,639 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=119381.33333333333, ans=0.125
+2024-08-25 16:44:31,758 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=119434.66666666667, ans=0.05
+2024-08-25 16:44:33,705 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=119434.66666666667, ans=0.0
+2024-08-25 16:44:34,109 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.52 vs. limit=22.5
+2024-08-25 16:44:34,870 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.09 vs. limit=15.0
+2024-08-25 16:45:25,708 INFO [train.py:1114] (3/4) Epoch 10, batch 0, loss[loss=0.2212, simple_loss=0.2731, pruned_loss=0.06173, ctc_loss=0.1146, over 19409.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2731, pruned_loss=0.06173, ctc_loss=0.1146, over 19409.00 frames. ], batch size: 48, lr: 1.53e-02, grad_scale: 32.0
+2024-08-25 16:45:25,708 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 16:46:37,107 INFO [train.py:1146] (3/4) Epoch 10, validation: loss=0.2041, simple_loss=0.2903, pruned_loss=0.04356, ctc_loss=0.07708, over 944034.00 frames.
+2024-08-25 16:46:37,107 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 16:47:46,601 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 1.955e+02 2.116e+02 2.362e+02 4.652e+02, threshold=4.231e+02, percent-clipped=1.0
+2024-08-25 16:47:50,312 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=119696.0, ans=0.2
+2024-08-25 16:48:28,309 INFO [train.py:1114] (3/4) Epoch 10, batch 50, loss[loss=0.2172, simple_loss=0.2685, pruned_loss=0.06094, ctc_loss=0.1104, over 19730.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.301, pruned_loss=0.07159, ctc_loss=0.1354, over 846230.90 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:48:57,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=119749.33333333333, ans=0.2
+2024-08-25 16:48:58,840 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.73 vs. limit=6.0
+2024-08-25 16:49:17,403 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=119749.33333333333, ans=0.1
+2024-08-25 16:49:17,487 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=119749.33333333333, ans=0.1
+2024-08-25 16:50:08,519 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=119856.0, ans=0.1
+2024-08-25 16:50:44,322 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=119909.33333333333, ans=0.07
+2024-08-25 16:50:45,035 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.17 vs. limit=15.0
+2024-08-25 16:50:46,027 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.05 vs. limit=22.5
+2024-08-25 16:52:34,135 INFO [train.py:1114] (3/4) Epoch 10, batch 100, loss[loss=0.2373, simple_loss=0.2927, pruned_loss=0.0651, ctc_loss=0.129, over 19727.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3017, pruned_loss=0.07102, ctc_loss=0.1345, over 1499330.95 frames. ], batch size: 51, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:52:54,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=120069.33333333333, ans=0.1
+2024-08-25 16:53:28,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=120122.66666666667, ans=0.125
+2024-08-25 16:53:30,686 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.89 vs. limit=15.0
+2024-08-25 16:53:35,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=120176.0, ans=0.0
+2024-08-25 16:53:39,980 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=120176.0, ans=0.07
+2024-08-25 16:53:47,835 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 1.798e+02 2.253e+02 2.860e+02 4.134e+02, threshold=4.507e+02, percent-clipped=0.0
+2024-08-25 16:54:42,223 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=120229.33333333333, ans=0.1
+2024-08-25 16:54:44,416 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.81 vs. limit=15.0
+2024-08-25 16:54:47,480 INFO [train.py:1114] (3/4) Epoch 10, batch 150, loss[loss=0.2073, simple_loss=0.2657, pruned_loss=0.05417, ctc_loss=0.1013, over 19722.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.2992, pruned_loss=0.0695, ctc_loss=0.1319, over 2027878.69 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:55:20,130 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=120389.33333333333, ans=0.125
+2024-08-25 16:55:25,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=120389.33333333333, ans=0.125
+2024-08-25 16:55:38,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=120442.66666666667, ans=0.125
+2024-08-25 16:56:01,812 INFO [train.py:1114] (3/4) Epoch 10, batch 200, loss[loss=0.2778, simple_loss=0.3198, pruned_loss=0.08562, ctc_loss=0.1616, over 18295.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.297, pruned_loss=0.06858, ctc_loss=0.13, over 2435690.75 frames. ], batch size: 85, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:56:11,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=120549.33333333333, ans=0.025
+2024-08-25 16:56:13,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=120602.66666666667, ans=0.125
+2024-08-25 16:57:32,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=120656.0, ans=0.125
+2024-08-25 16:57:51,837 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=120656.0, ans=0.0
+2024-08-25 16:57:56,619 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=120656.0, ans=0.0
+2024-08-25 16:58:07,756 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.824e+02 2.064e+02 2.548e+02 6.143e+02, threshold=4.128e+02, percent-clipped=2.0
+2024-08-25 16:58:11,135 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=120709.33333333333, ans=0.125
+2024-08-25 16:58:32,999 INFO [train.py:1114] (3/4) Epoch 10, batch 250, loss[loss=0.2732, simple_loss=0.3196, pruned_loss=0.0825, ctc_loss=0.1546, over 19432.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.2975, pruned_loss=0.06891, ctc_loss=0.1304, over 2755567.99 frames. ], batch size: 67, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 16:58:37,966 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=120816.0, ans=0.125
+2024-08-25 16:59:00,809 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=120869.33333333333, ans=0.125
+2024-08-25 16:59:04,333 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=120869.33333333333, ans=0.125
+2024-08-25 16:59:08,315 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.92 vs. limit=6.0
+2024-08-25 16:59:53,282 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=120976.0, ans=0.0
+2024-08-25 17:00:00,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=121029.33333333333, ans=0.09899494936611666
+2024-08-25 17:00:08,736 INFO [train.py:1114] (3/4) Epoch 10, batch 300, loss[loss=0.2579, simple_loss=0.3074, pruned_loss=0.07685, ctc_loss=0.1371, over 19521.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.2975, pruned_loss=0.06933, ctc_loss=0.1309, over 3001630.94 frames. ], batch size: 61, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:00:23,195 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=121082.66666666667, ans=0.1
+2024-08-25 17:00:45,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=121189.33333333333, ans=0.125
+2024-08-25 17:01:00,403 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=121242.66666666667, ans=0.125
+2024-08-25 17:01:01,165 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.908e+02 2.186e+02 2.769e+02 4.118e+02, threshold=4.372e+02, percent-clipped=0.0
+2024-08-25 17:02:40,288 INFO [train.py:1114] (3/4) Epoch 10, batch 350, loss[loss=0.2584, simple_loss=0.2941, pruned_loss=0.08119, ctc_loss=0.1511, over 19727.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.2983, pruned_loss=0.06984, ctc_loss=0.1321, over 3191366.70 frames. ], batch size: 48, lr: 1.52e-02, grad_scale: 32.0
+2024-08-25 17:02:51,751 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=121402.66666666667, ans=0.0
+2024-08-25 17:02:58,834 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=121402.66666666667, ans=0.2
+2024-08-25 17:03:28,655 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=121509.33333333333, ans=0.125
+2024-08-25 17:03:42,416 INFO [train.py:1114] (3/4) Epoch 10, batch 400, loss[loss=0.2481, simple_loss=0.3002, pruned_loss=0.07166, ctc_loss=0.1316, over 19501.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.2975, pruned_loss=0.06938, ctc_loss=0.131, over 3343145.60 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:03:48,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=121616.0, ans=0.125
+2024-08-25 17:04:04,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=121669.33333333333, ans=0.025
+2024-08-25 17:04:30,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=121776.0, ans=0.1
+2024-08-25 17:04:33,756 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 1.874e+02 2.151e+02 2.761e+02 4.102e+02, threshold=4.302e+02, percent-clipped=0.0
+2024-08-25 17:04:41,403 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.63 vs. limit=15.0
+2024-08-25 17:04:42,433 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.23 vs. limit=22.5
+2024-08-25 17:04:50,467 INFO [train.py:1114] (3/4) Epoch 10, batch 450, loss[loss=0.2898, simple_loss=0.3291, pruned_loss=0.09264, ctc_loss=0.1631, over 19624.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2972, pruned_loss=0.06923, ctc_loss=0.1306, over 3451118.13 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:07:40,571 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=121989.33333333333, ans=0.2
+2024-08-25 17:07:47,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=121989.33333333333, ans=0.035
+2024-08-25 17:07:47,201 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=121989.33333333333, ans=0.125
+2024-08-25 17:08:13,659 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=122042.66666666667, ans=0.0
+2024-08-25 17:08:38,543 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.86 vs. limit=6.0
+2024-08-25 17:09:01,411 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.84 vs. limit=15.0
+2024-08-25 17:09:04,036 INFO [train.py:1114] (3/4) Epoch 10, batch 500, loss[loss=0.2659, simple_loss=0.3148, pruned_loss=0.07869, ctc_loss=0.1487, over 19624.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.2966, pruned_loss=0.06933, ctc_loss=0.1306, over 3546995.54 frames. ], batch size: 63, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:09:09,036 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=122149.33333333333, ans=0.2
+2024-08-25 17:09:22,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=122149.33333333333, ans=0.1
+2024-08-25 17:09:26,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff3.min_abs, batch_count=122202.66666666667, ans=0.2
+2024-08-25 17:09:31,439 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=122202.66666666667, ans=0.025
+2024-08-25 17:09:57,290 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=122256.0, ans=0.2
+2024-08-25 17:10:26,446 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=122309.33333333333, ans=0.2
+2024-08-25 17:10:36,238 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.797e+02 2.290e+02 2.870e+02 3.920e+02, threshold=4.579e+02, percent-clipped=0.0
+2024-08-25 17:10:40,994 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=122362.66666666667, ans=0.0
+2024-08-25 17:10:44,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=122362.66666666667, ans=0.0
+2024-08-25 17:10:47,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=122362.66666666667, ans=0.0
+2024-08-25 17:10:51,445 INFO [train.py:1114] (3/4) Epoch 10, batch 550, loss[loss=0.2765, simple_loss=0.3226, pruned_loss=0.08432, ctc_loss=0.1543, over 19411.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2967, pruned_loss=0.06941, ctc_loss=0.1307, over 3607964.60 frames. ], batch size: 71, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:11:08,124 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=122416.0, ans=0.0
+2024-08-25 17:11:10,881 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.39 vs. limit=15.0
+2024-08-25 17:11:50,960 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=122416.0, ans=0.05
+2024-08-25 17:11:51,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=122469.33333333333, ans=0.125
+2024-08-25 17:13:37,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=122522.66666666667, ans=0.125
+2024-08-25 17:14:20,766 INFO [train.py:1114] (3/4) Epoch 10, batch 600, loss[loss=0.2505, simple_loss=0.3065, pruned_loss=0.07072, ctc_loss=0.1328, over 19394.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2966, pruned_loss=0.06912, ctc_loss=0.1302, over 3665406.62 frames. ], batch size: 67, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:14:26,627 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=122682.66666666667, ans=0.0
+2024-08-25 17:14:48,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=122789.33333333333, ans=0.0
+2024-08-25 17:15:08,631 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 2.061e+02 2.496e+02 4.365e+02, threshold=4.122e+02, percent-clipped=0.0
+2024-08-25 17:15:08,807 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=122842.66666666667, ans=0.025
+2024-08-25 17:15:13,217 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:15:19,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=122896.0, ans=0.0
+2024-08-25 17:15:24,797 INFO [train.py:1114] (3/4) Epoch 10, batch 650, loss[loss=0.2515, simple_loss=0.3052, pruned_loss=0.07193, ctc_loss=0.135, over 19772.00 frames. ], tot_loss[loss=0.243, simple_loss=0.2959, pruned_loss=0.06906, ctc_loss=0.1299, over 3716276.14 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:15:30,816 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=122949.33333333333, ans=0.125
+2024-08-25 17:15:38,254 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.53 vs. limit=15.0
+2024-08-25 17:15:42,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=123002.66666666667, ans=15.0
+2024-08-25 17:15:51,980 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=123056.0, ans=0.0
+2024-08-25 17:16:04,631 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=123109.33333333333, ans=0.125
+2024-08-25 17:16:34,661 INFO [train.py:1114] (3/4) Epoch 10, batch 700, loss[loss=0.2169, simple_loss=0.2816, pruned_loss=0.05533, ctc_loss=0.1037, over 19732.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2962, pruned_loss=0.06887, ctc_loss=0.1294, over 3748551.67 frames. ], batch size: 51, lr: 1.51e-02, grad_scale: 32.0
+2024-08-25 17:17:38,934 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.99 vs. limit=15.0
+2024-08-25 17:17:55,880 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.90 vs. limit=15.0
+2024-08-25 17:18:12,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=123376.0, ans=0.05
+2024-08-25 17:18:13,482 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 1.934e+02 2.276e+02 3.026e+02 5.626e+02, threshold=4.552e+02, percent-clipped=3.0
+2024-08-25 17:18:25,167 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=123429.33333333333, ans=0.125
+2024-08-25 17:18:28,243 INFO [train.py:1114] (3/4) Epoch 10, batch 750, loss[loss=0.2293, simple_loss=0.2945, pruned_loss=0.06032, ctc_loss=0.1087, over 19492.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2957, pruned_loss=0.06861, ctc_loss=0.1288, over 3775306.47 frames. ], batch size: 54, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:18:28,477 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=123482.66666666667, ans=0.0
+2024-08-25 17:18:47,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=123536.0, ans=0.125
+2024-08-25 17:19:30,793 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.16 vs. limit=22.5
+2024-08-25 17:19:32,721 INFO [train.py:1114] (3/4) Epoch 10, batch 800, loss[loss=0.2283, simple_loss=0.2781, pruned_loss=0.06354, ctc_loss=0.1285, over 19415.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2958, pruned_loss=0.06865, ctc_loss=0.129, over 3796476.02 frames. ], batch size: 48, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:20:27,600 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.19 vs. limit=22.5
+2024-08-25 17:20:33,019 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.887e+02 2.136e+02 2.736e+02 3.984e+02, threshold=4.273e+02, percent-clipped=0.0
+2024-08-25 17:20:34,311 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=123909.33333333333, ans=0.0
+2024-08-25 17:20:47,942 INFO [train.py:1114] (3/4) Epoch 10, batch 850, loss[loss=0.2407, simple_loss=0.3087, pruned_loss=0.06273, ctc_loss=0.1178, over 19669.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.2958, pruned_loss=0.0685, ctc_loss=0.1286, over 3814970.14 frames. ], batch size: 59, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:20:54,486 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.79 vs. limit=10.0
+2024-08-25 17:21:05,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=124069.33333333333, ans=0.125
+2024-08-25 17:21:08,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=124069.33333333333, ans=0.1
+2024-08-25 17:21:11,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=124122.66666666667, ans=0.2
+2024-08-25 17:21:27,961 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=124176.0, ans=0.05
+2024-08-25 17:21:30,610 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=124176.0, ans=0.04949747468305833
+2024-08-25 17:22:19,906 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=6.63 vs. limit=12.0
+2024-08-25 17:22:25,611 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.09 vs. limit=22.5
+2024-08-25 17:22:28,568 INFO [train.py:1114] (3/4) Epoch 10, batch 900, loss[loss=0.2038, simple_loss=0.2624, pruned_loss=0.05234, ctc_loss=0.1014, over 19809.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2964, pruned_loss=0.06903, ctc_loss=0.1294, over 3819622.48 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:22:48,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=124336.0, ans=0.125
+2024-08-25 17:23:13,942 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.854e+02 2.167e+02 2.763e+02 5.395e+02, threshold=4.333e+02, percent-clipped=2.0
+2024-08-25 17:23:25,960 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:23:30,294 INFO [train.py:1114] (3/4) Epoch 10, batch 950, loss[loss=0.2356, simple_loss=0.2884, pruned_loss=0.06731, ctc_loss=0.1206, over 19502.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2967, pruned_loss=0.06914, ctc_loss=0.1299, over 3821051.06 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:23:36,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=124549.33333333333, ans=0.0
+2024-08-25 17:23:36,456 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.61 vs. limit=10.0
+2024-08-25 17:23:40,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=124602.66666666667, ans=0.125
+2024-08-25 17:24:06,615 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.25 vs. limit=15.0
+2024-08-25 17:24:09,958 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.23 vs. limit=22.5
+2024-08-25 17:24:17,395 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=124762.66666666667, ans=0.035
+2024-08-25 17:24:17,552 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=124762.66666666667, ans=0.0
+2024-08-25 17:24:24,731 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.95 vs. limit=6.0
+2024-08-25 17:24:33,751 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.89 vs. limit=22.5
+2024-08-25 17:24:34,422 INFO [train.py:1114] (3/4) Epoch 10, batch 1000, loss[loss=0.2035, simple_loss=0.2712, pruned_loss=0.04919, ctc_loss=0.09347, over 19847.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.297, pruned_loss=0.06888, ctc_loss=0.1297, over 3815840.92 frames. ], batch size: 52, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:24:41,945 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=124816.0, ans=0.09899494936611666
+2024-08-25 17:25:03,372 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=124922.66666666667, ans=0.0
+2024-08-25 17:25:18,037 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 1.797e+02 2.069e+02 2.553e+02 4.130e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-25 17:25:27,784 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=125029.33333333333, ans=0.125
+2024-08-25 17:25:32,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=125082.66666666667, ans=0.1
+2024-08-25 17:25:33,291 INFO [train.py:1114] (3/4) Epoch 10, batch 1050, loss[loss=0.2351, simple_loss=0.2967, pruned_loss=0.06251, ctc_loss=0.121, over 19850.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.296, pruned_loss=0.06868, ctc_loss=0.1294, over 3822334.63 frames. ], batch size: 57, lr: 1.50e-02, grad_scale: 32.0
+2024-08-25 17:25:33,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=125082.66666666667, ans=0.5
+2024-08-25 17:25:42,353 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.08 vs. limit=15.0
+2024-08-25 17:25:47,734 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=125136.0, ans=0.025
+2024-08-25 17:26:25,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=125296.0, ans=0.025
+2024-08-25 17:26:30,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=125296.0, ans=0.125
+2024-08-25 17:26:32,057 INFO [train.py:1114] (3/4) Epoch 10, batch 1100, loss[loss=0.2082, simple_loss=0.271, pruned_loss=0.05248, ctc_loss=0.101, over 19583.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2959, pruned_loss=0.06842, ctc_loss=0.129, over 3829859.54 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:26:32,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=125349.33333333333, ans=0.025
+2024-08-25 17:26:33,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=125349.33333333333, ans=0.125
+2024-08-25 17:26:38,702 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=125349.33333333333, ans=0.2
+2024-08-25 17:27:18,169 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.787e+02 2.060e+02 2.560e+02 4.808e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 17:27:22,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=125562.66666666667, ans=0.125
+2024-08-25 17:27:33,335 INFO [train.py:1114] (3/4) Epoch 10, batch 1150, loss[loss=0.2369, simple_loss=0.2929, pruned_loss=0.06559, ctc_loss=0.1242, over 19598.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2961, pruned_loss=0.0684, ctc_loss=0.1289, over 3829334.24 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:28:06,509 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.58 vs. limit=15.0
+2024-08-25 17:28:09,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=125722.66666666667, ans=0.0
+2024-08-25 17:28:15,381 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=125776.0, ans=0.1
+2024-08-25 17:28:26,106 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.08 vs. limit=15.0
+2024-08-25 17:28:44,411 INFO [train.py:1114] (3/4) Epoch 10, batch 1200, loss[loss=0.2582, simple_loss=0.3151, pruned_loss=0.07161, ctc_loss=0.1449, over 19833.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.297, pruned_loss=0.06896, ctc_loss=0.1301, over 3823765.45 frames. ], batch size: 57, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:28:44,566 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=125882.66666666667, ans=0.125
+2024-08-25 17:28:45,796 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=125882.66666666667, ans=0.1
+2024-08-25 17:28:50,581 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.04 vs. limit=15.0
+2024-08-25 17:29:00,194 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=125936.0, ans=0.05
+2024-08-25 17:29:09,696 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=125936.0, ans=0.025
+2024-08-25 17:29:30,099 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.522e+02 1.823e+02 2.047e+02 2.358e+02 4.051e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 17:29:30,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=126042.66666666667, ans=0.125
+2024-08-25 17:29:35,002 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=126096.0, ans=0.125
+2024-08-25 17:29:45,836 INFO [train.py:1114] (3/4) Epoch 10, batch 1250, loss[loss=0.2569, simple_loss=0.3067, pruned_loss=0.07535, ctc_loss=0.1409, over 19520.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.2969, pruned_loss=0.06848, ctc_loss=0.1289, over 3842282.76 frames. ], batch size: 61, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:29:54,359 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.26 vs. limit=15.0
+2024-08-25 17:29:57,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=126202.66666666667, ans=0.125
+2024-08-25 17:30:45,786 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=126309.33333333333, ans=0.125
+2024-08-25 17:30:59,769 INFO [train.py:1114] (3/4) Epoch 10, batch 1300, loss[loss=0.2912, simple_loss=0.3271, pruned_loss=0.09214, ctc_loss=0.1775, over 18801.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2957, pruned_loss=0.06782, ctc_loss=0.1276, over 3845848.70 frames. ], batch size: 76, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:31:23,826 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=126416.0, ans=0.2
+2024-08-25 17:31:52,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=126522.66666666667, ans=0.025
+2024-08-25 17:32:01,857 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=126522.66666666667, ans=0.025
+2024-08-25 17:32:13,112 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.900e+02 2.303e+02 2.970e+02 5.096e+02, threshold=4.606e+02, percent-clipped=7.0
+2024-08-25 17:32:24,438 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.76 vs. limit=15.0
+2024-08-25 17:32:28,187 INFO [train.py:1114] (3/4) Epoch 10, batch 1350, loss[loss=0.2459, simple_loss=0.3005, pruned_loss=0.06791, ctc_loss=0.1388, over 19779.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2949, pruned_loss=0.06735, ctc_loss=0.1268, over 3856321.71 frames. ], batch size: 54, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:32:36,834 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.48 vs. limit=15.0
+2024-08-25 17:32:37,790 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.44 vs. limit=22.5
+2024-08-25 17:32:44,939 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.46 vs. limit=10.0
+2024-08-25 17:33:11,235 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=126842.66666666667, ans=0.5
+2024-08-25 17:33:23,537 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=126896.0, ans=0.125
+2024-08-25 17:33:30,485 INFO [train.py:1114] (3/4) Epoch 10, batch 1400, loss[loss=0.2006, simple_loss=0.2565, pruned_loss=0.05253, ctc_loss=0.09926, over 19703.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.2946, pruned_loss=0.06718, ctc_loss=0.1265, over 3863368.53 frames. ], batch size: 46, lr: 1.49e-02, grad_scale: 32.0
+2024-08-25 17:33:31,194 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.10 vs. limit=15.0
+2024-08-25 17:33:46,950 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=127002.66666666667, ans=0.0
+2024-08-25 17:33:57,882 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.63 vs. limit=15.0
+2024-08-25 17:34:02,710 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.71 vs. limit=12.0
+2024-08-25 17:34:42,450 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.856e+02 2.167e+02 2.631e+02 4.500e+02, threshold=4.335e+02, percent-clipped=0.0
+2024-08-25 17:34:56,685 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=127162.66666666667, ans=0.125
+2024-08-25 17:34:59,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=127162.66666666667, ans=0.0
+2024-08-25 17:35:02,135 INFO [train.py:1114] (3/4) Epoch 10, batch 1450, loss[loss=0.2693, simple_loss=0.3248, pruned_loss=0.07849, ctc_loss=0.1421, over 19678.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.296, pruned_loss=0.06796, ctc_loss=0.128, over 3862457.50 frames. ], batch size: 63, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:35:09,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=127216.0, ans=0.125
+2024-08-25 17:35:15,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=127269.33333333333, ans=0.125
+2024-08-25 17:35:35,940 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=127322.66666666667, ans=10.0
+2024-08-25 17:35:41,512 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=127376.0, ans=0.1
+2024-08-25 17:35:50,300 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=127429.33333333333, ans=0.125
+2024-08-25 17:36:02,113 INFO [train.py:1114] (3/4) Epoch 10, batch 1500, loss[loss=0.2462, simple_loss=0.2978, pruned_loss=0.07008, ctc_loss=0.136, over 19590.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.2962, pruned_loss=0.06797, ctc_loss=0.1282, over 3861690.34 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:36:08,551 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=127482.66666666667, ans=0.2
+2024-08-25 17:36:15,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=127536.0, ans=0.125
+2024-08-25 17:36:18,592 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=127536.0, ans=0.125
+2024-08-25 17:36:42,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=127589.33333333333, ans=0.1
+2024-08-25 17:36:44,669 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=127589.33333333333, ans=0.0
+2024-08-25 17:36:55,679 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 1.877e+02 2.186e+02 2.626e+02 4.478e+02, threshold=4.372e+02, percent-clipped=1.0
+2024-08-25 17:36:58,295 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 17:37:00,804 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=127696.0, ans=0.0
+2024-08-25 17:37:04,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127696.0, ans=0.1
+2024-08-25 17:37:06,625 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=127696.0, ans=0.0
+2024-08-25 17:37:24,242 INFO [train.py:1114] (3/4) Epoch 10, batch 1550, loss[loss=0.2469, simple_loss=0.3051, pruned_loss=0.06812, ctc_loss=0.131, over 19597.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.2958, pruned_loss=0.06793, ctc_loss=0.1282, over 3846689.75 frames. ], batch size: 60, lr: 1.48e-02, grad_scale: 16.0
+2024-08-25 17:37:42,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=127802.66666666667, ans=0.125
+2024-08-25 17:37:47,410 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=127856.0, ans=0.125
+2024-08-25 17:37:53,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=127856.0, ans=0.125
+2024-08-25 17:37:55,218 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127856.0, ans=0.1
+2024-08-25 17:38:50,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=127909.33333333333, ans=0.0
+2024-08-25 17:38:54,676 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=127909.33333333333, ans=0.2
+2024-08-25 17:39:41,581 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.16 vs. limit=10.0
+2024-08-25 17:39:42,326 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=127909.33333333333, ans=0.05
+2024-08-25 17:39:52,320 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=127962.66666666667, ans=0.125
+2024-08-25 17:41:06,796 INFO [train.py:1114] (3/4) Epoch 10, batch 1600, loss[loss=0.2428, simple_loss=0.3047, pruned_loss=0.06512, ctc_loss=0.1267, over 19835.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.2956, pruned_loss=0.06791, ctc_loss=0.1282, over 3835877.42 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:41:07,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=128016.0, ans=0.0
+2024-08-25 17:42:01,980 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=128016.0, ans=0.2
+2024-08-25 17:42:41,254 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.68 vs. limit=15.0
+2024-08-25 17:42:52,715 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.97 vs. limit=22.5
+2024-08-25 17:43:05,113 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=128176.0, ans=0.125
+2024-08-25 17:43:24,235 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 1.849e+02 2.080e+02 2.733e+02 5.175e+02, threshold=4.161e+02, percent-clipped=4.0
+2024-08-25 17:44:00,803 INFO [train.py:1114] (3/4) Epoch 10, batch 1650, loss[loss=0.2488, simple_loss=0.3076, pruned_loss=0.06891, ctc_loss=0.1305, over 19638.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2962, pruned_loss=0.06841, ctc_loss=0.129, over 3832221.01 frames. ], batch size: 59, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:44:33,329 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=128336.0, ans=0.125
+2024-08-25 17:45:04,619 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=128442.66666666667, ans=0.09899494936611666
+2024-08-25 17:45:46,331 INFO [train.py:1114] (3/4) Epoch 10, batch 1700, loss[loss=0.2161, simple_loss=0.2682, pruned_loss=0.06013, ctc_loss=0.1096, over 19679.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.296, pruned_loss=0.06804, ctc_loss=0.1282, over 3846404.40 frames. ], batch size: 46, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:46:47,594 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.90 vs. limit=15.0
+2024-08-25 17:46:48,361 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=128656.0, ans=0.125
+2024-08-25 17:47:00,548 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=128656.0, ans=0.2
+2024-08-25 17:47:01,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=128709.33333333333, ans=0.125
+2024-08-25 17:47:11,338 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 1.773e+02 2.059e+02 2.527e+02 4.467e+02, threshold=4.119e+02, percent-clipped=1.0
+2024-08-25 17:47:11,996 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.27 vs. limit=12.0
+2024-08-25 17:48:02,879 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=128762.66666666667, ans=0.125
+2024-08-25 17:48:11,671 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.10 vs. limit=10.0
+2024-08-25 17:48:12,452 INFO [train.py:1114] (3/4) Epoch 10, batch 1750, loss[loss=0.2251, simple_loss=0.2738, pruned_loss=0.06378, ctc_loss=0.1223, over 19678.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.2955, pruned_loss=0.06756, ctc_loss=0.1274, over 3851655.37 frames. ], batch size: 45, lr: 1.48e-02, grad_scale: 32.0
+2024-08-25 17:48:45,428 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=128922.66666666667, ans=0.125
+2024-08-25 17:49:11,936 INFO [train.py:1114] (3/4) Epoch 10, batch 1800, loss[loss=0.2313, simple_loss=0.2978, pruned_loss=0.06002, ctc_loss=0.1118, over 19609.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.2955, pruned_loss=0.0674, ctc_loss=0.127, over 3853702.55 frames. ], batch size: 55, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:06:40,190 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=129242.66666666667, ans=0.125
+2024-08-25 18:06:56,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=129242.66666666667, ans=0.125
+2024-08-25 18:11:17,736 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.930e+02 2.270e+02 3.115e+02 5.695e+02, threshold=4.540e+02, percent-clipped=10.0
+2024-08-25 18:11:53,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=129296.0, ans=6.0
+2024-08-25 18:18:09,217 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=129296.0, ans=0.0
+2024-08-25 18:19:59,304 INFO [train.py:1114] (3/4) Epoch 10, batch 1850, loss[loss=0.252, simple_loss=0.3123, pruned_loss=0.07035, ctc_loss=0.1272, over 19591.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.2953, pruned_loss=0.06728, ctc_loss=0.1266, over 3856696.98 frames. ], batch size: 57, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:21:03,468 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.30 vs. limit=15.0
+2024-08-25 18:24:29,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=129402.66666666667, ans=0.0
+2024-08-25 18:26:29,514 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=129456.0, ans=0.05
+2024-08-25 18:29:14,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=129509.33333333333, ans=0.1
+2024-08-25 18:29:52,931 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=129509.33333333333, ans=0.125
+2024-08-25 18:29:55,395 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.18 vs. limit=22.5
+2024-08-25 18:32:37,434 INFO [train.py:1114] (3/4) Epoch 10, batch 1900, loss[loss=0.2485, simple_loss=0.3102, pruned_loss=0.06805, ctc_loss=0.1269, over 19625.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2963, pruned_loss=0.06761, ctc_loss=0.1271, over 3861802.46 frames. ], batch size: 59, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:34:01,819 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.78 vs. limit=15.0
+2024-08-25 18:36:07,092 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=129722.66666666667, ans=0.0
+2024-08-25 18:36:20,197 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.50 vs. limit=22.5
+2024-08-25 18:37:35,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=129776.0, ans=0.0
+2024-08-25 18:37:43,365 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.882e+02 2.156e+02 2.772e+02 4.689e+02, threshold=4.313e+02, percent-clipped=1.0
+2024-08-25 18:38:51,207 INFO [train.py:1114] (3/4) Epoch 10, batch 1950, loss[loss=0.2294, simple_loss=0.2827, pruned_loss=0.06295, ctc_loss=0.1255, over 19584.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2973, pruned_loss=0.06781, ctc_loss=0.1275, over 3870507.56 frames. ], batch size: 52, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:39:43,702 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=129882.66666666667, ans=0.125
+2024-08-25 18:39:46,077 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.04 vs. limit=6.0
+2024-08-25 18:39:59,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=129936.0, ans=0.125
+2024-08-25 18:40:13,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=129936.0, ans=0.2
+2024-08-25 18:41:08,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=129989.33333333333, ans=0.1
+2024-08-25 18:42:32,422 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=130042.66666666667, ans=0.025
+2024-08-25 18:44:04,306 INFO [train.py:1114] (3/4) Epoch 10, batch 2000, loss[loss=0.2056, simple_loss=0.2607, pruned_loss=0.05463, ctc_loss=0.1028, over 19650.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2982, pruned_loss=0.06858, ctc_loss=0.1291, over 3854930.75 frames. ], batch size: 45, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:44:08,989 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=130149.33333333333, ans=0.2
+2024-08-25 18:44:14,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=130149.33333333333, ans=0.125
+2024-08-25 18:44:14,724 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.45 vs. limit=15.0
+2024-08-25 18:44:41,405 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=130202.66666666667, ans=0.2
+2024-08-25 18:47:32,410 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.882e+02 2.262e+02 2.707e+02 4.864e+02, threshold=4.523e+02, percent-clipped=1.0
+2024-08-25 18:48:05,138 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.05 vs. limit=22.5
+2024-08-25 18:48:30,485 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.70 vs. limit=15.0
+2024-08-25 18:48:39,775 INFO [train.py:1114] (3/4) Epoch 10, batch 2050, loss[loss=0.2463, simple_loss=0.2872, pruned_loss=0.07489, ctc_loss=0.1392, over 19715.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2974, pruned_loss=0.06862, ctc_loss=0.1292, over 3852752.19 frames. ], batch size: 47, lr: 1.47e-02, grad_scale: 32.0
+2024-08-25 18:50:05,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=130469.33333333333, ans=0.2
+2024-08-25 18:51:34,050 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=130629.33333333333, ans=0.0
+2024-08-25 18:52:20,494 INFO [train.py:1114] (3/4) Epoch 10, batch 2100, loss[loss=0.2506, simple_loss=0.3066, pruned_loss=0.07029, ctc_loss=0.135, over 19771.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2964, pruned_loss=0.06805, ctc_loss=0.128, over 3859455.41 frames. ], batch size: 54, lr: 1.47e-02, grad_scale: 16.0
+2024-08-25 18:52:45,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=130736.0, ans=0.125
+2024-08-25 18:53:32,468 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=130789.33333333333, ans=0.125
+2024-08-25 18:53:46,792 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.57 vs. limit=15.0
+2024-08-25 18:53:58,208 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.839e+02 2.296e+02 2.721e+02 6.154e+02, threshold=4.593e+02, percent-clipped=3.0
+2024-08-25 18:54:33,454 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.50 vs. limit=22.5
+2024-08-25 18:54:37,548 INFO [train.py:1114] (3/4) Epoch 10, batch 2150, loss[loss=0.2265, simple_loss=0.2833, pruned_loss=0.06206, ctc_loss=0.114, over 19864.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.296, pruned_loss=0.06824, ctc_loss=0.1282, over 3870385.41 frames. ], batch size: 52, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:54:44,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=130949.33333333333, ans=0.2
+2024-08-25 18:54:56,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=131002.66666666667, ans=0.0
+2024-08-25 18:54:57,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=131002.66666666667, ans=0.0
+2024-08-25 18:55:38,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=131056.0, ans=0.0
+2024-08-25 18:56:02,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=131162.66666666666, ans=0.125
+2024-08-25 18:56:19,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=131162.66666666666, ans=0.025
+2024-08-25 18:56:32,989 INFO [train.py:1114] (3/4) Epoch 10, batch 2200, loss[loss=0.2643, simple_loss=0.3134, pruned_loss=0.07904, ctc_loss=0.143, over 19586.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2959, pruned_loss=0.06788, ctc_loss=0.1278, over 3868666.47 frames. ], batch size: 57, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:56:43,310 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=131216.0, ans=0.5
+2024-08-25 18:57:00,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131216.0, ans=0.1
+2024-08-25 18:57:26,596 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.33 vs. limit=12.0
+2024-08-25 18:57:38,170 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=131376.0, ans=0.2
+2024-08-25 18:57:51,460 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.773e+02 2.006e+02 2.540e+02 3.937e+02, threshold=4.013e+02, percent-clipped=0.0
+2024-08-25 18:57:51,538 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=131429.33333333334, ans=0.125
+2024-08-25 18:58:07,783 INFO [train.py:1114] (3/4) Epoch 10, batch 2250, loss[loss=0.2689, simple_loss=0.3221, pruned_loss=0.07805, ctc_loss=0.1488, over 19619.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.296, pruned_loss=0.06787, ctc_loss=0.128, over 3868653.23 frames. ], batch size: 55, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:58:30,555 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=131589.33333333334, ans=0.1
+2024-08-25 18:58:39,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=131589.33333333334, ans=0.2
+2024-08-25 18:58:50,867 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.83 vs. limit=22.5
+2024-08-25 18:59:03,258 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=131696.0, ans=0.125
+2024-08-25 18:59:05,056 INFO [train.py:1114] (3/4) Epoch 10, batch 2300, loss[loss=0.2424, simple_loss=0.2941, pruned_loss=0.06929, ctc_loss=0.1302, over 19503.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.2952, pruned_loss=0.06773, ctc_loss=0.1278, over 3862127.26 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 18:59:14,942 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=131749.33333333334, ans=0.125
+2024-08-25 19:00:00,734 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.815e+02 2.310e+02 2.961e+02 4.661e+02, threshold=4.621e+02, percent-clipped=5.0
+2024-08-25 19:00:14,654 INFO [train.py:1114] (3/4) Epoch 10, batch 2350, loss[loss=0.2849, simple_loss=0.3248, pruned_loss=0.09101, ctc_loss=0.1573, over 19679.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.2948, pruned_loss=0.06752, ctc_loss=0.1272, over 3864104.51 frames. ], batch size: 63, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:00:26,546 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=132069.33333333334, ans=0.0
+2024-08-25 19:00:37,471 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=132122.66666666666, ans=0.125
+2024-08-25 19:00:38,405 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=132122.66666666666, ans=0.0
+2024-08-25 19:00:41,918 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=132122.66666666666, ans=0.125
+2024-08-25 19:00:49,146 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132176.0, ans=0.1
+2024-08-25 19:00:56,921 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.96 vs. limit=15.0
+2024-08-25 19:01:13,181 INFO [train.py:1114] (3/4) Epoch 10, batch 2400, loss[loss=0.255, simple_loss=0.3159, pruned_loss=0.07177, ctc_loss=0.1263, over 19248.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2969, pruned_loss=0.06832, ctc_loss=0.1282, over 3859369.59 frames. ], batch size: 71, lr: 1.46e-02, grad_scale: 32.0
+2024-08-25 19:01:13,282 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=132282.66666666666, ans=0.125
+2024-08-25 19:01:37,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=132389.33333333334, ans=0.2
+2024-08-25 19:01:39,020 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.42 vs. limit=6.0
+2024-08-25 19:01:43,851 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.70 vs. limit=15.0
+2024-08-25 19:01:44,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=132389.33333333334, ans=0.5
+2024-08-25 19:01:59,785 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.51 vs. limit=15.0
+2024-08-25 19:02:10,724 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.986e+02 2.279e+02 2.618e+02 8.799e+02, threshold=4.558e+02, percent-clipped=0.0
+2024-08-25 19:02:22,037 INFO [train.py:1114] (3/4) Epoch 10, batch 2450, loss[loss=0.3245, simple_loss=0.3373, pruned_loss=0.1128, ctc_loss=0.2156, over 13096.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3014, pruned_loss=0.07219, ctc_loss=0.1358, over 3729036.20 frames. ], batch size: 140, lr: 1.46e-02, grad_scale: 16.0
+2024-08-25 19:02:38,028 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.53 vs. limit=15.0
+2024-08-25 19:02:42,382 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=132602.66666666666, ans=0.1
+2024-08-25 19:02:52,715 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.13 vs. limit=15.0
+2024-08-25 19:02:53,475 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=132656.0, ans=0.09899494936611666
+2024-08-25 19:02:55,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=132656.0, ans=0.025
+2024-08-25 19:04:28,597 INFO [train.py:1114] (3/4) Epoch 11, batch 0, loss[loss=0.2237, simple_loss=0.2734, pruned_loss=0.06367, ctc_loss=0.1168, over 19802.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2734, pruned_loss=0.06367, ctc_loss=0.1168, over 19802.00 frames. ], batch size: 49, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:04:28,597 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 19:04:55,855 INFO [train.py:1146] (3/4) Epoch 11, validation: loss=0.2031, simple_loss=0.2887, pruned_loss=0.04339, ctc_loss=0.0768, over 944034.00 frames.
+2024-08-25 19:04:55,856 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 19:05:00,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=132757.33333333334, ans=0.1
+2024-08-25 19:05:11,727 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.01 vs. limit=15.0
+2024-08-25 19:05:18,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=132864.0, ans=0.125
+2024-08-25 19:05:34,339 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132864.0, ans=0.1
+2024-08-25 19:05:34,540 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=132864.0, ans=0.2
+2024-08-25 19:05:38,279 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.39 vs. limit=5.0
+2024-08-25 19:05:40,827 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=132917.33333333334, ans=0.0
+2024-08-25 19:05:48,668 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.11 vs. limit=22.5
+2024-08-25 19:06:02,242 INFO [train.py:1114] (3/4) Epoch 11, batch 50, loss[loss=0.2059, simple_loss=0.2664, pruned_loss=0.05372, ctc_loss=0.0949, over 19733.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2969, pruned_loss=0.06729, ctc_loss=0.1275, over 844726.23 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:06:02,567 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=133024.0, ans=0.0
+2024-08-25 19:06:03,364 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.050e+02 2.234e+02 2.552e+02 4.359e+02, threshold=4.468e+02, percent-clipped=1.0
+2024-08-25 19:06:08,054 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:06:08,356 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.42 vs. limit=15.0
+2024-08-25 19:06:09,587 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.36 vs. limit=10.0
+2024-08-25 19:06:22,498 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:06:32,737 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.74 vs. limit=15.0
+2024-08-25 19:06:51,901 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=133184.0, ans=0.1
+2024-08-25 19:07:42,555 INFO [train.py:1114] (3/4) Epoch 11, batch 100, loss[loss=0.2213, simple_loss=0.2768, pruned_loss=0.06084, ctc_loss=0.1103, over 19711.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.296, pruned_loss=0.06663, ctc_loss=0.1259, over 1499036.81 frames. ], batch size: 51, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:07:57,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=133344.0, ans=0.125
+2024-08-25 19:08:08,539 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.17 vs. limit=15.0
+2024-08-25 19:08:24,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=133450.66666666666, ans=0.125
+2024-08-25 19:08:58,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=133504.0, ans=0.1
+2024-08-25 19:09:10,084 INFO [train.py:1114] (3/4) Epoch 11, batch 150, loss[loss=0.2098, simple_loss=0.2604, pruned_loss=0.0578, ctc_loss=0.1091, over 19699.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.2935, pruned_loss=0.06573, ctc_loss=0.1241, over 2027778.22 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-25 19:09:12,928 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.747e+02 2.015e+02 2.344e+02 3.708e+02, threshold=4.031e+02, percent-clipped=0.0
+2024-08-25 19:09:24,715 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=133610.66666666666, ans=0.2
+2024-08-25 19:09:46,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=133717.33333333334, ans=0.0
+2024-08-25 19:10:05,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=133770.66666666666, ans=0.5
+2024-08-25 19:10:18,158 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.41 vs. limit=15.0
+2024-08-25 19:10:23,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.98 vs. limit=22.5
+2024-08-25 19:10:34,566 INFO [train.py:1114] (3/4) Epoch 11, batch 200, loss[loss=0.2589, simple_loss=0.3131, pruned_loss=0.07482, ctc_loss=0.1377, over 18237.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.2916, pruned_loss=0.06486, ctc_loss=0.1223, over 2435452.68 frames. ], batch size: 85, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:10:49,158 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.73 vs. limit=12.0
+2024-08-25 19:11:34,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=133930.66666666666, ans=0.125
+2024-08-25 19:11:40,944 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.00 vs. limit=15.0
+2024-08-25 19:11:50,037 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.63 vs. limit=22.5
+2024-08-25 19:12:01,079 INFO [train.py:1114] (3/4) Epoch 11, batch 250, loss[loss=0.2585, simple_loss=0.3109, pruned_loss=0.07641, ctc_loss=0.1331, over 19402.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2922, pruned_loss=0.0655, ctc_loss=0.1231, over 2756090.28 frames. ], batch size: 67, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:12:02,123 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.716e+02 2.023e+02 2.469e+02 5.021e+02, threshold=4.046e+02, percent-clipped=3.0
+2024-08-25 19:12:03,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=134090.66666666666, ans=0.125
+2024-08-25 19:12:09,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=134090.66666666666, ans=0.125
+2024-08-25 19:12:18,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=134144.0, ans=0.1
+2024-08-25 19:12:20,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=134144.0, ans=0.125
+2024-08-25 19:12:21,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=134144.0, ans=0.125
+2024-08-25 19:12:22,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=134144.0, ans=0.0
+2024-08-25 19:12:24,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=134144.0, ans=0.125
+2024-08-25 19:12:32,830 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.61 vs. limit=15.0
+2024-08-25 19:12:32,889 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.30 vs. limit=10.0
+2024-08-25 19:12:52,076 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.02 vs. limit=12.0
+2024-08-25 19:12:52,966 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=134304.0, ans=0.025
+2024-08-25 19:13:03,619 INFO [train.py:1114] (3/4) Epoch 11, batch 300, loss[loss=0.2627, simple_loss=0.3095, pruned_loss=0.07903, ctc_loss=0.1448, over 19547.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2918, pruned_loss=0.06523, ctc_loss=0.1224, over 3001824.53 frames. ], batch size: 61, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:13:20,702 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=15.0
+2024-08-25 19:13:49,570 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.40 vs. limit=15.0
+2024-08-25 19:13:50,412 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=134517.33333333334, ans=0.0
+2024-08-25 19:14:07,024 INFO [train.py:1114] (3/4) Epoch 11, batch 350, loss[loss=0.2076, simple_loss=0.2697, pruned_loss=0.05264, ctc_loss=0.1006, over 19756.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2921, pruned_loss=0.06523, ctc_loss=0.1223, over 3191252.45 frames. ], batch size: 48, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:14:08,113 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.838e+02 2.258e+02 2.898e+02 4.827e+02, threshold=4.516e+02, percent-clipped=2.0
+2024-08-25 19:14:14,450 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=134624.0, ans=0.125
+2024-08-25 19:14:36,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=134624.0, ans=0.0
+2024-08-25 19:14:54,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=134730.66666666666, ans=0.0
+2024-08-25 19:15:02,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=134730.66666666666, ans=0.0
+2024-08-25 19:15:10,735 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=134784.0, ans=0.07
+2024-08-25 19:15:16,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=134784.0, ans=0.025
+2024-08-25 19:15:47,949 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=134837.33333333334, ans=0.1
+2024-08-25 19:15:55,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=134837.33333333334, ans=0.0
+2024-08-25 19:15:57,892 INFO [train.py:1114] (3/4) Epoch 11, batch 400, loss[loss=0.2236, simple_loss=0.2879, pruned_loss=0.05721, ctc_loss=0.112, over 19498.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.2926, pruned_loss=0.06572, ctc_loss=0.1233, over 3343130.63 frames. ], batch size: 54, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:16:02,777 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=134890.66666666666, ans=0.0
+2024-08-25 19:16:41,248 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=134997.33333333334, ans=0.1
+2024-08-25 19:17:21,518 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.57 vs. limit=15.0
+2024-08-25 19:17:22,192 INFO [train.py:1114] (3/4) Epoch 11, batch 450, loss[loss=0.2219, simple_loss=0.2925, pruned_loss=0.0544, ctc_loss=0.1062, over 19600.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2925, pruned_loss=0.06567, ctc_loss=0.1232, over 3451343.18 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:17:31,719 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.841e+02 2.102e+02 2.681e+02 4.407e+02, threshold=4.204e+02, percent-clipped=0.0
+2024-08-25 19:17:44,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=135210.66666666666, ans=0.125
+2024-08-25 19:17:49,326 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=135210.66666666666, ans=0.025
+2024-08-25 19:18:00,786 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=135264.0, ans=0.0
+2024-08-25 19:18:22,637 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=135370.66666666666, ans=0.1
+2024-08-25 19:18:34,757 INFO [train.py:1114] (3/4) Epoch 11, batch 500, loss[loss=0.2374, simple_loss=0.3018, pruned_loss=0.06346, ctc_loss=0.1152, over 19630.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2916, pruned_loss=0.06506, ctc_loss=0.1223, over 3546681.93 frames. ], batch size: 63, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:18:36,557 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=135424.0, ans=0.125
+2024-08-25 19:19:50,547 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=135637.33333333334, ans=0.1
+2024-08-25 19:19:50,720 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:19:58,218 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=135637.33333333334, ans=0.125
+2024-08-25 19:20:17,214 INFO [train.py:1114] (3/4) Epoch 11, batch 550, loss[loss=0.2386, simple_loss=0.3058, pruned_loss=0.06259, ctc_loss=0.1157, over 19359.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.2916, pruned_loss=0.06495, ctc_loss=0.1222, over 3609335.63 frames. ], batch size: 71, lr: 1.38e-02, grad_scale: 32.0
+2024-08-25 19:20:18,397 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.822e+02 2.069e+02 2.386e+02 4.149e+02, threshold=4.137e+02, percent-clipped=0.0
+2024-08-25 19:20:33,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=135744.0, ans=0.125
+2024-08-25 19:20:44,045 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=135744.0, ans=0.0
+2024-08-25 19:20:49,213 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.04 vs. limit=10.0
+2024-08-25 19:20:50,233 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=135797.33333333334, ans=0.125
+2024-08-25 19:21:01,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=135797.33333333334, ans=0.0
+2024-08-25 19:21:15,821 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=135904.0, ans=0.125
+2024-08-25 19:21:18,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=135904.0, ans=0.125
+2024-08-25 19:21:30,819 INFO [train.py:1114] (3/4) Epoch 11, batch 600, loss[loss=0.2645, simple_loss=0.3119, pruned_loss=0.0785, ctc_loss=0.15, over 19401.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2919, pruned_loss=0.06513, ctc_loss=0.1226, over 3666839.14 frames. ], batch size: 67, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:21:44,166 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=135957.33333333334, ans=0.125
+2024-08-25 19:21:47,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=135957.33333333334, ans=0.2
+2024-08-25 19:21:50,743 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=136010.66666666666, ans=0.05
+2024-08-25 19:22:32,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=136010.66666666666, ans=0.0
+2024-08-25 19:22:50,544 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=5.926e-03
+2024-08-25 19:23:02,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=136117.33333333334, ans=0.1
+2024-08-25 19:23:10,175 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.43 vs. limit=15.0
+2024-08-25 19:23:42,569 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.08 vs. limit=15.0
+2024-08-25 19:23:54,564 INFO [train.py:1114] (3/4) Epoch 11, batch 650, loss[loss=0.2162, simple_loss=0.2877, pruned_loss=0.05195, ctc_loss=0.1022, over 19771.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2919, pruned_loss=0.06513, ctc_loss=0.1225, over 3716528.57 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:23:55,640 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 1.913e+02 2.094e+02 2.738e+02 4.984e+02, threshold=4.187e+02, percent-clipped=5.0
+2024-08-25 19:23:57,124 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=136224.0, ans=0.0
+2024-08-25 19:23:58,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=136224.0, ans=10.0
+2024-08-25 19:23:58,181 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=136224.0, ans=0.1
+2024-08-25 19:24:39,552 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=136330.66666666666, ans=0.125
+2024-08-25 19:24:47,885 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=136330.66666666666, ans=0.0
+2024-08-25 19:25:34,154 INFO [train.py:1114] (3/4) Epoch 11, batch 700, loss[loss=0.2386, simple_loss=0.2886, pruned_loss=0.06881, ctc_loss=0.1277, over 19728.00 frames. ], tot_loss[loss=0.237, simple_loss=0.2929, pruned_loss=0.06582, ctc_loss=0.1236, over 3747550.11 frames. ], batch size: 51, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:28:09,977 INFO [train.py:1114] (3/4) Epoch 11, batch 750, loss[loss=0.2355, simple_loss=0.2939, pruned_loss=0.06396, ctc_loss=0.1227, over 19492.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.2921, pruned_loss=0.06535, ctc_loss=0.1228, over 3772940.14 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:28:25,942 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.821e+02 2.028e+02 2.720e+02 4.524e+02, threshold=4.057e+02, percent-clipped=2.0
+2024-08-25 19:28:38,787 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.93 vs. limit=15.0
+2024-08-25 19:28:49,877 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=136810.66666666666, ans=0.125
+2024-08-25 19:28:50,151 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.54 vs. limit=10.0
+2024-08-25 19:28:51,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=136810.66666666666, ans=0.0
+2024-08-25 19:32:08,677 INFO [train.py:1114] (3/4) Epoch 11, batch 800, loss[loss=0.2083, simple_loss=0.2713, pruned_loss=0.05363, ctc_loss=0.09542, over 19814.00 frames. ], tot_loss[loss=0.236, simple_loss=0.2923, pruned_loss=0.06527, ctc_loss=0.1227, over 3794555.52 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:32:18,202 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.24 vs. limit=6.0
+2024-08-25 19:32:23,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=137024.0, ans=0.025
+2024-08-25 19:32:53,323 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=137077.33333333334, ans=0.2
+2024-08-25 19:32:54,764 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.39 vs. limit=15.0
+2024-08-25 19:33:06,228 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=137130.66666666666, ans=0.1
+2024-08-25 19:33:15,891 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=137130.66666666666, ans=0.0
+2024-08-25 19:33:19,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=137130.66666666666, ans=0.1
+2024-08-25 19:33:49,181 INFO [train.py:1114] (3/4) Epoch 11, batch 850, loss[loss=0.23, simple_loss=0.3012, pruned_loss=0.05746, ctc_loss=0.1095, over 19679.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.2916, pruned_loss=0.06499, ctc_loss=0.1222, over 3813600.62 frames. ], batch size: 59, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:33:50,252 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.863e+02 2.065e+02 2.415e+02 4.305e+02, threshold=4.130e+02, percent-clipped=1.0
+2024-08-25 19:34:09,079 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=137344.0, ans=0.0
+2024-08-25 19:34:12,508 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=137344.0, ans=0.035
+2024-08-25 19:34:18,311 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=137397.33333333334, ans=0.125
+2024-08-25 19:34:40,260 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=137450.66666666666, ans=0.125
+2024-08-25 19:34:47,542 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=137450.66666666666, ans=0.0
+2024-08-25 19:34:53,858 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=137504.0, ans=0.2
+2024-08-25 19:34:57,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=137504.0, ans=0.0
+2024-08-25 19:35:00,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=137504.0, ans=0.2
+2024-08-25 19:35:00,786 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=137504.0, ans=0.07
+2024-08-25 19:35:01,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=137504.0, ans=0.125
+2024-08-25 19:35:05,246 INFO [train.py:1114] (3/4) Epoch 11, batch 900, loss[loss=0.232, simple_loss=0.2804, pruned_loss=0.06606, ctc_loss=0.1285, over 19418.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.2919, pruned_loss=0.06515, ctc_loss=0.1225, over 3817804.25 frames. ], batch size: 48, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:35:07,922 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.70 vs. limit=15.0
+2024-08-25 19:35:35,659 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=137664.0, ans=0.125
+2024-08-25 19:35:48,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=137664.0, ans=0.125
+2024-08-25 19:35:52,991 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=137664.0, ans=0.5
+2024-08-25 19:36:04,406 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=137717.33333333334, ans=0.0
+2024-08-25 19:36:06,110 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.15 vs. limit=15.0
+2024-08-25 19:36:06,762 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=137717.33333333334, ans=10.0
+2024-08-25 19:36:49,861 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:37:18,564 INFO [train.py:1114] (3/4) Epoch 11, batch 950, loss[loss=0.2342, simple_loss=0.2882, pruned_loss=0.06574, ctc_loss=0.122, over 19512.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.2924, pruned_loss=0.06539, ctc_loss=0.1228, over 3819341.80 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-25 19:37:19,700 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.805e+02 2.081e+02 2.536e+02 4.211e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-25 19:37:25,956 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.77 vs. limit=6.0
+2024-08-25 19:37:44,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=137930.66666666666, ans=0.0
+2024-08-25 19:37:48,998 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.75 vs. limit=22.5
+2024-08-25 19:37:53,895 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.95 vs. limit=12.0
+2024-08-25 19:37:59,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=137930.66666666666, ans=0.2
+2024-08-25 19:37:59,826 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=137930.66666666666, ans=0.0
+2024-08-25 19:38:03,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=137984.0, ans=0.2
+2024-08-25 19:38:31,400 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=138037.33333333334, ans=0.05
+2024-08-25 19:38:48,985 INFO [train.py:1114] (3/4) Epoch 11, batch 1000, loss[loss=0.2479, simple_loss=0.2997, pruned_loss=0.07132, ctc_loss=0.1335, over 19850.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.2936, pruned_loss=0.06619, ctc_loss=0.1243, over 3815711.39 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:39:27,610 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.50 vs. limit=15.0
+2024-08-25 19:39:36,984 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.99 vs. limit=6.0
+2024-08-25 19:39:40,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=138250.66666666666, ans=0.125
+2024-08-25 19:40:11,799 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=138304.0, ans=0.0
+2024-08-25 19:40:14,838 INFO [train.py:1114] (3/4) Epoch 11, batch 1050, loss[loss=0.2412, simple_loss=0.2964, pruned_loss=0.06707, ctc_loss=0.13, over 19839.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.2928, pruned_loss=0.06591, ctc_loss=0.1239, over 3822587.29 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:40:16,356 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.04 vs. limit=22.5
+2024-08-25 19:40:16,851 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.874e+02 2.329e+02 2.645e+02 4.211e+02, threshold=4.658e+02, percent-clipped=2.0
+2024-08-25 19:40:19,279 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=138357.33333333334, ans=0.125
+2024-08-25 19:40:22,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=138357.33333333334, ans=0.125
+2024-08-25 19:40:23,877 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.54 vs. limit=22.5
+2024-08-25 19:40:24,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=138357.33333333334, ans=0.125
+2024-08-25 19:40:24,723 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=138357.33333333334, ans=0.1
+2024-08-25 19:40:28,075 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=138410.66666666666, ans=0.125
+2024-08-25 19:40:55,663 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=138517.33333333334, ans=0.1
+2024-08-25 19:41:24,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=138570.66666666666, ans=0.0
+2024-08-25 19:41:26,113 INFO [train.py:1114] (3/4) Epoch 11, batch 1100, loss[loss=0.253, simple_loss=0.2994, pruned_loss=0.0757, ctc_loss=0.1381, over 19576.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.2919, pruned_loss=0.06529, ctc_loss=0.123, over 3830551.23 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:42:17,835 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=138677.33333333334, ans=0.125
+2024-08-25 19:42:51,271 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 19:43:02,304 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=138730.66666666666, ans=0.0
+2024-08-25 19:43:08,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=138784.0, ans=0.1
+2024-08-25 19:43:35,986 INFO [train.py:1114] (3/4) Epoch 11, batch 1150, loss[loss=0.2022, simple_loss=0.2668, pruned_loss=0.05075, ctc_loss=0.09036, over 19582.00 frames. ], tot_loss[loss=0.236, simple_loss=0.2919, pruned_loss=0.06545, ctc_loss=0.1231, over 3828472.64 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:43:37,194 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.797e+02 2.039e+02 2.453e+02 4.580e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-25 19:43:53,788 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=138944.0, ans=0.0
+2024-08-25 19:44:06,611 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=138997.33333333334, ans=0.1
+2024-08-25 19:44:09,424 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.71 vs. limit=15.0
+2024-08-25 19:44:23,828 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.67 vs. limit=6.0
+2024-08-25 19:44:29,000 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=139104.0, ans=0.125
+2024-08-25 19:44:41,838 INFO [train.py:1114] (3/4) Epoch 11, batch 1200, loss[loss=0.2355, simple_loss=0.2937, pruned_loss=0.06542, ctc_loss=0.1162, over 19827.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.2936, pruned_loss=0.06633, ctc_loss=0.1249, over 3824357.90 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:44:46,580 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=139157.33333333334, ans=0.125
+2024-08-25 19:44:48,371 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.76 vs. limit=22.5
+2024-08-25 19:44:53,470 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=139210.66666666666, ans=0.125
+2024-08-25 19:45:00,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=139210.66666666666, ans=0.2
+2024-08-25 19:45:55,000 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=139317.33333333334, ans=10.0
+2024-08-25 19:46:02,884 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=139370.66666666666, ans=0.125
+2024-08-25 19:46:06,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=139370.66666666666, ans=0.125
+2024-08-25 19:46:15,637 INFO [train.py:1114] (3/4) Epoch 11, batch 1250, loss[loss=0.2728, simple_loss=0.3198, pruned_loss=0.08152, ctc_loss=0.157, over 19501.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.2937, pruned_loss=0.06585, ctc_loss=0.1238, over 3842991.96 frames. ], batch size: 61, lr: 1.36e-02, grad_scale: 32.0
+2024-08-25 19:46:16,712 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.769e+02 1.992e+02 2.545e+02 3.633e+02, threshold=3.984e+02, percent-clipped=0.0
+2024-08-25 19:46:24,139 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.85 vs. limit=15.0
+2024-08-25 19:46:39,607 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=139424.0, ans=0.125
+2024-08-25 19:46:45,543 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff2.min_abs, batch_count=139477.33333333334, ans=0.1
+2024-08-25 19:47:07,447 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=139530.66666666666, ans=0.0
+2024-08-25 19:47:18,086 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=139584.0, ans=0.125
+2024-08-25 19:47:23,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=139584.0, ans=0.2
+2024-08-25 19:47:40,574 INFO [train.py:1114] (3/4) Epoch 11, batch 1300, loss[loss=0.2374, simple_loss=0.3067, pruned_loss=0.0601, ctc_loss=0.1197, over 18834.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2925, pruned_loss=0.06503, ctc_loss=0.1223, over 3847251.73 frames. ], batch size: 76, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:48:01,916 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=139744.0, ans=0.125
+2024-08-25 19:48:04,235 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=139797.33333333334, ans=0.1
+2024-08-25 19:48:10,449 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=139797.33333333334, ans=0.2
+2024-08-25 19:48:22,889 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.48 vs. limit=10.0
+2024-08-25 19:48:23,697 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=139850.66666666666, ans=0.2
+2024-08-25 19:48:29,281 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=139850.66666666666, ans=0.125
+2024-08-25 19:48:31,717 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=139850.66666666666, ans=0.125
+2024-08-25 19:48:58,254 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=139957.33333333334, ans=0.1
+2024-08-25 19:48:58,428 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=139957.33333333334, ans=0.125
+2024-08-25 19:48:59,248 INFO [train.py:1114] (3/4) Epoch 11, batch 1350, loss[loss=0.2487, simple_loss=0.3036, pruned_loss=0.07042, ctc_loss=0.1323, over 19752.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.2918, pruned_loss=0.0646, ctc_loss=0.1214, over 3858638.85 frames. ], batch size: 54, lr: 1.36e-02, grad_scale: 16.0
+2024-08-25 19:49:00,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=139957.33333333334, ans=0.125
+2024-08-25 19:49:01,642 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.851e+02 2.124e+02 2.742e+02 4.665e+02, threshold=4.248e+02, percent-clipped=3.0
+2024-08-25 19:49:10,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff3.min_abs, batch_count=140010.66666666666, ans=0.2
+2024-08-25 19:49:27,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=140064.0, ans=0.5
+2024-08-25 19:49:43,875 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=140117.33333333334, ans=0.0
+2024-08-25 19:49:49,128 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=140117.33333333334, ans=0.125
+2024-08-25 19:49:49,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=140117.33333333334, ans=0.025
+2024-08-25 19:49:51,557 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=140170.66666666666, ans=0.1
+2024-08-25 19:49:53,871 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=140170.66666666666, ans=0.125
+2024-08-25 19:50:07,175 INFO [train.py:1114] (3/4) Epoch 11, batch 1400, loss[loss=0.1862, simple_loss=0.2479, pruned_loss=0.04541, ctc_loss=0.08398, over 19677.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2913, pruned_loss=0.06439, ctc_loss=0.1208, over 3865655.93 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:50:09,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=140224.0, ans=0.0
+2024-08-25 19:50:41,705 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.06 vs. limit=6.0
+2024-08-25 19:51:37,463 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.38 vs. limit=10.0
+2024-08-25 19:51:42,655 INFO [train.py:1114] (3/4) Epoch 11, batch 1450, loss[loss=0.2659, simple_loss=0.3195, pruned_loss=0.07782, ctc_loss=0.1416, over 19655.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.2919, pruned_loss=0.06448, ctc_loss=0.1208, over 3863849.63 frames. ], batch size: 63, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:51:45,009 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.813e+02 2.052e+02 2.523e+02 4.896e+02, threshold=4.103e+02, percent-clipped=2.0
+2024-08-25 19:52:10,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=140544.0, ans=0.0
+2024-08-25 19:52:20,699 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=140597.33333333334, ans=0.125
+2024-08-25 19:52:36,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=140597.33333333334, ans=0.125
+2024-08-25 19:52:41,176 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=140650.66666666666, ans=0.125
+2024-08-25 19:52:45,546 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=140650.66666666666, ans=0.1
+2024-08-25 19:52:47,962 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=140650.66666666666, ans=0.125
+2024-08-25 19:52:58,882 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=140704.0, ans=0.125
+2024-08-25 19:53:19,918 INFO [train.py:1114] (3/4) Epoch 11, batch 1500, loss[loss=0.2658, simple_loss=0.3155, pruned_loss=0.07867, ctc_loss=0.1467, over 19575.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2922, pruned_loss=0.06493, ctc_loss=0.1218, over 3863318.59 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:53:49,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=140810.66666666666, ans=0.125
+2024-08-25 19:54:14,164 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=140864.0, ans=0.025
+2024-08-25 19:55:07,083 INFO [train.py:1114] (3/4) Epoch 11, batch 1550, loss[loss=0.279, simple_loss=0.3229, pruned_loss=0.08576, ctc_loss=0.1592, over 19600.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.2918, pruned_loss=0.06463, ctc_loss=0.1216, over 3848889.14 frames. ], batch size: 60, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 19:55:08,786 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=141024.0, ans=0.2
+2024-08-25 19:55:10,753 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 1.804e+02 2.014e+02 2.422e+02 4.168e+02, threshold=4.028e+02, percent-clipped=1.0
+2024-08-25 19:55:38,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=141024.0, ans=0.0
+2024-08-25 19:56:14,577 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=141130.66666666666, ans=0.0
+2024-08-25 19:57:19,245 INFO [train.py:1114] (3/4) Epoch 11, batch 1600, loss[loss=0.2431, simple_loss=0.3059, pruned_loss=0.06614, ctc_loss=0.12, over 19856.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.292, pruned_loss=0.065, ctc_loss=0.1225, over 3836420.81 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:58:15,636 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=141397.33333333334, ans=0.2
+2024-08-25 19:58:18,380 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.58 vs. limit=12.0
+2024-08-25 19:58:27,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=141450.66666666666, ans=0.025
+2024-08-25 19:59:27,408 INFO [train.py:1114] (3/4) Epoch 11, batch 1650, loss[loss=0.2424, simple_loss=0.2986, pruned_loss=0.06728, ctc_loss=0.129, over 19653.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.292, pruned_loss=0.06508, ctc_loss=0.1229, over 3833698.53 frames. ], batch size: 59, lr: 1.35e-02, grad_scale: 32.0
+2024-08-25 19:59:29,880 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.768e+02 1.990e+02 2.303e+02 4.438e+02, threshold=3.979e+02, percent-clipped=2.0
+2024-08-25 19:59:42,734 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.55 vs. limit=6.0
+2024-08-25 19:59:48,244 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=141557.33333333334, ans=0.125
+2024-08-25 19:59:53,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=141610.66666666666, ans=0.1
+2024-08-25 20:00:10,204 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=141664.0, ans=0.125
+2024-08-25 20:00:14,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=141664.0, ans=0.0
+2024-08-25 20:01:05,209 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=141770.66666666666, ans=0.125
+2024-08-25 20:01:05,276 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=141770.66666666666, ans=0.2
+2024-08-25 20:01:17,965 INFO [train.py:1114] (3/4) Epoch 11, batch 1700, loss[loss=0.2206, simple_loss=0.2713, pruned_loss=0.06185, ctc_loss=0.1154, over 19690.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2912, pruned_loss=0.06431, ctc_loss=0.1216, over 3847872.20 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:01:39,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=141930.66666666666, ans=0.1
+2024-08-25 20:01:43,005 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=141930.66666666666, ans=0.125
+2024-08-25 20:01:55,239 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=141984.0, ans=0.0
+2024-08-25 20:02:12,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=142037.33333333334, ans=0.125
+2024-08-25 20:02:16,946 INFO [train.py:1114] (3/4) Epoch 11, batch 1750, loss[loss=0.2299, simple_loss=0.2776, pruned_loss=0.06704, ctc_loss=0.1202, over 19668.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2905, pruned_loss=0.06399, ctc_loss=0.1208, over 3852452.67 frames. ], batch size: 45, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:02:20,530 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.814e+02 2.107e+02 2.366e+02 3.890e+02, threshold=4.214e+02, percent-clipped=0.0
+2024-08-25 20:02:42,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=142090.66666666666, ans=0.125
+2024-08-25 20:03:14,530 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 20:03:20,741 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.87 vs. limit=15.0
+2024-08-25 20:04:10,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=142304.0, ans=0.125
+2024-08-25 20:04:27,016 INFO [train.py:1114] (3/4) Epoch 11, batch 1800, loss[loss=0.2145, simple_loss=0.2903, pruned_loss=0.05096, ctc_loss=0.09218, over 19622.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.2913, pruned_loss=0.06443, ctc_loss=0.1212, over 3853593.72 frames. ], batch size: 55, lr: 1.35e-02, grad_scale: 16.0
+2024-08-25 20:04:30,803 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.84 vs. limit=15.0
+2024-08-25 20:04:48,974 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.66 vs. limit=15.0
+2024-08-25 20:04:51,924 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=142410.66666666666, ans=0.2
+2024-08-25 20:05:21,023 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=142464.0, ans=0.125
+2024-08-25 20:05:29,895 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.63 vs. limit=15.0
+2024-08-25 20:05:50,166 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=142570.66666666666, ans=0.0
+2024-08-25 20:06:13,793 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.27 vs. limit=22.5
+2024-08-25 20:06:15,202 INFO [train.py:1114] (3/4) Epoch 11, batch 1850, loss[loss=0.2266, simple_loss=0.2888, pruned_loss=0.05982, ctc_loss=0.1122, over 19599.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2912, pruned_loss=0.06457, ctc_loss=0.1217, over 3856713.00 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:06:18,509 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 1.849e+02 2.256e+02 2.966e+02 5.642e+02, threshold=4.511e+02, percent-clipped=6.0
+2024-08-25 20:06:26,814 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.95 vs. limit=15.0
+2024-08-25 20:06:59,221 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=142730.66666666666, ans=0.025
+2024-08-25 20:07:12,324 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=142784.0, ans=0.125
+2024-08-25 20:07:23,747 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=142784.0, ans=0.125
+2024-08-25 20:07:51,842 INFO [train.py:1114] (3/4) Epoch 11, batch 1900, loss[loss=0.2322, simple_loss=0.2971, pruned_loss=0.06045, ctc_loss=0.1161, over 19657.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2919, pruned_loss=0.065, ctc_loss=0.1224, over 3860517.49 frames. ], batch size: 59, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 20:08:15,489 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 20:08:20,989 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=142944.0, ans=0.0
+2024-08-25 20:32:27,854 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=143050.66666666666, ans=0.2
+2024-08-25 20:51:09,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=143104.0, ans=0.125
+2024-08-25 20:55:30,008 INFO [train.py:1114] (3/4) Epoch 11, batch 1950, loss[loss=0.2227, simple_loss=0.2861, pruned_loss=0.05711, ctc_loss=0.1129, over 19600.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.2928, pruned_loss=0.06487, ctc_loss=0.122, over 3869579.71 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-25 21:03:39,809 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.850e+02 2.123e+02 2.695e+02 5.282e+02, threshold=4.246e+02, percent-clipped=2.0
+2024-08-25 21:09:35,050 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=143157.33333333334, ans=0.125
+2024-08-25 21:29:54,582 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=143264.0, ans=0.2
+2024-08-25 21:35:18,085 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=143317.33333333334, ans=0.125
+2024-08-25 21:45:05,548 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.47 vs. limit=12.0
+2024-08-25 21:46:16,253 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.77 vs. limit=22.5
+2024-08-25 21:46:38,198 INFO [train.py:1114] (3/4) Epoch 11, batch 2000, loss[loss=0.1827, simple_loss=0.248, pruned_loss=0.04247, ctc_loss=0.08091, over 19639.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.293, pruned_loss=0.06496, ctc_loss=0.1222, over 3854321.89 frames. ], batch size: 45, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 21:57:33,450 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.30 vs. limit=15.0
+2024-08-25 21:58:41,296 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=143477.33333333334, ans=0.015
+2024-08-25 22:03:56,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=143477.33333333334, ans=0.125
+2024-08-25 22:05:28,151 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=143530.66666666666, ans=0.025
+2024-08-25 22:08:39,899 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=143584.0, ans=0.025
+2024-08-25 22:19:42,816 INFO [train.py:1114] (3/4) Epoch 11, batch 2050, loss[loss=0.2247, simple_loss=0.2685, pruned_loss=0.06558, ctc_loss=0.1242, over 19703.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2922, pruned_loss=0.06488, ctc_loss=0.1221, over 3851667.99 frames. ], batch size: 47, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:20:13,479 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.838e+02 2.216e+02 2.724e+02 4.008e+02, threshold=4.432e+02, percent-clipped=0.0
+2024-08-25 22:20:41,925 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=143690.66666666666, ans=0.125
+2024-08-25 22:27:32,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=143850.66666666666, ans=0.0
+2024-08-25 22:27:58,765 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=143850.66666666666, ans=0.1
+2024-08-25 22:27:59,972 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=143850.66666666666, ans=0.1
+2024-08-25 22:28:12,007 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.91 vs. limit=22.5
+2024-08-25 22:31:28,411 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=143904.0, ans=0.025
+2024-08-25 22:32:23,591 INFO [train.py:1114] (3/4) Epoch 11, batch 2100, loss[loss=0.2381, simple_loss=0.3032, pruned_loss=0.0624, ctc_loss=0.1203, over 19761.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.2919, pruned_loss=0.0645, ctc_loss=0.1215, over 3858196.46 frames. ], batch size: 54, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:33:21,279 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-25 22:34:33,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-25 22:34:44,212 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=144010.66666666666, ans=0.125
+2024-08-25 22:34:44,243 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=144010.66666666666, ans=0.1
+2024-08-25 22:34:54,277 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=144010.66666666666, ans=0.0
+2024-08-25 22:35:12,149 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=144010.66666666666, ans=0.05
+2024-08-25 22:36:29,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=144064.0, ans=0.125
+2024-08-25 22:36:29,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=144064.0, ans=0.125
+2024-08-25 22:37:30,816 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=144117.33333333334, ans=0.09899494936611666
+2024-08-25 22:39:07,559 INFO [train.py:1114] (3/4) Epoch 11, batch 2150, loss[loss=0.2432, simple_loss=0.2901, pruned_loss=0.07063, ctc_loss=0.1374, over 19854.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.2914, pruned_loss=0.06448, ctc_loss=0.1213, over 3869561.22 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:39:51,928 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 1.804e+02 2.068e+02 2.942e+02 5.639e+02, threshold=4.136e+02, percent-clipped=4.0
+2024-08-25 22:41:20,394 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=144277.33333333334, ans=0.2
+2024-08-25 22:42:45,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=144330.66666666666, ans=0.0
+2024-08-25 22:43:18,843 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.64 vs. limit=5.0
+2024-08-25 22:43:19,251 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=144384.0, ans=0.0
+2024-08-25 22:44:02,557 INFO [train.py:1114] (3/4) Epoch 11, batch 2200, loss[loss=0.2409, simple_loss=0.3024, pruned_loss=0.06485, ctc_loss=0.1242, over 19598.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2907, pruned_loss=0.06412, ctc_loss=0.1205, over 3868060.34 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 32.0
+2024-08-25 22:46:31,104 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=144544.0, ans=0.125
+2024-08-25 22:47:00,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=144597.33333333334, ans=0.2
+2024-08-25 22:47:17,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=144597.33333333334, ans=0.0
+2024-08-25 22:49:03,016 INFO [train.py:1114] (3/4) Epoch 11, batch 2250, loss[loss=0.2473, simple_loss=0.3063, pruned_loss=0.06805, ctc_loss=0.1303, over 19619.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2912, pruned_loss=0.06441, ctc_loss=0.1209, over 3867730.12 frames. ], batch size: 55, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:49:09,610 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.818e+02 2.110e+02 2.782e+02 6.628e+02, threshold=4.220e+02, percent-clipped=3.0
+2024-08-25 22:49:55,011 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.16 vs. limit=6.0
+2024-08-25 22:50:09,651 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=144917.33333333334, ans=0.07
+2024-08-25 22:50:37,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=145024.0, ans=0.125
+2024-08-25 22:50:46,952 INFO [train.py:1114] (3/4) Epoch 11, batch 2300, loss[loss=0.2298, simple_loss=0.2884, pruned_loss=0.06295, ctc_loss=0.1131, over 19514.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2896, pruned_loss=0.064, ctc_loss=0.1202, over 3860895.39 frames. ], batch size: 49, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:50:53,183 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.58 vs. limit=6.0
+2024-08-25 22:51:17,157 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=145024.0, ans=0.125
+2024-08-25 22:51:32,263 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=145077.33333333334, ans=0.2
+2024-08-25 22:51:36,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=145130.66666666666, ans=0.1
+2024-08-25 22:52:12,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145184.0, ans=0.1
+2024-08-25 22:52:37,397 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=9.72 vs. limit=12.0
+2024-08-25 22:52:55,277 INFO [train.py:1114] (3/4) Epoch 11, batch 2350, loss[loss=0.2518, simple_loss=0.3071, pruned_loss=0.07109, ctc_loss=0.136, over 19652.00 frames. ], tot_loss[loss=0.233, simple_loss=0.2896, pruned_loss=0.06416, ctc_loss=0.1204, over 3863648.94 frames. ], batch size: 63, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:53:00,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=145290.66666666666, ans=0.125
+2024-08-25 22:53:01,239 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.317e+02 1.788e+02 2.141e+02 2.380e+02 3.835e+02, threshold=4.282e+02, percent-clipped=0.0
+2024-08-25 22:53:06,385 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.30 vs. limit=15.0
+2024-08-25 22:53:11,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=145290.66666666666, ans=0.025
+2024-08-25 22:53:26,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145344.0, ans=0.1
+2024-08-25 22:53:34,785 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.51 vs. limit=6.0
+2024-08-25 22:53:44,240 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=145397.33333333334, ans=0.2
+2024-08-25 22:53:46,771 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.46 vs. limit=6.0
+2024-08-25 22:54:15,134 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=145504.0, ans=0.125
+2024-08-25 22:54:25,998 INFO [train.py:1114] (3/4) Epoch 11, batch 2400, loss[loss=0.2372, simple_loss=0.2985, pruned_loss=0.06285, ctc_loss=0.1256, over 19293.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.292, pruned_loss=0.06508, ctc_loss=0.122, over 3857595.91 frames. ], batch size: 71, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:54:46,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145610.66666666666, ans=0.1
+2024-08-25 22:55:10,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=145717.33333333334, ans=0.125
+2024-08-25 22:55:23,707 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.57 vs. limit=15.0
+2024-08-25 22:55:36,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=145770.66666666666, ans=0.025
+2024-08-25 22:55:44,071 INFO [train.py:1114] (3/4) Epoch 11, batch 2450, loss[loss=0.3527, simple_loss=0.3499, pruned_loss=0.13, ctc_loss=0.2383, over 13244.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.2963, pruned_loss=0.06863, ctc_loss=0.1288, over 3729666.82 frames. ], batch size: 140, lr: 1.33e-02, grad_scale: 32.0
+2024-08-25 22:56:00,766 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.910e+02 2.208e+02 2.594e+02 5.356e+02, threshold=4.415e+02, percent-clipped=1.0
+2024-08-25 22:58:44,112 INFO [train.py:1114] (3/4) Epoch 12, batch 0, loss[loss=0.2733, simple_loss=0.3043, pruned_loss=0.08727, ctc_loss=0.1694, over 19401.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.3043, pruned_loss=0.08727, ctc_loss=0.1694, over 19401.00 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 22:58:44,113 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 22:59:53,981 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.0477, 2.7592, 3.2232, 2.5012], device='cuda:3')
+2024-08-25 23:00:02,931 INFO [train.py:1146] (3/4) Epoch 12, validation: loss=0.1972, simple_loss=0.2841, pruned_loss=0.04086, ctc_loss=0.07109, over 944034.00 frames.
+2024-08-25 23:00:02,931 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-25 23:00:15,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=146032.0, ans=0.2
+2024-08-25 23:00:58,595 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.30 vs. limit=15.0
+2024-08-25 23:01:02,854 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.36 vs. limit=10.0
+2024-08-25 23:01:08,422 INFO [train.py:1114] (3/4) Epoch 12, batch 50, loss[loss=0.2109, simple_loss=0.2621, pruned_loss=0.05843, ctc_loss=0.1068, over 19703.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.2919, pruned_loss=0.06546, ctc_loss=0.1236, over 845316.12 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:01:17,163 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=146298.66666666666, ans=0.125
+2024-08-25 23:01:20,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=146298.66666666666, ans=0.125
+2024-08-25 23:01:23,963 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=146352.0, ans=0.125
+2024-08-25 23:01:26,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=146352.0, ans=0.125
+2024-08-25 23:01:27,723 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.810e+02 2.073e+02 2.436e+02 4.057e+02, threshold=4.147e+02, percent-clipped=0.0
+2024-08-25 23:01:42,348 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=146405.33333333334, ans=15.0
+2024-08-25 23:01:45,732 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=146458.66666666666, ans=0.1
+2024-08-25 23:01:51,839 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.35 vs. limit=15.0
+2024-08-25 23:02:20,271 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.98 vs. limit=22.5
+2024-08-25 23:02:23,000 INFO [train.py:1114] (3/4) Epoch 12, batch 100, loss[loss=0.2313, simple_loss=0.2898, pruned_loss=0.06322, ctc_loss=0.1158, over 19712.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.2943, pruned_loss=0.06564, ctc_loss=0.1242, over 1500217.78 frames. ], batch size: 51, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:02:33,927 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=146565.33333333334, ans=0.2
+2024-08-25 23:02:54,616 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=146618.66666666666, ans=0.1
+2024-08-25 23:02:57,102 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.36 vs. limit=15.0
+2024-08-25 23:03:39,245 INFO [train.py:1114] (3/4) Epoch 12, batch 150, loss[loss=0.1931, simple_loss=0.2523, pruned_loss=0.04974, ctc_loss=0.08593, over 19714.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2912, pruned_loss=0.06367, ctc_loss=0.1204, over 2028992.09 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:03:51,624 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.13 vs. limit=15.0
+2024-08-25 23:04:05,537 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=146885.33333333334, ans=0.125
+2024-08-25 23:04:09,868 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.659e+02 1.880e+02 2.314e+02 3.650e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-25 23:04:23,314 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:04:39,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=146938.66666666666, ans=0.07
+2024-08-25 23:04:55,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=147045.33333333334, ans=0.0
+2024-08-25 23:05:07,043 INFO [train.py:1114] (3/4) Epoch 12, batch 200, loss[loss=0.2466, simple_loss=0.3007, pruned_loss=0.07015, ctc_loss=0.1306, over 18303.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2904, pruned_loss=0.06318, ctc_loss=0.1193, over 2435906.35 frames. ], batch size: 85, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:05:13,093 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=147098.66666666666, ans=0.0
+2024-08-25 23:05:34,755 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=147152.0, ans=0.5
+2024-08-25 23:06:02,909 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.45 vs. limit=6.0
+2024-08-25 23:06:21,790 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:06:55,334 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=147312.0, ans=0.0
+2024-08-25 23:07:01,816 INFO [train.py:1114] (3/4) Epoch 12, batch 250, loss[loss=0.2457, simple_loss=0.304, pruned_loss=0.06866, ctc_loss=0.1249, over 19363.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2896, pruned_loss=0.06282, ctc_loss=0.1188, over 2755304.30 frames. ], batch size: 67, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:07:04,393 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=147365.33333333334, ans=0.0
+2024-08-25 23:07:22,624 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 1.825e+02 2.154e+02 2.499e+02 3.884e+02, threshold=4.307e+02, percent-clipped=2.0
+2024-08-25 23:07:26,498 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=147418.66666666666, ans=0.0
+2024-08-25 23:07:45,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=147525.33333333334, ans=0.0
+2024-08-25 23:07:52,537 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=147525.33333333334, ans=0.0
+2024-08-25 23:08:07,245 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=147578.66666666666, ans=0.2
+2024-08-25 23:08:13,950 INFO [train.py:1114] (3/4) Epoch 12, batch 300, loss[loss=0.2383, simple_loss=0.2958, pruned_loss=0.06578, ctc_loss=0.1232, over 19550.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.2888, pruned_loss=0.06256, ctc_loss=0.118, over 2999102.31 frames. ], batch size: 61, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:08:29,398 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.67 vs. limit=15.0
+2024-08-25 23:08:47,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=147738.66666666666, ans=10.0
+2024-08-25 23:09:03,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=147845.33333333334, ans=0.0
+2024-08-25 23:09:03,444 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.19 vs. limit=22.5
+2024-08-25 23:09:12,399 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.37 vs. limit=6.0
+2024-08-25 23:09:17,476 INFO [train.py:1114] (3/4) Epoch 12, batch 350, loss[loss=0.2275, simple_loss=0.2797, pruned_loss=0.06359, ctc_loss=0.1205, over 19741.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.2895, pruned_loss=0.0626, ctc_loss=0.118, over 3190049.49 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:09:36,453 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.749e+02 2.047e+02 2.740e+02 4.170e+02, threshold=4.094e+02, percent-clipped=0.0
+2024-08-25 23:10:00,536 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.37 vs. limit=15.0
+2024-08-25 23:10:10,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=148058.66666666666, ans=0.2
+2024-08-25 23:10:12,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=148058.66666666666, ans=0.1
+2024-08-25 23:10:12,488 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.29 vs. limit=12.0
+2024-08-25 23:10:25,929 INFO [train.py:1114] (3/4) Epoch 12, batch 400, loss[loss=0.2268, simple_loss=0.2922, pruned_loss=0.0592, ctc_loss=0.1075, over 19493.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2885, pruned_loss=0.06222, ctc_loss=0.1171, over 3341862.98 frames. ], batch size: 54, lr: 1.27e-02, grad_scale: 32.0
+2024-08-25 23:10:56,367 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=148218.66666666666, ans=0.0
+2024-08-25 23:11:13,060 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_na.min_abs, batch_count=148325.33333333334, ans=0.02
+2024-08-25 23:11:23,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=148325.33333333334, ans=0.125
+2024-08-25 23:11:27,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=148325.33333333334, ans=0.0
+2024-08-25 23:11:33,443 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.56 vs. limit=5.0
+2024-08-25 23:11:51,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=148378.66666666666, ans=0.1
+2024-08-25 23:11:54,375 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.94 vs. limit=15.0
+2024-08-25 23:12:05,083 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.64 vs. limit=15.0
+2024-08-25 23:12:05,893 INFO [train.py:1114] (3/4) Epoch 12, batch 450, loss[loss=0.2278, simple_loss=0.2911, pruned_loss=0.05858, ctc_loss=0.1184, over 19604.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2887, pruned_loss=0.06214, ctc_loss=0.117, over 3449575.95 frames. ], batch size: 55, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:12:07,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=148432.0, ans=0.125
+2024-08-25 23:12:28,369 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.359e+02 1.830e+02 2.201e+02 2.765e+02 4.484e+02, threshold=4.403e+02, percent-clipped=1.0
+2024-08-25 23:12:35,808 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.53 vs. limit=6.0
+2024-08-25 23:12:38,894 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:13:21,163 INFO [train.py:1114] (3/4) Epoch 12, batch 500, loss[loss=0.2495, simple_loss=0.3105, pruned_loss=0.06948, ctc_loss=0.1239, over 19711.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.2881, pruned_loss=0.06225, ctc_loss=0.1171, over 3546020.89 frames. ], batch size: 63, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:13:23,771 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=148698.66666666666, ans=0.2
+2024-08-25 23:13:51,619 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=148752.0, ans=0.1
+2024-08-25 23:13:55,854 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=148805.33333333334, ans=0.125
+2024-08-25 23:14:15,406 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:14:40,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=148858.66666666666, ans=0.1
+2024-08-25 23:14:52,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=148912.0, ans=6.0
+2024-08-25 23:14:52,728 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=148912.0, ans=22.5
+2024-08-25 23:14:54,570 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=148912.0, ans=10.0
+2024-08-25 23:14:59,259 INFO [train.py:1114] (3/4) Epoch 12, batch 550, loss[loss=0.2404, simple_loss=0.3026, pruned_loss=0.0646, ctc_loss=0.1224, over 19283.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2884, pruned_loss=0.06254, ctc_loss=0.1177, over 3607276.06 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:15:03,056 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=148965.33333333334, ans=0.125
+2024-08-25 23:15:13,753 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=148965.33333333334, ans=0.125
+2024-08-25 23:15:42,261 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.692e+02 2.049e+02 2.499e+02 4.022e+02, threshold=4.098e+02, percent-clipped=0.0
+2024-08-25 23:15:58,290 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=149072.0, ans=0.125
+2024-08-25 23:16:00,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=149072.0, ans=0.125
+2024-08-25 23:16:06,873 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.36 vs. limit=15.0
+2024-08-25 23:16:26,614 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=149125.33333333334, ans=0.125
+2024-08-25 23:16:38,580 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:16:54,713 INFO [train.py:1114] (3/4) Epoch 12, batch 600, loss[loss=0.266, simple_loss=0.3128, pruned_loss=0.08027, ctc_loss=0.1465, over 19326.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.2885, pruned_loss=0.06273, ctc_loss=0.1178, over 3665697.83 frames. ], batch size: 67, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:17:21,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=149285.33333333334, ans=0.1
+2024-08-25 23:18:47,511 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=3.930e-01
+2024-08-25 23:19:03,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=149392.0, ans=0.0
+2024-08-25 23:19:09,712 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=149445.33333333334, ans=0.125
+2024-08-25 23:19:16,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=149445.33333333334, ans=0.1
+2024-08-25 23:19:22,622 INFO [train.py:1114] (3/4) Epoch 12, batch 650, loss[loss=0.2331, simple_loss=0.2925, pruned_loss=0.06241, ctc_loss=0.1223, over 19780.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.2875, pruned_loss=0.06211, ctc_loss=0.1168, over 3716004.42 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:19:33,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=149498.66666666666, ans=0.1
+2024-08-25 23:19:43,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=149552.0, ans=0.125
+2024-08-25 23:19:47,560 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=149552.0, ans=0.125
+2024-08-25 23:19:48,488 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.911e+02 2.346e+02 2.911e+02 5.072e+02, threshold=4.691e+02, percent-clipped=6.0
+2024-08-25 23:19:54,548 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:20:41,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=149712.0, ans=0.07
+2024-08-25 23:20:49,402 INFO [train.py:1114] (3/4) Epoch 12, batch 700, loss[loss=0.2191, simple_loss=0.277, pruned_loss=0.05893, ctc_loss=0.1085, over 19722.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.2876, pruned_loss=0.06196, ctc_loss=0.1165, over 3748205.91 frames. ], batch size: 51, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:20:59,949 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=149765.33333333334, ans=0.1
+2024-08-25 23:21:09,278 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=149818.66666666666, ans=0.1
+2024-08-25 23:21:43,662 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=149978.66666666666, ans=0.125
+2024-08-25 23:21:47,357 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.56 vs. limit=15.0
+2024-08-25 23:21:49,543 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.55 vs. limit=6.0
+2024-08-25 23:21:51,356 INFO [train.py:1114] (3/4) Epoch 12, batch 750, loss[loss=0.2367, simple_loss=0.2936, pruned_loss=0.06499, ctc_loss=0.1246, over 19494.00 frames. ], tot_loss[loss=0.23, simple_loss=0.288, pruned_loss=0.0625, ctc_loss=0.1176, over 3772565.94 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:21:51,660 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=150032.0, ans=0.04949747468305833
+2024-08-25 23:22:09,733 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=150032.0, ans=0.0
+2024-08-25 23:22:14,643 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:22:17,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=150085.33333333334, ans=0.1
+2024-08-25 23:22:20,744 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.992e+02 2.563e+02 3.460e+02 5.252e+02, threshold=5.125e+02, percent-clipped=3.0
+2024-08-25 23:22:38,245 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=150138.66666666666, ans=0.125
+2024-08-25 23:22:40,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=150138.66666666666, ans=0.2
+2024-08-25 23:23:10,621 INFO [train.py:1114] (3/4) Epoch 12, batch 800, loss[loss=0.1996, simple_loss=0.2636, pruned_loss=0.05021, ctc_loss=0.08812, over 19402.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2882, pruned_loss=0.06254, ctc_loss=0.1174, over 3793435.95 frames. ], batch size: 48, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:23:58,465 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.16 vs. limit=15.0
+2024-08-25 23:24:07,591 INFO [train.py:1114] (3/4) Epoch 12, batch 850, loss[loss=0.2332, simple_loss=0.2956, pruned_loss=0.06074, ctc_loss=0.1231, over 19661.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2878, pruned_loss=0.06231, ctc_loss=0.1171, over 3813485.28 frames. ], batch size: 59, lr: 1.26e-02, grad_scale: 32.0
+2024-08-25 23:24:07,774 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=150565.33333333334, ans=0.0
+2024-08-25 23:24:07,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=150565.33333333334, ans=0.125
+2024-08-25 23:24:22,515 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=150618.66666666666, ans=0.0
+2024-08-25 23:24:30,647 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.376e+02 1.732e+02 2.149e+02 2.756e+02 4.869e+02, threshold=4.297e+02, percent-clipped=0.0
+2024-08-25 23:24:31,415 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.29 vs. limit=22.5
+2024-08-25 23:24:32,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=150618.66666666666, ans=0.2
+2024-08-25 23:24:50,077 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=150672.0, ans=0.0
+2024-08-25 23:24:51,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=150672.0, ans=0.125
+2024-08-25 23:25:06,721 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=150725.33333333334, ans=0.125
+2024-08-25 23:25:23,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=150778.66666666666, ans=0.1
+2024-08-25 23:25:39,233 INFO [train.py:1114] (3/4) Epoch 12, batch 900, loss[loss=0.2076, simple_loss=0.2677, pruned_loss=0.05272, ctc_loss=0.1054, over 19783.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.2883, pruned_loss=0.06269, ctc_loss=0.1177, over 3817612.49 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:25:43,717 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=150832.0, ans=0.0
+2024-08-25 23:25:50,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=150832.0, ans=0.0
+2024-08-25 23:26:04,277 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.24 vs. limit=22.5
+2024-08-25 23:26:18,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=150938.66666666666, ans=0.125
+2024-08-25 23:26:35,600 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=150992.0, ans=0.0
+2024-08-25 23:26:55,986 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=151045.33333333334, ans=0.125
+2024-08-25 23:27:21,992 INFO [train.py:1114] (3/4) Epoch 12, batch 950, loss[loss=0.2108, simple_loss=0.2654, pruned_loss=0.05731, ctc_loss=0.1039, over 19499.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2891, pruned_loss=0.06309, ctc_loss=0.1186, over 3819388.05 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:27:45,693 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=151152.0, ans=0.0
+2024-08-25 23:27:47,801 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 1.727e+02 2.047e+02 2.468e+02 3.873e+02, threshold=4.093e+02, percent-clipped=0.0
+2024-08-25 23:28:21,832 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=151205.33333333334, ans=0.0
+2024-08-25 23:28:55,953 INFO [train.py:1114] (3/4) Epoch 12, batch 1000, loss[loss=0.2264, simple_loss=0.2855, pruned_loss=0.061, ctc_loss=0.1134, over 19860.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.2901, pruned_loss=0.06385, ctc_loss=0.1201, over 3815895.59 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:30:02,187 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=151365.33333333334, ans=0.125
+2024-08-25 23:30:07,744 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.83 vs. limit=15.0
+2024-08-25 23:30:12,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=151418.66666666666, ans=0.1
+2024-08-25 23:30:43,329 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.24 vs. limit=15.0
+2024-08-25 23:30:46,363 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.22 vs. limit=22.5
+2024-08-25 23:30:49,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=151578.66666666666, ans=0.025
+2024-08-25 23:30:55,445 INFO [train.py:1114] (3/4) Epoch 12, batch 1050, loss[loss=0.2292, simple_loss=0.2869, pruned_loss=0.06279, ctc_loss=0.115, over 19840.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.2889, pruned_loss=0.06312, ctc_loss=0.1187, over 3822208.51 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:31:14,267 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.748e+02 2.222e+02 2.883e+02 4.562e+02, threshold=4.445e+02, percent-clipped=3.0
+2024-08-25 23:31:15,849 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=151685.33333333334, ans=0.025
+2024-08-25 23:31:18,291 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.15 vs. limit=22.5
+2024-08-25 23:31:24,962 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=151738.66666666666, ans=0.0
+2024-08-25 23:31:39,482 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=151792.0, ans=0.125
+2024-08-25 23:31:46,089 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.87 vs. limit=15.0
+2024-08-25 23:32:14,302 INFO [train.py:1114] (3/4) Epoch 12, batch 1100, loss[loss=0.2477, simple_loss=0.3005, pruned_loss=0.07169, ctc_loss=0.129, over 19596.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.2883, pruned_loss=0.06281, ctc_loss=0.1183, over 3831298.08 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:32:27,347 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=151898.66666666666, ans=0.0
+2024-08-25 23:32:37,909 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.88 vs. limit=15.0
+2024-08-25 23:32:48,529 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=152005.33333333334, ans=0.125
+2024-08-25 23:32:49,552 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=152005.33333333334, ans=0.125
+2024-08-25 23:32:54,305 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=152058.66666666666, ans=0.0
+2024-08-25 23:32:58,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=152058.66666666666, ans=0.125
+2024-08-25 23:33:01,747 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=152058.66666666666, ans=0.025
+2024-08-25 23:33:05,223 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=152058.66666666666, ans=0.125
+2024-08-25 23:33:21,663 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=152112.0, ans=0.125
+2024-08-25 23:33:32,438 INFO [train.py:1114] (3/4) Epoch 12, batch 1150, loss[loss=0.2419, simple_loss=0.2967, pruned_loss=0.06791, ctc_loss=0.1281, over 19574.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.2885, pruned_loss=0.06296, ctc_loss=0.1185, over 3830196.69 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:33:32,572 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=152165.33333333334, ans=0.0
+2024-08-25 23:34:03,974 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=152218.66666666666, ans=0.125
+2024-08-25 23:34:07,239 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.763e+02 2.002e+02 2.335e+02 5.298e+02, threshold=4.005e+02, percent-clipped=1.0
+2024-08-25 23:34:33,739 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=152325.33333333334, ans=0.2
+2024-08-25 23:34:56,955 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=152378.66666666666, ans=0.1
+2024-08-25 23:34:59,036 INFO [train.py:1114] (3/4) Epoch 12, batch 1200, loss[loss=0.253, simple_loss=0.3011, pruned_loss=0.07422, ctc_loss=0.1412, over 19844.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2895, pruned_loss=0.0633, ctc_loss=0.1195, over 3825521.01 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:35:25,611 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.89 vs. limit=15.0
+2024-08-25 23:35:44,336 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.87 vs. limit=15.0
+2024-08-25 23:35:45,449 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.80 vs. limit=15.0
+2024-08-25 23:36:08,956 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=152698.66666666666, ans=0.1
+2024-08-25 23:36:09,955 INFO [train.py:1114] (3/4) Epoch 12, batch 1250, loss[loss=0.2408, simple_loss=0.2922, pruned_loss=0.06905, ctc_loss=0.1281, over 19521.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.2892, pruned_loss=0.0629, ctc_loss=0.1185, over 3843627.66 frames. ], batch size: 61, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:36:10,225 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=152698.66666666666, ans=0.125
+2024-08-25 23:36:10,523 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.99 vs. limit=15.0
+2024-08-25 23:36:11,629 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.74 vs. limit=15.0
+2024-08-25 23:36:34,026 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.907e+02 2.265e+02 2.785e+02 4.753e+02, threshold=4.530e+02, percent-clipped=2.0
+2024-08-25 23:36:51,583 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=152805.33333333334, ans=0.125
+2024-08-25 23:36:57,458 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.03 vs. limit=15.0
+2024-08-25 23:37:18,927 INFO [train.py:1114] (3/4) Epoch 12, batch 1300, loss[loss=0.2246, simple_loss=0.2921, pruned_loss=0.05682, ctc_loss=0.1089, over 18845.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.2878, pruned_loss=0.06198, ctc_loss=0.1167, over 3848769.64 frames. ], batch size: 76, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:37:30,004 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.18 vs. limit=12.0
+2024-08-25 23:37:48,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=153072.0, ans=0.04949747468305833
+2024-08-25 23:37:51,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=153072.0, ans=0.125
+2024-08-25 23:38:28,987 INFO [train.py:1114] (3/4) Epoch 12, batch 1350, loss[loss=0.2477, simple_loss=0.2993, pruned_loss=0.07157, ctc_loss=0.1326, over 19765.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.2878, pruned_loss=0.06206, ctc_loss=0.1167, over 3858914.98 frames. ], batch size: 54, lr: 1.25e-02, grad_scale: 32.0
+2024-08-25 23:38:38,309 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=153232.0, ans=0.0
+2024-08-25 23:38:46,285 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.707e+02 2.039e+02 2.408e+02 4.402e+02, threshold=4.078e+02, percent-clipped=0.0
+2024-08-25 23:38:46,478 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=153285.33333333334, ans=0.125
+2024-08-25 23:39:18,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=153392.0, ans=0.0
+2024-08-25 23:39:20,199 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.01 vs. limit=15.0
+2024-08-25 23:39:32,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=153445.33333333334, ans=0.125
+2024-08-25 23:39:43,073 INFO [train.py:1114] (3/4) Epoch 12, batch 1400, loss[loss=0.1911, simple_loss=0.2496, pruned_loss=0.04732, ctc_loss=0.09509, over 19697.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2878, pruned_loss=0.0622, ctc_loss=0.1171, over 3865222.20 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:39:45,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=153498.66666666666, ans=0.125
+2024-08-25 23:40:18,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=153605.33333333334, ans=0.0
+2024-08-25 23:40:28,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=153658.66666666666, ans=0.2
+2024-08-25 23:41:07,357 INFO [train.py:1114] (3/4) Epoch 12, batch 1450, loss[loss=0.2603, simple_loss=0.3218, pruned_loss=0.07253, ctc_loss=0.1341, over 19683.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2885, pruned_loss=0.06231, ctc_loss=0.1175, over 3862585.95 frames. ], batch size: 63, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:41:15,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=153765.33333333334, ans=0.125
+2024-08-25 23:41:15,643 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.34 vs. limit=15.0
+2024-08-25 23:41:26,391 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.30 vs. limit=10.0
+2024-08-25 23:41:27,990 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 1.773e+02 2.135e+02 2.639e+02 4.435e+02, threshold=4.270e+02, percent-clipped=2.0
+2024-08-25 23:41:57,552 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=153872.0, ans=0.0
+2024-08-25 23:42:42,070 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=154032.0, ans=0.2
+2024-08-25 23:42:43,053 INFO [train.py:1114] (3/4) Epoch 12, batch 1500, loss[loss=0.25, simple_loss=0.3048, pruned_loss=0.07104, ctc_loss=0.1326, over 19599.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2884, pruned_loss=0.06206, ctc_loss=0.117, over 3863490.21 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:42:50,285 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=154032.0, ans=0.125
+2024-08-25 23:42:50,854 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=154032.0, ans=22.5
+2024-08-25 23:43:21,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=154138.66666666666, ans=0.125
+2024-08-25 23:43:23,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=154138.66666666666, ans=0.125
+2024-08-25 23:43:50,031 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.12 vs. limit=6.0
+2024-08-25 23:44:04,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=154245.33333333334, ans=0.0
+2024-08-25 23:44:06,093 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.17 vs. limit=22.5
+2024-08-25 23:44:09,914 INFO [train.py:1114] (3/4) Epoch 12, batch 1550, loss[loss=0.2431, simple_loss=0.2959, pruned_loss=0.06895, ctc_loss=0.131, over 19622.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.289, pruned_loss=0.06276, ctc_loss=0.1182, over 3847926.80 frames. ], batch size: 60, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:44:16,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=154298.66666666666, ans=0.125
+2024-08-25 23:44:21,652 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=154352.0, ans=0.0
+2024-08-25 23:44:43,862 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.860e+02 2.194e+02 2.828e+02 4.590e+02, threshold=4.388e+02, percent-clipped=1.0
+2024-08-25 23:45:04,449 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=154405.33333333334, ans=0.1
+2024-08-25 23:46:25,364 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.18 vs. limit=12.0
+2024-08-25 23:46:27,421 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=154512.0, ans=0.125
+2024-08-25 23:46:32,998 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=154512.0, ans=0.125
+2024-08-25 23:46:36,443 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=154565.33333333334, ans=0.125
+2024-08-25 23:46:37,539 INFO [train.py:1114] (3/4) Epoch 12, batch 1600, loss[loss=0.2247, simple_loss=0.2882, pruned_loss=0.05838, ctc_loss=0.1108, over 19843.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.289, pruned_loss=0.06288, ctc_loss=0.1184, over 3836593.08 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:46:58,128 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=154565.33333333334, ans=0.125
+2024-08-25 23:47:14,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=154565.33333333334, ans=0.05
+2024-08-25 23:47:15,058 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.74 vs. limit=15.0
+2024-08-25 23:47:45,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=154672.0, ans=0.125
+2024-08-25 23:48:12,969 INFO [train.py:1114] (3/4) Epoch 12, batch 1650, loss[loss=0.2274, simple_loss=0.2961, pruned_loss=0.05808, ctc_loss=0.1062, over 19664.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2888, pruned_loss=0.06303, ctc_loss=0.1186, over 3832756.55 frames. ], batch size: 59, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:48:17,981 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=154832.0, ans=0.2
+2024-08-25 23:48:20,245 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 23:48:22,527 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=154832.0, ans=0.1
+2024-08-25 23:48:32,978 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.751e+02 2.060e+02 2.481e+02 4.497e+02, threshold=4.120e+02, percent-clipped=1.0
+2024-08-25 23:48:35,721 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=8.901e-02
+2024-08-25 23:48:42,836 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=154938.66666666666, ans=0.0
+2024-08-25 23:48:52,815 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=154938.66666666666, ans=0.125
+2024-08-25 23:48:52,896 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=154938.66666666666, ans=0.0
+2024-08-25 23:49:09,643 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.07 vs. limit=15.0
+2024-08-25 23:49:11,080 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.67 vs. limit=15.0
+2024-08-25 23:49:19,229 INFO [train.py:1114] (3/4) Epoch 12, batch 1700, loss[loss=0.2117, simple_loss=0.2582, pruned_loss=0.06007, ctc_loss=0.1125, over 19666.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.2887, pruned_loss=0.06278, ctc_loss=0.1181, over 3847160.88 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:49:46,226 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=155205.33333333334, ans=0.0
+2024-08-25 23:50:23,076 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=155258.66666666666, ans=0.2
+2024-08-25 23:50:24,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=155312.0, ans=0.125
+2024-08-25 23:50:36,445 INFO [train.py:1114] (3/4) Epoch 12, batch 1750, loss[loss=0.218, simple_loss=0.2649, pruned_loss=0.0628, ctc_loss=0.1139, over 19645.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2882, pruned_loss=0.0624, ctc_loss=0.1174, over 3851893.16 frames. ], batch size: 45, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:50:59,870 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=155365.33333333334, ans=0.0
+2024-08-25 23:51:08,256 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=155418.66666666666, ans=0.0
+2024-08-25 23:51:12,443 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.691e+02 1.944e+02 2.310e+02 4.068e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-25 23:51:19,490 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=155472.0, ans=0.2
+2024-08-25 23:51:29,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=155472.0, ans=0.125
+2024-08-25 23:51:45,961 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=155525.33333333334, ans=0.125
+2024-08-25 23:51:56,045 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.68 vs. limit=15.0
+2024-08-25 23:52:03,817 INFO [train.py:1114] (3/4) Epoch 12, batch 1800, loss[loss=0.2132, simple_loss=0.2808, pruned_loss=0.05324, ctc_loss=0.09782, over 19599.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.2882, pruned_loss=0.06253, ctc_loss=0.1176, over 3853702.72 frames. ], batch size: 55, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:52:08,714 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=155632.0, ans=0.125
+2024-08-25 23:52:42,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=155685.33333333334, ans=0.0
+2024-08-25 23:52:43,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=155685.33333333334, ans=0.0
+2024-08-25 23:53:22,070 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.16 vs. limit=15.0
+2024-08-25 23:54:05,050 INFO [train.py:1114] (3/4) Epoch 12, batch 1850, loss[loss=0.2333, simple_loss=0.2973, pruned_loss=0.06151, ctc_loss=0.1155, over 19581.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.2879, pruned_loss=0.06205, ctc_loss=0.1169, over 3855951.22 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-25 23:54:39,814 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=155952.0, ans=0.125
+2024-08-25 23:54:44,939 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 1.785e+02 2.050e+02 2.712e+02 4.249e+02, threshold=4.100e+02, percent-clipped=1.0
+2024-08-25 23:55:03,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=156005.33333333334, ans=0.5
+2024-08-25 23:55:16,178 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.00 vs. limit=12.0
+2024-08-25 23:55:26,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=156058.66666666666, ans=0.5
+2024-08-25 23:55:38,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=156112.0, ans=0.5
+2024-08-25 23:56:02,279 INFO [train.py:1114] (3/4) Epoch 12, batch 1900, loss[loss=0.2259, simple_loss=0.298, pruned_loss=0.05654, ctc_loss=0.1015, over 19656.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.2878, pruned_loss=0.06178, ctc_loss=0.1162, over 3860707.06 frames. ], batch size: 59, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:56:02,405 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=156165.33333333334, ans=0.1
+2024-08-25 23:56:30,577 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=156218.66666666666, ans=0.0
+2024-08-25 23:56:52,095 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.17 vs. limit=22.5
+2024-08-25 23:57:05,888 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=156325.33333333334, ans=0.125
+2024-08-25 23:57:30,410 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=156325.33333333334, ans=0.125
+2024-08-25 23:57:35,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=156378.66666666666, ans=0.125
+2024-08-25 23:58:03,005 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.66 vs. limit=15.0
+2024-08-25 23:58:05,824 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=156378.66666666666, ans=0.0
+2024-08-25 23:58:28,733 INFO [train.py:1114] (3/4) Epoch 12, batch 1950, loss[loss=0.1953, simple_loss=0.2725, pruned_loss=0.04309, ctc_loss=0.08007, over 19587.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.2881, pruned_loss=0.06119, ctc_loss=0.115, over 3869202.79 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-25 23:58:54,336 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=156432.0, ans=0.04949747468305833
+2024-08-25 23:59:03,809 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 1.700e+02 2.031e+02 2.417e+02 3.778e+02, threshold=4.063e+02, percent-clipped=0.0
+2024-08-25 23:59:37,488 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=156645.33333333334, ans=0.1
+2024-08-25 23:59:51,574 INFO [train.py:1114] (3/4) Epoch 12, batch 2000, loss[loss=0.2117, simple_loss=0.259, pruned_loss=0.06005, ctc_loss=0.1107, over 19645.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.2884, pruned_loss=0.06117, ctc_loss=0.1151, over 3855475.62 frames. ], batch size: 45, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:00:07,387 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.42 vs. limit=15.0
+2024-08-26 00:00:30,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=156805.33333333334, ans=0.125
+2024-08-26 00:00:37,444 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=156858.66666666666, ans=0.125
+2024-08-26 00:01:02,651 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.80 vs. limit=22.5
+2024-08-26 00:01:08,903 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=156912.0, ans=0.2
+2024-08-26 00:01:22,788 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.41 vs. limit=12.0
+2024-08-26 00:01:26,012 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.06 vs. limit=15.0
+2024-08-26 00:01:27,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=156965.33333333334, ans=0.125
+2024-08-26 00:01:28,934 INFO [train.py:1114] (3/4) Epoch 12, batch 2050, loss[loss=0.2013, simple_loss=0.2637, pruned_loss=0.0497, ctc_loss=0.0987, over 19724.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.2881, pruned_loss=0.06144, ctc_loss=0.1155, over 3851501.54 frames. ], batch size: 47, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:01:39,000 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=157018.66666666666, ans=0.0
+2024-08-26 00:01:46,854 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.778e+02 1.977e+02 2.412e+02 4.440e+02, threshold=3.953e+02, percent-clipped=1.0
+2024-08-26 00:01:51,653 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=157072.0, ans=0.0
+2024-08-26 00:02:19,105 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=157072.0, ans=0.125
+2024-08-26 00:03:00,089 INFO [train.py:1114] (3/4) Epoch 12, batch 2100, loss[loss=0.2328, simple_loss=0.2913, pruned_loss=0.06313, ctc_loss=0.1201, over 19761.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2876, pruned_loss=0.06125, ctc_loss=0.1153, over 3858296.91 frames. ], batch size: 54, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 00:36:32,849 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=157392.0, ans=0.125
+2024-08-26 00:40:03,633 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=2.649e-03
+2024-08-26 00:49:44,343 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.64 vs. limit=15.0
+2024-08-26 00:52:11,175 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=157445.33333333334, ans=0.125
+2024-08-26 00:56:02,902 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=157498.66666666666, ans=0.125
+2024-08-26 00:56:07,938 INFO [train.py:1114] (3/4) Epoch 12, batch 2150, loss[loss=0.2237, simple_loss=0.2765, pruned_loss=0.06315, ctc_loss=0.1115, over 19848.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.287, pruned_loss=0.06116, ctc_loss=0.1151, over 3869541.67 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 32.0
+2024-08-26 01:08:00,154 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=157552.0, ans=0.125
+2024-08-26 01:09:53,315 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.788e+02 2.174e+02 2.705e+02 6.148e+02, threshold=4.348e+02, percent-clipped=11.0
+2024-08-26 01:34:52,243 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=157712.0, ans=0.125
+2024-08-26 01:37:28,998 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=157765.33333333334, ans=0.0
+2024-08-26 01:37:35,690 INFO [train.py:1114] (3/4) Epoch 12, batch 2200, loss[loss=0.2226, simple_loss=0.2926, pruned_loss=0.05495, ctc_loss=0.1067, over 19575.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.2869, pruned_loss=0.06104, ctc_loss=0.115, over 3867635.59 frames. ], batch size: 57, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 01:39:25,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=157765.33333333334, ans=0.125
+2024-08-26 01:39:25,135 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=157765.33333333334, ans=0.0
+2024-08-26 01:39:40,911 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=157765.33333333334, ans=0.0
+2024-08-26 01:43:04,815 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=157818.66666666666, ans=0.1
+2024-08-26 01:43:19,487 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=157818.66666666666, ans=0.1
+2024-08-26 01:44:08,917 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=157818.66666666666, ans=0.0
+2024-08-26 01:49:04,964 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=157872.0, ans=0.04949747468305833
+2024-08-26 01:50:17,654 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=157925.33333333334, ans=0.1
+2024-08-26 01:53:53,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=157925.33333333334, ans=0.125
+2024-08-26 01:57:29,381 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=158032.0, ans=0.125
+2024-08-26 01:57:30,320 INFO [train.py:1114] (3/4) Epoch 12, batch 2250, loss[loss=0.2146, simple_loss=0.283, pruned_loss=0.05217, ctc_loss=0.1045, over 19635.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.287, pruned_loss=0.06104, ctc_loss=0.1149, over 3868137.32 frames. ], batch size: 55, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:02:32,847 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.78 vs. limit=15.0
+2024-08-26 02:03:20,022 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.61 vs. limit=22.5
+2024-08-26 02:03:57,656 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 02:04:28,479 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.839e+02 2.199e+02 2.577e+02 6.358e+02, threshold=4.399e+02, percent-clipped=1.0
+2024-08-26 02:05:36,604 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158138.66666666666, ans=0.1
+2024-08-26 02:07:31,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=158138.66666666666, ans=0.0
+2024-08-26 02:10:34,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=158245.33333333334, ans=0.125
+2024-08-26 02:10:34,625 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten.whitening_limit, batch_count=158245.33333333334, ans=15.0
+2024-08-26 02:13:21,298 INFO [train.py:1114] (3/4) Epoch 12, batch 2300, loss[loss=0.2083, simple_loss=0.2694, pruned_loss=0.05339, ctc_loss=0.1009, over 19507.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2865, pruned_loss=0.06136, ctc_loss=0.1155, over 3861735.12 frames. ], batch size: 49, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:14:01,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=158298.66666666666, ans=0.0
+2024-08-26 02:14:01,848 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=158298.66666666666, ans=0.2
+2024-08-26 02:14:59,461 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=158352.0, ans=0.125
+2024-08-26 02:15:00,597 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=158352.0, ans=0.2
+2024-08-26 02:16:17,994 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=158405.33333333334, ans=0.0
+2024-08-26 02:20:01,813 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=158458.66666666666, ans=0.025
+2024-08-26 02:22:39,623 INFO [train.py:1114] (3/4) Epoch 12, batch 2350, loss[loss=0.2548, simple_loss=0.3095, pruned_loss=0.07378, ctc_loss=0.1313, over 19675.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.2867, pruned_loss=0.06175, ctc_loss=0.1161, over 3864136.16 frames. ], batch size: 63, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 02:23:23,533 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=158565.33333333334, ans=0.0
+2024-08-26 02:23:52,665 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.74 vs. limit=15.0
+2024-08-26 02:25:18,441 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.991e+02 2.536e+02 3.183e+02 5.552e+02, threshold=5.072e+02, percent-clipped=5.0
+2024-08-26 02:25:33,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=158672.0, ans=0.2
+2024-08-26 02:25:39,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=158672.0, ans=0.2
+2024-08-26 02:27:03,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=158725.33333333334, ans=0.0
+2024-08-26 02:28:23,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=158778.66666666666, ans=0.125
+2024-08-26 02:30:54,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=158778.66666666666, ans=0.0
+2024-08-26 02:30:58,339 INFO [train.py:1114] (3/4) Epoch 12, batch 2400, loss[loss=0.2266, simple_loss=0.2923, pruned_loss=0.05776, ctc_loss=0.1136, over 19333.00 frames. ], tot_loss[loss=0.231, simple_loss=0.2894, pruned_loss=0.06276, ctc_loss=0.1177, over 3857334.42 frames. ], batch size: 71, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:31:10,564 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=158832.0, ans=0.125
+2024-08-26 02:31:12,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=158832.0, ans=0.0
+2024-08-26 02:31:28,962 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 02:35:11,956 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.23 vs. limit=15.0
+2024-08-26 02:37:05,166 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=158992.0, ans=0.07
+2024-08-26 02:38:19,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=159045.33333333334, ans=0.125
+2024-08-26 02:38:22,380 INFO [train.py:1114] (3/4) Epoch 12, batch 2450, loss[loss=0.3018, simple_loss=0.3308, pruned_loss=0.09814, ctc_loss=0.1913, over 13367.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.2936, pruned_loss=0.06576, ctc_loss=0.1237, over 3729493.54 frames. ], batch size: 140, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 02:38:43,298 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=159098.66666666666, ans=0.1
+2024-08-26 02:39:05,207 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=159098.66666666666, ans=0.125
+2024-08-26 02:39:12,476 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=159152.0, ans=0.125
+2024-08-26 02:39:42,305 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.859e+02 2.162e+02 2.447e+02 4.124e+02, threshold=4.324e+02, percent-clipped=0.0
+2024-08-26 02:40:40,799 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.08 vs. limit=15.0
+2024-08-26 02:40:53,155 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.35 vs. limit=15.0
+2024-08-26 02:43:45,616 INFO [train.py:1114] (3/4) Epoch 13, batch 0, loss[loss=0.2296, simple_loss=0.2769, pruned_loss=0.06711, ctc_loss=0.1201, over 19823.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2769, pruned_loss=0.06711, ctc_loss=0.1201, over 19823.00 frames. ], batch size: 49, lr: 1.18e-02, grad_scale: 32.0
+2024-08-26 02:43:45,617 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-26 02:45:27,903 INFO [train.py:1146] (3/4) Epoch 13, validation: loss=0.1972, simple_loss=0.2835, pruned_loss=0.04113, ctc_loss=0.07151, over 944034.00 frames.
+2024-08-26 02:45:27,904 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-26 02:45:31,381 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=159306.66666666666, ans=0.0
+2024-08-26 02:45:54,855 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.77 vs. limit=22.5
+2024-08-26 02:46:14,736 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=159413.33333333334, ans=0.2
+2024-08-26 02:46:22,480 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=159413.33333333334, ans=0.04949747468305833
+2024-08-26 02:48:06,363 INFO [train.py:1114] (3/4) Epoch 13, batch 50, loss[loss=0.2081, simple_loss=0.2665, pruned_loss=0.05372, ctc_loss=0.1059, over 19731.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.2899, pruned_loss=0.06301, ctc_loss=0.1185, over 844332.57 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:48:06,614 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=159573.33333333334, ans=0.2
+2024-08-26 02:48:55,465 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.822e+02 2.122e+02 2.766e+02 5.339e+02, threshold=4.244e+02, percent-clipped=3.0
+2024-08-26 02:48:59,636 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.92 vs. limit=15.0
+2024-08-26 02:49:20,891 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.10 vs. limit=22.5
+2024-08-26 02:49:26,907 INFO [train.py:1114] (3/4) Epoch 13, batch 100, loss[loss=0.24, simple_loss=0.2925, pruned_loss=0.06859, ctc_loss=0.126, over 19736.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2908, pruned_loss=0.06268, ctc_loss=0.118, over 1499336.38 frames. ], batch size: 51, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:49:29,984 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=159840.0, ans=0.125
+2024-08-26 02:49:31,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=159840.0, ans=0.125
+2024-08-26 02:49:51,628 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.21 vs. limit=15.0
+2024-08-26 02:50:20,151 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 02:50:22,395 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=159946.66666666666, ans=0.125
+2024-08-26 02:50:40,661 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=160000.0, ans=0.025
+2024-08-26 02:50:46,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160000.0, ans=0.1
+2024-08-26 02:51:02,374 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 02:51:27,229 INFO [train.py:1114] (3/4) Epoch 13, batch 150, loss[loss=0.2312, simple_loss=0.2744, pruned_loss=0.06927, ctc_loss=0.1234, over 19721.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.2871, pruned_loss=0.06053, ctc_loss=0.1137, over 2028110.84 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:51:45,289 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=160106.66666666666, ans=0.07
+2024-08-26 02:52:36,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=160213.33333333334, ans=0.125
+2024-08-26 02:52:48,526 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.693e+02 1.889e+02 2.276e+02 3.515e+02, threshold=3.778e+02, percent-clipped=0.0
+2024-08-26 02:53:36,263 INFO [train.py:1114] (3/4) Epoch 13, batch 200, loss[loss=0.2952, simple_loss=0.3317, pruned_loss=0.09343, ctc_loss=0.1798, over 18464.00 frames. ], tot_loss[loss=0.227, simple_loss=0.2865, pruned_loss=0.06088, ctc_loss=0.1146, over 2435881.61 frames. ], batch size: 85, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:53:38,926 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=160373.33333333334, ans=0.125
+2024-08-26 02:53:46,158 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=160373.33333333334, ans=0.125
+2024-08-26 02:54:11,454 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=160480.0, ans=0.035
+2024-08-26 02:54:27,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=160480.0, ans=0.0
+2024-08-26 02:54:51,649 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160533.33333333334, ans=0.1
+2024-08-26 02:55:15,708 INFO [train.py:1114] (3/4) Epoch 13, batch 250, loss[loss=0.2501, simple_loss=0.311, pruned_loss=0.06899, ctc_loss=0.128, over 19430.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.2873, pruned_loss=0.06137, ctc_loss=0.1156, over 2756746.88 frames. ], batch size: 67, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:55:24,254 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.98 vs. limit=6.0
+2024-08-26 02:55:34,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=160693.33333333334, ans=0.125
+2024-08-26 02:55:40,635 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.44 vs. limit=15.0
+2024-08-26 02:55:44,668 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=160746.66666666666, ans=0.125
+2024-08-26 02:55:45,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=160746.66666666666, ans=0.125
+2024-08-26 02:55:47,660 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.754e+02 2.188e+02 2.577e+02 4.403e+02, threshold=4.375e+02, percent-clipped=2.0
+2024-08-26 02:55:48,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=160800.0, ans=0.0
+2024-08-26 02:56:09,487 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.49 vs. limit=10.0
+2024-08-26 02:56:43,559 INFO [train.py:1114] (3/4) Epoch 13, batch 300, loss[loss=0.2306, simple_loss=0.2969, pruned_loss=0.0601, ctc_loss=0.1106, over 19508.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.2866, pruned_loss=0.0609, ctc_loss=0.1148, over 3001398.51 frames. ], batch size: 61, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:57:37,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=161120.0, ans=0.0
+2024-08-26 02:57:50,480 INFO [train.py:1114] (3/4) Epoch 13, batch 350, loss[loss=0.213, simple_loss=0.2684, pruned_loss=0.05757, ctc_loss=0.1063, over 19777.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2871, pruned_loss=0.06111, ctc_loss=0.1151, over 3190647.47 frames. ], batch size: 48, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 02:58:08,302 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.59 vs. limit=15.0
+2024-08-26 02:58:25,614 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.772e+02 2.039e+02 2.354e+02 3.759e+02, threshold=4.079e+02, percent-clipped=0.0
+2024-08-26 02:58:37,417 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=161333.33333333334, ans=0.125
+2024-08-26 02:59:24,148 INFO [train.py:1114] (3/4) Epoch 13, batch 400, loss[loss=0.2504, simple_loss=0.3053, pruned_loss=0.07128, ctc_loss=0.1325, over 19494.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.2871, pruned_loss=0.06131, ctc_loss=0.1152, over 3343038.56 frames. ], batch size: 54, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 02:59:29,051 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=161440.0, ans=0.125
+2024-08-26 02:59:52,485 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.58 vs. limit=10.0
+2024-08-26 02:59:54,634 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=161440.0, ans=0.0
+2024-08-26 03:00:56,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=161653.33333333334, ans=0.2
+2024-08-26 03:00:58,028 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=161653.33333333334, ans=0.1
+2024-08-26 03:01:25,139 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=161653.33333333334, ans=0.0
+2024-08-26 03:01:42,770 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=161706.66666666666, ans=0.1
+2024-08-26 03:01:53,833 INFO [train.py:1114] (3/4) Epoch 13, batch 450, loss[loss=0.242, simple_loss=0.3026, pruned_loss=0.06418, ctc_loss=0.1324, over 19622.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2872, pruned_loss=0.06115, ctc_loss=0.1148, over 3451375.65 frames. ], batch size: 55, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:02:13,996 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=161760.0, ans=0.125
+2024-08-26 03:03:10,112 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.726e+02 2.085e+02 2.754e+02 4.301e+02, threshold=4.170e+02, percent-clipped=3.0
+2024-08-26 03:03:51,746 INFO [train.py:1114] (3/4) Epoch 13, batch 500, loss[loss=0.2415, simple_loss=0.3005, pruned_loss=0.06648, ctc_loss=0.1237, over 19673.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.2861, pruned_loss=0.06085, ctc_loss=0.1143, over 3546446.50 frames. ], batch size: 63, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:04:15,958 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.20 vs. limit=15.0
+2024-08-26 03:04:38,834 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=162026.66666666666, ans=0.125
+2024-08-26 03:05:03,841 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=162080.0, ans=0.0
+2024-08-26 03:05:11,602 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff2.min_abs, batch_count=162080.0, ans=0.1
+2024-08-26 03:05:17,514 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.16 vs. limit=22.5
+2024-08-26 03:05:24,196 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=162133.33333333334, ans=0.0
+2024-08-26 03:05:47,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=162186.66666666666, ans=0.125
+2024-08-26 03:06:03,079 INFO [train.py:1114] (3/4) Epoch 13, batch 550, loss[loss=0.2227, simple_loss=0.2862, pruned_loss=0.0576, ctc_loss=0.1102, over 19375.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.2861, pruned_loss=0.0608, ctc_loss=0.1142, over 3609598.00 frames. ], batch size: 71, lr: 1.17e-02, grad_scale: 32.0
+2024-08-26 03:06:12,547 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=162240.0, ans=0.125
+2024-08-26 03:06:14,253 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.96 vs. limit=15.0
+2024-08-26 03:06:19,086 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=162293.33333333334, ans=0.025
+2024-08-26 03:06:21,501 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=162293.33333333334, ans=0.125
+2024-08-26 03:06:37,697 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=162346.66666666666, ans=0.0
+2024-08-26 03:06:44,318 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=162346.66666666666, ans=0.0
+2024-08-26 03:06:47,056 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 1.758e+02 1.954e+02 2.485e+02 4.688e+02, threshold=3.908e+02, percent-clipped=2.0
+2024-08-26 03:07:11,086 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:07:24,266 INFO [train.py:1114] (3/4) Epoch 13, batch 600, loss[loss=0.2241, simple_loss=0.2972, pruned_loss=0.05455, ctc_loss=0.105, over 19364.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2855, pruned_loss=0.06005, ctc_loss=0.113, over 3666015.15 frames. ], batch size: 67, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:07:41,977 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.00 vs. limit=22.5
+2024-08-26 03:07:46,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=162560.0, ans=0.125
+2024-08-26 03:07:47,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=162560.0, ans=0.125
+2024-08-26 03:08:27,201 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=162666.66666666666, ans=0.0
+2024-08-26 03:09:12,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=162720.0, ans=0.125
+2024-08-26 03:09:12,707 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=162720.0, ans=0.0
+2024-08-26 03:09:14,966 INFO [train.py:1114] (3/4) Epoch 13, batch 650, loss[loss=0.2214, simple_loss=0.2806, pruned_loss=0.05899, ctc_loss=0.1104, over 19780.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2846, pruned_loss=0.05953, ctc_loss=0.112, over 3716724.57 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:09:29,729 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=162773.33333333334, ans=0.125
+2024-08-26 03:09:30,889 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=162826.66666666666, ans=0.125
+2024-08-26 03:09:41,619 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.91 vs. limit=8.0
+2024-08-26 03:09:44,285 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=162826.66666666666, ans=0.0
+2024-08-26 03:09:53,299 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.02 vs. limit=15.0
+2024-08-26 03:09:54,062 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=162880.0, ans=0.1
+2024-08-26 03:09:58,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=162880.0, ans=0.125
+2024-08-26 03:10:08,814 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=162880.0, ans=0.1
+2024-08-26 03:10:09,864 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.755e+02 2.119e+02 2.960e+02 5.119e+02, threshold=4.237e+02, percent-clipped=6.0
+2024-08-26 03:10:24,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=162933.33333333334, ans=0.125
+2024-08-26 03:10:27,175 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=162986.66666666666, ans=0.125
+2024-08-26 03:10:30,541 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=162986.66666666666, ans=0.07
+2024-08-26 03:10:32,917 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=162986.66666666666, ans=0.0
+2024-08-26 03:10:39,733 INFO [train.py:1114] (3/4) Epoch 13, batch 700, loss[loss=0.2075, simple_loss=0.2691, pruned_loss=0.05257, ctc_loss=0.1019, over 19732.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.285, pruned_loss=0.05988, ctc_loss=0.1126, over 3748157.86 frames. ], batch size: 51, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:10:40,249 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.80 vs. limit=22.5
+2024-08-26 03:10:40,400 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.79 vs. limit=15.0
+2024-08-26 03:10:52,324 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=163093.33333333334, ans=0.125
+2024-08-26 03:10:56,583 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=163093.33333333334, ans=0.125
+2024-08-26 03:11:07,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=163146.66666666666, ans=0.125
+2024-08-26 03:11:27,431 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=163200.0, ans=0.2
+2024-08-26 03:11:50,236 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=163253.33333333334, ans=0.125
+2024-08-26 03:11:52,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=163253.33333333334, ans=0.125
+2024-08-26 03:12:00,798 INFO [train.py:1114] (3/4) Epoch 13, batch 750, loss[loss=0.2547, simple_loss=0.3048, pruned_loss=0.07325, ctc_loss=0.1451, over 19486.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2841, pruned_loss=0.05948, ctc_loss=0.1119, over 3774054.91 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:12:08,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=163306.66666666666, ans=0.1
+2024-08-26 03:12:16,020 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=163360.0, ans=0.2
+2024-08-26 03:12:41,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=163413.33333333334, ans=0.125
+2024-08-26 03:12:43,023 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.800e+02 2.310e+02 2.882e+02 4.749e+02, threshold=4.619e+02, percent-clipped=2.0
+2024-08-26 03:12:43,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=163413.33333333334, ans=0.125
+2024-08-26 03:12:44,549 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=163466.66666666666, ans=0.025
+2024-08-26 03:12:51,884 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.19 vs. limit=15.0
+2024-08-26 03:13:43,528 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=163520.0, ans=0.0
+2024-08-26 03:13:56,358 INFO [train.py:1114] (3/4) Epoch 13, batch 800, loss[loss=0.212, simple_loss=0.2647, pruned_loss=0.05762, ctc_loss=0.1103, over 19823.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2839, pruned_loss=0.05942, ctc_loss=0.1117, over 3795086.49 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:14:26,170 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.31 vs. limit=15.0
+2024-08-26 03:14:28,536 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.67 vs. limit=22.5
+2024-08-26 03:14:29,113 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=163680.0, ans=0.1
+2024-08-26 03:14:35,228 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.68 vs. limit=6.0
+2024-08-26 03:14:47,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=163733.33333333334, ans=0.125
+2024-08-26 03:15:00,339 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=163733.33333333334, ans=0.2
+2024-08-26 03:15:09,208 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=163786.66666666666, ans=0.2
+2024-08-26 03:15:13,888 INFO [train.py:1114] (3/4) Epoch 13, batch 850, loss[loss=0.2436, simple_loss=0.3068, pruned_loss=0.06527, ctc_loss=0.1246, over 19677.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2838, pruned_loss=0.05932, ctc_loss=0.1116, over 3813675.41 frames. ], batch size: 59, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:15:55,888 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=163893.33333333334, ans=0.0
+2024-08-26 03:15:58,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=163946.66666666666, ans=0.025
+2024-08-26 03:16:04,442 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.85 vs. limit=15.0
+2024-08-26 03:16:11,621 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.727e+02 1.948e+02 2.271e+02 3.773e+02, threshold=3.897e+02, percent-clipped=0.0
+2024-08-26 03:16:35,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=164053.33333333334, ans=0.125
+2024-08-26 03:16:39,635 INFO [train.py:1114] (3/4) Epoch 13, batch 900, loss[loss=0.2259, simple_loss=0.2823, pruned_loss=0.06184, ctc_loss=0.1144, over 19407.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.2845, pruned_loss=0.05959, ctc_loss=0.1122, over 3816310.23 frames. ], batch size: 48, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:16:42,520 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.39 vs. limit=15.0
+2024-08-26 03:16:52,662 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.39 vs. limit=15.0
+2024-08-26 03:17:39,435 INFO [train.py:1114] (3/4) Epoch 13, batch 950, loss[loss=0.2307, simple_loss=0.2829, pruned_loss=0.0648, ctc_loss=0.1222, over 19485.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.285, pruned_loss=0.05992, ctc_loss=0.1126, over 3817781.80 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:17:53,172 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:18:24,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=164426.66666666666, ans=0.0
+2024-08-26 03:18:38,813 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=164480.0, ans=0.125
+2024-08-26 03:18:52,283 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.763e+02 2.081e+02 2.549e+02 5.575e+02, threshold=4.162e+02, percent-clipped=2.0
+2024-08-26 03:18:55,383 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=164533.33333333334, ans=0.2
+2024-08-26 03:19:15,949 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=164586.66666666666, ans=0.0
+2024-08-26 03:19:29,889 INFO [train.py:1114] (3/4) Epoch 13, batch 1000, loss[loss=0.219, simple_loss=0.2812, pruned_loss=0.05664, ctc_loss=0.1089, over 19858.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2859, pruned_loss=0.06079, ctc_loss=0.1143, over 3813887.63 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:19:31,456 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=164640.0, ans=0.125
+2024-08-26 03:19:55,741 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.64 vs. limit=15.0
+2024-08-26 03:20:16,725 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=164800.0, ans=0.07
+2024-08-26 03:20:21,140 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.00 vs. limit=15.0
+2024-08-26 03:20:35,692 INFO [train.py:1114] (3/4) Epoch 13, batch 1050, loss[loss=0.2481, simple_loss=0.3028, pruned_loss=0.07082, ctc_loss=0.1292, over 19851.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.285, pruned_loss=0.0604, ctc_loss=0.1137, over 3821461.55 frames. ], batch size: 57, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:20:41,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=164906.66666666666, ans=0.125
+2024-08-26 03:20:41,065 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=164906.66666666666, ans=0.1
+2024-08-26 03:21:05,253 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.27 vs. limit=15.0
+2024-08-26 03:21:08,101 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.698e+02 1.997e+02 2.318e+02 3.616e+02, threshold=3.994e+02, percent-clipped=0.0
+2024-08-26 03:21:14,253 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 03:21:18,239 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=165066.66666666666, ans=0.0
+2024-08-26 03:21:32,855 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.37 vs. limit=10.0
+2024-08-26 03:21:37,051 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.15 vs. limit=10.0
+2024-08-26 03:21:40,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=165120.0, ans=0.125
+2024-08-26 03:21:42,318 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=165120.0, ans=0.125
+2024-08-26 03:21:44,513 INFO [train.py:1114] (3/4) Epoch 13, batch 1100, loss[loss=0.2617, simple_loss=0.3039, pruned_loss=0.07937, ctc_loss=0.1518, over 19582.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2855, pruned_loss=0.06058, ctc_loss=0.1142, over 3828495.58 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 03:21:54,291 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.04 vs. limit=15.0
+2024-08-26 03:21:57,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=165226.66666666666, ans=0.125
+2024-08-26 03:22:08,023 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=165226.66666666666, ans=0.1
+2024-08-26 03:22:12,549 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=165280.0, ans=0.0
+2024-08-26 03:22:21,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=165333.33333333334, ans=0.0
+2024-08-26 03:22:46,513 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=165386.66666666666, ans=0.025
+2024-08-26 03:22:54,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=165386.66666666666, ans=0.125
+2024-08-26 03:22:57,608 INFO [train.py:1114] (3/4) Epoch 13, batch 1150, loss[loss=0.238, simple_loss=0.3059, pruned_loss=0.06172, ctc_loss=0.1166, over 19573.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.2858, pruned_loss=0.06088, ctc_loss=0.1146, over 3826488.39 frames. ], batch size: 52, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:23:10,127 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=165493.33333333334, ans=0.125
+2024-08-26 03:23:15,948 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=165493.33333333334, ans=0.125
+2024-08-26 03:23:38,672 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.729e+02 2.006e+02 2.456e+02 7.202e+02, threshold=4.012e+02, percent-clipped=3.0
+2024-08-26 03:23:42,122 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.31 vs. limit=22.5
+2024-08-26 03:23:44,666 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.80 vs. limit=15.0
+2024-08-26 03:23:45,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=165600.0, ans=0.025
+2024-08-26 03:23:47,691 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=165600.0, ans=0.0
+2024-08-26 03:23:51,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=165600.0, ans=0.125
+2024-08-26 03:24:03,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=165653.33333333334, ans=0.025
+2024-08-26 03:24:09,372 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.54 vs. limit=6.0
+2024-08-26 03:24:11,586 INFO [train.py:1114] (3/4) Epoch 13, batch 1200, loss[loss=0.2527, simple_loss=0.3101, pruned_loss=0.07018, ctc_loss=0.1372, over 19836.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.2871, pruned_loss=0.06122, ctc_loss=0.1152, over 3822233.55 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:24:45,724 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=165760.0, ans=0.0
+2024-08-26 03:24:53,219 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.73 vs. limit=15.0
+2024-08-26 03:24:53,926 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=165813.33333333334, ans=0.125
+2024-08-26 03:25:23,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=165866.66666666666, ans=0.125
+2024-08-26 03:25:25,918 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=165866.66666666666, ans=0.0
+2024-08-26 03:26:20,505 INFO [train.py:1114] (3/4) Epoch 13, batch 1250, loss[loss=0.2245, simple_loss=0.2958, pruned_loss=0.05548, ctc_loss=0.1055, over 19559.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.2872, pruned_loss=0.06082, ctc_loss=0.1145, over 3841072.53 frames. ], batch size: 61, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:26:28,727 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=165973.33333333334, ans=0.025
+2024-08-26 03:26:33,646 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.48 vs. limit=22.5
+2024-08-26 03:26:50,282 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=166026.66666666666, ans=0.0
+2024-08-26 03:26:58,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=166080.0, ans=0.0
+2024-08-26 03:27:23,462 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.311e+02 1.715e+02 1.869e+02 2.285e+02 3.930e+02, threshold=3.738e+02, percent-clipped=0.0
+2024-08-26 03:28:00,524 INFO [train.py:1114] (3/4) Epoch 13, batch 1300, loss[loss=0.2574, simple_loss=0.307, pruned_loss=0.07534, ctc_loss=0.1427, over 18839.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.2866, pruned_loss=0.06072, ctc_loss=0.1143, over 3844356.87 frames. ], batch size: 76, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:28:04,051 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=166240.0, ans=0.1
+2024-08-26 03:28:38,591 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=166293.33333333334, ans=0.0
+2024-08-26 03:29:19,962 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=166346.66666666666, ans=0.0
+2024-08-26 03:30:19,039 INFO [train.py:1114] (3/4) Epoch 13, batch 1350, loss[loss=0.215, simple_loss=0.2825, pruned_loss=0.05329, ctc_loss=0.1023, over 19755.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2865, pruned_loss=0.06053, ctc_loss=0.1139, over 3856048.39 frames. ], batch size: 54, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:30:29,038 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=7.185e-02
+2024-08-26 03:30:55,256 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.86 vs. limit=15.0
+2024-08-26 03:31:08,779 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.736e+02 2.053e+02 2.622e+02 5.263e+02, threshold=4.106e+02, percent-clipped=6.0
+2024-08-26 03:31:09,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=166613.33333333334, ans=0.125
+2024-08-26 03:31:20,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=166666.66666666666, ans=0.0
+2024-08-26 03:31:40,383 INFO [train.py:1114] (3/4) Epoch 13, batch 1400, loss[loss=0.1921, simple_loss=0.2527, pruned_loss=0.04755, ctc_loss=0.09107, over 19652.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2852, pruned_loss=0.06002, ctc_loss=0.1133, over 3862951.88 frames. ], batch size: 46, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:31:49,700 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=166773.33333333334, ans=0.125
+2024-08-26 03:31:49,701 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=166773.33333333334, ans=0.125
+2024-08-26 03:31:55,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=166773.33333333334, ans=0.125
+2024-08-26 03:32:14,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=166773.33333333334, ans=0.0
+2024-08-26 03:32:39,718 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=166880.0, ans=0.025
+2024-08-26 03:32:59,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=166933.33333333334, ans=0.0
+2024-08-26 03:33:20,770 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=167040.0, ans=0.2
+2024-08-26 03:33:21,481 INFO [train.py:1114] (3/4) Epoch 13, batch 1450, loss[loss=0.2239, simple_loss=0.2965, pruned_loss=0.05474, ctc_loss=0.1046, over 19659.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2861, pruned_loss=0.06038, ctc_loss=0.1139, over 3861419.28 frames. ], batch size: 63, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:33:59,234 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.756e+02 1.937e+02 2.380e+02 3.895e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-26 03:34:09,506 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=167200.0, ans=0.05
+2024-08-26 03:34:22,222 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=167253.33333333334, ans=0.2
+2024-08-26 03:34:23,194 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=167253.33333333334, ans=0.125
+2024-08-26 03:34:32,315 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.13 vs. limit=15.0
+2024-08-26 03:34:33,948 INFO [train.py:1114] (3/4) Epoch 13, batch 1500, loss[loss=0.2601, simple_loss=0.3139, pruned_loss=0.0758, ctc_loss=0.1368, over 19575.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2859, pruned_loss=0.05986, ctc_loss=0.1128, over 3861637.72 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:35:12,760 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.21 vs. limit=15.0
+2024-08-26 03:35:16,816 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=167466.66666666666, ans=0.2
+2024-08-26 03:35:43,253 INFO [train.py:1114] (3/4) Epoch 13, batch 1550, loss[loss=0.2167, simple_loss=0.2894, pruned_loss=0.0533, ctc_loss=0.09328, over 19596.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2858, pruned_loss=0.06002, ctc_loss=0.113, over 3845387.19 frames. ], batch size: 60, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:35:47,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=167573.33333333334, ans=0.025
+2024-08-26 03:35:49,387 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=167573.33333333334, ans=0.0
+2024-08-26 03:36:35,361 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.778e+02 2.054e+02 2.767e+02 5.252e+02, threshold=4.108e+02, percent-clipped=7.0
+2024-08-26 03:36:47,881 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=167733.33333333334, ans=0.125
+2024-08-26 03:36:56,568 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.21 vs. limit=22.5
+2024-08-26 03:37:03,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=167786.66666666666, ans=0.0
+2024-08-26 03:37:05,305 INFO [train.py:1114] (3/4) Epoch 13, batch 1600, loss[loss=0.2097, simple_loss=0.284, pruned_loss=0.04839, ctc_loss=0.09669, over 19833.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2855, pruned_loss=0.05998, ctc_loss=0.1131, over 3834864.77 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:37:05,584 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=167840.0, ans=0.2
+2024-08-26 03:37:10,631 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.90 vs. limit=15.0
+2024-08-26 03:38:12,048 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=168000.0, ans=0.125
+2024-08-26 03:38:12,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=168000.0, ans=0.1
+2024-08-26 03:38:33,000 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=168053.33333333334, ans=0.04949747468305833
+2024-08-26 03:38:33,024 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=168053.33333333334, ans=0.0
+2024-08-26 03:38:35,988 INFO [train.py:1114] (3/4) Epoch 13, batch 1650, loss[loss=0.2346, simple_loss=0.2956, pruned_loss=0.063, ctc_loss=0.1192, over 19674.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.2861, pruned_loss=0.06045, ctc_loss=0.1143, over 3831702.51 frames. ], batch size: 59, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 03:39:20,049 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 1.825e+02 2.209e+02 2.614e+02 4.167e+02, threshold=4.418e+02, percent-clipped=2.0
+2024-08-26 03:40:00,075 INFO [train.py:1114] (3/4) Epoch 13, batch 1700, loss[loss=0.221, simple_loss=0.264, pruned_loss=0.06478, ctc_loss=0.1214, over 19690.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.2861, pruned_loss=0.06055, ctc_loss=0.1142, over 3846185.05 frames. ], batch size: 46, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:40:12,931 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.64 vs. limit=15.0
+2024-08-26 03:40:13,885 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=168373.33333333334, ans=0.2
+2024-08-26 03:40:20,495 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=168426.66666666666, ans=0.125
+2024-08-26 03:40:33,940 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=168426.66666666666, ans=0.0
+2024-08-26 03:41:17,849 INFO [train.py:1114] (3/4) Epoch 13, batch 1750, loss[loss=0.1875, simple_loss=0.2506, pruned_loss=0.04469, ctc_loss=0.08762, over 19656.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.2854, pruned_loss=0.05998, ctc_loss=0.113, over 3851308.13 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:41:18,083 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=168640.0, ans=0.125
+2024-08-26 03:41:25,345 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=168640.0, ans=0.1
+2024-08-26 03:41:28,567 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=168640.0, ans=0.125
+2024-08-26 03:41:58,093 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=168746.66666666666, ans=0.0
+2024-08-26 03:41:59,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=168746.66666666666, ans=0.125
+2024-08-26 03:42:01,092 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.702e+02 2.065e+02 2.813e+02 5.109e+02, threshold=4.129e+02, percent-clipped=2.0
+2024-08-26 03:42:12,521 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=168800.0, ans=0.1
+2024-08-26 03:42:21,277 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=168800.0, ans=0.025
+2024-08-26 03:42:33,914 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.70 vs. limit=12.0
+2024-08-26 03:42:45,936 INFO [train.py:1114] (3/4) Epoch 13, batch 1800, loss[loss=0.2382, simple_loss=0.2973, pruned_loss=0.06532, ctc_loss=0.121, over 19609.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2854, pruned_loss=0.05991, ctc_loss=0.1125, over 3853420.17 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:42:50,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=168906.66666666666, ans=0.2
+2024-08-26 03:43:21,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=169013.33333333334, ans=0.0
+2024-08-26 03:43:24,869 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=169066.66666666666, ans=0.125
+2024-08-26 03:43:44,566 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=169120.0, ans=0.05
+2024-08-26 03:43:53,525 INFO [train.py:1114] (3/4) Epoch 13, batch 1850, loss[loss=0.2534, simple_loss=0.3093, pruned_loss=0.0713, ctc_loss=0.1372, over 19604.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2847, pruned_loss=0.05944, ctc_loss=0.1116, over 3856241.64 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:44:29,680 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 1.936e+02 2.666e+02 3.402e+02 5.252e+02, threshold=5.332e+02, percent-clipped=13.0
+2024-08-26 03:45:00,066 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.53 vs. limit=22.5
+2024-08-26 03:45:04,199 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.43 vs. limit=10.0
+2024-08-26 03:45:05,840 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=169386.66666666666, ans=0.125
+2024-08-26 03:45:07,842 INFO [train.py:1114] (3/4) Epoch 13, batch 1900, loss[loss=0.2494, simple_loss=0.3115, pruned_loss=0.06866, ctc_loss=0.1249, over 19681.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2857, pruned_loss=0.05991, ctc_loss=0.1123, over 3860124.61 frames. ], batch size: 59, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:45:28,412 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=169493.33333333334, ans=0.125
+2024-08-26 03:45:31,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=169546.66666666666, ans=0.0
+2024-08-26 03:46:03,550 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.31 vs. limit=12.0
+2024-08-26 03:46:05,197 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=169653.33333333334, ans=0.1
+2024-08-26 03:46:12,274 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.95 vs. limit=12.0
+2024-08-26 03:46:29,183 INFO [train.py:1114] (3/4) Epoch 13, batch 1950, loss[loss=0.1922, simple_loss=0.2602, pruned_loss=0.04518, ctc_loss=0.08431, over 19598.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.2868, pruned_loss=0.06038, ctc_loss=0.1133, over 3869711.64 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 03:46:47,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=169760.0, ans=0.125
+2024-08-26 03:46:58,260 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=169813.33333333334, ans=0.125
+2024-08-26 03:47:25,830 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.63 vs. limit=15.0
+2024-08-26 03:49:49,666 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=169813.33333333334, ans=0.2
+2024-08-26 03:50:26,630 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.795e+02 2.018e+02 2.323e+02 3.502e+02, threshold=4.036e+02, percent-clipped=0.0
+2024-08-26 04:04:15,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=169920.0, ans=0.025
+2024-08-26 04:22:39,285 INFO [train.py:1114] (3/4) Epoch 13, batch 2000, loss[loss=0.2111, simple_loss=0.2634, pruned_loss=0.05766, ctc_loss=0.1085, over 19651.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.287, pruned_loss=0.06042, ctc_loss=0.1134, over 3853944.93 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 04:25:47,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=169973.33333333334, ans=0.125
+2024-08-26 04:34:06,143 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.53 vs. limit=10.0
+2024-08-26 05:06:38,773 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 05:09:29,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=170186.66666666666, ans=0.2
+2024-08-26 05:15:36,119 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_ff3.min_abs, batch_count=170186.66666666666, ans=0.2
+2024-08-26 05:17:15,369 INFO [train.py:1114] (3/4) Epoch 13, batch 2050, loss[loss=0.2044, simple_loss=0.2605, pruned_loss=0.05413, ctc_loss=0.1, over 19702.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2856, pruned_loss=0.05984, ctc_loss=0.1124, over 3850415.50 frames. ], batch size: 47, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:17:51,993 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=170240.0, ans=0.0
+2024-08-26 05:34:32,866 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.739e+02 2.095e+02 2.592e+02 3.598e+02, threshold=4.189e+02, percent-clipped=0.0
+2024-08-26 05:36:11,440 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=170400.0, ans=0.2
+2024-08-26 05:44:04,158 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=170506.66666666666, ans=0.0
+2024-08-26 05:45:21,777 INFO [train.py:1114] (3/4) Epoch 13, batch 2100, loss[loss=0.2355, simple_loss=0.2904, pruned_loss=0.06602, ctc_loss=0.1215, over 19765.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.2843, pruned_loss=0.05896, ctc_loss=0.1108, over 3857634.22 frames. ], batch size: 54, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 05:45:23,129 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=170506.66666666666, ans=0.0
+2024-08-26 05:45:23,217 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=170506.66666666666, ans=0.0
+2024-08-26 05:51:34,256 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=170560.0, ans=0.125
+2024-08-26 05:55:01,998 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=170666.66666666666, ans=0.0
+2024-08-26 05:56:34,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=170720.0, ans=0.2
+2024-08-26 05:57:05,749 INFO [train.py:1114] (3/4) Epoch 13, batch 2150, loss[loss=0.2127, simple_loss=0.2696, pruned_loss=0.0568, ctc_loss=0.1053, over 19866.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2829, pruned_loss=0.0581, ctc_loss=0.1095, over 3868862.44 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 06:00:46,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=170826.66666666666, ans=0.2
+2024-08-26 06:00:47,617 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=170826.66666666666, ans=0.0
+2024-08-26 06:02:02,677 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=170880.0, ans=15.0
+2024-08-26 06:02:10,730 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.801e+02 2.071e+02 2.646e+02 5.963e+02, threshold=4.141e+02, percent-clipped=6.0
+2024-08-26 06:03:09,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=170986.66666666666, ans=0.2
+2024-08-26 06:03:12,976 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:03:16,480 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=170986.66666666666, ans=0.125
+2024-08-26 06:03:39,302 INFO [train.py:1114] (3/4) Epoch 13, batch 2200, loss[loss=0.2274, simple_loss=0.2927, pruned_loss=0.05837, ctc_loss=0.1135, over 19591.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.2837, pruned_loss=0.05864, ctc_loss=0.1105, over 3867637.94 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 64.0
+2024-08-26 06:03:49,913 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=171040.0, ans=0.125
+2024-08-26 06:04:06,104 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=171093.33333333334, ans=0.0
+2024-08-26 06:04:24,037 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=171093.33333333334, ans=0.2
+2024-08-26 06:04:34,072 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=171093.33333333334, ans=0.125
+2024-08-26 06:05:06,984 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.59 vs. limit=15.0
+2024-08-26 06:05:31,078 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.05 vs. limit=22.5
+2024-08-26 06:06:26,730 INFO [train.py:1114] (3/4) Epoch 13, batch 2250, loss[loss=0.2335, simple_loss=0.2999, pruned_loss=0.0602, ctc_loss=0.1165, over 19632.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2838, pruned_loss=0.05868, ctc_loss=0.1104, over 3867400.59 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 06:06:27,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=171306.66666666666, ans=0.035
+2024-08-26 06:06:36,891 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.56 vs. limit=15.0
+2024-08-26 06:07:27,968 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=171360.0, ans=0.1
+2024-08-26 06:08:30,393 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.765e+02 2.070e+02 2.599e+02 3.761e+02, threshold=4.140e+02, percent-clipped=0.0
+2024-08-26 06:09:34,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=171466.66666666666, ans=0.125
+2024-08-26 06:09:39,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=171520.0, ans=0.1
+2024-08-26 06:10:19,744 INFO [train.py:1114] (3/4) Epoch 13, batch 2300, loss[loss=0.2423, simple_loss=0.2926, pruned_loss=0.07072, ctc_loss=0.1264, over 19500.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2839, pruned_loss=0.05923, ctc_loss=0.1114, over 3861275.80 frames. ], batch size: 49, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:10:25,753 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.30 vs. limit=15.0
+2024-08-26 06:10:45,484 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=171626.66666666666, ans=0.1
+2024-08-26 06:11:23,829 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=171733.33333333334, ans=0.2
+2024-08-26 06:11:43,310 INFO [train.py:1114] (3/4) Epoch 13, batch 2350, loss[loss=0.2372, simple_loss=0.3042, pruned_loss=0.06223, ctc_loss=0.1141, over 19703.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.2831, pruned_loss=0.05872, ctc_loss=0.1104, over 3863262.31 frames. ], batch size: 63, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:12:01,236 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=171893.33333333334, ans=0.07
+2024-08-26 06:12:05,582 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=171946.66666666666, ans=0.0
+2024-08-26 06:12:06,768 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.52 vs. limit=22.5
+2024-08-26 06:12:07,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=171946.66666666666, ans=0.125
+2024-08-26 06:12:16,624 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.773e+02 2.247e+02 3.255e+02 4.983e+02, threshold=4.494e+02, percent-clipped=2.0
+2024-08-26 06:12:21,845 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=172000.0, ans=0.125
+2024-08-26 06:12:27,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=172000.0, ans=0.0
+2024-08-26 06:12:30,878 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.52 vs. limit=15.0
+2024-08-26 06:12:31,640 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:12:41,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=172053.33333333334, ans=0.125
+2024-08-26 06:12:46,283 INFO [train.py:1114] (3/4) Epoch 13, batch 2400, loss[loss=0.2132, simple_loss=0.2838, pruned_loss=0.05156, ctc_loss=0.09849, over 19317.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.2861, pruned_loss=0.05999, ctc_loss=0.1127, over 3857386.77 frames. ], batch size: 71, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:13:22,369 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.74 vs. limit=22.5
+2024-08-26 06:13:29,980 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.46 vs. limit=12.0
+2024-08-26 06:13:41,761 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=172266.66666666666, ans=0.125
+2024-08-26 06:13:51,879 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=172266.66666666666, ans=0.2
+2024-08-26 06:13:52,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=172266.66666666666, ans=0.04949747468305833
+2024-08-26 06:14:08,369 INFO [train.py:1114] (3/4) Epoch 13, batch 2450, loss[loss=0.2929, simple_loss=0.3216, pruned_loss=0.09474, ctc_loss=0.1868, over 13453.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2903, pruned_loss=0.0633, ctc_loss=0.1195, over 3730288.78 frames. ], batch size: 140, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 06:14:23,117 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.56 vs. limit=15.0
+2024-08-26 06:14:29,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=172426.66666666666, ans=0.2
+2024-08-26 06:14:30,488 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.47 vs. limit=22.5
+2024-08-26 06:14:32,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=172480.0, ans=0.125
+2024-08-26 06:14:43,290 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.935e+02 2.072e+02 2.350e+02 4.711e+02, threshold=4.143e+02, percent-clipped=2.0
+2024-08-26 06:14:43,422 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=172533.33333333334, ans=0.1
+2024-08-26 06:14:50,635 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=172533.33333333334, ans=0.09899494936611666
+2024-08-26 06:15:44,124 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=172581.33333333334, ans=0.04949747468305833
+2024-08-26 06:16:27,497 INFO [train.py:1114] (3/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:16:27,497 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-26 06:17:54,189 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.2040, 3.1870, 3.7603, 2.9080], device='cuda:3')
+2024-08-26 06:17:58,795 INFO [train.py:1146] (3/4) Epoch 14, validation: loss=0.1898, simple_loss=0.2778, pruned_loss=0.03769, ctc_loss=0.06578, over 944034.00 frames.
+2024-08-26 06:18:12,591 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 14072MB
+2024-08-26 06:18:29,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=172634.66666666666, ans=0.1
+2024-08-26 06:18:33,898 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=172634.66666666666, ans=0.0
+2024-08-26 06:18:57,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=172688.0, ans=0.0
+2024-08-26 06:19:48,329 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=172794.66666666666, ans=0.125
+2024-08-26 06:19:53,753 INFO [train.py:1114] (3/4) Epoch 14, batch 50, loss[loss=0.1952, simple_loss=0.2579, pruned_loss=0.04869, ctc_loss=0.08786, over 19744.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2845, pruned_loss=0.05889, ctc_loss=0.1117, over 844617.17 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:20:17,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=172848.0, ans=0.0
+2024-08-26 06:20:41,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-26 06:21:15,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=173061.33333333334, ans=0.2
+2024-08-26 06:21:17,214 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.738e+02 2.047e+02 2.487e+02 4.948e+02, threshold=4.095e+02, percent-clipped=4.0
+2024-08-26 06:21:48,389 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=173061.33333333334, ans=0.0
+2024-08-26 06:21:51,844 INFO [train.py:1114] (3/4) Epoch 14, batch 100, loss[loss=0.2423, simple_loss=0.2947, pruned_loss=0.06908, ctc_loss=0.1296, over 19736.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.2867, pruned_loss=0.06008, ctc_loss=0.1132, over 1499497.26 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:22:05,285 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.10 vs. limit=22.5
+2024-08-26 06:22:20,490 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:22:26,303 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=173221.33333333334, ans=0.125
+2024-08-26 06:22:52,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=173274.66666666666, ans=0.1
+2024-08-26 06:23:28,714 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=173328.0, ans=0.125
+2024-08-26 06:23:33,012 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=173381.33333333334, ans=0.125
+2024-08-26 06:23:38,126 INFO [train.py:1114] (3/4) Epoch 14, batch 150, loss[loss=0.2089, simple_loss=0.2613, pruned_loss=0.05643, ctc_loss=0.1092, over 19682.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.2845, pruned_loss=0.05887, ctc_loss=0.1107, over 2027234.41 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:23:51,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=173381.33333333334, ans=0.0
+2024-08-26 06:24:27,299 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.96 vs. limit=15.0
+2024-08-26 06:24:28,182 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=173488.0, ans=0.0
+2024-08-26 06:24:49,742 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.676e+02 1.898e+02 2.213e+02 4.155e+02, threshold=3.795e+02, percent-clipped=1.0
+2024-08-26 06:25:00,462 INFO [train.py:1114] (3/4) Epoch 14, batch 200, loss[loss=0.2636, simple_loss=0.3077, pruned_loss=0.0796, ctc_loss=0.1506, over 18104.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2829, pruned_loss=0.05843, ctc_loss=0.11, over 2434650.11 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:25:04,164 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.68 vs. limit=15.0
+2024-08-26 06:25:37,094 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.72 vs. limit=12.0
+2024-08-26 06:25:40,315 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=173754.66666666666, ans=0.0
+2024-08-26 06:25:49,108 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.51 vs. limit=15.0
+2024-08-26 06:25:49,884 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:26:05,273 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=173861.33333333334, ans=0.2
+2024-08-26 06:26:10,271 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=173861.33333333334, ans=0.0
+2024-08-26 06:26:16,071 INFO [train.py:1114] (3/4) Epoch 14, batch 250, loss[loss=0.234, simple_loss=0.2987, pruned_loss=0.06192, ctc_loss=0.1139, over 19411.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2826, pruned_loss=0.05807, ctc_loss=0.1092, over 2753935.14 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:26:17,596 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=173914.66666666666, ans=0.125
+2024-08-26 06:26:39,409 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=173968.0, ans=0.125
+2024-08-26 06:26:42,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=173968.0, ans=0.125
+2024-08-26 06:26:46,586 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.41 vs. limit=10.0
+2024-08-26 06:26:49,893 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.83 vs. limit=15.0
+2024-08-26 06:27:09,288 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=174074.66666666666, ans=0.025
+2024-08-26 06:27:18,000 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.683e+02 2.061e+02 2.648e+02 4.927e+02, threshold=4.123e+02, percent-clipped=4.0
+2024-08-26 06:27:25,215 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=174128.0, ans=0.125
+2024-08-26 06:27:28,134 INFO [train.py:1114] (3/4) Epoch 14, batch 300, loss[loss=0.2284, simple_loss=0.2867, pruned_loss=0.06308, ctc_loss=0.1099, over 19518.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2821, pruned_loss=0.05807, ctc_loss=0.1091, over 2999168.00 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 06:28:16,042 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=174341.33333333334, ans=0.1
+2024-08-26 06:28:17,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=174341.33333333334, ans=0.2
+2024-08-26 06:28:34,438 INFO [train.py:1114] (3/4) Epoch 14, batch 350, loss[loss=0.1806, simple_loss=0.2486, pruned_loss=0.0412, ctc_loss=0.07537, over 19738.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2827, pruned_loss=0.05819, ctc_loss=0.1092, over 3190097.11 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:29:12,878 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=174554.66666666666, ans=0.1
+2024-08-26 06:29:15,400 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=174554.66666666666, ans=0.125
+2024-08-26 06:29:16,477 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=174554.66666666666, ans=0.125
+2024-08-26 06:29:19,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=174608.0, ans=0.2
+2024-08-26 06:29:32,485 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.657e+02 1.894e+02 2.440e+02 4.007e+02, threshold=3.787e+02, percent-clipped=0.0
+2024-08-26 06:29:42,961 INFO [train.py:1114] (3/4) Epoch 14, batch 400, loss[loss=0.2178, simple_loss=0.2853, pruned_loss=0.05595, ctc_loss=0.09592, over 19499.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2825, pruned_loss=0.05817, ctc_loss=0.1092, over 3342697.91 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:30:09,896 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.42 vs. limit=15.0
+2024-08-26 06:30:16,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=174768.0, ans=0.1
+2024-08-26 06:30:58,872 INFO [train.py:1114] (3/4) Epoch 14, batch 450, loss[loss=0.2287, simple_loss=0.2884, pruned_loss=0.06125, ctc_loss=0.116, over 19617.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2824, pruned_loss=0.05814, ctc_loss=0.1092, over 3451257.46 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:31:58,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=175088.0, ans=0.1
+2024-08-26 06:32:32,609 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.297e+02 1.702e+02 1.875e+02 2.205e+02 3.904e+02, threshold=3.749e+02, percent-clipped=2.0
+2024-08-26 06:32:59,821 INFO [train.py:1114] (3/4) Epoch 14, batch 500, loss[loss=0.221, simple_loss=0.2893, pruned_loss=0.05606, ctc_loss=0.1015, over 19678.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2818, pruned_loss=0.05789, ctc_loss=0.1091, over 3547006.36 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:33:04,596 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=175248.0, ans=0.0
+2024-08-26 06:33:50,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=175354.66666666666, ans=0.2
+2024-08-26 06:34:13,967 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=175408.0, ans=0.0
+2024-08-26 06:34:14,979 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=175408.0, ans=0.125
+2024-08-26 06:34:23,145 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.22 vs. limit=6.0
+2024-08-26 06:34:32,917 INFO [train.py:1114] (3/4) Epoch 14, batch 550, loss[loss=0.2591, simple_loss=0.3066, pruned_loss=0.07753, ctc_loss=0.1413, over 19229.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2821, pruned_loss=0.05815, ctc_loss=0.1094, over 3608381.59 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:34:42,565 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.34 vs. limit=12.0
+2024-08-26 06:34:44,363 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=175514.66666666666, ans=0.125
+2024-08-26 06:34:55,845 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=175568.0, ans=0.0
+2024-08-26 06:35:05,620 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.84 vs. limit=15.0
+2024-08-26 06:35:14,168 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=175674.66666666666, ans=0.04949747468305833
+2024-08-26 06:35:23,157 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=175674.66666666666, ans=0.125
+2024-08-26 06:35:25,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=175674.66666666666, ans=0.125
+2024-08-26 06:35:36,414 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.729e+02 1.957e+02 2.291e+02 4.042e+02, threshold=3.913e+02, percent-clipped=2.0
+2024-08-26 06:36:00,300 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=175728.0, ans=0.025
+2024-08-26 06:36:17,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=175781.33333333334, ans=0.125
+2024-08-26 06:36:18,839 INFO [train.py:1114] (3/4) Epoch 14, batch 600, loss[loss=0.2306, simple_loss=0.2943, pruned_loss=0.0619, ctc_loss=0.1075, over 19367.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2821, pruned_loss=0.05791, ctc_loss=0.1089, over 3664729.30 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:38:33,376 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=175834.66666666666, ans=0.025
+2024-08-26 06:38:56,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=175941.33333333334, ans=0.125
+2024-08-26 06:39:00,572 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.10 vs. limit=22.5
+2024-08-26 06:39:25,941 INFO [train.py:1114] (3/4) Epoch 14, batch 650, loss[loss=0.2255, simple_loss=0.2864, pruned_loss=0.06071, ctc_loss=0.1078, over 19768.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2809, pruned_loss=0.05711, ctc_loss=0.1075, over 3715303.48 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:39:28,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=176048.0, ans=0.125
+2024-08-26 06:40:32,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=176101.33333333334, ans=0.0
+2024-08-26 06:40:51,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=176154.66666666666, ans=0.0
+2024-08-26 06:41:22,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=176208.0, ans=0.125
+2024-08-26 06:41:27,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=176261.33333333334, ans=0.2
+2024-08-26 06:41:31,385 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.772e+02 2.123e+02 2.635e+02 4.354e+02, threshold=4.247e+02, percent-clipped=3.0
+2024-08-26 06:41:34,672 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=176261.33333333334, ans=0.5
+2024-08-26 06:41:45,010 INFO [train.py:1114] (3/4) Epoch 14, batch 700, loss[loss=0.2053, simple_loss=0.2698, pruned_loss=0.05052, ctc_loss=0.0992, over 19748.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2812, pruned_loss=0.05697, ctc_loss=0.1074, over 3747071.82 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:41:58,870 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=176368.0, ans=0.2
+2024-08-26 06:42:02,374 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=176368.0, ans=0.0
+2024-08-26 06:42:51,119 INFO [train.py:1114] (3/4) Epoch 14, batch 750, loss[loss=0.2154, simple_loss=0.2782, pruned_loss=0.05522, ctc_loss=0.1055, over 19497.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2811, pruned_loss=0.05705, ctc_loss=0.1074, over 3774604.89 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:43:05,196 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.15 vs. limit=15.0
+2024-08-26 06:43:09,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176634.66666666666, ans=0.1
+2024-08-26 06:43:13,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=176634.66666666666, ans=0.125
+2024-08-26 06:44:26,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=176794.66666666666, ans=0.025
+2024-08-26 06:44:27,358 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.803e+02 2.358e+02 3.080e+02 4.835e+02, threshold=4.715e+02, percent-clipped=7.0
+2024-08-26 06:44:27,722 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=176794.66666666666, ans=0.125
+2024-08-26 06:44:39,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=176794.66666666666, ans=0.125
+2024-08-26 06:44:41,967 INFO [train.py:1114] (3/4) Epoch 14, batch 800, loss[loss=0.1913, simple_loss=0.2512, pruned_loss=0.04898, ctc_loss=0.08347, over 19821.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2813, pruned_loss=0.05723, ctc_loss=0.1077, over 3796395.50 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:44:47,349 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.81 vs. limit=15.0
+2024-08-26 06:44:50,336 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=176848.0, ans=0.125
+2024-08-26 06:45:02,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=176901.33333333334, ans=0.125
+2024-08-26 06:45:39,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=177061.33333333334, ans=0.125
+2024-08-26 06:45:52,039 INFO [train.py:1114] (3/4) Epoch 14, batch 850, loss[loss=0.2188, simple_loss=0.2985, pruned_loss=0.05004, ctc_loss=0.09747, over 19649.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2813, pruned_loss=0.05737, ctc_loss=0.108, over 3815210.30 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:46:11,703 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=177114.66666666666, ans=0.125
+2024-08-26 06:46:16,653 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.41 vs. limit=6.0
+2024-08-26 06:46:37,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=177221.33333333334, ans=0.2
+2024-08-26 06:47:11,725 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.690e+02 1.974e+02 2.351e+02 3.908e+02, threshold=3.948e+02, percent-clipped=0.0
+2024-08-26 06:47:21,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=177328.0, ans=0.125
+2024-08-26 06:47:24,578 INFO [train.py:1114] (3/4) Epoch 14, batch 900, loss[loss=0.194, simple_loss=0.2578, pruned_loss=0.04778, ctc_loss=0.08644, over 19814.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2821, pruned_loss=0.05787, ctc_loss=0.1087, over 3820149.52 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:47:30,089 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=177381.33333333334, ans=0.125
+2024-08-26 06:47:35,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177434.66666666666, ans=0.1
+2024-08-26 06:47:40,031 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=16.41 vs. limit=22.5
+2024-08-26 06:47:41,824 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=177434.66666666666, ans=0.125
+2024-08-26 06:47:58,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=177488.0, ans=0.0
+2024-08-26 06:48:01,203 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.77 vs. limit=22.5
+2024-08-26 06:48:19,953 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=177594.66666666666, ans=0.125
+2024-08-26 06:48:31,289 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=177594.66666666666, ans=0.0
+2024-08-26 06:48:35,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177594.66666666666, ans=0.1
+2024-08-26 06:48:38,039 INFO [train.py:1114] (3/4) Epoch 14, batch 950, loss[loss=0.2294, simple_loss=0.2811, pruned_loss=0.06521, ctc_loss=0.1183, over 19503.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2827, pruned_loss=0.05819, ctc_loss=0.1092, over 3821034.01 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 06:48:39,726 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.47 vs. limit=15.0
+2024-08-26 06:49:00,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-26 06:49:08,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=177754.66666666666, ans=0.125
+2024-08-26 06:49:18,532 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=177754.66666666666, ans=0.0
+2024-08-26 06:49:26,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=177808.0, ans=0.2
+2024-08-26 06:49:36,177 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 1.810e+02 2.092e+02 2.519e+02 4.035e+02, threshold=4.185e+02, percent-clipped=1.0
+2024-08-26 06:50:05,496 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177914.66666666666, ans=0.1
+2024-08-26 06:50:06,711 INFO [train.py:1114] (3/4) Epoch 14, batch 1000, loss[loss=0.2184, simple_loss=0.2805, pruned_loss=0.05672, ctc_loss=0.1071, over 19846.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2839, pruned_loss=0.0587, ctc_loss=0.1102, over 3817118.64 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:50:17,843 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=7.734e-03
+2024-08-26 06:50:26,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177968.0, ans=0.1
+2024-08-26 06:50:31,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=177968.0, ans=0.125
+2024-08-26 06:50:45,500 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.46 vs. limit=15.0
+2024-08-26 06:50:50,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=178074.66666666666, ans=0.1
+2024-08-26 06:51:03,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=178128.0, ans=0.125
+2024-08-26 06:51:05,340 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=178128.0, ans=0.015
+2024-08-26 06:51:21,182 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=178128.0, ans=0.1
+2024-08-26 06:51:23,212 INFO [train.py:1114] (3/4) Epoch 14, batch 1050, loss[loss=0.2164, simple_loss=0.2826, pruned_loss=0.05522, ctc_loss=0.09966, over 19823.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2832, pruned_loss=0.05855, ctc_loss=0.1098, over 3824209.19 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:51:58,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=178288.0, ans=0.125
+2024-08-26 06:52:01,752 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=178341.33333333334, ans=0.1
+2024-08-26 06:52:09,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=178341.33333333334, ans=0.0
+2024-08-26 06:52:17,049 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 1.767e+02 2.034e+02 2.568e+02 4.426e+02, threshold=4.067e+02, percent-clipped=2.0
+2024-08-26 06:52:39,166 INFO [train.py:1114] (3/4) Epoch 14, batch 1100, loss[loss=0.2066, simple_loss=0.2783, pruned_loss=0.0489, ctc_loss=0.09278, over 19572.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2828, pruned_loss=0.05822, ctc_loss=0.1094, over 3832090.41 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:52:41,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=178448.0, ans=0.0
+2024-08-26 06:53:06,430 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=178554.66666666666, ans=0.0
+2024-08-26 06:53:37,312 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_na.min_abs, batch_count=178661.33333333334, ans=0.02
+2024-08-26 06:53:49,720 INFO [train.py:1114] (3/4) Epoch 14, batch 1150, loss[loss=0.2277, simple_loss=0.2861, pruned_loss=0.0617, ctc_loss=0.1149, over 19592.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2825, pruned_loss=0.0582, ctc_loss=0.1094, over 3830063.13 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:54:05,664 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=178714.66666666666, ans=0.125
+2024-08-26 06:54:09,193 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 06:54:09,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=178768.0, ans=0.125
+2024-08-26 06:54:13,944 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=178768.0, ans=0.125
+2024-08-26 06:54:16,665 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.70 vs. limit=6.0
+2024-08-26 06:54:32,805 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=178821.33333333334, ans=0.125
+2024-08-26 06:54:40,077 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=178874.66666666666, ans=0.025
+2024-08-26 06:54:47,789 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.255e+02 1.672e+02 1.916e+02 2.259e+02 4.129e+02, threshold=3.832e+02, percent-clipped=1.0
+2024-08-26 06:54:58,206 INFO [train.py:1114] (3/4) Epoch 14, batch 1200, loss[loss=0.2114, simple_loss=0.2822, pruned_loss=0.05054, ctc_loss=0.09882, over 19835.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2838, pruned_loss=0.05893, ctc_loss=0.1109, over 3826323.75 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:55:12,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-26 06:55:20,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=179034.66666666666, ans=0.0
+2024-08-26 06:55:20,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-26 06:55:36,109 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=179141.33333333334, ans=0.1
+2024-08-26 06:55:39,868 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=179141.33333333334, ans=0.2
+2024-08-26 06:56:27,981 INFO [train.py:1114] (3/4) Epoch 14, batch 1250, loss[loss=0.2594, simple_loss=0.311, pruned_loss=0.07541, ctc_loss=0.1421, over 19526.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2839, pruned_loss=0.05867, ctc_loss=0.1101, over 3844490.95 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:56:45,807 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=179301.33333333334, ans=0.2
+2024-08-26 06:56:46,444 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.20 vs. limit=6.0
+2024-08-26 06:57:07,347 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=179301.33333333334, ans=0.0
+2024-08-26 06:57:11,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=179354.66666666666, ans=0.0
+2024-08-26 06:58:13,352 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.864e+02 2.134e+02 2.537e+02 3.723e+02, threshold=4.267e+02, percent-clipped=0.0
+2024-08-26 06:58:31,270 INFO [train.py:1114] (3/4) Epoch 14, batch 1300, loss[loss=0.2556, simple_loss=0.3057, pruned_loss=0.07511, ctc_loss=0.1384, over 18977.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2827, pruned_loss=0.05795, ctc_loss=0.109, over 3849016.59 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 06:58:34,553 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.87 vs. limit=15.0
+2024-08-26 06:58:58,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-26 06:58:58,638 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=179514.66666666666, ans=0.1
+2024-08-26 06:58:59,970 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.31 vs. limit=15.0
+2024-08-26 06:59:18,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=179621.33333333334, ans=0.2
+2024-08-26 06:59:38,825 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=179621.33333333334, ans=0.125
+2024-08-26 07:00:15,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=179728.0, ans=0.1
+2024-08-26 07:00:19,489 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=179728.0, ans=0.125
+2024-08-26 07:00:23,927 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.66 vs. limit=15.0
+2024-08-26 07:00:31,186 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=179728.0, ans=0.125
+2024-08-26 07:00:35,359 INFO [train.py:1114] (3/4) Epoch 14, batch 1350, loss[loss=0.2029, simple_loss=0.2719, pruned_loss=0.04847, ctc_loss=0.0927, over 19782.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2823, pruned_loss=0.05769, ctc_loss=0.1086, over 3860373.05 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:01:10,593 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.15 vs. limit=10.0
+2024-08-26 07:01:26,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=179834.66666666666, ans=0.125
+2024-08-26 07:01:37,052 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=179888.0, ans=0.125
+2024-08-26 07:01:39,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=179888.0, ans=0.125
+2024-08-26 07:01:42,722 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.80 vs. limit=12.0
+2024-08-26 07:02:09,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=179941.33333333334, ans=0.125
+2024-08-26 07:02:11,204 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.07 vs. limit=15.0
+2024-08-26 07:02:26,031 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 1.690e+02 1.870e+02 2.214e+02 3.706e+02, threshold=3.740e+02, percent-clipped=0.0
+2024-08-26 07:02:46,434 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=180048.0, ans=0.2
+2024-08-26 07:02:47,353 INFO [train.py:1114] (3/4) Epoch 14, batch 1400, loss[loss=0.1921, simple_loss=0.2463, pruned_loss=0.04974, ctc_loss=0.09597, over 19689.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2819, pruned_loss=0.0576, ctc_loss=0.1084, over 3866720.71 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:03:00,264 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=8.149e-02
+2024-08-26 07:03:00,575 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.00 vs. limit=15.0
+2024-08-26 07:03:15,178 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=180101.33333333334, ans=0.125
+2024-08-26 07:03:35,097 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=180154.66666666666, ans=0.025
+2024-08-26 07:04:17,102 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.07 vs. limit=15.0
+2024-08-26 07:04:25,275 INFO [train.py:1114] (3/4) Epoch 14, batch 1450, loss[loss=0.2236, simple_loss=0.2877, pruned_loss=0.05868, ctc_loss=0.1053, over 19664.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2829, pruned_loss=0.05806, ctc_loss=0.1093, over 3864940.50 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:04:30,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=180314.66666666666, ans=0.125
+2024-08-26 07:04:38,713 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=180314.66666666666, ans=0.2
+2024-08-26 07:04:44,217 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=180314.66666666666, ans=0.0
+2024-08-26 07:05:12,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=180421.33333333334, ans=0.125
+2024-08-26 07:05:28,573 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.73 vs. limit=15.0
+2024-08-26 07:05:38,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-26 07:05:38,521 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.97 vs. limit=15.0
+2024-08-26 07:05:41,194 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.716e+02 1.963e+02 2.339e+02 6.137e+02, threshold=3.925e+02, percent-clipped=1.0
+2024-08-26 07:05:57,992 INFO [train.py:1114] (3/4) Epoch 14, batch 1500, loss[loss=0.2479, simple_loss=0.3095, pruned_loss=0.06755, ctc_loss=0.128, over 19584.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2825, pruned_loss=0.05763, ctc_loss=0.1083, over 3864728.47 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:06:05,702 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=180581.33333333334, ans=0.125
+2024-08-26 07:06:08,923 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=180634.66666666666, ans=0.2
+2024-08-26 07:06:47,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=180688.0, ans=0.125
+2024-08-26 07:07:06,103 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=180741.33333333334, ans=0.1
+2024-08-26 07:07:26,416 INFO [train.py:1114] (3/4) Epoch 14, batch 1550, loss[loss=0.2353, simple_loss=0.2955, pruned_loss=0.06458, ctc_loss=0.1149, over 19609.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2824, pruned_loss=0.0576, ctc_loss=0.1084, over 3848764.53 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:07:26,710 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=180848.0, ans=0.125
+2024-08-26 07:07:47,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=180901.33333333334, ans=0.2
+2024-08-26 07:07:56,198 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=180954.66666666666, ans=0.125
+2024-08-26 07:08:02,303 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=18.04 vs. limit=22.5
+2024-08-26 07:08:09,621 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.13 vs. limit=15.0
+2024-08-26 07:08:20,840 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.735e+02 1.996e+02 2.323e+02 4.332e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-26 07:08:29,936 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.14 vs. limit=15.0
+2024-08-26 07:08:46,918 INFO [train.py:1114] (3/4) Epoch 14, batch 1600, loss[loss=0.2182, simple_loss=0.2815, pruned_loss=0.05496, ctc_loss=0.1122, over 19841.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.282, pruned_loss=0.05757, ctc_loss=0.1083, over 3837124.52 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 07:08:50,585 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=181114.66666666666, ans=0.0
+2024-08-26 07:08:50,829 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.90 vs. limit=15.0
+2024-08-26 07:09:18,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=181221.33333333334, ans=0.0
+2024-08-26 07:09:24,717 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=181221.33333333334, ans=0.0
+2024-08-26 07:10:18,901 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=181328.0, ans=0.125
+2024-08-26 07:10:22,352 INFO [train.py:1114] (3/4) Epoch 14, batch 1650, loss[loss=0.2378, simple_loss=0.2936, pruned_loss=0.06451, ctc_loss=0.1323, over 19635.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2823, pruned_loss=0.05795, ctc_loss=0.1092, over 3834583.61 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:11:06,564 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.61 vs. limit=15.0
+2024-08-26 07:11:07,500 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=181541.33333333334, ans=0.125
+2024-08-26 07:11:10,758 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.857e+02 2.243e+02 2.957e+02 5.258e+02, threshold=4.486e+02, percent-clipped=5.0
+2024-08-26 07:11:23,916 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=181594.66666666666, ans=0.125
+2024-08-26 07:11:27,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=181648.0, ans=0.1
+2024-08-26 07:11:28,239 INFO [train.py:1114] (3/4) Epoch 14, batch 1700, loss[loss=0.1764, simple_loss=0.241, pruned_loss=0.03982, ctc_loss=0.08018, over 19673.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2817, pruned_loss=0.05728, ctc_loss=0.1079, over 3848471.89 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:11:44,226 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=181701.33333333334, ans=0.5
+2024-08-26 07:11:44,236 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:12:04,378 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=181808.0, ans=0.125
+2024-08-26 07:12:05,636 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.14 vs. limit=15.0
+2024-08-26 07:12:24,387 INFO [train.py:1114] (3/4) Epoch 14, batch 1750, loss[loss=0.1921, simple_loss=0.253, pruned_loss=0.04799, ctc_loss=0.08786, over 19696.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2817, pruned_loss=0.05745, ctc_loss=0.1082, over 3854184.62 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:12:42,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=181968.0, ans=0.0
+2024-08-26 07:12:58,809 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=182021.33333333334, ans=0.015
+2024-08-26 07:13:14,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=182021.33333333334, ans=0.1
+2024-08-26 07:13:27,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=182074.66666666666, ans=0.125
+2024-08-26 07:13:35,919 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.769e+02 2.123e+02 2.747e+02 4.234e+02, threshold=4.245e+02, percent-clipped=0.0
+2024-08-26 07:13:38,266 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=182128.0, ans=0.125
+2024-08-26 07:13:46,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182128.0, ans=0.1
+2024-08-26 07:13:51,688 INFO [train.py:1114] (3/4) Epoch 14, batch 1800, loss[loss=0.2355, simple_loss=0.2956, pruned_loss=0.06293, ctc_loss=0.1242, over 19601.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.282, pruned_loss=0.05769, ctc_loss=0.1086, over 3855590.03 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:14:04,062 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=182234.66666666666, ans=0.0
+2024-08-26 07:14:10,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=182234.66666666666, ans=0.125
+2024-08-26 07:14:49,559 INFO [train.py:1114] (3/4) Epoch 14, batch 1850, loss[loss=0.204, simple_loss=0.2762, pruned_loss=0.04671, ctc_loss=0.09587, over 19599.00 frames. ], tot_loss[loss=0.22, simple_loss=0.282, pruned_loss=0.05742, ctc_loss=0.108, over 3858221.10 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:15:07,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=182501.33333333334, ans=0.2
+2024-08-26 07:15:35,880 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.755e+02 2.000e+02 2.500e+02 5.147e+02, threshold=4.001e+02, percent-clipped=3.0
+2024-08-26 07:15:46,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182661.33333333334, ans=0.1
+2024-08-26 07:15:52,243 INFO [train.py:1114] (3/4) Epoch 14, batch 1900, loss[loss=0.237, simple_loss=0.2997, pruned_loss=0.06393, ctc_loss=0.1163, over 19678.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2827, pruned_loss=0.05757, ctc_loss=0.1084, over 3862304.89 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:16:01,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=182714.66666666666, ans=0.125
+2024-08-26 07:16:06,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=182768.0, ans=0.0
+2024-08-26 07:16:11,280 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=182768.0, ans=0.025
+2024-08-26 07:16:22,236 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=182821.33333333334, ans=0.125
+2024-08-26 07:16:56,692 INFO [train.py:1114] (3/4) Epoch 14, batch 1950, loss[loss=0.191, simple_loss=0.2604, pruned_loss=0.04392, ctc_loss=0.08439, over 19580.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2838, pruned_loss=0.0578, ctc_loss=0.1087, over 3871186.13 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:16:58,153 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=182981.33333333334, ans=0.125
+2024-08-26 07:17:31,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=183088.0, ans=0.125
+2024-08-26 07:17:33,195 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=183088.0, ans=0.0
+2024-08-26 07:17:51,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=183141.33333333334, ans=0.1
+2024-08-26 07:17:55,528 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.666e+02 1.941e+02 2.281e+02 4.229e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-26 07:18:05,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=183194.66666666666, ans=0.0
+2024-08-26 07:18:14,089 INFO [train.py:1114] (3/4) Epoch 14, batch 2000, loss[loss=0.2122, simple_loss=0.2676, pruned_loss=0.05706, ctc_loss=0.1068, over 19655.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2842, pruned_loss=0.0581, ctc_loss=0.1095, over 3856216.08 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 64.0
+2024-08-26 07:18:28,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=183301.33333333334, ans=0.125
+2024-08-26 07:18:30,978 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=183301.33333333334, ans=0.04949747468305833
+2024-08-26 07:18:46,577 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:19:11,483 INFO [train.py:1114] (3/4) Epoch 14, batch 2050, loss[loss=0.1956, simple_loss=0.2574, pruned_loss=0.04928, ctc_loss=0.08794, over 19725.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2835, pruned_loss=0.05821, ctc_loss=0.1097, over 3853148.84 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:19:39,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=183568.0, ans=0.2
+2024-08-26 07:19:45,103 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=183621.33333333334, ans=0.125
+2024-08-26 07:19:46,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=183621.33333333334, ans=0.0
+2024-08-26 07:19:52,588 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=183621.33333333334, ans=0.125
+2024-08-26 07:19:53,822 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183674.66666666666, ans=0.1
+2024-08-26 07:19:54,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183674.66666666666, ans=0.1
+2024-08-26 07:19:58,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=183674.66666666666, ans=0.2
+2024-08-26 07:20:49,711 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 07:20:51,591 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 1.705e+02 1.994e+02 2.461e+02 3.917e+02, threshold=3.988e+02, percent-clipped=1.0
+2024-08-26 07:24:48,260 INFO [train.py:1114] (3/4) Epoch 14, batch 2100, loss[loss=0.2434, simple_loss=0.2954, pruned_loss=0.06934, ctc_loss=0.1319, over 19763.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2823, pruned_loss=0.05752, ctc_loss=0.1086, over 3859523.11 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 07:35:53,956 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183834.66666666666, ans=0.1
+2024-08-26 07:43:34,451 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.97 vs. limit=22.5
+2024-08-26 07:52:29,409 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=183888.0, ans=0.0
+2024-08-26 08:02:07,170 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=183994.66666666666, ans=0.125
+2024-08-26 08:07:07,602 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=183994.66666666666, ans=0.0
+2024-08-26 08:09:58,326 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=183994.66666666666, ans=0.2
+2024-08-26 08:13:15,354 INFO [train.py:1114] (3/4) Epoch 14, batch 2150, loss[loss=0.1871, simple_loss=0.2652, pruned_loss=0.03962, ctc_loss=0.07465, over 19831.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2818, pruned_loss=0.05736, ctc_loss=0.1081, over 3870758.51 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 08:16:18,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=184048.0, ans=0.125
+2024-08-26 08:27:10,855 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=184101.33333333334, ans=0.0
+2024-08-26 08:42:47,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=184154.66666666666, ans=0.125
+2024-08-26 08:51:19,682 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=184208.0, ans=0.5
+2024-08-26 08:59:37,607 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.765e+02 2.052e+02 2.784e+02 6.261e+02, threshold=4.104e+02, percent-clipped=7.0
+2024-08-26 08:59:59,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=184261.33333333334, ans=0.1
+2024-08-26 09:03:09,764 INFO [train.py:1114] (3/4) Epoch 14, batch 2200, loss[loss=0.236, simple_loss=0.3022, pruned_loss=0.06183, ctc_loss=0.1152, over 19593.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.282, pruned_loss=0.05747, ctc_loss=0.1082, over 3869156.33 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 09:18:44,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=184474.66666666666, ans=0.125
+2024-08-26 09:18:48,244 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.08 vs. limit=15.0
+2024-08-26 09:20:23,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=184474.66666666666, ans=0.125
+2024-08-26 09:20:25,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=184528.0, ans=0.0
+2024-08-26 09:20:37,577 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=184528.0, ans=0.0
+2024-08-26 09:20:37,787 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.96 vs. limit=15.0
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-0
new file mode 100644
index 0000000000000000000000000000000000000000..d425c44f15a08b7d0a771d8d8e0155f402ffbeb0
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-0
@@ -0,0 +1,41 @@
+2024-08-29 02:20:12,557 INFO [train.py:1182] (0/4) Training started
+2024-08-29 02:20:16,779 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-29 02:20:24,018 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2655.int.cedar.computecanada.ca', 'IP address': '172.16.146.92'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 02:20:24,018 INFO [train.py:1212] (0/4) About to create model
+2024-08-29 02:20:26,032 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-29 02:20:26,744 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 02:21:07,937 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-29 02:21:11,401 INFO [train.py:1231] (0/4) Using DDP
+2024-08-29 02:22:07,014 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-29 02:22:07,205 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-29 02:22:07,205 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-29 02:22:08,990 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-29 02:22:08,995 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-29 02:22:09,604 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-29 02:22:09,940 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-29 02:22:10,267 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-29 02:22:10,267 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:28:00,136 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12840MB
+2024-08-29 02:28:01,730 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-29 02:30:56,404 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-29 02:30:57,389 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=256, metric=7.97 vs. limit=7.5
+2024-08-29 02:31:02,586 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 02:32:39,929 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 02:32:41,489 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 02:32:41,507 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-29 02:39:21,843 INFO [train.py:1114] (0/4) Epoch 14, batch 0, loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 02:39:21,844 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-29 02:41:04,595 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.0927, 2.8748, 3.3499, 2.5743], device='cuda:0')
+2024-08-29 02:43:58,039 INFO [train.py:1146] (0/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 02:43:58,039 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 03:22:08,041 INFO [train.py:1050] (0/4) Caught exception: [Rank 0] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=245, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600003 milliseconds before timing out..
+2024-08-29 03:22:08,192 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-0.pt
+2024-08-29 03:22:23,650 INFO [train.py:1413] (0/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-323d3ab0-f35c-b8a9-fce5-e9d717208331.pt
+2024-08-29 03:22:48,082 INFO [train.py:1419] (0/4) features shape: torch.Size([48, 1633, 80])
+2024-08-29 03:22:48,091 INFO [train.py:1423] (0/4) num tokens: 3861
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-1
new file mode 100644
index 0000000000000000000000000000000000000000..9ec2a62c51e91c18116602e9c7ec298810612c14
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-1
@@ -0,0 +1,40 @@
+2024-08-29 02:20:12,771 INFO [train.py:1182] (1/4) Training started
+2024-08-29 02:20:12,772 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-29 02:20:24,017 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2655.int.cedar.computecanada.ca', 'IP address': '172.16.146.92'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 02:20:42,972 INFO [train.py:1212] (1/4) About to create model
+2024-08-29 02:20:43,668 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-29 02:20:43,668 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 02:21:11,391 INFO [train.py:1231] (1/4) Using DDP
+2024-08-29 02:22:07,014 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-29 02:22:07,213 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-29 02:22:07,213 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-29 02:22:08,990 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-29 02:22:08,995 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-29 02:22:09,604 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-29 02:22:09,940 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-29 02:22:10,261 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-29 02:22:10,261 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:27:59,124 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.58 vs. limit=3.0
+2024-08-29 02:28:00,137 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13363MB
+2024-08-29 02:28:01,726 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 02:30:56,400 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 02:31:02,582 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 02:32:39,930 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 02:32:40,850 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=4.55 vs. limit=3.0
+2024-08-29 02:32:41,485 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 02:32:41,508 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-29 02:39:21,860 INFO [train.py:1114] (1/4) Epoch 14, batch 0, loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 02:39:21,861 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-29 02:43:58,038 INFO [train.py:1146] (1/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 02:43:58,039 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 03:22:08,039 INFO [train.py:1050] (1/4) Caught exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=245, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600003 milliseconds before timing out..
+2024-08-29 03:22:08,041 INFO [checkpoint.py:75] (1/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-1.pt
+2024-08-29 03:24:03,407 INFO [train.py:1413] (1/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-323d3ab0-f35c-b8a9-fce5-e9d717208331.pt
+2024-08-29 03:24:03,503 INFO [train.py:1419] (1/4) features shape: torch.Size([49, 1632, 80])
+2024-08-29 03:24:03,505 INFO [train.py:1423] (1/4) num tokens: 3786
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-2
new file mode 100644
index 0000000000000000000000000000000000000000..92cac4aa36ba1d0c647bbc9b552ca5ab8f0ac007
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-2
@@ -0,0 +1,39 @@
+2024-08-29 02:20:12,763 INFO [train.py:1182] (2/4) Training started
+2024-08-29 02:20:12,764 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-29 02:20:24,017 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2655.int.cedar.computecanada.ca', 'IP address': '172.16.146.92'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 02:20:24,017 INFO [train.py:1212] (2/4) About to create model
+2024-08-29 02:20:26,034 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-29 02:20:26,744 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 02:21:11,391 INFO [train.py:1231] (2/4) Using DDP
+2024-08-29 02:22:07,024 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-29 02:22:07,222 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-29 02:22:07,222 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-29 02:22:07,424 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-29 02:22:07,424 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-29 02:22:07,424 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-29 02:22:09,032 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-29 02:22:09,034 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-29 02:22:09,604 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-29 02:22:09,940 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-29 02:22:10,266 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-29 02:22:10,267 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:28:00,138 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12782MB
+2024-08-29 02:28:01,723 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12849MB
+2024-08-29 02:30:56,401 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 02:30:57,521 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=12.51 vs. limit=7.5
+2024-08-29 02:31:02,578 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 02:32:39,932 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 02:32:41,480 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 02:32:41,498 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-29 02:39:21,843 INFO [train.py:1114] (2/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 02:39:21,843 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-29 02:43:58,044 INFO [train.py:1146] (2/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 02:43:58,045 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 03:12:13,916 INFO [train.py:1050] (2/4) Caught exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=233, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600008 milliseconds before timing out..
+2024-08-29 03:12:13,918 INFO [checkpoint.py:75] (2/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-2.pt
+2024-08-29 03:12:24,827 INFO [train.py:1413] (2/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-323d3ab0-f35c-b8a9-fce5-e9d717208331.pt
+2024-08-29 03:12:26,813 INFO [train.py:1419] (2/4) features shape: torch.Size([76, 1045, 80])
+2024-08-29 03:12:26,816 INFO [train.py:1423] (2/4) num tokens: 4179
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-3
new file mode 100644
index 0000000000000000000000000000000000000000..d78c66fdd209e36e8d5df1e62d9e239c64f0007c
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-02-20-12-3
@@ -0,0 +1,41 @@
+2024-08-29 02:20:12,771 INFO [train.py:1182] (3/4) Training started
+2024-08-29 02:20:12,772 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-29 02:20:24,017 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2655.int.cedar.computecanada.ca', 'IP address': '172.16.146.92'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 02:20:52,701 INFO [train.py:1212] (3/4) About to create model
+2024-08-29 02:20:53,396 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-29 02:20:53,396 INFO [checkpoint.py:112] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 02:21:11,368 INFO [train.py:1231] (3/4) Using DDP
+2024-08-29 02:22:07,023 INFO [train.py:1243] (3/4) Loading optimizer state dict
+2024-08-29 02:22:07,184 INFO [train.py:1251] (3/4) Loading scheduler state dict
+2024-08-29 02:22:07,184 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-29 02:22:07,423 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-29 02:22:07,424 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-29 02:22:09,006 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-29 02:22:09,008 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-29 02:22:09,604 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-29 02:22:09,941 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-29 02:22:10,264 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-29 02:22:10,264 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 02:28:00,136 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12808MB
+2024-08-29 02:28:01,730 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-29 02:30:56,394 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-29 02:30:57,360 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=256, metric=7.92 vs. limit=7.5
+2024-08-29 02:31:02,579 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 02:32:39,932 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 02:32:41,479 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 02:32:41,499 INFO [train.py:1344] (3/4) Loading grad scaler state dict
+2024-08-29 02:39:00,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=172581.33333333334, ans=0.04949747468305833
+2024-08-29 02:39:21,844 INFO [train.py:1114] (3/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 02:39:21,844 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-29 02:41:44,001 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.1110, 2.9698, 3.4716, 2.6793], device='cuda:3')
+2024-08-29 02:43:58,036 INFO [train.py:1146] (3/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 02:43:58,037 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 03:22:08,040 INFO [train.py:1050] (3/4) Caught exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=245, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600001 milliseconds before timing out..
+2024-08-29 03:22:08,042 INFO [checkpoint.py:75] (3/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-3.pt
+2024-08-29 03:22:09,462 INFO [train.py:1413] (3/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-323d3ab0-f35c-b8a9-fce5-e9d717208331.pt
+2024-08-29 03:22:38,838 INFO [train.py:1419] (3/4) features shape: torch.Size([49, 1632, 80])
+2024-08-29 03:22:38,841 INFO [train.py:1423] (3/4) num tokens: 3961
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-34-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-34-0
new file mode 100644
index 0000000000000000000000000000000000000000..abc413646faf51152965d222afc16ce84568e2b5
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-34-0
@@ -0,0 +1,41 @@
+2024-08-29 10:29:34,831 INFO [train.py:1182] (0/4) Training started
+2024-08-29 10:29:43,675 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-29 10:29:48,609 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 10:29:48,610 INFO [train.py:1212] (0/4) About to create model
+2024-08-29 10:29:49,338 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-29 10:29:49,916 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 10:30:17,107 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-29 10:30:25,093 INFO [train.py:1231] (0/4) Using DDP
+2024-08-29 10:30:35,005 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-29 10:30:56,630 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-29 10:30:56,631 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 10:30:56,633 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-29 10:30:56,633 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-29 10:30:56,633 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-29 10:30:56,633 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-29 10:30:56,633 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-29 10:30:56,633 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-29 10:30:58,222 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-29 10:30:58,222 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-29 10:30:58,224 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-29 10:30:58,225 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-29 10:30:58,550 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-29 10:30:58,550 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 10:42:07,077 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12840MB
+2024-08-29 10:42:08,615 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-29 10:47:00,571 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-29 10:47:01,535 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=256, metric=7.97 vs. limit=7.5
+2024-08-29 10:47:01,970 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 10:54:13,225 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 10:54:14,754 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 10:54:14,770 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-29 10:56:29,891 INFO [train.py:1114] (0/4) Epoch 14, batch 0, loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 10:56:29,892 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-29 11:02:14,718 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.0927, 2.8748, 3.3499, 2.5743], device='cuda:0')
+2024-08-29 11:03:12,764 INFO [train.py:1146] (0/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 11:03:12,765 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 11:27:38,684 INFO [train.py:1050] (0/4) Caught exception: [Rank 0] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=209, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600004 milliseconds before timing out..
+2024-08-29 11:27:38,688 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-0.pt
+2024-08-29 11:27:43,350 INFO [train.py:1413] (0/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-323d3ab0-f35c-b8a9-fce5-e9d717208331.pt
+2024-08-29 11:37:59,919 INFO [train.py:1419] (0/4) features shape: torch.Size([50, 1582, 80])
+2024-08-29 11:38:00,088 INFO [train.py:1423] (0/4) num tokens: 4102
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-1
new file mode 100644
index 0000000000000000000000000000000000000000..995dd307ac763724ffc6f14b48564ff8d5c46283
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-1
@@ -0,0 +1,35 @@
+2024-08-29 10:29:35,106 INFO [train.py:1182] (1/4) Training started
+2024-08-29 10:29:35,107 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-29 10:29:35,925 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 10:29:35,925 INFO [train.py:1212] (1/4) About to create model
+2024-08-29 10:29:37,620 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-29 10:29:37,755 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 10:30:25,092 INFO [train.py:1231] (1/4) Using DDP
+2024-08-29 10:30:35,008 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-29 10:30:35,206 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-29 10:30:35,206 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-29 10:30:40,956 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-29 10:30:40,964 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-29 10:30:41,292 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-29 10:30:41,498 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-29 10:30:41,818 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-29 10:30:41,818 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 10:41:41,672 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.58 vs. limit=3.0
+2024-08-29 10:42:07,083 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13363MB
+2024-08-29 10:42:08,626 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 10:47:00,568 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 10:47:01,963 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 10:54:13,225 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 10:54:14,126 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=4.55 vs. limit=3.0
+2024-08-29 10:54:14,749 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 10:54:14,767 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-29 10:56:29,892 INFO [train.py:1114] (1/4) Epoch 14, batch 0, loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 10:56:29,892 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-29 11:03:12,766 INFO [train.py:1146] (1/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 11:03:12,766 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13467MB
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-2
new file mode 100644
index 0000000000000000000000000000000000000000..0b153bb65e1e91d3feb5f5572e08633cd6f48a38
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-2
@@ -0,0 +1,39 @@
+2024-08-29 10:29:35,200 INFO [train.py:1182] (2/4) Training started
+2024-08-29 10:29:35,201 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-29 10:29:35,925 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 10:29:35,925 INFO [train.py:1212] (2/4) About to create model
+2024-08-29 10:29:37,659 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-29 10:29:37,755 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 10:30:25,067 INFO [train.py:1231] (2/4) Using DDP
+2024-08-29 10:30:35,008 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-29 10:30:35,161 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-29 10:30:35,161 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-29 10:30:39,346 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-29 10:30:39,346 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-29 10:30:40,957 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-29 10:30:40,964 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-29 10:30:41,292 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-29 10:30:41,498 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-29 10:30:41,823 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-29 10:30:41,823 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 10:42:07,078 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12782MB
+2024-08-29 10:42:08,620 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12849MB
+2024-08-29 10:47:00,570 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 10:47:01,745 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=12.51 vs. limit=7.5
+2024-08-29 10:47:01,974 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 10:54:13,229 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 10:54:14,759 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 10:54:14,778 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-29 10:56:29,907 INFO [train.py:1114] (2/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 10:56:29,908 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-29 11:03:12,766 INFO [train.py:1146] (2/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 11:03:12,767 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 11:27:38,684 INFO [train.py:1050] (2/4) Caught exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=209, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600000 milliseconds before timing out..
+2024-08-29 11:27:38,686 INFO [checkpoint.py:75] (2/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-2.pt
+2024-08-29 11:28:00,514 INFO [train.py:1413] (2/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-323d3ab0-f35c-b8a9-fce5-e9d717208331.pt
+2024-08-29 11:37:54,610 INFO [train.py:1419] (2/4) features shape: torch.Size([50, 1582, 80])
+2024-08-29 11:37:54,614 INFO [train.py:1423] (2/4) num tokens: 4031
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-3
new file mode 100644
index 0000000000000000000000000000000000000000..5315595b745a3c3f497ffa8b54c5331706077b0d
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-10-29-35-3
@@ -0,0 +1,41 @@
+2024-08-29 10:29:35,106 INFO [train.py:1182] (3/4) Training started
+2024-08-29 10:29:35,107 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-29 10:29:35,925 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2558.int.cedar.computecanada.ca', 'IP address': '172.16.145.251'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 10:29:35,925 INFO [train.py:1212] (3/4) About to create model
+2024-08-29 10:29:37,639 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-29 10:29:37,755 INFO [checkpoint.py:112] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 10:30:25,092 INFO [train.py:1231] (3/4) Using DDP
+2024-08-29 10:30:35,013 INFO [train.py:1243] (3/4) Loading optimizer state dict
+2024-08-29 10:30:35,218 INFO [train.py:1251] (3/4) Loading scheduler state dict
+2024-08-29 10:30:35,218 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-29 10:30:39,345 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-29 10:30:40,960 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-29 10:30:40,966 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-29 10:30:41,293 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-29 10:30:41,498 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-29 10:30:41,824 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-29 10:30:41,824 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 10:42:07,076 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12808MB
+2024-08-29 10:42:08,617 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-29 10:47:00,566 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-29 10:47:01,543 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=256, metric=7.92 vs. limit=7.5
+2024-08-29 10:47:01,971 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 10:54:13,227 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 10:54:14,746 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 10:54:14,766 INFO [train.py:1344] (3/4) Loading grad scaler state dict
+2024-08-29 10:55:56,210 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=172581.33333333334, ans=0.04949747468305833
+2024-08-29 10:56:29,894 INFO [train.py:1114] (3/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 10:56:29,894 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-29 11:02:14,721 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.1110, 2.9698, 3.4716, 2.6793], device='cuda:3')
+2024-08-29 11:03:12,756 INFO [train.py:1146] (3/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 11:03:12,757 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 11:27:38,684 INFO [train.py:1050] (3/4) Caught exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=209, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600000 milliseconds before timing out..
+2024-08-29 11:27:38,687 INFO [checkpoint.py:75] (3/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-3.pt
+2024-08-29 11:27:40,372 INFO [train.py:1413] (3/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-323d3ab0-f35c-b8a9-fce5-e9d717208331.pt
+2024-08-29 11:37:47,768 INFO [train.py:1419] (3/4) features shape: torch.Size([50, 1582, 80])
+2024-08-29 11:37:51,441 INFO [train.py:1423] (3/4) num tokens: 4029
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-37-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-37-0
new file mode 100644
index 0000000000000000000000000000000000000000..f16196b1a56d01bd40c7c4c89a9a61876bc8a41e
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-37-0
@@ -0,0 +1,1138 @@
+2024-08-29 13:08:37,942 INFO [train.py:1182] (0/4) Training started
+2024-08-29 13:08:37,946 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-29 13:08:38,138 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 13:08:38,138 INFO [train.py:1212] (0/4) About to create model
+2024-08-29 13:08:39,851 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-29 13:08:40,405 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 13:08:58,973 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-29 13:08:59,464 INFO [train.py:1231] (0/4) Using DDP
+2024-08-29 13:09:40,408 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-29 13:09:40,607 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-29 13:09:40,607 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 13:09:40,690 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-29 13:09:42,269 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-29 13:09:42,276 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-29 13:09:42,374 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-29 13:09:42,446 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-29 13:09:42,767 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-29 13:09:42,767 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 13:14:18,567 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12840MB
+2024-08-29 13:14:21,284 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-29 13:14:38,626 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-29 13:14:39,612 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=256, metric=7.97 vs. limit=7.5
+2024-08-29 13:14:45,671 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 13:15:10,817 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 13:15:12,349 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 13:15:12,368 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-29 13:16:15,202 INFO [train.py:1114] (0/4) Epoch 14, batch 0, loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2753, pruned_loss=0.05867, ctc_loss=0.107, over 19413.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:16:15,203 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-29 13:16:26,166 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.0927, 2.8748, 3.3499, 2.5743], device='cuda:0')
+2024-08-29 13:16:31,408 INFO [train.py:1146] (0/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 13:16:31,409 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13085MB
+2024-08-29 13:24:30,823 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=172688.0, ans=0.025
+2024-08-29 13:25:31,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=172741.33333333334, ans=0.125
+2024-08-29 13:26:36,724 INFO [train.py:1114] (0/4) Epoch 14, batch 50, loss[loss=0.2105, simple_loss=0.2695, pruned_loss=0.05525, ctc_loss=0.1026, over 19713.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2858, pruned_loss=0.05843, ctc_loss=0.1106, over 844800.35 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:26:37,527 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.08 vs. limit=22.5
+2024-08-29 13:27:00,165 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.69 vs. limit=15.0
+2024-08-29 13:27:22,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-29 13:30:17,778 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-29 13:30:45,602 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.72 vs. limit=15.0
+2024-08-29 13:30:55,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=172954.66666666666, ans=0.2
+2024-08-29 13:31:58,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=173008.0, ans=0.5
+2024-08-29 13:32:29,770 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.749e+02 1.974e+02 2.504e+02 4.970e+02, threshold=3.948e+02, percent-clipped=4.0
+2024-08-29 13:32:38,242 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.28 vs. limit=10.0
+2024-08-29 13:32:58,198 INFO [train.py:1114] (0/4) Epoch 14, batch 100, loss[loss=0.2323, simple_loss=0.2886, pruned_loss=0.06317, ctc_loss=0.1243, over 19723.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.2882, pruned_loss=0.05994, ctc_loss=0.1132, over 1499143.56 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:33:05,086 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.38 vs. limit=15.0
+2024-08-29 13:34:27,512 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.24 vs. limit=15.0
+2024-08-29 13:34:34,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=173274.66666666666, ans=0.04949747468305833
+2024-08-29 13:34:42,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=173274.66666666666, ans=0.125
+2024-08-29 13:35:53,136 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.85 vs. limit=10.0
+2024-08-29 13:36:03,007 INFO [train.py:1114] (0/4) Epoch 14, batch 150, loss[loss=0.1995, simple_loss=0.2572, pruned_loss=0.05198, ctc_loss=0.09456, over 19719.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2857, pruned_loss=0.05877, ctc_loss=0.1108, over 2027452.09 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:36:03,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=173381.33333333334, ans=0.125
+2024-08-29 13:36:15,037 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.92 vs. limit=15.0
+2024-08-29 13:36:55,773 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.86 vs. limit=10.0
+2024-08-29 13:37:13,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=173541.33333333334, ans=0.125
+2024-08-29 13:37:17,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=173594.66666666666, ans=0.125
+2024-08-29 13:37:19,615 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.730e+02 2.035e+02 2.422e+02 3.683e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-29 13:37:30,504 INFO [train.py:1114] (0/4) Epoch 14, batch 200, loss[loss=0.2796, simple_loss=0.3228, pruned_loss=0.08462, ctc_loss=0.1678, over 18353.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2831, pruned_loss=0.05821, ctc_loss=0.1097, over 2435099.22 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:37:35,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=173648.0, ans=0.1
+2024-08-29 13:37:41,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=173648.0, ans=0.125
+2024-08-29 13:37:47,047 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.60 vs. limit=8.0
+2024-08-29 13:38:46,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=173701.33333333334, ans=0.07
+2024-08-29 13:42:15,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=173861.33333333334, ans=0.125
+2024-08-29 13:42:18,835 INFO [train.py:1114] (0/4) Epoch 14, batch 250, loss[loss=0.2507, simple_loss=0.2993, pruned_loss=0.07243, ctc_loss=0.1431, over 19394.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2833, pruned_loss=0.0584, ctc_loss=0.1102, over 2755806.45 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:43:11,815 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=173914.66666666666, ans=0.125
+2024-08-29 13:43:43,081 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=10.54 vs. limit=15.0
+2024-08-29 13:44:08,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=174074.66666666666, ans=0.0
+2024-08-29 13:44:13,474 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.787e+02 2.022e+02 2.717e+02 4.953e+02, threshold=4.043e+02, percent-clipped=2.0
+2024-08-29 13:44:43,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=174128.0, ans=0.125
+2024-08-29 13:44:46,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=174128.0, ans=0.125
+2024-08-29 13:44:52,079 INFO [train.py:1114] (0/4) Epoch 14, batch 300, loss[loss=0.2361, simple_loss=0.2883, pruned_loss=0.06748, ctc_loss=0.1226, over 19521.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2827, pruned_loss=0.05827, ctc_loss=0.1099, over 3001481.83 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:45:17,316 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=174288.0, ans=0.125
+2024-08-29 13:45:41,952 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.34 vs. limit=10.0
+2024-08-29 13:45:58,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=174394.66666666666, ans=0.125
+2024-08-29 13:46:13,841 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.30 vs. limit=22.5
+2024-08-29 13:46:17,854 INFO [train.py:1114] (0/4) Epoch 14, batch 350, loss[loss=0.1933, simple_loss=0.2531, pruned_loss=0.04839, ctc_loss=0.0919, over 19759.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2823, pruned_loss=0.05781, ctc_loss=0.1089, over 3191850.72 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 16.0
+2024-08-29 13:46:18,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=174448.0, ans=0.125
+2024-08-29 13:46:25,797 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.96 vs. limit=10.0
+2024-08-29 13:46:28,127 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.78 vs. limit=15.0
+2024-08-29 13:47:29,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=174608.0, ans=0.125
+2024-08-29 13:47:39,426 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.793e+02 2.058e+02 2.658e+02 4.429e+02, threshold=4.116e+02, percent-clipped=3.0
+2024-08-29 13:48:31,273 INFO [train.py:1114] (0/4) Epoch 14, batch 400, loss[loss=0.2258, simple_loss=0.2894, pruned_loss=0.05975, ctc_loss=0.1065, over 19496.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2821, pruned_loss=0.05787, ctc_loss=0.1089, over 3342545.93 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:49:13,711 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.876e-03
+2024-08-29 13:50:10,952 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=174821.33333333334, ans=0.2
+2024-08-29 13:50:12,227 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.60 vs. limit=15.0
+2024-08-29 13:50:57,536 INFO [train.py:1114] (0/4) Epoch 14, batch 450, loss[loss=0.2367, simple_loss=0.2999, pruned_loss=0.06201, ctc_loss=0.1237, over 19609.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2829, pruned_loss=0.05829, ctc_loss=0.1099, over 3450915.96 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:51:00,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=174981.33333333334, ans=0.125
+2024-08-29 13:51:11,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=175034.66666666666, ans=0.1
+2024-08-29 13:51:26,784 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 13:51:46,130 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.41 vs. limit=15.0
+2024-08-29 13:51:50,571 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.713e+02 1.900e+02 2.415e+02 4.159e+02, threshold=3.800e+02, percent-clipped=2.0
+2024-08-29 13:52:14,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=175194.66666666666, ans=0.5
+2024-08-29 13:52:16,290 INFO [train.py:1114] (0/4) Epoch 14, batch 500, loss[loss=0.2462, simple_loss=0.3152, pruned_loss=0.065, ctc_loss=0.118, over 19681.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2823, pruned_loss=0.05801, ctc_loss=0.1094, over 3546652.26 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:52:18,069 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.57 vs. limit=15.0
+2024-08-29 13:52:26,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=175248.0, ans=0.125
+2024-08-29 13:52:47,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=175354.66666666666, ans=0.125
+2024-08-29 13:52:47,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=175354.66666666666, ans=0.125
+2024-08-29 13:52:49,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=175354.66666666666, ans=0.0
+2024-08-29 13:52:54,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=175354.66666666666, ans=0.2
+2024-08-29 13:53:07,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=175408.0, ans=0.125
+2024-08-29 13:53:22,230 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.15 vs. limit=22.5
+2024-08-29 13:53:23,936 INFO [train.py:1114] (0/4) Epoch 14, batch 550, loss[loss=0.2481, simple_loss=0.3075, pruned_loss=0.0687, ctc_loss=0.1283, over 19273.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2826, pruned_loss=0.058, ctc_loss=0.1095, over 3608532.80 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:53:24,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=175514.66666666666, ans=0.025
+2024-08-29 13:53:36,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=175568.0, ans=0.125
+2024-08-29 13:53:36,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=175568.0, ans=0.0
+2024-08-29 13:54:06,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=175674.66666666666, ans=0.0
+2024-08-29 13:54:10,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=175674.66666666666, ans=0.2
+2024-08-29 13:54:18,067 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.725e+02 1.963e+02 2.348e+02 4.063e+02, threshold=3.927e+02, percent-clipped=2.0
+2024-08-29 13:54:20,854 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=175728.0, ans=0.025
+2024-08-29 13:54:24,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=175728.0, ans=0.125
+2024-08-29 13:54:27,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=175781.33333333334, ans=0.2
+2024-08-29 13:54:28,215 INFO [train.py:1114] (0/4) Epoch 14, batch 600, loss[loss=0.2468, simple_loss=0.3053, pruned_loss=0.06894, ctc_loss=0.126, over 19370.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2832, pruned_loss=0.05814, ctc_loss=0.1097, over 3666425.54 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:54:42,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=175834.66666666666, ans=0.1
+2024-08-29 13:55:03,507 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.51 vs. limit=15.0
+2024-08-29 13:55:30,840 INFO [train.py:1114] (0/4) Epoch 14, batch 650, loss[loss=0.2249, simple_loss=0.2942, pruned_loss=0.05575, ctc_loss=0.1105, over 19762.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2827, pruned_loss=0.05783, ctc_loss=0.1091, over 3716649.43 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:55:33,385 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 13:55:40,669 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 13:55:46,123 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.65 vs. limit=15.0
+2024-08-29 13:55:49,594 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.33 vs. limit=15.0
+2024-08-29 13:55:59,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=176154.66666666666, ans=0.125
+2024-08-29 13:56:08,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=176154.66666666666, ans=0.0
+2024-08-29 13:56:13,720 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.98 vs. limit=15.0
+2024-08-29 13:56:14,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=176208.0, ans=0.125
+2024-08-29 13:56:23,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=176261.33333333334, ans=0.125
+2024-08-29 13:56:24,629 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.743e+02 2.058e+02 2.560e+02 4.338e+02, threshold=4.116e+02, percent-clipped=4.0
+2024-08-29 13:56:34,647 INFO [train.py:1114] (0/4) Epoch 14, batch 700, loss[loss=0.1954, simple_loss=0.2666, pruned_loss=0.04456, ctc_loss=0.08781, over 19729.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2832, pruned_loss=0.05802, ctc_loss=0.1096, over 3748596.56 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:56:34,955 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=176314.66666666666, ans=0.125
+2024-08-29 13:56:36,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=176314.66666666666, ans=0.2
+2024-08-29 13:56:51,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=176368.0, ans=0.5
+2024-08-29 13:57:42,081 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.20 vs. limit=12.0
+2024-08-29 13:58:07,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=176528.0, ans=0.125
+2024-08-29 13:58:12,866 INFO [train.py:1114] (0/4) Epoch 14, batch 750, loss[loss=0.2129, simple_loss=0.2794, pruned_loss=0.05363, ctc_loss=0.09797, over 19505.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2824, pruned_loss=0.0577, ctc_loss=0.1088, over 3775051.89 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:58:18,264 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.28 vs. limit=15.0
+2024-08-29 13:58:19,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176581.33333333334, ans=0.1
+2024-08-29 13:58:21,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=176581.33333333334, ans=0.025
+2024-08-29 13:58:27,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=176634.66666666666, ans=0.0
+2024-08-29 13:58:41,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176688.0, ans=0.1
+2024-08-29 13:58:44,087 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.78 vs. limit=15.0
+2024-08-29 13:58:58,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=176741.33333333334, ans=0.125
+2024-08-29 13:59:04,792 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.96 vs. limit=15.0
+2024-08-29 13:59:06,508 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.910e+02 2.277e+02 2.884e+02 4.780e+02, threshold=4.554e+02, percent-clipped=3.0
+2024-08-29 13:59:06,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=176794.66666666666, ans=0.0
+2024-08-29 13:59:26,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=176794.66666666666, ans=0.025
+2024-08-29 13:59:28,747 INFO [train.py:1114] (0/4) Epoch 14, batch 800, loss[loss=0.1867, simple_loss=0.2534, pruned_loss=0.04332, ctc_loss=0.08361, over 19409.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2819, pruned_loss=0.05738, ctc_loss=0.1082, over 3795596.12 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:59:49,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=176901.33333333334, ans=0.05
+2024-08-29 14:01:16,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=177008.0, ans=0.025
+2024-08-29 14:01:17,618 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.59 vs. limit=10.0
+2024-08-29 14:01:18,487 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.66 vs. limit=15.0
+2024-08-29 14:02:31,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=177008.0, ans=0.125
+2024-08-29 14:02:39,433 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.22 vs. limit=15.0
+2024-08-29 14:02:49,613 INFO [train.py:1114] (0/4) Epoch 14, batch 850, loss[loss=0.2317, simple_loss=0.2998, pruned_loss=0.05868, ctc_loss=0.1153, over 19642.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2817, pruned_loss=0.0575, ctc_loss=0.1084, over 3814865.22 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:03:03,641 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.30 vs. limit=15.0
+2024-08-29 14:03:09,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=177168.0, ans=0.0
+2024-08-29 14:03:10,997 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.12 vs. limit=12.0
+2024-08-29 14:03:13,239 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.21 vs. limit=6.0
+2024-08-29 14:03:15,986 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=16.96 vs. limit=22.5
+2024-08-29 14:03:40,321 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.703e+02 1.970e+02 2.385e+02 3.831e+02, threshold=3.939e+02, percent-clipped=0.0
+2024-08-29 14:03:49,897 INFO [train.py:1114] (0/4) Epoch 14, batch 900, loss[loss=0.2209, simple_loss=0.2741, pruned_loss=0.06039, ctc_loss=0.1171, over 19784.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.282, pruned_loss=0.05769, ctc_loss=0.1086, over 3818440.25 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:03:50,635 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.26 vs. limit=22.5
+2024-08-29 14:04:04,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=177434.66666666666, ans=0.125
+2024-08-29 14:04:17,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=177488.0, ans=0.125
+2024-08-29 14:04:22,840 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.08 vs. limit=12.0
+2024-08-29 14:04:26,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=177541.33333333334, ans=0.1
+2024-08-29 14:04:28,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=177541.33333333334, ans=0.125
+2024-08-29 14:04:40,424 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=177594.66666666666, ans=0.0
+2024-08-29 14:04:52,321 INFO [train.py:1114] (0/4) Epoch 14, batch 950, loss[loss=0.2047, simple_loss=0.2673, pruned_loss=0.05199, ctc_loss=0.09517, over 19503.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2823, pruned_loss=0.05788, ctc_loss=0.1089, over 3820636.09 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:05:06,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-29 14:05:09,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-29 14:05:14,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-29 14:05:30,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=177754.66666666666, ans=0.0
+2024-08-29 14:06:00,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=177808.0, ans=0.0
+2024-08-29 14:06:03,821 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.36 vs. limit=15.0
+2024-08-29 14:06:19,921 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.335e+02 1.740e+02 1.996e+02 2.581e+02 3.979e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-29 14:06:28,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=177861.33333333334, ans=0.125
+2024-08-29 14:07:04,890 INFO [train.py:1114] (0/4) Epoch 14, batch 1000, loss[loss=0.1979, simple_loss=0.2589, pruned_loss=0.05, ctc_loss=0.09211, over 19854.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2829, pruned_loss=0.05839, ctc_loss=0.1098, over 3816509.39 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:07:06,739 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=177914.66666666666, ans=0.09899494936611666
+2024-08-29 14:08:06,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177968.0, ans=0.1
+2024-08-29 14:08:08,178 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177968.0, ans=0.1
+2024-08-29 14:08:26,883 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=178021.33333333334, ans=0.2
+2024-08-29 14:08:48,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=178128.0, ans=0.0
+2024-08-29 14:08:56,377 INFO [train.py:1114] (0/4) Epoch 14, batch 1050, loss[loss=0.2091, simple_loss=0.2817, pruned_loss=0.04913, ctc_loss=0.09538, over 19845.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2819, pruned_loss=0.05771, ctc_loss=0.1086, over 3823872.80 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:09:05,096 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=178181.33333333334, ans=0.0
+2024-08-29 14:09:46,665 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.811e+02 2.215e+02 2.668e+02 4.320e+02, threshold=4.429e+02, percent-clipped=1.0
+2024-08-29 14:09:51,798 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=178394.66666666666, ans=0.125
+2024-08-29 14:10:24,276 INFO [train.py:1114] (0/4) Epoch 14, batch 1100, loss[loss=0.1921, simple_loss=0.2624, pruned_loss=0.04454, ctc_loss=0.08194, over 19612.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2812, pruned_loss=0.05724, ctc_loss=0.1079, over 3831458.89 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:14:46,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=178608.0, ans=0.0
+2024-08-29 14:14:54,175 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.19 vs. limit=12.0
+2024-08-29 14:16:18,599 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff2.min_abs, batch_count=178608.0, ans=0.1
+2024-08-29 14:17:48,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=178661.33333333334, ans=0.1
+2024-08-29 14:19:15,474 INFO [train.py:1114] (0/4) Epoch 14, batch 1150, loss[loss=0.2197, simple_loss=0.2785, pruned_loss=0.05788, ctc_loss=0.1127, over 19593.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2812, pruned_loss=0.05741, ctc_loss=0.1082, over 3828830.16 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:19:20,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=178714.66666666666, ans=0.0
+2024-08-29 14:19:22,864 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.39 vs. limit=12.0
+2024-08-29 14:19:29,855 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=178714.66666666666, ans=0.125
+2024-08-29 14:19:45,162 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=178768.0, ans=0.0
+2024-08-29 14:20:36,919 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:22:13,338 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.701e+02 1.876e+02 2.352e+02 3.362e+02, threshold=3.753e+02, percent-clipped=0.0
+2024-08-29 14:22:33,798 INFO [train.py:1114] (0/4) Epoch 14, batch 1200, loss[loss=0.2318, simple_loss=0.2973, pruned_loss=0.06034, ctc_loss=0.114, over 19839.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2828, pruned_loss=0.05804, ctc_loss=0.1095, over 3825820.21 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:22:47,365 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=178981.33333333334, ans=0.1
+2024-08-29 14:23:17,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=178981.33333333334, ans=0.07
+2024-08-29 14:23:48,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-29 14:23:48,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-29 14:23:51,642 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.04 vs. limit=15.0
+2024-08-29 14:24:24,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=179088.0, ans=0.125
+2024-08-29 14:24:28,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=179088.0, ans=0.125
+2024-08-29 14:24:29,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=179088.0, ans=0.0
+2024-08-29 14:25:06,596 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.51 vs. limit=10.0
+2024-08-29 14:26:20,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=179194.66666666666, ans=0.0
+2024-08-29 14:29:54,001 INFO [train.py:1114] (0/4) Epoch 14, batch 1250, loss[loss=0.2112, simple_loss=0.2843, pruned_loss=0.0502, ctc_loss=0.09421, over 19531.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2829, pruned_loss=0.05771, ctc_loss=0.1086, over 3843334.00 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:30:08,625 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.33 vs. limit=15.0
+2024-08-29 14:31:51,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=179248.0, ans=0.125
+2024-08-29 14:31:55,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=179301.33333333334, ans=0.125
+2024-08-29 14:32:24,792 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=179408.0, ans=0.125
+2024-08-29 14:32:24,809 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=179408.0, ans=0.04949747468305833
+2024-08-29 14:32:41,060 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.718e+02 2.120e+02 2.679e+02 4.271e+02, threshold=4.240e+02, percent-clipped=3.0
+2024-08-29 14:32:44,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=179461.33333333334, ans=0.125
+2024-08-29 14:33:09,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-29 14:33:10,231 INFO [train.py:1114] (0/4) Epoch 14, batch 1300, loss[loss=0.2381, simple_loss=0.2987, pruned_loss=0.06345, ctc_loss=0.1266, over 18833.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2819, pruned_loss=0.05703, ctc_loss=0.1074, over 3847330.15 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:34:11,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=179568.0, ans=0.025
+2024-08-29 14:34:12,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=179568.0, ans=0.125
+2024-08-29 14:35:12,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=179728.0, ans=0.0
+2024-08-29 14:35:41,876 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.33 vs. limit=22.5
+2024-08-29 14:35:42,417 INFO [train.py:1114] (0/4) Epoch 14, batch 1350, loss[loss=0.2201, simple_loss=0.278, pruned_loss=0.05916, ctc_loss=0.1096, over 19770.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2818, pruned_loss=0.05714, ctc_loss=0.1075, over 3858687.41 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:35:44,309 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.95 vs. limit=15.0
+2024-08-29 14:36:07,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=179834.66666666666, ans=0.1
+2024-08-29 14:36:11,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=179834.66666666666, ans=0.2
+2024-08-29 14:36:20,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=179888.0, ans=0.0
+2024-08-29 14:39:13,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=179994.66666666666, ans=0.0
+2024-08-29 14:40:29,505 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.654e+02 1.881e+02 2.431e+02 4.376e+02, threshold=3.761e+02, percent-clipped=1.0
+2024-08-29 14:41:36,157 INFO [train.py:1114] (0/4) Epoch 14, batch 1400, loss[loss=0.1883, simple_loss=0.2586, pruned_loss=0.0425, ctc_loss=0.08245, over 19674.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.282, pruned_loss=0.05728, ctc_loss=0.1076, over 3864940.55 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:41:52,084 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=180101.33333333334, ans=0.0
+2024-08-29 14:41:54,674 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.82 vs. limit=15.0
+2024-08-29 14:42:09,954 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.26 vs. limit=6.0
+2024-08-29 14:42:19,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=180208.0, ans=0.125
+2024-08-29 14:42:39,846 INFO [train.py:1114] (0/4) Epoch 14, batch 1450, loss[loss=0.2351, simple_loss=0.2945, pruned_loss=0.06375, ctc_loss=0.1203, over 19668.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2823, pruned_loss=0.05716, ctc_loss=0.1075, over 3862868.32 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:43:57,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=180421.33333333334, ans=0.125
+2024-08-29 14:44:04,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=180421.33333333334, ans=0.0
+2024-08-29 14:44:05,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-29 14:44:05,347 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=180474.66666666666, ans=10.0
+2024-08-29 14:44:19,804 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.699e+02 1.929e+02 2.254e+02 4.469e+02, threshold=3.859e+02, percent-clipped=1.0
+2024-08-29 14:44:20,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=180528.0, ans=0.0
+2024-08-29 14:45:06,833 INFO [train.py:1114] (0/4) Epoch 14, batch 1500, loss[loss=0.2157, simple_loss=0.2827, pruned_loss=0.05425, ctc_loss=0.1006, over 19588.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2825, pruned_loss=0.0573, ctc_loss=0.1078, over 3861664.46 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:45:10,399 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.62 vs. limit=12.0
+2024-08-29 14:45:23,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=180634.66666666666, ans=0.0
+2024-08-29 14:45:48,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=180634.66666666666, ans=0.0
+2024-08-29 14:45:57,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=180688.0, ans=0.1
+2024-08-29 14:46:11,294 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180741.33333333334, ans=0.1
+2024-08-29 14:46:12,855 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.03 vs. limit=22.5
+2024-08-29 14:46:16,444 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.44 vs. limit=15.0
+2024-08-29 14:46:17,698 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.44 vs. limit=15.0
+2024-08-29 14:46:25,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180794.66666666666, ans=0.1
+2024-08-29 14:46:27,478 INFO [train.py:1114] (0/4) Epoch 14, batch 1550, loss[loss=0.2477, simple_loss=0.3021, pruned_loss=0.06973, ctc_loss=0.1347, over 19602.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2824, pruned_loss=0.05759, ctc_loss=0.1086, over 3847541.67 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:46:42,274 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=180901.33333333334, ans=0.1
+2024-08-29 14:48:37,430 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.702e+02 2.011e+02 2.397e+02 3.479e+02, threshold=4.023e+02, percent-clipped=0.0
+2024-08-29 14:48:41,325 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=181061.33333333334, ans=0.0
+2024-08-29 14:48:45,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=181061.33333333334, ans=0.125
+2024-08-29 14:48:45,430 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.20 vs. limit=15.0
+2024-08-29 14:48:47,150 INFO [train.py:1114] (0/4) Epoch 14, batch 1600, loss[loss=0.2003, simple_loss=0.2737, pruned_loss=0.04675, ctc_loss=0.0835, over 19828.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2816, pruned_loss=0.05708, ctc_loss=0.1076, over 3837473.80 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:50:21,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=181274.66666666666, ans=0.025
+2024-08-29 14:51:10,753 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=181328.0, ans=0.5
+2024-08-29 14:51:29,835 INFO [train.py:1114] (0/4) Epoch 14, batch 1650, loss[loss=0.2255, simple_loss=0.2958, pruned_loss=0.0557, ctc_loss=0.1098, over 19654.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2817, pruned_loss=0.05731, ctc_loss=0.108, over 3833134.13 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:51:32,743 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=181381.33333333334, ans=0.0
+2024-08-29 14:51:43,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=181381.33333333334, ans=0.0
+2024-08-29 14:51:44,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=181434.66666666666, ans=0.0
+2024-08-29 14:51:45,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=181434.66666666666, ans=0.2
+2024-08-29 14:52:11,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=181488.0, ans=0.125
+2024-08-29 14:52:18,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=181541.33333333334, ans=0.125
+2024-08-29 14:52:24,814 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.61 vs. limit=15.0
+2024-08-29 14:52:28,557 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.808e+02 2.247e+02 2.720e+02 5.029e+02, threshold=4.494e+02, percent-clipped=3.0
+2024-08-29 14:52:32,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=181594.66666666666, ans=0.0
+2024-08-29 14:52:38,136 INFO [train.py:1114] (0/4) Epoch 14, batch 1700, loss[loss=0.1751, simple_loss=0.2403, pruned_loss=0.04029, ctc_loss=0.07342, over 19703.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2815, pruned_loss=0.05704, ctc_loss=0.1074, over 3847514.77 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:52:48,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=181701.33333333334, ans=10.0
+2024-08-29 14:52:48,649 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:52:53,899 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.91 vs. limit=15.0
+2024-08-29 14:53:46,198 INFO [train.py:1114] (0/4) Epoch 14, batch 1750, loss[loss=0.1844, simple_loss=0.2383, pruned_loss=0.04689, ctc_loss=0.09163, over 19617.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2808, pruned_loss=0.05664, ctc_loss=0.1068, over 3852353.23 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:53:46,490 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=181914.66666666666, ans=0.125
+2024-08-29 14:53:48,031 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.72 vs. limit=22.5
+2024-08-29 14:54:11,905 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=182021.33333333334, ans=0.95
+2024-08-29 14:54:36,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=182021.33333333334, ans=0.125
+2024-08-29 14:56:19,214 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.53 vs. limit=15.0
+2024-08-29 14:56:23,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=182128.0, ans=0.0
+2024-08-29 14:56:25,492 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.791e+02 2.085e+02 2.712e+02 5.021e+02, threshold=4.170e+02, percent-clipped=2.0
+2024-08-29 14:56:29,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=182128.0, ans=0.0
+2024-08-29 14:56:29,736 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.51 vs. limit=22.5
+2024-08-29 14:56:34,698 INFO [train.py:1114] (0/4) Epoch 14, batch 1800, loss[loss=0.2055, simple_loss=0.2682, pruned_loss=0.05217, ctc_loss=0.09629, over 19592.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2813, pruned_loss=0.05681, ctc_loss=0.1071, over 3852353.41 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:57:15,550 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.36 vs. limit=15.0
+2024-08-29 14:57:16,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=182181.33333333334, ans=0.2
+2024-08-29 14:57:17,700 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.09 vs. limit=15.0
+2024-08-29 14:58:02,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=182394.66666666666, ans=0.0
+2024-08-29 14:58:07,414 INFO [train.py:1114] (0/4) Epoch 14, batch 1850, loss[loss=0.2178, simple_loss=0.2869, pruned_loss=0.05453, ctc_loss=0.09922, over 19596.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2812, pruned_loss=0.05664, ctc_loss=0.1066, over 3854748.92 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:59:37,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=182501.33333333334, ans=0.125
+2024-08-29 14:59:39,953 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.95 vs. limit=22.5
+2024-08-29 15:00:37,432 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.91 vs. limit=22.5
+2024-08-29 15:03:29,637 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.679e+02 1.934e+02 2.278e+02 6.084e+02, threshold=3.868e+02, percent-clipped=1.0
+2024-08-29 15:03:40,818 INFO [train.py:1114] (0/4) Epoch 14, batch 1900, loss[loss=0.2196, simple_loss=0.2991, pruned_loss=0.05018, ctc_loss=0.0994, over 19628.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2817, pruned_loss=0.05675, ctc_loss=0.1067, over 3861097.39 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:03:53,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=182768.0, ans=0.1
+2024-08-29 15:03:54,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=182768.0, ans=0.04949747468305833
+2024-08-29 15:04:23,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=182768.0, ans=0.0
+2024-08-29 15:04:23,925 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.09 vs. limit=22.5
+2024-08-29 15:04:43,481 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=182874.66666666666, ans=15.0
+2024-08-29 15:05:00,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=182928.0, ans=0.125
+2024-08-29 15:05:18,969 INFO [train.py:1114] (0/4) Epoch 14, batch 1950, loss[loss=0.1912, simple_loss=0.2613, pruned_loss=0.04436, ctc_loss=0.08118, over 19583.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2828, pruned_loss=0.05689, ctc_loss=0.107, over 3870300.04 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:05:42,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=183088.0, ans=0.1
+2024-08-29 15:05:45,607 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=183088.0, ans=0.0
+2024-08-29 15:05:45,844 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=8.27 vs. limit=12.0
+2024-08-29 15:05:50,112 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.50 vs. limit=15.0
+2024-08-29 15:05:53,334 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:06:03,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=183194.66666666666, ans=0.05
+2024-08-29 15:06:06,639 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.683e+02 1.939e+02 2.319e+02 3.642e+02, threshold=3.877e+02, percent-clipped=0.0
+2024-08-29 15:06:41,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=183194.66666666666, ans=0.04949747468305833
+2024-08-29 15:06:48,421 INFO [train.py:1114] (0/4) Epoch 14, batch 2000, loss[loss=0.1879, simple_loss=0.2501, pruned_loss=0.04524, ctc_loss=0.08805, over 19643.00 frames. ], tot_loss[loss=0.22, simple_loss=0.283, pruned_loss=0.05703, ctc_loss=0.1074, over 3854479.09 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:07:01,879 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.17 vs. limit=22.5
+2024-08-29 15:07:02,056 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.82 vs. limit=10.0
+2024-08-29 15:07:21,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=183408.0, ans=0.1
+2024-08-29 15:07:45,830 INFO [train.py:1114] (0/4) Epoch 14, batch 2050, loss[loss=0.1983, simple_loss=0.2545, pruned_loss=0.05182, ctc_loss=0.09622, over 19725.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2823, pruned_loss=0.05726, ctc_loss=0.1077, over 3850346.95 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:07:49,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=183514.66666666666, ans=0.125
+2024-08-29 15:08:41,811 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.94 vs. limit=15.0
+2024-08-29 15:09:31,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=183674.66666666666, ans=0.125
+2024-08-29 15:09:39,971 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.749e+02 1.987e+02 2.455e+02 3.413e+02, threshold=3.973e+02, percent-clipped=0.0
+2024-08-29 15:09:48,906 INFO [train.py:1114] (0/4) Epoch 14, batch 2100, loss[loss=0.2037, simple_loss=0.2715, pruned_loss=0.04929, ctc_loss=0.09321, over 19758.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2815, pruned_loss=0.05701, ctc_loss=0.1074, over 3857367.71 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:09:51,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=183781.33333333334, ans=0.125
+2024-08-29 15:09:58,489 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.02 vs. limit=6.0
+2024-08-29 15:10:25,706 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.27 vs. limit=15.0
+2024-08-29 15:10:49,071 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:10:57,777 INFO [train.py:1114] (0/4) Epoch 14, batch 2150, loss[loss=0.2131, simple_loss=0.2802, pruned_loss=0.05318, ctc_loss=0.09924, over 19570.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2814, pruned_loss=0.05717, ctc_loss=0.1076, over 3868244.08 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:10:58,227 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.85 vs. limit=15.0
+2024-08-29 15:11:05,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff3.min_abs, batch_count=184048.0, ans=0.2
+2024-08-29 15:11:24,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=184154.66666666666, ans=0.1
+2024-08-29 15:11:33,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=184208.0, ans=0.0
+2024-08-29 15:11:35,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=184208.0, ans=0.1
+2024-08-29 15:11:42,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=184261.33333333334, ans=0.125
+2024-08-29 15:11:44,635 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 1.765e+02 2.209e+02 2.742e+02 6.061e+02, threshold=4.418e+02, percent-clipped=6.0
+2024-08-29 15:12:09,370 INFO [train.py:1114] (0/4) Epoch 14, batch 2200, loss[loss=0.2306, simple_loss=0.2943, pruned_loss=0.06077, ctc_loss=0.1134, over 19567.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2811, pruned_loss=0.05689, ctc_loss=0.107, over 3866837.54 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:12:10,723 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:12:17,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=184314.66666666666, ans=0.0
+2024-08-29 15:12:22,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=184368.0, ans=0.1
+2024-08-29 15:13:08,099 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.06 vs. limit=22.5
+2024-08-29 15:13:47,040 INFO [train.py:1114] (0/4) Epoch 14, batch 2250, loss[loss=0.2197, simple_loss=0.2901, pruned_loss=0.05351, ctc_loss=0.1059, over 19595.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2814, pruned_loss=0.0569, ctc_loss=0.1069, over 3866495.61 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:14:04,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=184634.66666666666, ans=0.0
+2024-08-29 15:14:09,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=184688.0, ans=0.125
+2024-08-29 15:14:13,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=184688.0, ans=0.125
+2024-08-29 15:14:21,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=184741.33333333334, ans=0.125
+2024-08-29 15:14:28,877 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=184741.33333333334, ans=0.05
+2024-08-29 15:14:30,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=184741.33333333334, ans=0.125
+2024-08-29 15:14:34,163 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.796e+02 2.116e+02 2.512e+02 3.767e+02, threshold=4.231e+02, percent-clipped=0.0
+2024-08-29 15:14:43,286 INFO [train.py:1114] (0/4) Epoch 14, batch 2300, loss[loss=0.2066, simple_loss=0.2635, pruned_loss=0.05529, ctc_loss=0.09767, over 19508.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2809, pruned_loss=0.05729, ctc_loss=0.1076, over 3860452.97 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:14:43,490 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=184848.0, ans=0.125
+2024-08-29 15:14:46,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=184848.0, ans=0.125
+2024-08-29 15:15:18,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=185008.0, ans=0.0
+2024-08-29 15:15:40,918 INFO [train.py:1114] (0/4) Epoch 14, batch 2350, loss[loss=0.2539, simple_loss=0.3058, pruned_loss=0.07385, ctc_loss=0.1359, over 19697.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2806, pruned_loss=0.05712, ctc_loss=0.1071, over 3864029.08 frames. ], batch size: 63, lr: 1.05e-02, grad_scale: 64.0
+2024-08-29 15:15:49,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=185114.66666666666, ans=0.1
+2024-08-29 15:15:55,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=185168.0, ans=0.125
+2024-08-29 15:16:00,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=185168.0, ans=0.125
+2024-08-29 15:16:07,104 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=185221.33333333334, ans=0.0
+2024-08-29 15:16:07,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=185221.33333333334, ans=0.1
+2024-08-29 15:16:13,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=185274.66666666666, ans=0.125
+2024-08-29 15:16:28,749 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.724e+02 2.017e+02 2.647e+02 4.792e+02, threshold=4.034e+02, percent-clipped=3.0
+2024-08-29 15:16:30,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=185328.0, ans=0.2
+2024-08-29 15:16:31,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=185328.0, ans=0.2
+2024-08-29 15:16:36,506 INFO [train.py:1114] (0/4) Epoch 14, batch 2400, loss[loss=0.2144, simple_loss=0.2835, pruned_loss=0.05247, ctc_loss=0.1011, over 19439.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2825, pruned_loss=0.05767, ctc_loss=0.1081, over 3858659.76 frames. ], batch size: 67, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:16:50,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=185381.33333333334, ans=0.125
+2024-08-29 15:17:13,739 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=185541.33333333334, ans=0.125
+2024-08-29 15:17:38,563 INFO [train.py:1114] (0/4) Epoch 14, batch 2450, loss[loss=0.2832, simple_loss=0.3165, pruned_loss=0.0906, ctc_loss=0.1717, over 13414.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.2865, pruned_loss=0.06066, ctc_loss=0.1142, over 3733724.63 frames. ], batch size: 143, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:18:01,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=185754.66666666666, ans=0.0
+2024-08-29 15:18:05,308 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.65 vs. limit=15.0
+2024-08-29 15:18:22,141 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-14.pt
+2024-08-29 15:19:09,367 INFO [train.py:1114] (0/4) Epoch 15, batch 0, loss[loss=0.2255, simple_loss=0.2792, pruned_loss=0.06351, ctc_loss=0.1121, over 19411.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2792, pruned_loss=0.06351, ctc_loss=0.1121, over 19411.00 frames. ], batch size: 48, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:19:09,369 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-29 15:19:20,877 INFO [train.py:1146] (0/4) Epoch 15, validation: loss=0.1908, simple_loss=0.2785, pruned_loss=0.03825, ctc_loss=0.06651, over 944034.00 frames.
+2024-08-29 15:19:20,878 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13465MB
+2024-08-29 15:19:22,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=185856.0, ans=0.125
+2024-08-29 15:19:25,781 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.942e+02 2.136e+02 2.424e+02 3.799e+02, threshold=4.272e+02, percent-clipped=0.0
+2024-08-29 15:19:30,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=185856.0, ans=0.025
+2024-08-29 15:19:36,102 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:19:37,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=185909.33333333334, ans=0.125
+2024-08-29 15:19:38,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=185909.33333333334, ans=0.0
+2024-08-29 15:19:51,760 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.55 vs. limit=15.0
+2024-08-29 15:20:13,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=186069.33333333334, ans=0.125
+2024-08-29 15:20:21,070 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=186069.33333333334, ans=22.5
+2024-08-29 15:20:25,223 INFO [train.py:1114] (0/4) Epoch 15, batch 50, loss[loss=0.1992, simple_loss=0.2638, pruned_loss=0.0491, ctc_loss=0.09092, over 19710.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2847, pruned_loss=0.05854, ctc_loss=0.1104, over 843490.78 frames. ], batch size: 47, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:20:25,477 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=186122.66666666666, ans=0.07
+2024-08-29 15:20:34,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=186122.66666666666, ans=0.125
+2024-08-29 15:20:59,199 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=186229.33333333334, ans=0.0
+2024-08-29 15:21:11,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=186282.66666666666, ans=0.125
+2024-08-29 15:21:25,440 INFO [train.py:1114] (0/4) Epoch 15, batch 100, loss[loss=0.2085, simple_loss=0.267, pruned_loss=0.05381, ctc_loss=0.1061, over 19716.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.2852, pruned_loss=0.05789, ctc_loss=0.1099, over 1497089.71 frames. ], batch size: 51, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:21:25,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=186389.33333333334, ans=0.05
+2024-08-29 15:21:30,090 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.739e+02 1.952e+02 2.450e+02 4.288e+02, threshold=3.904e+02, percent-clipped=1.0
+2024-08-29 15:21:30,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=186389.33333333334, ans=0.125
+2024-08-29 15:21:43,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=186442.66666666666, ans=0.2
+2024-08-29 15:22:00,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=186496.0, ans=0.125
+2024-08-29 15:22:04,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=186549.33333333334, ans=0.0
+2024-08-29 15:22:10,736 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.96 vs. limit=15.0
+2024-08-29 15:22:29,380 INFO [train.py:1114] (0/4) Epoch 15, batch 150, loss[loss=0.2449, simple_loss=0.2822, pruned_loss=0.07664, ctc_loss=0.1355, over 19727.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2829, pruned_loss=0.0573, ctc_loss=0.108, over 2025845.62 frames. ], batch size: 47, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:22:33,935 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=7.09 vs. limit=15.0
+2024-08-29 15:23:02,411 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.07 vs. limit=15.0
+2024-08-29 15:23:12,302 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=186816.0, ans=0.125
+2024-08-29 15:23:22,358 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.19 vs. limit=15.0
+2024-08-29 15:23:28,621 INFO [train.py:1114] (0/4) Epoch 15, batch 200, loss[loss=0.2158, simple_loss=0.2874, pruned_loss=0.05199, ctc_loss=0.1005, over 18056.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2807, pruned_loss=0.05648, ctc_loss=0.1064, over 2434205.28 frames. ], batch size: 85, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:23:44,488 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.690e+02 2.002e+02 2.433e+02 3.884e+02, threshold=4.003e+02, percent-clipped=0.0
+2024-08-29 15:24:05,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=187029.33333333334, ans=0.125
+2024-08-29 15:24:10,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=187029.33333333334, ans=0.1
+2024-08-29 15:24:18,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=187082.66666666666, ans=0.0
+2024-08-29 15:24:59,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=187189.33333333334, ans=0.125
+2024-08-29 15:25:01,156 INFO [train.py:1114] (0/4) Epoch 15, batch 250, loss[loss=0.2387, simple_loss=0.3013, pruned_loss=0.06471, ctc_loss=0.1166, over 19445.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2813, pruned_loss=0.05662, ctc_loss=0.1067, over 2754227.35 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:25:06,746 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.14 vs. limit=6.0
+2024-08-29 15:25:16,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=187242.66666666666, ans=0.125
+2024-08-29 15:25:16,422 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.79 vs. limit=15.0
+2024-08-29 15:26:33,415 INFO [train.py:1114] (0/4) Epoch 15, batch 300, loss[loss=0.2452, simple_loss=0.3037, pruned_loss=0.06892, ctc_loss=0.1224, over 19512.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2803, pruned_loss=0.05621, ctc_loss=0.1058, over 2999797.41 frames. ], batch size: 61, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:26:37,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=187456.0, ans=0.125
+2024-08-29 15:26:38,059 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.706e+02 2.088e+02 2.592e+02 3.748e+02, threshold=4.177e+02, percent-clipped=0.0
+2024-08-29 15:26:39,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=187456.0, ans=0.0
+2024-08-29 15:26:43,155 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.83 vs. limit=15.0
+2024-08-29 15:27:34,910 INFO [train.py:1114] (0/4) Epoch 15, batch 350, loss[loss=0.2171, simple_loss=0.2686, pruned_loss=0.06009, ctc_loss=0.1134, over 19746.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2805, pruned_loss=0.05604, ctc_loss=0.1052, over 3189316.06 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 16.0
+2024-08-29 15:27:37,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=187722.66666666666, ans=0.09899494936611666
+2024-08-29 15:27:39,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=187722.66666666666, ans=0.125
+2024-08-29 15:28:12,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=187829.33333333334, ans=0.125
+2024-08-29 15:28:27,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=187936.0, ans=0.125
+2024-08-29 15:28:38,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=187989.33333333334, ans=0.025
+2024-08-29 15:28:38,929 INFO [train.py:1114] (0/4) Epoch 15, batch 400, loss[loss=0.2058, simple_loss=0.2807, pruned_loss=0.04741, ctc_loss=0.09023, over 19497.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.28, pruned_loss=0.05589, ctc_loss=0.105, over 3342205.60 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:28:44,504 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.706e+02 2.043e+02 2.587e+02 5.210e+02, threshold=4.085e+02, percent-clipped=2.0
+2024-08-29 15:29:23,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_na.min_abs, batch_count=188042.66666666666, ans=0.02
+2024-08-29 15:29:39,925 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.07 vs. limit=15.0
+2024-08-29 15:29:46,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=188149.33333333334, ans=0.125
+2024-08-29 15:30:00,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=188202.66666666666, ans=0.0
+2024-08-29 15:30:07,909 INFO [train.py:1114] (0/4) Epoch 15, batch 450, loss[loss=0.197, simple_loss=0.2794, pruned_loss=0.04149, ctc_loss=0.07894, over 19609.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2804, pruned_loss=0.05593, ctc_loss=0.1055, over 3449783.93 frames. ], batch size: 55, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:30:15,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=188256.0, ans=0.125
+2024-08-29 15:30:19,018 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=7.85 vs. limit=15.0
+2024-08-29 15:30:24,838 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.15 vs. limit=15.0
+2024-08-29 15:30:39,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=188362.66666666666, ans=0.2
+2024-08-29 15:30:41,231 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:31:09,327 INFO [train.py:1114] (0/4) Epoch 15, batch 500, loss[loss=0.2263, simple_loss=0.2972, pruned_loss=0.05772, ctc_loss=0.0998, over 19712.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2798, pruned_loss=0.05572, ctc_loss=0.1049, over 3544980.18 frames. ], batch size: 63, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:31:14,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=188522.66666666666, ans=0.5
+2024-08-29 15:31:15,126 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.681e+02 1.897e+02 2.177e+02 4.545e+02, threshold=3.794e+02, percent-clipped=1.0
+2024-08-29 15:31:15,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=188522.66666666666, ans=0.2
+2024-08-29 15:31:16,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=188522.66666666666, ans=0.125
+2024-08-29 15:31:20,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=188576.0, ans=0.125
+2024-08-29 15:31:26,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=188576.0, ans=0.0
+2024-08-29 15:31:32,895 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=188629.33333333334, ans=0.0
+2024-08-29 15:31:32,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=188629.33333333334, ans=0.0
+2024-08-29 15:32:34,644 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=188682.66666666666, ans=0.125
+2024-08-29 15:32:42,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=188682.66666666666, ans=15.0
+2024-08-29 15:32:46,282 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=188736.0, ans=0.125
+2024-08-29 15:32:47,702 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.58 vs. limit=15.0
+2024-08-29 15:32:47,944 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.66 vs. limit=22.5
+2024-08-29 15:32:59,021 INFO [train.py:1114] (0/4) Epoch 15, batch 550, loss[loss=0.24, simple_loss=0.2972, pruned_loss=0.06659, ctc_loss=0.1241, over 19249.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.28, pruned_loss=0.05585, ctc_loss=0.1053, over 3605052.13 frames. ], batch size: 71, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:33:03,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=188789.33333333334, ans=0.125
+2024-08-29 15:33:43,865 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.35 vs. limit=10.0
+2024-08-29 15:33:50,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=188842.66666666666, ans=0.125
+2024-08-29 15:33:51,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_na.min_abs, batch_count=188842.66666666666, ans=0.02
+2024-08-29 15:33:51,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=188842.66666666666, ans=0.125
+2024-08-29 15:33:54,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=188896.0, ans=0.125
+2024-08-29 15:33:59,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=188896.0, ans=0.125
+2024-08-29 15:34:05,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=188949.33333333334, ans=0.1
+2024-08-29 15:34:09,808 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.76 vs. limit=6.0
+2024-08-29 15:34:12,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=188949.33333333334, ans=0.125
+2024-08-29 15:34:28,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189002.66666666666, ans=0.1
+2024-08-29 15:34:30,453 INFO [train.py:1114] (0/4) Epoch 15, batch 600, loss[loss=0.2549, simple_loss=0.3119, pruned_loss=0.07278, ctc_loss=0.1307, over 19440.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2801, pruned_loss=0.0557, ctc_loss=0.1049, over 3663301.89 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:34:34,762 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.75 vs. limit=15.0
+2024-08-29 15:34:36,393 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.830e+02 2.111e+02 2.732e+02 4.380e+02, threshold=4.223e+02, percent-clipped=4.0
+2024-08-29 15:34:48,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=189109.33333333334, ans=0.2
+2024-08-29 15:34:49,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=189109.33333333334, ans=0.0
+2024-08-29 15:34:50,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=189109.33333333334, ans=0.125
+2024-08-29 15:34:56,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189162.66666666666, ans=0.1
+2024-08-29 15:34:57,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=189162.66666666666, ans=10.0
+2024-08-29 15:35:03,601 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:35:12,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189216.0, ans=0.1
+2024-08-29 15:35:19,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=189269.33333333334, ans=0.125
+2024-08-29 15:35:23,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189269.33333333334, ans=0.1
+2024-08-29 15:35:31,335 INFO [train.py:1114] (0/4) Epoch 15, batch 650, loss[loss=0.2139, simple_loss=0.2788, pruned_loss=0.05401, ctc_loss=0.1023, over 19768.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2792, pruned_loss=0.05516, ctc_loss=0.1038, over 3714569.97 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:35:36,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=189322.66666666666, ans=0.2
+2024-08-29 15:35:45,965 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=189376.0, ans=0.2
+2024-08-29 15:35:53,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=189376.0, ans=0.1
+2024-08-29 15:37:52,054 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=189429.33333333334, ans=0.5
+2024-08-29 15:38:05,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-29 15:38:05,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-29 15:38:20,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=189536.0, ans=0.07
+2024-08-29 15:38:22,782 INFO [train.py:1114] (0/4) Epoch 15, batch 700, loss[loss=0.1795, simple_loss=0.2488, pruned_loss=0.03991, ctc_loss=0.07592, over 19719.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2798, pruned_loss=0.05541, ctc_loss=0.1042, over 3746563.06 frames. ], batch size: 51, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:38:28,535 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.846e+02 2.430e+02 3.057e+02 4.272e+02, threshold=4.860e+02, percent-clipped=1.0
+2024-08-29 15:38:29,203 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.24 vs. limit=10.0
+2024-08-29 15:38:33,712 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=189642.66666666666, ans=0.125
+2024-08-29 15:38:59,245 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.63 vs. limit=15.0
+2024-08-29 15:39:07,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=189749.33333333334, ans=0.125
+2024-08-29 15:39:21,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=189802.66666666666, ans=0.0
+2024-08-29 15:39:25,973 INFO [train.py:1114] (0/4) Epoch 15, batch 750, loss[loss=0.267, simple_loss=0.3161, pruned_loss=0.07776, ctc_loss=0.1557, over 19502.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2793, pruned_loss=0.05529, ctc_loss=0.1037, over 3773103.81 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:39:38,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=189909.33333333334, ans=0.125
+2024-08-29 15:39:40,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=189909.33333333334, ans=0.025
+2024-08-29 15:39:54,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=189962.66666666666, ans=0.025
+2024-08-29 15:39:55,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=189962.66666666666, ans=0.125
+2024-08-29 15:39:56,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=189962.66666666666, ans=0.0
+2024-08-29 15:40:01,569 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=190016.0, ans=0.125
+2024-08-29 15:40:22,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=190069.33333333334, ans=0.0
+2024-08-29 15:40:28,210 INFO [train.py:1114] (0/4) Epoch 15, batch 800, loss[loss=0.1895, simple_loss=0.2521, pruned_loss=0.04608, ctc_loss=0.0867, over 19806.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2786, pruned_loss=0.0549, ctc_loss=0.103, over 3796038.15 frames. ], batch size: 49, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:40:34,419 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.728e+02 2.068e+02 2.494e+02 4.984e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-29 15:40:34,672 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=190122.66666666666, ans=0.1
+2024-08-29 15:40:44,189 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:40:51,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=190229.33333333334, ans=0.0
+2024-08-29 15:41:00,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=190229.33333333334, ans=0.125
+2024-08-29 15:41:04,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=190282.66666666666, ans=0.125
+2024-08-29 15:41:26,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=190336.0, ans=0.0
+2024-08-29 15:41:30,882 INFO [train.py:1114] (0/4) Epoch 15, batch 850, loss[loss=0.2374, simple_loss=0.3029, pruned_loss=0.0632, ctc_loss=0.1138, over 19670.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2786, pruned_loss=0.05522, ctc_loss=0.1036, over 3816028.85 frames. ], batch size: 59, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:42:03,267 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.89 vs. limit=15.0
+2024-08-29 15:42:32,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=190602.66666666666, ans=0.1
+2024-08-29 15:42:34,722 INFO [train.py:1114] (0/4) Epoch 15, batch 900, loss[loss=0.2189, simple_loss=0.2731, pruned_loss=0.06054, ctc_loss=0.109, over 19402.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2791, pruned_loss=0.0556, ctc_loss=0.1042, over 3818918.45 frames. ], batch size: 48, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:42:40,575 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 1.760e+02 2.061e+02 2.441e+02 4.748e+02, threshold=4.121e+02, percent-clipped=4.0
+2024-08-29 15:43:05,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=190709.33333333334, ans=0.125
+2024-08-29 15:43:12,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=190762.66666666666, ans=0.1
+2024-08-29 15:43:22,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=190816.0, ans=0.0
+2024-08-29 15:43:47,861 INFO [train.py:1114] (0/4) Epoch 15, batch 950, loss[loss=0.216, simple_loss=0.2788, pruned_loss=0.05603, ctc_loss=0.1031, over 19500.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2796, pruned_loss=0.05601, ctc_loss=0.1049, over 3820506.49 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:44:06,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=190976.0, ans=0.2
+2024-08-29 15:44:09,815 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=190976.0, ans=0.025
+2024-08-29 15:44:12,764 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.84 vs. limit=6.0
+2024-08-29 15:44:22,842 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.14 vs. limit=15.0
+2024-08-29 15:44:37,952 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=191136.0, ans=0.125
+2024-08-29 15:44:38,657 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.48 vs. limit=15.0
+2024-08-29 15:44:44,421 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.34 vs. limit=15.0
+2024-08-29 15:44:48,254 INFO [train.py:1114] (0/4) Epoch 15, batch 1000, loss[loss=0.223, simple_loss=0.2758, pruned_loss=0.06184, ctc_loss=0.1166, over 19842.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.28, pruned_loss=0.05624, ctc_loss=0.1054, over 3817654.75 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:44:49,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=191189.33333333334, ans=0.0
+2024-08-29 15:44:56,618 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.63 vs. limit=15.0
+2024-08-29 15:44:56,846 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 1.691e+02 1.934e+02 2.300e+02 3.610e+02, threshold=3.869e+02, percent-clipped=0.0
+2024-08-29 15:45:16,666 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=191296.0, ans=0.125
+2024-08-29 15:45:38,169 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.90 vs. limit=15.0
+2024-08-29 15:45:42,761 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=191402.66666666666, ans=0.125
+2024-08-29 15:45:53,390 INFO [train.py:1114] (0/4) Epoch 15, batch 1050, loss[loss=0.2172, simple_loss=0.2795, pruned_loss=0.05651, ctc_loss=0.1046, over 19840.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2793, pruned_loss=0.05615, ctc_loss=0.1053, over 3823923.35 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:46:15,812 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=191509.33333333334, ans=0.125
+2024-08-29 15:46:16,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=191509.33333333334, ans=0.125
+2024-08-29 15:46:21,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=191562.66666666666, ans=0.2
+2024-08-29 15:46:54,818 INFO [train.py:1114] (0/4) Epoch 15, batch 1100, loss[loss=0.1898, simple_loss=0.2642, pruned_loss=0.04222, ctc_loss=0.07748, over 19589.00 frames. ], tot_loss[loss=0.216, simple_loss=0.279, pruned_loss=0.0556, ctc_loss=0.1044, over 3831082.54 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:46:59,200 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.69 vs. limit=15.0
+2024-08-29 15:47:17,592 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.746e+02 1.965e+02 2.496e+02 3.903e+02, threshold=3.929e+02, percent-clipped=1.0
+2024-08-29 15:47:32,576 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=191776.0, ans=0.125
+2024-08-29 15:47:33,173 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.95 vs. limit=15.0
+2024-08-29 15:47:43,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=191829.33333333334, ans=0.0
+2024-08-29 15:47:51,085 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.05 vs. limit=8.0
+2024-08-29 15:48:10,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=191936.0, ans=0.0
+2024-08-29 15:48:12,562 INFO [train.py:1114] (0/4) Epoch 15, batch 1150, loss[loss=0.2341, simple_loss=0.2931, pruned_loss=0.06416, ctc_loss=0.1167, over 19572.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2794, pruned_loss=0.05582, ctc_loss=0.1049, over 3828740.94 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:48:14,157 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-36000.pt
+2024-08-29 15:48:33,080 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.03 vs. limit=12.0
+2024-08-29 15:48:33,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=192042.66666666666, ans=0.125
+2024-08-29 15:48:42,385 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=5.424e-03
+2024-08-29 15:48:59,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=192149.33333333334, ans=0.025
+2024-08-29 15:49:19,923 INFO [train.py:1114] (0/4) Epoch 15, batch 1200, loss[loss=0.2055, simple_loss=0.2801, pruned_loss=0.04722, ctc_loss=0.09125, over 19835.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2801, pruned_loss=0.05592, ctc_loss=0.1055, over 3822545.46 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:49:20,861 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.54 vs. limit=15.0
+2024-08-29 15:49:24,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=192256.0, ans=0.125
+2024-08-29 15:49:26,210 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.719e+02 2.001e+02 2.349e+02 3.398e+02, threshold=4.002e+02, percent-clipped=0.0
+2024-08-29 15:49:57,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=192362.66666666666, ans=0.125
+2024-08-29 15:50:10,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=192416.0, ans=0.0
+2024-08-29 15:50:24,187 INFO [train.py:1114] (0/4) Epoch 15, batch 1250, loss[loss=0.2371, simple_loss=0.299, pruned_loss=0.06459, ctc_loss=0.115, over 19506.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2807, pruned_loss=0.05596, ctc_loss=0.1053, over 3841818.15 frames. ], batch size: 61, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:50:24,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=192522.66666666666, ans=0.125
+2024-08-29 15:50:39,264 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.64 vs. limit=15.0
+2024-08-29 15:50:44,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=192576.0, ans=0.025
+2024-08-29 15:50:51,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=192629.33333333334, ans=0.2
+2024-08-29 15:51:17,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=192736.0, ans=0.0
+2024-08-29 15:51:18,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=192736.0, ans=0.05
+2024-08-29 15:51:21,945 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=192736.0, ans=0.0
+2024-08-29 15:51:25,192 INFO [train.py:1114] (0/4) Epoch 15, batch 1300, loss[loss=0.2566, simple_loss=0.3086, pruned_loss=0.07423, ctc_loss=0.1404, over 18896.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.28, pruned_loss=0.05565, ctc_loss=0.1049, over 3846791.24 frames. ], batch size: 76, lr: 9.99e-03, grad_scale: 32.0
+2024-08-29 15:51:27,111 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.35 vs. limit=15.0
+2024-08-29 15:52:15,027 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.668e+02 1.955e+02 2.455e+02 4.261e+02, threshold=3.910e+02, percent-clipped=2.0
+2024-08-29 15:52:50,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=192949.33333333334, ans=0.0
+2024-08-29 15:52:54,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=192949.33333333334, ans=0.125
+2024-08-29 15:53:04,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=193002.66666666666, ans=0.2
+2024-08-29 15:53:10,999 INFO [train.py:1114] (0/4) Epoch 15, batch 1350, loss[loss=0.1984, simple_loss=0.2619, pruned_loss=0.0488, ctc_loss=0.09307, over 19767.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2787, pruned_loss=0.05471, ctc_loss=0.103, over 3857434.60 frames. ], batch size: 54, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:53:12,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=193056.0, ans=0.0
+2024-08-29 15:53:19,895 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=193056.0, ans=0.0
+2024-08-29 15:53:57,673 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.97 vs. limit=10.0
+2024-08-29 15:54:14,980 INFO [train.py:1114] (0/4) Epoch 15, batch 1400, loss[loss=0.1577, simple_loss=0.2283, pruned_loss=0.03145, ctc_loss=0.06026, over 19663.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2785, pruned_loss=0.05484, ctc_loss=0.1032, over 3864045.08 frames. ], batch size: 46, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:54:35,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=193322.66666666666, ans=0.125
+2024-08-29 15:54:37,475 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.658e+02 1.833e+02 2.351e+02 3.730e+02, threshold=3.665e+02, percent-clipped=0.0
+2024-08-29 15:54:45,113 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=193376.0, ans=0.0
+2024-08-29 15:55:27,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=193482.66666666666, ans=0.125
+2024-08-29 15:55:35,067 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.09 vs. limit=15.0
+2024-08-29 15:55:43,676 INFO [train.py:1114] (0/4) Epoch 15, batch 1450, loss[loss=0.2079, simple_loss=0.2816, pruned_loss=0.04975, ctc_loss=0.08664, over 19645.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2797, pruned_loss=0.05556, ctc_loss=0.1044, over 3861696.22 frames. ], batch size: 63, lr: 9.97e-03, grad_scale: 32.0
+2024-08-29 15:55:50,154 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.24 vs. limit=22.5
+2024-08-29 15:55:55,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=193642.66666666666, ans=0.0
+2024-08-29 15:56:45,766 INFO [train.py:1114] (0/4) Epoch 15, batch 1500, loss[loss=0.2324, simple_loss=0.2992, pruned_loss=0.06031, ctc_loss=0.1125, over 19575.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.28, pruned_loss=0.05547, ctc_loss=0.1042, over 3861285.39 frames. ], batch size: 57, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:56:52,431 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.660e+02 1.885e+02 2.337e+02 4.281e+02, threshold=3.770e+02, percent-clipped=2.0
+2024-08-29 15:56:54,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=193856.0, ans=0.125
+2024-08-29 15:57:28,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=194016.0, ans=0.125
+2024-08-29 15:57:30,938 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.39 vs. limit=12.0
+2024-08-29 15:57:40,294 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:57:49,276 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=2.504e-03
+2024-08-29 15:57:51,457 INFO [train.py:1114] (0/4) Epoch 15, batch 1550, loss[loss=0.2202, simple_loss=0.2874, pruned_loss=0.05595, ctc_loss=0.1027, over 19610.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2794, pruned_loss=0.0552, ctc_loss=0.1037, over 3847267.13 frames. ], batch size: 60, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:57:56,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=194122.66666666666, ans=0.125
+2024-08-29 15:58:15,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=194176.0, ans=0.125
+2024-08-29 15:58:16,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=194229.33333333334, ans=0.2
+2024-08-29 15:58:19,230 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.09 vs. limit=15.0
+2024-08-29 15:58:25,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=194229.33333333334, ans=0.125
+2024-08-29 15:58:29,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=194282.66666666666, ans=0.125
+2024-08-29 15:58:29,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=194282.66666666666, ans=0.125
+2024-08-29 15:58:35,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=194282.66666666666, ans=0.1
+2024-08-29 15:58:47,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=194336.0, ans=0.0
+2024-08-29 15:58:53,438 INFO [train.py:1114] (0/4) Epoch 15, batch 1600, loss[loss=0.2281, simple_loss=0.2967, pruned_loss=0.05748, ctc_loss=0.1112, over 19833.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2796, pruned_loss=0.05534, ctc_loss=0.1041, over 3836125.80 frames. ], batch size: 57, lr: 9.95e-03, grad_scale: 32.0
+2024-08-29 15:58:59,525 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.762e+02 2.164e+02 2.478e+02 4.927e+02, threshold=4.328e+02, percent-clipped=7.0
+2024-08-29 15:59:52,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=194442.66666666666, ans=0.0
+2024-08-29 15:59:59,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=194496.0, ans=0.125
+2024-08-29 16:00:35,210 INFO [train.py:1114] (0/4) Epoch 15, batch 1650, loss[loss=0.2116, simple_loss=0.2844, pruned_loss=0.05079, ctc_loss=0.09311, over 19643.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2796, pruned_loss=0.05554, ctc_loss=0.1045, over 3832052.74 frames. ], batch size: 59, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:00:35,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=194656.0, ans=0.0
+2024-08-29 16:00:46,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=194709.33333333334, ans=0.125
+2024-08-29 16:00:54,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=194709.33333333334, ans=0.0
+2024-08-29 16:00:58,969 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=194762.66666666666, ans=0.0
+2024-08-29 16:01:04,758 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=194762.66666666666, ans=0.125
+2024-08-29 16:01:11,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=194762.66666666666, ans=0.07
+2024-08-29 16:01:14,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=194816.0, ans=0.0
+2024-08-29 16:01:15,186 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.94 vs. limit=22.5
+2024-08-29 16:01:26,769 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.28 vs. limit=15.0
+2024-08-29 16:01:32,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=194869.33333333334, ans=0.125
+2024-08-29 16:01:38,037 INFO [train.py:1114] (0/4) Epoch 15, batch 1700, loss[loss=0.1783, simple_loss=0.24, pruned_loss=0.04232, ctc_loss=0.07976, over 19682.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2798, pruned_loss=0.05556, ctc_loss=0.1045, over 3846484.66 frames. ], batch size: 46, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:01:44,061 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.696e+02 2.083e+02 2.797e+02 4.802e+02, threshold=4.167e+02, percent-clipped=3.0
+2024-08-29 16:01:52,742 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=194976.0, ans=0.125
+2024-08-29 16:01:54,055 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=194976.0, ans=0.0
+2024-08-29 16:02:00,965 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=195029.33333333334, ans=0.125
+2024-08-29 16:02:09,070 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=195029.33333333334, ans=0.0
+2024-08-29 16:02:10,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=195029.33333333334, ans=0.125
+2024-08-29 16:02:18,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=195082.66666666666, ans=0.07
+2024-08-29 16:02:25,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=195082.66666666666, ans=0.125
+2024-08-29 16:02:32,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=195136.0, ans=0.09899494936611666
+2024-08-29 16:02:39,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=195189.33333333334, ans=0.125
+2024-08-29 16:02:40,469 INFO [train.py:1114] (0/4) Epoch 15, batch 1750, loss[loss=0.2011, simple_loss=0.2584, pruned_loss=0.05301, ctc_loss=0.09414, over 19659.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2797, pruned_loss=0.05562, ctc_loss=0.1046, over 3851315.28 frames. ], batch size: 45, lr: 9.93e-03, grad_scale: 32.0
+2024-08-29 16:03:02,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=195296.0, ans=0.125
+2024-08-29 16:03:05,459 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.28 vs. limit=22.5
+2024-08-29 16:03:18,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=195349.33333333334, ans=15.0
+2024-08-29 16:03:18,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=195349.33333333334, ans=0.2
+2024-08-29 16:03:28,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=195402.66666666666, ans=0.125
+2024-08-29 16:03:29,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=195402.66666666666, ans=0.125
+2024-08-29 16:03:37,900 INFO [train.py:1114] (0/4) Epoch 15, batch 1800, loss[loss=0.2222, simple_loss=0.2905, pruned_loss=0.05508, ctc_loss=0.1092, over 19617.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2795, pruned_loss=0.05539, ctc_loss=0.1042, over 3852338.26 frames. ], batch size: 55, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:03:43,639 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.702e+02 2.083e+02 2.690e+02 4.339e+02, threshold=4.166e+02, percent-clipped=1.0
+2024-08-29 16:04:05,408 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.22 vs. limit=15.0
+2024-08-29 16:04:23,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=195669.33333333334, ans=0.07
+2024-08-29 16:04:23,609 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=195669.33333333334, ans=0.125
+2024-08-29 16:04:26,988 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:04:34,674 INFO [train.py:1114] (0/4) Epoch 15, batch 1850, loss[loss=0.2097, simple_loss=0.2862, pruned_loss=0.04804, ctc_loss=0.09257, over 19592.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2794, pruned_loss=0.05543, ctc_loss=0.1043, over 3856614.83 frames. ], batch size: 57, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:04:50,556 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=195776.0, ans=0.125
+2024-08-29 16:05:02,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=195829.33333333334, ans=0.125
+2024-08-29 16:05:29,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=195936.0, ans=0.025
+2024-08-29 16:05:35,597 INFO [train.py:1114] (0/4) Epoch 15, batch 1900, loss[loss=0.2077, simple_loss=0.2851, pruned_loss=0.04659, ctc_loss=0.09306, over 19668.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2798, pruned_loss=0.05528, ctc_loss=0.104, over 3862380.04 frames. ], batch size: 59, lr: 9.91e-03, grad_scale: 32.0
+2024-08-29 16:05:40,976 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.724e+02 2.102e+02 3.115e+02 5.340e+02, threshold=4.204e+02, percent-clipped=3.0
+2024-08-29 16:05:50,314 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=196042.66666666666, ans=0.125
+2024-08-29 16:05:57,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=196042.66666666666, ans=0.95
+2024-08-29 16:06:02,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=196096.0, ans=0.125
+2024-08-29 16:06:02,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=196096.0, ans=0.125
+2024-08-29 16:06:10,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=196149.33333333334, ans=0.0
+2024-08-29 16:06:21,445 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=196202.66666666666, ans=0.1
+2024-08-29 16:06:32,338 INFO [train.py:1114] (0/4) Epoch 15, batch 1950, loss[loss=0.1992, simple_loss=0.2664, pruned_loss=0.04831, ctc_loss=0.08829, over 19589.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2804, pruned_loss=0.05524, ctc_loss=0.1038, over 3870699.90 frames. ], batch size: 52, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:06:55,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=196309.33333333334, ans=0.0
+2024-08-29 16:06:57,288 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=196309.33333333334, ans=0.0
+2024-08-29 16:06:59,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=196309.33333333334, ans=0.125
+2024-08-29 16:07:35,224 INFO [train.py:1114] (0/4) Epoch 15, batch 2000, loss[loss=0.1953, simple_loss=0.254, pruned_loss=0.04913, ctc_loss=0.09612, over 19619.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2805, pruned_loss=0.05508, ctc_loss=0.1035, over 3855755.56 frames. ], batch size: 45, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:07:41,140 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.618e+02 1.832e+02 2.132e+02 4.362e+02, threshold=3.664e+02, percent-clipped=1.0
+2024-08-29 16:08:10,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=196682.66666666666, ans=0.125
+2024-08-29 16:08:21,725 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.46 vs. limit=15.0
+2024-08-29 16:08:22,423 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=196736.0, ans=0.125
+2024-08-29 16:08:23,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=196736.0, ans=0.2
+2024-08-29 16:08:32,365 INFO [train.py:1114] (0/4) Epoch 15, batch 2050, loss[loss=0.1991, simple_loss=0.257, pruned_loss=0.05101, ctc_loss=0.09781, over 19726.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2796, pruned_loss=0.05504, ctc_loss=0.1035, over 3851699.63 frames. ], batch size: 47, lr: 9.89e-03, grad_scale: 32.0
+2024-08-29 16:08:44,417 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:09:17,363 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.01 vs. limit=22.5
+2024-08-29 16:09:24,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197002.66666666666, ans=0.1
+2024-08-29 16:09:27,726 INFO [train.py:1114] (0/4) Epoch 15, batch 2100, loss[loss=0.2271, simple_loss=0.2875, pruned_loss=0.05964, ctc_loss=0.1184, over 19763.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2788, pruned_loss=0.05468, ctc_loss=0.1028, over 3858793.46 frames. ], batch size: 54, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:09:33,397 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.265e+02 1.691e+02 1.929e+02 2.354e+02 3.359e+02, threshold=3.858e+02, percent-clipped=0.0
+2024-08-29 16:09:38,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=197109.33333333334, ans=0.125
+2024-08-29 16:09:44,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=197109.33333333334, ans=0.125
+2024-08-29 16:10:06,170 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=4.475e-02
+2024-08-29 16:10:16,584 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.52 vs. limit=22.5
+2024-08-29 16:10:19,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=197269.33333333334, ans=0.2
+2024-08-29 16:10:25,583 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=197322.66666666666, ans=0.0
+2024-08-29 16:10:26,364 INFO [train.py:1114] (0/4) Epoch 15, batch 2150, loss[loss=0.2114, simple_loss=0.276, pruned_loss=0.05372, ctc_loss=0.09831, over 19595.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2783, pruned_loss=0.05452, ctc_loss=0.1023, over 3869908.97 frames. ], batch size: 52, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:10:31,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197322.66666666666, ans=0.1
+2024-08-29 16:10:36,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=197322.66666666666, ans=0.0
+2024-08-29 16:10:40,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=197376.0, ans=0.125
+2024-08-29 16:10:44,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197376.0, ans=0.1
+2024-08-29 16:12:14,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=197482.66666666666, ans=0.1
+2024-08-29 16:12:15,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=197482.66666666666, ans=0.025
+2024-08-29 16:12:31,457 INFO [train.py:1114] (0/4) Epoch 15, batch 2200, loss[loss=0.2079, simple_loss=0.2841, pruned_loss=0.04744, ctc_loss=0.09196, over 19603.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.279, pruned_loss=0.05474, ctc_loss=0.1029, over 3866967.17 frames. ], batch size: 57, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:12:36,859 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.787e+02 2.154e+02 2.730e+02 5.047e+02, threshold=4.308e+02, percent-clipped=4.0
+2024-08-29 16:12:54,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=197696.0, ans=0.0
+2024-08-29 16:12:57,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=197696.0, ans=0.125
+2024-08-29 16:13:25,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=197802.66666666666, ans=0.125
+2024-08-29 16:13:25,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=197802.66666666666, ans=0.125
+2024-08-29 16:13:29,255 INFO [train.py:1114] (0/4) Epoch 15, batch 2250, loss[loss=0.2243, simple_loss=0.2964, pruned_loss=0.05642, ctc_loss=0.09872, over 19614.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2795, pruned_loss=0.05509, ctc_loss=0.1034, over 3866993.22 frames. ], batch size: 55, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:13:31,742 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=197856.0, ans=0.125
+2024-08-29 16:13:35,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=197856.0, ans=0.125
+2024-08-29 16:14:45,283 INFO [train.py:1114] (0/4) Epoch 15, batch 2300, loss[loss=0.1824, simple_loss=0.248, pruned_loss=0.04301, ctc_loss=0.07703, over 19503.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2783, pruned_loss=0.05515, ctc_loss=0.1036, over 3861395.04 frames. ], batch size: 49, lr: 9.86e-03, grad_scale: 32.0
+2024-08-29 16:14:45,974 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.12 vs. limit=10.0
+2024-08-29 16:14:50,777 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.686e+02 1.986e+02 2.467e+02 4.553e+02, threshold=3.971e+02, percent-clipped=1.0
+2024-08-29 16:14:59,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=198176.0, ans=0.125
+2024-08-29 16:14:59,587 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.28 vs. limit=15.0
+2024-08-29 16:15:06,969 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=198176.0, ans=0.0
+2024-08-29 16:15:16,355 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.14 vs. limit=15.0
+2024-08-29 16:15:20,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=198282.66666666666, ans=0.125
+2024-08-29 16:15:21,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=198282.66666666666, ans=0.0
+2024-08-29 16:15:22,206 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=198282.66666666666, ans=0.0
+2024-08-29 16:15:30,441 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.05 vs. limit=8.0
+2024-08-29 16:15:43,134 INFO [train.py:1114] (0/4) Epoch 15, batch 2350, loss[loss=0.2468, simple_loss=0.3063, pruned_loss=0.06758, ctc_loss=0.1304, over 19705.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2779, pruned_loss=0.05496, ctc_loss=0.103, over 3864267.56 frames. ], batch size: 63, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:15:57,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=198442.66666666666, ans=0.09899494936611666
+2024-08-29 16:15:59,442 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=198442.66666666666, ans=0.125
+2024-08-29 16:16:16,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=198496.0, ans=0.0
+2024-08-29 16:16:38,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=198602.66666666666, ans=0.1
+2024-08-29 16:16:42,890 INFO [train.py:1114] (0/4) Epoch 15, batch 2400, loss[loss=0.2105, simple_loss=0.2844, pruned_loss=0.04959, ctc_loss=0.09354, over 19363.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.28, pruned_loss=0.0555, ctc_loss=0.104, over 3858534.45 frames. ], batch size: 67, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:16:48,397 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.659e+02 1.944e+02 2.492e+02 3.873e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-29 16:16:52,245 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=198656.0, ans=0.125
+2024-08-29 16:16:52,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=198656.0, ans=0.1
+2024-08-29 16:16:53,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=198709.33333333334, ans=0.0
+2024-08-29 16:16:53,567 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=198709.33333333334, ans=0.125
+2024-08-29 16:16:54,615 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=198709.33333333334, ans=0.2
+2024-08-29 16:16:59,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=198709.33333333334, ans=0.0
+2024-08-29 16:17:52,431 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.15 vs. limit=15.0
+2024-08-29 16:17:54,252 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=198709.33333333334, ans=0.1
+2024-08-29 16:18:01,478 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=198762.66666666666, ans=0.125
+2024-08-29 16:18:10,683 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.89 vs. limit=10.0
+2024-08-29 16:18:33,181 INFO [train.py:1114] (0/4) Epoch 15, batch 2450, loss[loss=0.2701, simple_loss=0.3043, pruned_loss=0.08653, ctc_loss=0.1573, over 13381.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2837, pruned_loss=0.05836, ctc_loss=0.1098, over 3731305.61 frames. ], batch size: 141, lr: 9.84e-03, grad_scale: 32.0
+2024-08-29 16:18:49,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=198976.0, ans=0.1
+2024-08-29 16:18:56,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=199029.33333333334, ans=0.125
+2024-08-29 16:18:56,839 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:19:04,958 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.09 vs. limit=15.0
+2024-08-29 16:19:16,586 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-15.pt
+2024-08-29 16:20:18,426 INFO [train.py:1114] (0/4) Epoch 16, batch 0, loss[loss=0.1889, simple_loss=0.2524, pruned_loss=0.04568, ctc_loss=0.08503, over 19833.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.2524, pruned_loss=0.04568, ctc_loss=0.08503, over 19833.00 frames. ], batch size: 49, lr: 9.52e-03, grad_scale: 32.0
+2024-08-29 16:20:18,427 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-29 16:20:28,423 INFO [train.py:1146] (0/4) Epoch 16, validation: loss=0.1867, simple_loss=0.2755, pruned_loss=0.03636, ctc_loss=0.06317, over 944034.00 frames.
+2024-08-29 16:20:28,424 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13544MB
+2024-08-29 16:20:28,626 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=199130.66666666666, ans=0.0
+2024-08-29 16:20:37,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=199130.66666666666, ans=0.125
+2024-08-29 16:20:47,355 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.24 vs. limit=22.5
+2024-08-29 16:20:48,967 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.810e+02 1.998e+02 2.276e+02 3.528e+02, threshold=3.997e+02, percent-clipped=0.0
+2024-08-29 16:21:11,762 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:21:32,424 INFO [train.py:1114] (0/4) Epoch 16, batch 50, loss[loss=0.1812, simple_loss=0.2488, pruned_loss=0.04118, ctc_loss=0.0783, over 19702.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2814, pruned_loss=0.0567, ctc_loss=0.1074, over 845293.61 frames. ], batch size: 47, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:21:32,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=199397.33333333334, ans=0.125
+2024-08-29 16:22:27,461 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.73 vs. limit=15.0
+2024-08-29 16:22:30,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=199610.66666666666, ans=0.125
+2024-08-29 16:22:40,111 INFO [train.py:1114] (0/4) Epoch 16, batch 100, loss[loss=0.2142, simple_loss=0.2757, pruned_loss=0.05466, ctc_loss=0.1086, over 19711.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2831, pruned_loss=0.05656, ctc_loss=0.1072, over 1499669.28 frames. ], batch size: 51, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:23:08,063 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 1.815e+02 2.137e+02 2.569e+02 4.869e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-29 16:23:14,569 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=199717.33333333334, ans=0.125
+2024-08-29 16:23:15,646 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=199770.66666666666, ans=0.0
+2024-08-29 16:23:23,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=199770.66666666666, ans=0.1
+2024-08-29 16:35:02,696 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.94 vs. limit=22.5
+2024-08-29 16:35:20,728 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.18 vs. limit=15.0
+2024-08-29 16:37:11,209 INFO [train.py:1114] (0/4) Epoch 16, batch 150, loss[loss=0.1901, simple_loss=0.2471, pruned_loss=0.04851, ctc_loss=0.09035, over 19725.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2784, pruned_loss=0.05401, ctc_loss=0.1022, over 2028484.55 frames. ], batch size: 47, lr: 9.50e-03, grad_scale: 32.0
+2024-08-29 16:40:06,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=199984.0, ans=0.0
+2024-08-29 16:45:46,802 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:47:01,760 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.96 vs. limit=15.0
+2024-08-29 16:47:20,447 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=200144.0, ans=0.0
+2024-08-29 16:48:04,609 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:48:05,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=200197.33333333334, ans=0.0
+2024-08-29 16:48:05,813 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=200197.33333333334, ans=0.125
+2024-08-29 16:48:09,838 INFO [train.py:1114] (0/4) Epoch 16, batch 200, loss[loss=0.2386, simple_loss=0.2913, pruned_loss=0.06769, ctc_loss=0.1261, over 18100.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2773, pruned_loss=0.05353, ctc_loss=0.1012, over 2435806.75 frames. ], batch size: 85, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:49:17,163 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.22 vs. limit=6.0
+2024-08-29 16:49:58,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=200197.33333333334, ans=0.125
+2024-08-29 16:50:01,817 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=200197.33333333334, ans=0.0
+2024-08-29 16:53:29,823 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.834e+02 2.227e+02 2.815e+02 4.534e+02, threshold=4.454e+02, percent-clipped=1.0
+2024-08-29 16:54:13,168 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=200304.0, ans=0.125
+2024-08-29 16:55:31,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=200357.33333333334, ans=0.125
+2024-08-29 16:55:53,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=200357.33333333334, ans=0.2
+2024-08-29 16:56:00,209 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=200357.33333333334, ans=0.1
+2024-08-29 16:56:29,781 INFO [train.py:1114] (0/4) Epoch 16, batch 250, loss[loss=0.2291, simple_loss=0.2902, pruned_loss=0.06038, ctc_loss=0.118, over 19425.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2776, pruned_loss=0.05394, ctc_loss=0.1017, over 2756064.49 frames. ], batch size: 67, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:58:34,027 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=200517.33333333334, ans=0.025
+2024-08-29 16:58:41,769 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=200570.66666666666, ans=0.2
+2024-08-29 16:58:56,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=200570.66666666666, ans=0.2
+2024-08-29 16:59:18,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=200624.0, ans=0.125
+2024-08-29 17:03:13,498 INFO [train.py:1114] (0/4) Epoch 16, batch 300, loss[loss=0.2483, simple_loss=0.3013, pruned_loss=0.07164, ctc_loss=0.13, over 19542.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2775, pruned_loss=0.05398, ctc_loss=0.1018, over 3000525.35 frames. ], batch size: 61, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:03:25,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=200730.66666666666, ans=0.125
+2024-08-29 17:03:36,035 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.663e+02 1.972e+02 2.398e+02 4.674e+02, threshold=3.943e+02, percent-clipped=1.0
+2024-08-29 17:07:21,590 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=200890.66666666666, ans=0.0
+2024-08-29 17:07:54,518 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.18 vs. limit=10.0
+2024-08-29 17:08:30,922 INFO [train.py:1114] (0/4) Epoch 16, batch 350, loss[loss=0.189, simple_loss=0.2521, pruned_loss=0.04501, ctc_loss=0.08945, over 19764.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2784, pruned_loss=0.05422, ctc_loss=0.1023, over 3189776.14 frames. ], batch size: 48, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:08:34,701 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:10:15,068 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.81 vs. limit=15.0
+2024-08-29 17:10:17,359 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.31 vs. limit=15.0
+2024-08-29 17:11:34,872 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.07 vs. limit=12.0
+2024-08-29 17:13:16,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=201264.0, ans=0.1
+2024-08-29 17:13:17,607 INFO [train.py:1114] (0/4) Epoch 16, batch 400, loss[loss=0.2288, simple_loss=0.2882, pruned_loss=0.06133, ctc_loss=0.1171, over 19493.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2779, pruned_loss=0.054, ctc_loss=0.1018, over 3341541.24 frames. ], batch size: 54, lr: 9.47e-03, grad_scale: 32.0
+2024-08-29 17:13:20,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=201264.0, ans=0.1
+2024-08-29 17:13:42,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=201317.33333333334, ans=0.0
+2024-08-29 17:15:51,032 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.714e+02 1.905e+02 2.508e+02 3.565e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-29 17:17:00,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=201477.33333333334, ans=0.1
+2024-08-29 17:17:07,844 INFO [train.py:1114] (0/4) Epoch 16, batch 450, loss[loss=0.2373, simple_loss=0.3003, pruned_loss=0.06337, ctc_loss=0.1189, over 19623.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.278, pruned_loss=0.05428, ctc_loss=0.1021, over 3449745.03 frames. ], batch size: 55, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:21:13,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=201690.66666666666, ans=0.2
+2024-08-29 17:21:24,927 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.97 vs. limit=22.5
+2024-08-29 17:21:31,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=201744.0, ans=0.0
+2024-08-29 17:21:57,440 INFO [train.py:1114] (0/4) Epoch 16, batch 500, loss[loss=0.2126, simple_loss=0.2822, pruned_loss=0.05282, ctc_loss=0.0933, over 19663.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2764, pruned_loss=0.05346, ctc_loss=0.1005, over 3545183.97 frames. ], batch size: 63, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:22:03,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=201797.33333333334, ans=0.125
+2024-08-29 17:22:46,947 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.689e+02 2.169e+02 2.570e+02 5.370e+02, threshold=4.338e+02, percent-clipped=3.0
+2024-08-29 17:23:06,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=201850.66666666666, ans=0.125
+2024-08-29 17:23:16,445 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.06 vs. limit=15.0
+2024-08-29 17:23:26,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2.whitening_limit, batch_count=201904.0, ans=15.0
+2024-08-29 17:23:49,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=202010.66666666666, ans=0.05
+2024-08-29 17:24:02,844 INFO [train.py:1114] (0/4) Epoch 16, batch 550, loss[loss=0.1928, simple_loss=0.2731, pruned_loss=0.04042, ctc_loss=0.0789, over 19241.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2763, pruned_loss=0.05344, ctc_loss=0.1006, over 3606918.73 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:24:06,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=202064.0, ans=0.0
+2024-08-29 17:24:41,322 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=202170.66666666666, ans=0.0
+2024-08-29 17:24:50,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=202224.0, ans=0.125
+2024-08-29 17:24:51,148 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.83 vs. limit=15.0
+2024-08-29 17:24:51,505 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.01 vs. limit=15.0
+2024-08-29 17:24:56,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=202224.0, ans=0.125
+2024-08-29 17:25:11,147 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.23 vs. limit=6.0
+2024-08-29 17:25:21,558 INFO [train.py:1114] (0/4) Epoch 16, batch 600, loss[loss=0.2233, simple_loss=0.294, pruned_loss=0.05698, ctc_loss=0.09633, over 19441.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2763, pruned_loss=0.05321, ctc_loss=0.09994, over 3664117.11 frames. ], batch size: 67, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:25:25,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=202330.66666666666, ans=0.2
+2024-08-29 17:26:27,334 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.86 vs. limit=12.0
+2024-08-29 17:27:04,563 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.652e+02 1.934e+02 2.290e+02 3.719e+02, threshold=3.867e+02, percent-clipped=0.0
+2024-08-29 17:31:02,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=202597.33333333334, ans=0.125
+2024-08-29 17:31:03,783 INFO [train.py:1114] (0/4) Epoch 16, batch 650, loss[loss=0.2092, simple_loss=0.2726, pruned_loss=0.05153, ctc_loss=0.1069, over 19769.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2759, pruned_loss=0.05311, ctc_loss=0.09994, over 3715349.48 frames. ], batch size: 54, lr: 9.44e-03, grad_scale: 32.0
+2024-08-29 17:32:25,225 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.69 vs. limit=22.5
+2024-08-29 17:32:46,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=202704.0, ans=0.0
+2024-08-29 17:32:49,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=202757.33333333334, ans=0.125
+2024-08-29 17:32:49,867 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202757.33333333334, ans=0.1
+2024-08-29 17:32:55,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=202757.33333333334, ans=0.125
+2024-08-29 17:34:02,127 INFO [train.py:1114] (0/4) Epoch 16, batch 700, loss[loss=0.1962, simple_loss=0.262, pruned_loss=0.04739, ctc_loss=0.08921, over 19717.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.276, pruned_loss=0.05293, ctc_loss=0.0995, over 3747773.79 frames. ], batch size: 51, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:34:02,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=202864.0, ans=0.125
+2024-08-29 17:34:09,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=202864.0, ans=0.1
+2024-08-29 17:34:15,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=202917.33333333334, ans=0.0
+2024-08-29 17:35:12,325 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.286e+02 1.755e+02 2.110e+02 2.761e+02 5.047e+02, threshold=4.220e+02, percent-clipped=5.0
+2024-08-29 17:35:35,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=202970.66666666666, ans=0.0
+2024-08-29 17:41:01,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=203077.33333333334, ans=0.125
+2024-08-29 17:41:53,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=203077.33333333334, ans=0.125
+2024-08-29 17:42:01,680 INFO [train.py:1114] (0/4) Epoch 16, batch 750, loss[loss=0.2356, simple_loss=0.2978, pruned_loss=0.06221, ctc_loss=0.1226, over 19493.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2758, pruned_loss=0.0529, ctc_loss=0.09961, over 3772968.21 frames. ], batch size: 54, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:42:08,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=203130.66666666666, ans=0.2
+2024-08-29 17:42:12,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=203130.66666666666, ans=0.1
+2024-08-29 17:42:35,620 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=203237.33333333334, ans=0.2
+2024-08-29 17:42:41,466 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.22 vs. limit=15.0
+2024-08-29 17:44:19,408 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.46 vs. limit=15.0
+2024-08-29 17:46:10,172 INFO [train.py:1114] (0/4) Epoch 16, batch 800, loss[loss=0.1972, simple_loss=0.2635, pruned_loss=0.04698, ctc_loss=0.0921, over 19828.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2757, pruned_loss=0.05294, ctc_loss=0.0997, over 3794945.14 frames. ], batch size: 49, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:46:10,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=203397.33333333334, ans=0.125
+2024-08-29 17:46:10,471 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:47:32,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=203397.33333333334, ans=0.0
+2024-08-29 17:48:02,084 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.05 vs. limit=22.5
+2024-08-29 17:48:15,894 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.745e+02 2.069e+02 2.556e+02 3.770e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-29 17:48:27,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=203504.0, ans=0.0
+2024-08-29 17:48:54,742 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.08 vs. limit=15.0
+2024-08-29 17:48:55,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=203610.66666666666, ans=0.125
+2024-08-29 17:49:06,950 INFO [train.py:1114] (0/4) Epoch 16, batch 850, loss[loss=0.2044, simple_loss=0.2803, pruned_loss=0.04638, ctc_loss=0.08933, over 19643.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2758, pruned_loss=0.05304, ctc_loss=0.0998, over 3814588.77 frames. ], batch size: 59, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:49:09,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=203664.0, ans=0.025
+2024-08-29 17:49:30,281 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.61 vs. limit=15.0
+2024-08-29 17:49:32,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=203770.66666666666, ans=0.125
+2024-08-29 17:50:13,218 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=203877.33333333334, ans=0.125
+2024-08-29 17:50:21,093 INFO [train.py:1114] (0/4) Epoch 16, batch 900, loss[loss=0.2148, simple_loss=0.2766, pruned_loss=0.05601, ctc_loss=0.1025, over 19403.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2765, pruned_loss=0.05344, ctc_loss=0.1005, over 3818681.23 frames. ], batch size: 48, lr: 9.41e-03, grad_scale: 32.0
+2024-08-29 17:50:34,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=203930.66666666666, ans=0.125
+2024-08-29 17:50:48,717 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.676e+02 1.827e+02 2.350e+02 4.099e+02, threshold=3.653e+02, percent-clipped=0.0
+2024-08-29 17:51:22,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.whiten.whitening_limit, batch_count=204037.33333333334, ans=15.0
+2024-08-29 17:53:37,507 INFO [train.py:1114] (0/4) Epoch 16, batch 950, loss[loss=0.1989, simple_loss=0.2625, pruned_loss=0.04887, ctc_loss=0.0937, over 19500.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2768, pruned_loss=0.05342, ctc_loss=0.1007, over 3821331.88 frames. ], batch size: 49, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:54:06,997 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=204197.33333333334, ans=0.125
+2024-08-29 17:54:21,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=204250.66666666666, ans=0.07
+2024-08-29 17:54:32,968 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.56 vs. limit=10.0
+2024-08-29 17:54:56,664 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=204357.33333333334, ans=0.125
+2024-08-29 17:54:57,742 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=204357.33333333334, ans=0.125
+2024-08-29 17:54:58,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=204357.33333333334, ans=0.125
+2024-08-29 17:55:46,650 INFO [train.py:1114] (0/4) Epoch 16, batch 1000, loss[loss=0.2144, simple_loss=0.2734, pruned_loss=0.05637, ctc_loss=0.1069, over 19863.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2774, pruned_loss=0.05375, ctc_loss=0.1012, over 3817099.88 frames. ], batch size: 52, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:56:07,211 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.649e+02 1.918e+02 2.268e+02 3.238e+02, threshold=3.836e+02, percent-clipped=0.0
+2024-08-29 17:57:03,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=204570.66666666666, ans=0.1
+2024-08-29 17:57:51,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=204677.33333333334, ans=0.2
+2024-08-29 17:57:54,925 INFO [train.py:1114] (0/4) Epoch 16, batch 1050, loss[loss=0.2117, simple_loss=0.2814, pruned_loss=0.05122, ctc_loss=0.09913, over 19848.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2771, pruned_loss=0.05377, ctc_loss=0.1013, over 3823437.21 frames. ], batch size: 57, lr: 9.39e-03, grad_scale: 32.0
+2024-08-29 17:58:31,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=204730.66666666666, ans=0.2
+2024-08-29 17:58:36,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=204730.66666666666, ans=0.125
+2024-08-29 17:59:17,539 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.14 vs. limit=10.0
+2024-08-29 17:59:50,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=204784.0, ans=0.0
+2024-08-29 17:59:54,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=204784.0, ans=0.0
+2024-08-29 18:00:19,805 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=204890.66666666666, ans=10.0
+2024-08-29 18:00:52,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=204997.33333333334, ans=0.0
+2024-08-29 18:00:53,294 INFO [train.py:1114] (0/4) Epoch 16, batch 1100, loss[loss=0.2029, simple_loss=0.2722, pruned_loss=0.0487, ctc_loss=0.09062, over 19578.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.277, pruned_loss=0.05386, ctc_loss=0.1015, over 3830357.68 frames. ], batch size: 52, lr: 9.39e-03, grad_scale: 16.0
+2024-08-29 18:00:55,803 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=204997.33333333334, ans=0.125
+2024-08-29 18:01:25,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=205050.66666666666, ans=0.1
+2024-08-29 18:01:27,932 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 1.694e+02 1.874e+02 2.325e+02 3.063e+02, threshold=3.748e+02, percent-clipped=0.0
+2024-08-29 18:01:42,188 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=205104.0, ans=0.125
+2024-08-29 18:02:43,504 INFO [train.py:1114] (0/4) Epoch 16, batch 1150, loss[loss=0.2199, simple_loss=0.2754, pruned_loss=0.0592, ctc_loss=0.1147, over 19586.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2769, pruned_loss=0.05409, ctc_loss=0.1019, over 3828742.07 frames. ], batch size: 52, lr: 9.38e-03, grad_scale: 16.0
+2024-08-29 18:03:07,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=205370.66666666666, ans=0.0
+2024-08-29 18:03:08,917 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.98 vs. limit=15.0
+2024-08-29 18:03:31,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=205477.33333333334, ans=0.04949747468305833
+2024-08-29 18:03:43,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=205530.66666666666, ans=0.0
+2024-08-29 18:03:45,116 INFO [train.py:1114] (0/4) Epoch 16, batch 1200, loss[loss=0.216, simple_loss=0.2795, pruned_loss=0.05588, ctc_loss=0.1018, over 19826.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2775, pruned_loss=0.05427, ctc_loss=0.1023, over 3825157.00 frames. ], batch size: 57, lr: 9.38e-03, grad_scale: 32.0
+2024-08-29 18:03:47,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=205530.66666666666, ans=0.07
+2024-08-29 18:04:06,312 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.725e+02 2.012e+02 2.470e+02 3.418e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-29 18:04:12,655 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.04 vs. limit=22.5
+2024-08-29 18:04:18,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=205637.33333333334, ans=0.2
+2024-08-29 18:04:18,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.28 vs. limit=15.0
+2024-08-29 18:04:50,748 INFO [train.py:1114] (0/4) Epoch 16, batch 1250, loss[loss=0.269, simple_loss=0.3141, pruned_loss=0.08125, ctc_loss=0.1535, over 19537.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2782, pruned_loss=0.05428, ctc_loss=0.1022, over 3843181.77 frames. ], batch size: 61, lr: 9.37e-03, grad_scale: 32.0
+2024-08-29 18:04:58,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=205797.33333333334, ans=0.025
+2024-08-29 18:05:35,782 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=205904.0, ans=0.125
+2024-08-29 18:05:55,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=205957.33333333334, ans=0.025
+2024-08-29 18:06:24,149 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=206010.66666666666, ans=0.2
+2024-08-29 18:06:35,900 INFO [train.py:1114] (0/4) Epoch 16, batch 1300, loss[loss=0.2407, simple_loss=0.296, pruned_loss=0.06776, ctc_loss=0.1248, over 18946.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2771, pruned_loss=0.05355, ctc_loss=0.1011, over 3847064.72 frames. ], batch size: 76, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:06:38,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=206064.0, ans=0.0
+2024-08-29 18:06:41,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=206064.0, ans=0.125
+2024-08-29 18:06:41,925 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.22 vs. limit=12.0
+2024-08-29 18:06:57,557 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.716e+02 2.090e+02 2.690e+02 4.268e+02, threshold=4.180e+02, percent-clipped=3.0
+2024-08-29 18:07:09,553 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.21 vs. limit=12.0
+2024-08-29 18:07:15,181 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=206224.0, ans=0.025
+2024-08-29 18:07:19,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=206224.0, ans=0.0
+2024-08-29 18:07:34,536 INFO [train.py:1114] (0/4) Epoch 16, batch 1350, loss[loss=0.2033, simple_loss=0.2743, pruned_loss=0.04793, ctc_loss=0.09084, over 19769.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2767, pruned_loss=0.05321, ctc_loss=0.1004, over 3859016.68 frames. ], batch size: 54, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:08:18,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=206437.33333333334, ans=0.2
+2024-08-29 18:08:23,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=206490.66666666666, ans=0.125
+2024-08-29 18:09:52,340 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.39 vs. limit=6.0
+2024-08-29 18:10:09,706 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=206544.0, ans=0.125
+2024-08-29 18:10:12,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=206544.0, ans=0.025
+2024-08-29 18:10:12,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=206544.0, ans=0.125
+2024-08-29 18:10:56,014 INFO [train.py:1114] (0/4) Epoch 16, batch 1400, loss[loss=0.1664, simple_loss=0.2294, pruned_loss=0.03752, ctc_loss=0.07102, over 19644.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2764, pruned_loss=0.05318, ctc_loss=0.1005, over 3865651.80 frames. ], batch size: 46, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:11:03,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=206597.33333333334, ans=0.125
+2024-08-29 18:12:41,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=206597.33333333334, ans=0.09899494936611666
+2024-08-29 18:13:15,203 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.659e+02 1.830e+02 2.117e+02 3.619e+02, threshold=3.659e+02, percent-clipped=0.0
+2024-08-29 18:13:31,034 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=206704.0, ans=0.125
+2024-08-29 18:14:30,016 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=206757.33333333334, ans=0.125
+2024-08-29 18:14:31,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=206757.33333333334, ans=0.125
+2024-08-29 18:14:40,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=206810.66666666666, ans=0.0
+2024-08-29 18:14:43,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=206810.66666666666, ans=0.125
+2024-08-29 18:14:49,675 INFO [train.py:1114] (0/4) Epoch 16, batch 1450, loss[loss=0.2157, simple_loss=0.2826, pruned_loss=0.05385, ctc_loss=0.1026, over 19666.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2771, pruned_loss=0.05348, ctc_loss=0.1011, over 3862662.88 frames. ], batch size: 63, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:15:03,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=206864.0, ans=0.125
+2024-08-29 18:15:44,767 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=207024.0, ans=0.1
+2024-08-29 18:16:04,448 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.16 vs. limit=12.0
+2024-08-29 18:16:10,746 INFO [train.py:1114] (0/4) Epoch 16, batch 1500, loss[loss=0.2058, simple_loss=0.2871, pruned_loss=0.04497, ctc_loss=0.08629, over 19605.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2776, pruned_loss=0.05349, ctc_loss=0.101, over 3862701.02 frames. ], batch size: 57, lr: 9.34e-03, grad_scale: 32.0
+2024-08-29 18:16:32,418 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.680e+02 1.893e+02 2.490e+02 3.994e+02, threshold=3.786e+02, percent-clipped=1.0
+2024-08-29 18:16:34,099 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.13 vs. limit=6.0
+2024-08-29 18:16:34,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=207237.33333333334, ans=0.1
+2024-08-29 18:16:35,279 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.27 vs. limit=15.0
+2024-08-29 18:16:36,042 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=207237.33333333334, ans=0.5
+2024-08-29 18:16:37,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=207237.33333333334, ans=0.0
+2024-08-29 18:17:19,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=207344.0, ans=0.1
+2024-08-29 18:17:24,227 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.09 vs. limit=15.0
+2024-08-29 18:17:34,469 INFO [train.py:1114] (0/4) Epoch 16, batch 1550, loss[loss=0.2309, simple_loss=0.2942, pruned_loss=0.06051, ctc_loss=0.1167, over 19591.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2778, pruned_loss=0.05378, ctc_loss=0.1016, over 3846710.12 frames. ], batch size: 60, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:17:47,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=207450.66666666666, ans=0.125
+2024-08-29 18:17:59,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=207504.0, ans=0.0
+2024-08-29 18:18:00,609 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=207504.0, ans=0.2
+2024-08-29 18:19:42,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=207610.66666666666, ans=0.025
+2024-08-29 18:19:55,360 INFO [train.py:1114] (0/4) Epoch 16, batch 1600, loss[loss=0.2088, simple_loss=0.272, pruned_loss=0.0525, ctc_loss=0.1013, over 19829.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.277, pruned_loss=0.05343, ctc_loss=0.101, over 3835215.71 frames. ], batch size: 57, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:20:28,806 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.22 vs. limit=15.0
+2024-08-29 18:21:42,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=207664.0, ans=0.125
+2024-08-29 18:21:55,755 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.773e+02 1.965e+02 2.508e+02 5.321e+02, threshold=3.930e+02, percent-clipped=3.0
+2024-08-29 18:21:55,966 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=207717.33333333334, ans=0.125
+2024-08-29 18:22:14,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=207824.0, ans=0.04949747468305833
+2024-08-29 18:22:52,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=207877.33333333334, ans=0.125
+2024-08-29 18:23:01,487 INFO [train.py:1114] (0/4) Epoch 16, batch 1650, loss[loss=0.1982, simple_loss=0.2782, pruned_loss=0.0426, ctc_loss=0.08261, over 19647.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2773, pruned_loss=0.0539, ctc_loss=0.102, over 3832569.16 frames. ], batch size: 59, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:23:09,832 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.73 vs. limit=15.0
+2024-08-29 18:23:29,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=207984.0, ans=0.125
+2024-08-29 18:24:40,014 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.58 vs. limit=5.0
+2024-08-29 18:24:41,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=208090.66666666666, ans=0.05
+2024-08-29 18:24:56,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=208144.0, ans=0.125
+2024-08-29 18:26:13,696 INFO [train.py:1114] (0/4) Epoch 16, batch 1700, loss[loss=0.1783, simple_loss=0.238, pruned_loss=0.04258, ctc_loss=0.08366, over 19662.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2773, pruned_loss=0.05377, ctc_loss=0.1019, over 3846213.36 frames. ], batch size: 46, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:26:34,625 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.759e+02 2.180e+02 2.878e+02 5.111e+02, threshold=4.361e+02, percent-clipped=4.0
+2024-08-29 18:26:49,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=208357.33333333334, ans=0.125
+2024-08-29 18:27:00,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=208410.66666666666, ans=0.0
+2024-08-29 18:27:13,585 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.64 vs. limit=15.0
+2024-08-29 18:27:13,923 INFO [train.py:1114] (0/4) Epoch 16, batch 1750, loss[loss=0.169, simple_loss=0.2368, pruned_loss=0.03701, ctc_loss=0.06785, over 19699.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2769, pruned_loss=0.05377, ctc_loss=0.1015, over 3850569.47 frames. ], batch size: 45, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:27:15,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=208464.0, ans=0.125
+2024-08-29 18:27:50,810 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.96 vs. limit=12.0
+2024-08-29 18:27:51,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=208517.33333333334, ans=0.125
+2024-08-29 18:27:54,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=208517.33333333334, ans=0.1
+2024-08-29 18:29:17,950 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=208624.0, ans=0.125
+2024-08-29 18:29:20,578 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.72 vs. limit=22.5
+2024-08-29 18:29:22,737 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.49 vs. limit=15.0
+2024-08-29 18:29:23,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=208624.0, ans=0.125
+2024-08-29 18:30:17,798 INFO [train.py:1114] (0/4) Epoch 16, batch 1800, loss[loss=0.2226, simple_loss=0.2805, pruned_loss=0.0589, ctc_loss=0.117, over 19609.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2771, pruned_loss=0.05405, ctc_loss=0.1018, over 3853165.70 frames. ], batch size: 55, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:30:22,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=208730.66666666666, ans=0.0
+2024-08-29 18:30:41,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=208784.0, ans=0.125
+2024-08-29 18:30:45,747 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 1.693e+02 1.985e+02 2.381e+02 4.228e+02, threshold=3.971e+02, percent-clipped=0.0
+2024-08-29 18:31:20,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=208944.0, ans=0.0
+2024-08-29 18:31:23,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=208944.0, ans=0.125
+2024-08-29 18:31:30,931 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten.whitening_limit, batch_count=208944.0, ans=15.0
+2024-08-29 18:31:41,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=208944.0, ans=0.125
+2024-08-29 18:31:45,953 INFO [train.py:1114] (0/4) Epoch 16, batch 1850, loss[loss=0.2382, simple_loss=0.3049, pruned_loss=0.06175, ctc_loss=0.1202, over 19587.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2762, pruned_loss=0.05334, ctc_loss=0.1003, over 3856066.70 frames. ], batch size: 57, lr: 9.30e-03, grad_scale: 32.0
+2024-08-29 18:32:16,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=208997.33333333334, ans=0.1
+2024-08-29 18:32:27,709 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.59 vs. limit=15.0
+2024-08-29 18:32:54,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=209157.33333333334, ans=0.125
+2024-08-29 18:32:57,445 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.whiten.whitening_limit, batch_count=209157.33333333334, ans=15.0
+2024-08-29 18:33:04,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=209157.33333333334, ans=0.125
+2024-08-29 18:33:06,591 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.89 vs. limit=12.0
+2024-08-29 18:33:13,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=209210.66666666666, ans=0.0
+2024-08-29 18:33:17,368 INFO [train.py:1114] (0/4) Epoch 16, batch 1900, loss[loss=0.2187, simple_loss=0.2936, pruned_loss=0.05123, ctc_loss=0.1034, over 19654.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2767, pruned_loss=0.05325, ctc_loss=0.1001, over 3861856.26 frames. ], batch size: 59, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:33:28,185 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.32 vs. limit=15.0
+2024-08-29 18:33:40,796 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.785e+02 2.354e+02 2.964e+02 6.037e+02, threshold=4.708e+02, percent-clipped=9.0
+2024-08-29 18:33:46,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=209370.66666666666, ans=0.125
+2024-08-29 18:33:47,084 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.04 vs. limit=15.0
+2024-08-29 18:33:55,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=209424.0, ans=0.0
+2024-08-29 18:33:58,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=209424.0, ans=0.125
+2024-08-29 18:34:30,827 INFO [train.py:1114] (0/4) Epoch 16, batch 1950, loss[loss=0.2053, simple_loss=0.2661, pruned_loss=0.05257, ctc_loss=0.09872, over 19579.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2776, pruned_loss=0.05325, ctc_loss=0.1001, over 3870276.46 frames. ], batch size: 52, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:34:37,080 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.54 vs. limit=15.0
+2024-08-29 18:35:33,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=209690.66666666666, ans=0.125
+2024-08-29 18:35:39,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=209744.0, ans=0.1
+2024-08-29 18:35:41,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=209744.0, ans=0.125
+2024-08-29 18:35:42,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=209744.0, ans=0.1
+2024-08-29 18:35:48,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=209744.0, ans=0.0
+2024-08-29 18:35:51,684 INFO [train.py:1114] (0/4) Epoch 16, batch 2000, loss[loss=0.1946, simple_loss=0.2479, pruned_loss=0.05156, ctc_loss=0.09541, over 19680.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2782, pruned_loss=0.05359, ctc_loss=0.1008, over 3856251.01 frames. ], batch size: 45, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:35:54,472 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-29 18:35:59,408 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.46 vs. limit=22.5
+2024-08-29 18:36:00,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=209797.33333333334, ans=0.5
+2024-08-29 18:36:05,404 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:36:05,509 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=209850.66666666666, ans=0.125
+2024-08-29 18:36:05,532 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=209850.66666666666, ans=0.2
+2024-08-29 18:36:07,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=209850.66666666666, ans=0.125
+2024-08-29 18:36:08,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=209850.66666666666, ans=0.2
+2024-08-29 18:36:13,157 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.666e+02 1.888e+02 2.185e+02 3.516e+02, threshold=3.775e+02, percent-clipped=0.0
+2024-08-29 18:37:02,162 INFO [train.py:1114] (0/4) Epoch 16, batch 2050, loss[loss=0.1932, simple_loss=0.2522, pruned_loss=0.04872, ctc_loss=0.09173, over 19724.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2772, pruned_loss=0.05359, ctc_loss=0.1007, over 3852418.76 frames. ], batch size: 47, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:37:47,214 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.09 vs. limit=15.0
+2024-08-29 18:38:21,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=210170.66666666666, ans=10.0
+2024-08-29 18:38:59,620 INFO [train.py:1114] (0/4) Epoch 16, batch 2100, loss[loss=0.2214, simple_loss=0.285, pruned_loss=0.05701, ctc_loss=0.1093, over 19785.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2767, pruned_loss=0.05317, ctc_loss=0.1002, over 3859205.20 frames. ], batch size: 54, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:39:02,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=210330.66666666666, ans=0.1
+2024-08-29 18:39:11,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=210384.0, ans=0.125
+2024-08-29 18:39:22,238 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.792e+02 2.112e+02 2.675e+02 4.176e+02, threshold=4.223e+02, percent-clipped=3.0
+2024-08-29 18:39:40,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=210490.66666666666, ans=0.0
+2024-08-29 18:39:56,495 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.07 vs. limit=15.0
+2024-08-29 18:39:57,981 INFO [train.py:1114] (0/4) Epoch 16, batch 2150, loss[loss=0.2026, simple_loss=0.2657, pruned_loss=0.0516, ctc_loss=0.09064, over 19604.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2756, pruned_loss=0.05266, ctc_loss=0.09911, over 3869138.34 frames. ], batch size: 52, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:39:59,166 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=210597.33333333334, ans=0.125
+2024-08-29 18:40:09,099 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=210650.66666666666, ans=0.0
+2024-08-29 18:40:27,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=210650.66666666666, ans=6.0
+2024-08-29 18:40:45,986 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.32 vs. limit=6.0
+2024-08-29 18:41:06,167 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.42 vs. limit=12.0
+2024-08-29 18:41:06,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=210810.66666666666, ans=0.1
+2024-08-29 18:41:08,971 INFO [train.py:1114] (0/4) Epoch 16, batch 2200, loss[loss=0.2045, simple_loss=0.2745, pruned_loss=0.04844, ctc_loss=0.09367, over 19590.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2757, pruned_loss=0.05278, ctc_loss=0.09945, over 3866681.74 frames. ], batch size: 57, lr: 9.26e-03, grad_scale: 32.0
+2024-08-29 18:41:09,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=210864.0, ans=0.0
+2024-08-29 18:41:11,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=210864.0, ans=0.0
+2024-08-29 18:41:22,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=210917.33333333334, ans=0.0
+2024-08-29 18:41:29,787 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.757e+02 2.042e+02 2.598e+02 4.148e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-08-29 18:42:04,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=210970.66666666666, ans=0.0
+2024-08-29 18:42:15,440 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.02 vs. limit=15.0
+2024-08-29 18:42:28,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=211077.33333333334, ans=0.125
+2024-08-29 18:42:31,569 INFO [train.py:1114] (0/4) Epoch 16, batch 2250, loss[loss=0.2206, simple_loss=0.2898, pruned_loss=0.05386, ctc_loss=0.1091, over 19629.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2764, pruned_loss=0.05311, ctc_loss=0.1001, over 3866935.30 frames. ], batch size: 55, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:42:36,795 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.37 vs. limit=15.0
+2024-08-29 18:42:42,983 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.56 vs. limit=15.0
+2024-08-29 18:42:46,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=211184.0, ans=0.1
+2024-08-29 18:43:17,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=211344.0, ans=0.0
+2024-08-29 18:43:27,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=211344.0, ans=0.0
+2024-08-29 18:44:22,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=211344.0, ans=0.125
+2024-08-29 18:44:24,268 INFO [train.py:1114] (0/4) Epoch 16, batch 2300, loss[loss=0.2157, simple_loss=0.2691, pruned_loss=0.05912, ctc_loss=0.1101, over 19502.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.276, pruned_loss=0.05349, ctc_loss=0.1007, over 3861191.67 frames. ], batch size: 49, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:44:29,586 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.81 vs. limit=22.5
+2024-08-29 18:45:03,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=211450.66666666666, ans=0.0
+2024-08-29 18:45:09,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=211450.66666666666, ans=0.125
+2024-08-29 18:45:10,438 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.785e+02 2.121e+02 2.618e+02 4.213e+02, threshold=4.241e+02, percent-clipped=2.0
+2024-08-29 18:45:18,431 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:45:26,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=211557.33333333334, ans=0.025
+2024-08-29 18:45:26,447 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=211557.33333333334, ans=0.0
+2024-08-29 18:45:34,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=211610.66666666666, ans=0.2
+2024-08-29 18:45:39,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=211610.66666666666, ans=0.2
+2024-08-29 18:45:59,061 INFO [train.py:1114] (0/4) Epoch 16, batch 2350, loss[loss=0.2325, simple_loss=0.2935, pruned_loss=0.06398, ctc_loss=0.1091, over 19649.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2763, pruned_loss=0.05378, ctc_loss=0.1008, over 3862969.39 frames. ], batch size: 63, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:46:10,091 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=4.979e-02
+2024-08-29 18:46:40,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=211824.0, ans=0.125
+2024-08-29 18:46:41,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=211824.0, ans=0.0
+2024-08-29 18:46:43,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=211824.0, ans=0.125
+2024-08-29 18:46:57,270 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=211877.33333333334, ans=0.1
+2024-08-29 18:47:00,341 INFO [train.py:1114] (0/4) Epoch 16, batch 2400, loss[loss=0.2214, simple_loss=0.2937, pruned_loss=0.05396, ctc_loss=0.1029, over 19453.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2783, pruned_loss=0.05441, ctc_loss=0.1017, over 3857514.17 frames. ], batch size: 67, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:47:03,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=211930.66666666666, ans=0.125
+2024-08-29 18:47:09,866 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.35 vs. limit=10.0
+2024-08-29 18:47:20,732 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.800e+02 2.132e+02 2.653e+02 4.129e+02, threshold=4.264e+02, percent-clipped=0.0
+2024-08-29 18:47:56,917 INFO [train.py:1114] (0/4) Epoch 16, batch 2450, loss[loss=0.2771, simple_loss=0.3084, pruned_loss=0.08855, ctc_loss=0.1719, over 13465.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2821, pruned_loss=0.05729, ctc_loss=0.1075, over 3733785.23 frames. ], batch size: 140, lr: 9.23e-03, grad_scale: 32.0
+2024-08-29 18:48:07,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=212197.33333333334, ans=0.125
+2024-08-29 18:48:31,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=212304.0, ans=0.025
+2024-08-29 18:48:45,437 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-16.pt
+2024-08-29 18:55:35,482 INFO [train.py:1114] (0/4) Epoch 17, batch 0, loss[loss=0.2431, simple_loss=0.286, pruned_loss=0.07327, ctc_loss=0.1339, over 19800.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.286, pruned_loss=0.07327, ctc_loss=0.1339, over 19800.00 frames. ], batch size: 49, lr: 8.95e-03, grad_scale: 32.0
+2024-08-29 18:55:35,484 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-29 18:56:01,361 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.1628, 4.4569, 3.9691, 4.1827], device='cuda:0')
+2024-08-29 18:56:01,791 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.8841, 2.0599, 3.4462, 3.5285], device='cuda:0')
+2024-08-29 18:56:04,690 INFO [train.py:1146] (0/4) Epoch 17, validation: loss=0.1843, simple_loss=0.2733, pruned_loss=0.03544, ctc_loss=0.06098, over 944034.00 frames.
+2024-08-29 18:56:04,692 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13691MB
+2024-08-29 18:56:54,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-29 18:56:56,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-29 18:58:11,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-29 18:58:20,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=212512.0, ans=0.125
+2024-08-29 18:58:20,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=212512.0, ans=0.125
+2024-08-29 18:58:29,111 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.51 vs. limit=12.0
+2024-08-29 18:58:30,846 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.824e+02 2.030e+02 2.233e+02 3.073e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-29 18:58:34,789 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.96 vs. limit=22.5
+2024-08-29 19:05:26,875 INFO [train.py:1114] (0/4) Epoch 17, batch 50, loss[loss=0.1819, simple_loss=0.2478, pruned_loss=0.04161, ctc_loss=0.08164, over 19711.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2777, pruned_loss=0.05312, ctc_loss=0.1011, over 844772.04 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:07:29,742 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.60 vs. limit=10.0
+2024-08-29 19:07:31,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=212672.0, ans=0.125
+2024-08-29 19:07:53,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=212725.33333333334, ans=0.0
+2024-08-29 19:08:30,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=212832.0, ans=0.125
+2024-08-29 19:08:52,256 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.30 vs. limit=15.0
+2024-08-29 19:08:52,562 INFO [train.py:1114] (0/4) Epoch 17, batch 100, loss[loss=0.1848, simple_loss=0.255, pruned_loss=0.04151, ctc_loss=0.07903, over 19727.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2796, pruned_loss=0.05439, ctc_loss=0.103, over 1498273.11 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:09:16,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=213045.33333333334, ans=0.125
+2024-08-29 19:09:25,912 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.707e+02 1.910e+02 2.335e+02 3.363e+02, threshold=3.820e+02, percent-clipped=0.0
+2024-08-29 19:09:29,609 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=213098.66666666666, ans=0.5
+2024-08-29 19:09:37,946 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:09:58,148 INFO [train.py:1114] (0/4) Epoch 17, batch 150, loss[loss=0.2011, simple_loss=0.2579, pruned_loss=0.05256, ctc_loss=0.09771, over 19685.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2774, pruned_loss=0.05373, ctc_loss=0.1016, over 2027311.53 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:12:29,049 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-40000.pt
+2024-08-29 19:16:11,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=213365.33333333334, ans=0.125
+2024-08-29 19:16:25,552 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:16:28,917 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=213472.0, ans=0.125
+2024-08-29 19:16:29,779 INFO [train.py:1114] (0/4) Epoch 17, batch 200, loss[loss=0.2251, simple_loss=0.2875, pruned_loss=0.05821, ctc_loss=0.1158, over 18082.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2761, pruned_loss=0.05311, ctc_loss=0.1003, over 2434813.02 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:24:57,869 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=213472.0, ans=0.125
+2024-08-29 19:25:04,090 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.17 vs. limit=22.5
+2024-08-29 19:27:12,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213525.33333333334, ans=0.1
+2024-08-29 19:27:13,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=213525.33333333334, ans=0.125
+2024-08-29 19:27:57,285 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.724e+02 1.931e+02 2.405e+02 4.691e+02, threshold=3.862e+02, percent-clipped=4.0
+2024-08-29 19:28:33,442 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=213685.33333333334, ans=0.1
+2024-08-29 19:28:38,487 INFO [train.py:1114] (0/4) Epoch 17, batch 250, loss[loss=0.226, simple_loss=0.2832, pruned_loss=0.06089, ctc_loss=0.1177, over 19421.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2756, pruned_loss=0.05304, ctc_loss=0.09995, over 2755282.98 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:30:01,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=213952.0, ans=0.2
+2024-08-29 19:30:03,451 INFO [train.py:1114] (0/4) Epoch 17, batch 300, loss[loss=0.251, simple_loss=0.3031, pruned_loss=0.07208, ctc_loss=0.1367, over 19530.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2748, pruned_loss=0.05262, ctc_loss=0.09908, over 3000773.43 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:32:02,229 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.689e+02 1.972e+02 2.447e+02 4.331e+02, threshold=3.945e+02, percent-clipped=1.0
+2024-08-29 19:32:19,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214165.33333333334, ans=0.1
+2024-08-29 19:32:41,692 INFO [train.py:1114] (0/4) Epoch 17, batch 350, loss[loss=0.2015, simple_loss=0.2611, pruned_loss=0.05113, ctc_loss=0.09911, over 19745.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2755, pruned_loss=0.05256, ctc_loss=0.09925, over 3190658.25 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:32:51,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=214272.0, ans=0.1
+2024-08-29 19:34:08,229 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.08 vs. limit=15.0
+2024-08-29 19:34:18,290 INFO [train.py:1114] (0/4) Epoch 17, batch 400, loss[loss=0.2022, simple_loss=0.2822, pruned_loss=0.04416, ctc_loss=0.08466, over 19506.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2753, pruned_loss=0.0523, ctc_loss=0.09851, over 3343295.33 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:34:38,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=214538.66666666666, ans=0.125
+2024-08-29 19:34:54,286 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=214592.0, ans=0.0
+2024-08-29 19:35:31,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-29 19:35:39,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-29 19:36:30,690 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.665e+02 1.964e+02 2.553e+02 4.238e+02, threshold=3.929e+02, percent-clipped=2.0
+2024-08-29 19:37:24,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=214698.66666666666, ans=0.0
+2024-08-29 19:37:57,089 INFO [train.py:1114] (0/4) Epoch 17, batch 450, loss[loss=0.2257, simple_loss=0.2935, pruned_loss=0.05695, ctc_loss=0.1102, over 19619.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2756, pruned_loss=0.05251, ctc_loss=0.09866, over 3449981.31 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:37:58,021 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.72 vs. limit=15.0
+2024-08-29 19:38:13,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=214805.33333333334, ans=0.125
+2024-08-29 19:39:29,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=214912.0, ans=0.0
+2024-08-29 19:39:53,966 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=214965.33333333334, ans=0.0
+2024-08-29 19:40:26,604 INFO [train.py:1114] (0/4) Epoch 17, batch 500, loss[loss=0.2231, simple_loss=0.2932, pruned_loss=0.05562, ctc_loss=0.1042, over 19663.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2747, pruned_loss=0.05188, ctc_loss=0.09752, over 3545189.32 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:40:27,339 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.39 vs. limit=15.0
+2024-08-29 19:40:29,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=215072.0, ans=0.125
+2024-08-29 19:40:30,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215072.0, ans=0.1
+2024-08-29 19:40:30,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=215072.0, ans=0.1
+2024-08-29 19:40:36,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=215072.0, ans=0.2
+2024-08-29 19:41:55,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=215178.66666666666, ans=0.035
+2024-08-29 19:42:38,137 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.765e+02 1.983e+02 2.603e+02 4.687e+02, threshold=3.966e+02, percent-clipped=3.0
+2024-08-29 19:43:10,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=215232.0, ans=0.0
+2024-08-29 19:43:45,805 INFO [train.py:1114] (0/4) Epoch 17, batch 550, loss[loss=0.2106, simple_loss=0.2854, pruned_loss=0.04923, ctc_loss=0.09326, over 19206.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2747, pruned_loss=0.05185, ctc_loss=0.09746, over 3607486.88 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-29 19:44:10,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=215338.66666666666, ans=0.5
+2024-08-29 19:45:00,487 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=215392.0, ans=0.125
+2024-08-29 19:45:28,568 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.87 vs. limit=22.5
+2024-08-29 19:46:02,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=215498.66666666666, ans=0.0
+2024-08-29 19:46:13,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_ff3.min_abs, batch_count=215552.0, ans=0.2
+2024-08-29 19:47:01,450 INFO [train.py:1114] (0/4) Epoch 17, batch 600, loss[loss=0.2217, simple_loss=0.2863, pruned_loss=0.05838, ctc_loss=0.1006, over 19446.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2747, pruned_loss=0.05163, ctc_loss=0.09708, over 3664553.91 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:47:39,641 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.81 vs. limit=15.0
+2024-08-29 19:47:49,417 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.74 vs. limit=15.0
+2024-08-29 19:48:09,363 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.31 vs. limit=10.0
+2024-08-29 19:48:19,060 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.690e+02 1.951e+02 2.307e+02 4.172e+02, threshold=3.901e+02, percent-clipped=2.0
+2024-08-29 19:48:27,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=215765.33333333334, ans=0.0
+2024-08-29 19:48:27,630 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.50 vs. limit=22.5
+2024-08-29 19:49:06,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=215818.66666666666, ans=0.125
+2024-08-29 19:49:21,617 INFO [train.py:1114] (0/4) Epoch 17, batch 650, loss[loss=0.1963, simple_loss=0.2638, pruned_loss=0.04705, ctc_loss=0.08668, over 19762.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2739, pruned_loss=0.05141, ctc_loss=0.09665, over 3715413.88 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:49:42,926 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=215925.33333333334, ans=0.125
+2024-08-29 19:49:44,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=215925.33333333334, ans=0.125
+2024-08-29 19:50:28,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=216032.0, ans=0.2
+2024-08-29 19:51:32,034 INFO [train.py:1114] (0/4) Epoch 17, batch 700, loss[loss=0.2105, simple_loss=0.2803, pruned_loss=0.05173, ctc_loss=0.09324, over 19728.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.275, pruned_loss=0.05171, ctc_loss=0.09712, over 3747076.79 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:51:33,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216138.66666666666, ans=0.1
+2024-08-29 19:51:56,642 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=216245.33333333334, ans=0.2
+2024-08-29 19:52:43,598 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.757e+02 1.978e+02 2.439e+02 3.670e+02, threshold=3.956e+02, percent-clipped=0.0
+2024-08-29 19:52:48,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=216298.66666666666, ans=0.125
+2024-08-29 19:53:32,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=216298.66666666666, ans=0.125
+2024-08-29 19:53:39,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=216352.0, ans=0.09899494936611666
+2024-08-29 19:53:46,884 INFO [train.py:1114] (0/4) Epoch 17, batch 750, loss[loss=0.1971, simple_loss=0.2772, pruned_loss=0.04325, ctc_loss=0.07651, over 19495.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2748, pruned_loss=0.05155, ctc_loss=0.09683, over 3773742.85 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:54:30,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=216405.33333333334, ans=15.0
+2024-08-29 20:06:06,360 INFO [train.py:1050] (0/4) Caught exception: [Rank 0] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=98948, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600000 milliseconds before timing out..
+2024-08-29 20:06:06,361 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-0.pt
+2024-08-29 20:06:09,238 INFO [train.py:1413] (0/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-bd023447-34aa-53ff-3278-e0a594ac807a.pt
+2024-08-29 20:07:54,230 INFO [train.py:1419] (0/4) features shape: torch.Size([53, 1497, 80])
+2024-08-29 20:07:54,232 INFO [train.py:1423] (0/4) num tokens: 4003
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-1
new file mode 100644
index 0000000000000000000000000000000000000000..083bebac6759fd1199f37434b78ebd346fab4a62
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-1
@@ -0,0 +1,1139 @@
+2024-08-29 13:08:38,318 INFO [train.py:1182] (1/4) Training started
+2024-08-29 13:08:38,319 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-29 13:08:38,321 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 13:08:38,322 INFO [train.py:1212] (1/4) About to create model
+2024-08-29 13:08:39,835 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-29 13:08:39,891 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 13:08:59,021 INFO [train.py:1231] (1/4) Using DDP
+2024-08-29 13:09:40,461 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-29 13:09:40,782 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-29 13:09:40,783 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 13:09:40,789 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-29 13:09:40,789 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-29 13:09:40,789 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-29 13:09:40,790 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-29 13:09:40,790 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-29 13:09:40,790 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-29 13:09:42,376 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-29 13:09:42,378 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-29 13:09:42,379 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-29 13:09:42,446 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-29 13:09:42,770 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-29 13:09:42,771 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 13:13:40,052 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.58 vs. limit=3.0
+2024-08-29 13:14:18,565 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13363MB
+2024-08-29 13:14:21,278 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 13:14:38,617 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 13:14:45,672 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 13:15:10,806 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 13:15:11,674 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=4.55 vs. limit=3.0
+2024-08-29 13:15:12,353 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 13:15:12,372 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-29 13:16:15,181 INFO [train.py:1114] (1/4) Epoch 14, batch 0, loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2686, pruned_loss=0.05116, ctc_loss=0.1002, over 19403.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:16:15,181 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-29 13:16:31,404 INFO [train.py:1146] (1/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 13:16:31,405 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-29 13:20:30,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.71 vs. limit=15.0
+2024-08-29 13:24:20,810 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=172688.0, ans=0.125
+2024-08-29 13:24:21,050 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.40 vs. limit=12.0
+2024-08-29 13:24:48,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=172688.0, ans=0.0
+2024-08-29 13:25:58,932 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=172794.66666666666, ans=0.1
+2024-08-29 13:26:36,726 INFO [train.py:1114] (1/4) Epoch 14, batch 50, loss[loss=0.2173, simple_loss=0.2665, pruned_loss=0.06156, ctc_loss=0.1127, over 19699.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2858, pruned_loss=0.05956, ctc_loss=0.1123, over 844835.93 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:26:49,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=172848.0, ans=0.0
+2024-08-29 13:27:08,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=172848.0, ans=0.0
+2024-08-29 13:30:25,789 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=172901.33333333334, ans=0.1
+2024-08-29 13:30:32,683 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-29 13:30:46,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=172954.66666666666, ans=0.0
+2024-08-29 13:30:46,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=172954.66666666666, ans=0.0
+2024-08-29 13:32:29,768 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.749e+02 1.974e+02 2.504e+02 4.970e+02, threshold=3.948e+02, percent-clipped=4.0
+2024-08-29 13:32:58,194 INFO [train.py:1114] (1/4) Epoch 14, batch 100, loss[loss=0.22, simple_loss=0.2774, pruned_loss=0.05873, ctc_loss=0.1131, over 19712.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.287, pruned_loss=0.05992, ctc_loss=0.1133, over 1499995.38 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:33:31,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=173168.0, ans=0.125
+2024-08-29 13:34:41,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=173274.66666666666, ans=0.125
+2024-08-29 13:35:52,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=173328.0, ans=0.125
+2024-08-29 13:36:02,973 INFO [train.py:1114] (1/4) Epoch 14, batch 150, loss[loss=0.2055, simple_loss=0.2633, pruned_loss=0.05313, ctc_loss=0.1038, over 19717.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2835, pruned_loss=0.05793, ctc_loss=0.1095, over 2027349.01 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:36:03,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=173381.33333333334, ans=0.025
+2024-08-29 13:36:04,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=173381.33333333334, ans=0.2
+2024-08-29 13:36:21,054 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.45 vs. limit=22.5
+2024-08-29 13:36:22,013 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=173434.66666666666, ans=0.04949747468305833
+2024-08-29 13:36:47,030 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=173488.0, ans=0.125
+2024-08-29 13:37:19,620 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.730e+02 2.035e+02 2.422e+02 3.683e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-29 13:37:21,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=173594.66666666666, ans=0.125
+2024-08-29 13:37:30,501 INFO [train.py:1114] (1/4) Epoch 14, batch 200, loss[loss=0.2308, simple_loss=0.2892, pruned_loss=0.06283, ctc_loss=0.1167, over 18213.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2814, pruned_loss=0.05694, ctc_loss=0.1071, over 2434363.68 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:37:31,272 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.01 vs. limit=15.0
+2024-08-29 13:37:33,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=173648.0, ans=0.2
+2024-08-29 13:37:35,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=173648.0, ans=0.125
+2024-08-29 13:37:43,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=173701.33333333334, ans=0.1
+2024-08-29 13:37:50,008 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=6.41 vs. limit=15.0
+2024-08-29 13:40:14,108 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=2.762e-03
+2024-08-29 13:40:51,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=173808.0, ans=0.125
+2024-08-29 13:41:36,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=173861.33333333334, ans=0.0
+2024-08-29 13:42:16,150 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=13.55 vs. limit=15.0
+2024-08-29 13:42:18,842 INFO [train.py:1114] (1/4) Epoch 14, batch 250, loss[loss=0.2318, simple_loss=0.2932, pruned_loss=0.06315, ctc_loss=0.1102, over 19420.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.282, pruned_loss=0.05716, ctc_loss=0.1079, over 2753707.57 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:43:03,852 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.47 vs. limit=6.0
+2024-08-29 13:43:57,807 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=174021.33333333334, ans=0.2
+2024-08-29 13:44:01,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=174074.66666666666, ans=0.125
+2024-08-29 13:44:11,808 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.51 vs. limit=15.0
+2024-08-29 13:44:13,473 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.787e+02 2.022e+02 2.717e+02 4.953e+02, threshold=4.043e+02, percent-clipped=2.0
+2024-08-29 13:44:52,056 INFO [train.py:1114] (1/4) Epoch 14, batch 300, loss[loss=0.264, simple_loss=0.3105, pruned_loss=0.07908, ctc_loss=0.1484, over 19548.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2822, pruned_loss=0.05737, ctc_loss=0.1081, over 2999566.01 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:45:16,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=174288.0, ans=0.025
+2024-08-29 13:46:17,851 INFO [train.py:1114] (1/4) Epoch 14, batch 350, loss[loss=0.1752, simple_loss=0.2415, pruned_loss=0.04017, ctc_loss=0.07129, over 19763.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2829, pruned_loss=0.05757, ctc_loss=0.1084, over 3190345.80 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 16.0
+2024-08-29 13:46:24,310 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=174448.0, ans=0.125
+2024-08-29 13:47:25,029 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=174608.0, ans=0.0
+2024-08-29 13:47:33,992 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.62 vs. limit=22.5
+2024-08-29 13:47:39,416 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.793e+02 2.058e+02 2.658e+02 4.429e+02, threshold=4.116e+02, percent-clipped=3.0
+2024-08-29 13:48:31,275 INFO [train.py:1114] (1/4) Epoch 14, batch 400, loss[loss=0.2267, simple_loss=0.2899, pruned_loss=0.05903, ctc_loss=0.1134, over 19500.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2829, pruned_loss=0.05788, ctc_loss=0.109, over 3341930.78 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:50:41,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=174874.66666666666, ans=0.0
+2024-08-29 13:50:54,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=174928.0, ans=0.0
+2024-08-29 13:50:57,540 INFO [train.py:1114] (1/4) Epoch 14, batch 450, loss[loss=0.2046, simple_loss=0.2781, pruned_loss=0.04812, ctc_loss=0.08732, over 19622.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2829, pruned_loss=0.05801, ctc_loss=0.109, over 3450368.11 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:51:48,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=175194.66666666666, ans=0.125
+2024-08-29 13:51:50,579 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.713e+02 1.900e+02 2.415e+02 4.159e+02, threshold=3.800e+02, percent-clipped=2.0
+2024-08-29 13:52:16,314 INFO [train.py:1114] (1/4) Epoch 14, batch 500, loss[loss=0.2297, simple_loss=0.2931, pruned_loss=0.06113, ctc_loss=0.1101, over 19650.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.282, pruned_loss=0.05761, ctc_loss=0.1083, over 3545951.97 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:52:40,716 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=175301.33333333334, ans=0.2
+2024-08-29 13:53:02,866 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=175408.0, ans=0.2
+2024-08-29 13:53:12,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=175461.33333333334, ans=0.125
+2024-08-29 13:53:17,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=175461.33333333334, ans=0.1
+2024-08-29 13:53:23,922 INFO [train.py:1114] (1/4) Epoch 14, batch 550, loss[loss=0.2261, simple_loss=0.2908, pruned_loss=0.05747, ctc_loss=0.1162, over 19266.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2818, pruned_loss=0.05716, ctc_loss=0.1076, over 3608026.80 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:53:30,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=175514.66666666666, ans=0.05
+2024-08-29 13:53:41,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=175568.0, ans=0.125
+2024-08-29 13:53:41,856 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.81 vs. limit=22.5
+2024-08-29 13:53:50,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=175621.33333333334, ans=0.125
+2024-08-29 13:53:50,893 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=175621.33333333334, ans=0.0
+2024-08-29 13:54:12,500 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.85 vs. limit=6.0
+2024-08-29 13:54:18,075 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.725e+02 1.963e+02 2.348e+02 4.063e+02, threshold=3.927e+02, percent-clipped=2.0
+2024-08-29 13:54:18,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=175728.0, ans=0.0
+2024-08-29 13:54:22,760 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.40 vs. limit=15.0
+2024-08-29 13:54:23,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=175728.0, ans=0.1
+2024-08-29 13:54:27,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=175781.33333333334, ans=0.025
+2024-08-29 13:54:28,216 INFO [train.py:1114] (1/4) Epoch 14, batch 600, loss[loss=0.2724, simple_loss=0.3185, pruned_loss=0.082, ctc_loss=0.1559, over 19321.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2823, pruned_loss=0.05751, ctc_loss=0.1082, over 3664666.00 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:54:32,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=175781.33333333334, ans=0.2
+2024-08-29 13:54:39,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=175834.66666666666, ans=0.1
+2024-08-29 13:54:58,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=175888.0, ans=0.1
+2024-08-29 13:55:04,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=175941.33333333334, ans=0.0
+2024-08-29 13:55:05,811 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.10 vs. limit=15.0
+2024-08-29 13:55:22,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=175994.66666666666, ans=0.1
+2024-08-29 13:55:30,814 INFO [train.py:1114] (1/4) Epoch 14, batch 650, loss[loss=0.2148, simple_loss=0.2849, pruned_loss=0.05261, ctc_loss=0.09861, over 19752.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2823, pruned_loss=0.05771, ctc_loss=0.1087, over 3715941.04 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:55:42,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=176101.33333333334, ans=0.0
+2024-08-29 13:55:45,665 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=176101.33333333334, ans=0.0
+2024-08-29 13:55:59,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=176154.66666666666, ans=0.0
+2024-08-29 13:56:04,291 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=176154.66666666666, ans=0.04949747468305833
+2024-08-29 13:56:14,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=176208.0, ans=0.125
+2024-08-29 13:56:20,649 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.54 vs. limit=15.0
+2024-08-29 13:56:24,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176261.33333333334, ans=0.1
+2024-08-29 13:56:24,636 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.743e+02 2.058e+02 2.560e+02 4.338e+02, threshold=4.116e+02, percent-clipped=4.0
+2024-08-29 13:56:28,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=176261.33333333334, ans=0.2
+2024-08-29 13:56:34,649 INFO [train.py:1114] (1/4) Epoch 14, batch 700, loss[loss=0.1908, simple_loss=0.2601, pruned_loss=0.0447, ctc_loss=0.08039, over 19721.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2824, pruned_loss=0.05746, ctc_loss=0.1082, over 3748643.73 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:56:40,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=176314.66666666666, ans=0.0
+2024-08-29 13:57:35,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=176368.0, ans=0.125
+2024-08-29 13:57:43,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176421.33333333334, ans=0.1
+2024-08-29 13:58:03,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=176528.0, ans=0.125
+2024-08-29 13:58:08,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=176528.0, ans=0.025
+2024-08-29 13:58:09,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=176528.0, ans=0.2
+2024-08-29 13:58:12,845 INFO [train.py:1114] (1/4) Epoch 14, batch 750, loss[loss=0.2208, simple_loss=0.284, pruned_loss=0.05653, ctc_loss=0.1115, over 19855.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2817, pruned_loss=0.05711, ctc_loss=0.1074, over 3776206.67 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:58:20,540 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.82 vs. limit=22.5
+2024-08-29 13:58:25,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=176634.66666666666, ans=0.2
+2024-08-29 13:58:25,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=176634.66666666666, ans=0.2
+2024-08-29 13:58:35,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=176634.66666666666, ans=0.125
+2024-08-29 13:58:35,472 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=176634.66666666666, ans=0.07
+2024-08-29 13:58:47,353 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=176688.0, ans=0.125
+2024-08-29 13:58:53,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=176741.33333333334, ans=0.125
+2024-08-29 13:59:06,503 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.910e+02 2.277e+02 2.884e+02 4.780e+02, threshold=4.554e+02, percent-clipped=3.0
+2024-08-29 13:59:28,730 INFO [train.py:1114] (1/4) Epoch 14, batch 800, loss[loss=0.1821, simple_loss=0.2497, pruned_loss=0.04109, ctc_loss=0.08083, over 19804.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2821, pruned_loss=0.05739, ctc_loss=0.1081, over 3797126.98 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:59:28,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=176848.0, ans=0.025
+2024-08-29 13:59:31,455 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.02 vs. limit=12.0
+2024-08-29 13:59:38,875 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.25 vs. limit=15.0
+2024-08-29 13:59:38,915 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.27 vs. limit=15.0
+2024-08-29 13:59:44,580 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.37 vs. limit=6.0
+2024-08-29 14:01:15,119 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.56 vs. limit=10.0
+2024-08-29 14:02:36,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=177061.33333333334, ans=0.125
+2024-08-29 14:02:41,444 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=177061.33333333334, ans=0.125
+2024-08-29 14:02:49,620 INFO [train.py:1114] (1/4) Epoch 14, batch 850, loss[loss=0.2069, simple_loss=0.2805, pruned_loss=0.04845, ctc_loss=0.09075, over 19658.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.282, pruned_loss=0.0576, ctc_loss=0.1084, over 3815224.61 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:02:55,211 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.96 vs. limit=15.0
+2024-08-29 14:03:01,946 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:03:04,436 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=177168.0, ans=0.0
+2024-08-29 14:03:40,317 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.703e+02 1.970e+02 2.385e+02 3.831e+02, threshold=3.939e+02, percent-clipped=0.0
+2024-08-29 14:03:49,898 INFO [train.py:1114] (1/4) Epoch 14, batch 900, loss[loss=0.207, simple_loss=0.2675, pruned_loss=0.05334, ctc_loss=0.09936, over 19433.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2824, pruned_loss=0.05798, ctc_loss=0.109, over 3818547.13 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:04:15,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=177488.0, ans=0.125
+2024-08-29 14:04:45,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=177594.66666666666, ans=0.125
+2024-08-29 14:04:52,320 INFO [train.py:1114] (1/4) Epoch 14, batch 950, loss[loss=0.2387, simple_loss=0.2805, pruned_loss=0.0714, ctc_loss=0.1354, over 19512.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2823, pruned_loss=0.05788, ctc_loss=0.1089, over 3821519.80 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:04:54,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=177648.0, ans=0.125
+2024-08-29 14:05:01,232 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=177648.0, ans=0.0
+2024-08-29 14:05:03,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-29 14:05:07,325 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-29 14:05:13,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=177701.33333333334, ans=0.125
+2024-08-29 14:05:26,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=177754.66666666666, ans=0.125
+2024-08-29 14:06:02,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=177808.0, ans=0.125
+2024-08-29 14:06:18,421 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.94 vs. limit=15.0
+2024-08-29 14:06:19,919 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.335e+02 1.740e+02 1.996e+02 2.581e+02 3.979e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-29 14:07:04,890 INFO [train.py:1114] (1/4) Epoch 14, batch 1000, loss[loss=0.1933, simple_loss=0.2643, pruned_loss=0.04452, ctc_loss=0.08323, over 19863.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2833, pruned_loss=0.0584, ctc_loss=0.1099, over 3818921.51 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:07:10,372 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:07:19,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=177914.66666666666, ans=0.125
+2024-08-29 14:08:29,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=178021.33333333334, ans=0.125
+2024-08-29 14:08:30,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=178074.66666666666, ans=0.05
+2024-08-29 14:08:33,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=178074.66666666666, ans=0.125
+2024-08-29 14:08:56,378 INFO [train.py:1114] (1/4) Epoch 14, batch 1050, loss[loss=0.2189, simple_loss=0.2868, pruned_loss=0.05452, ctc_loss=0.1048, over 19861.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2825, pruned_loss=0.0581, ctc_loss=0.1093, over 3824616.59 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:09:09,206 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.57 vs. limit=10.0
+2024-08-29 14:09:09,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=178234.66666666666, ans=0.125
+2024-08-29 14:09:14,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=178234.66666666666, ans=0.025
+2024-08-29 14:09:20,658 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=178288.0, ans=0.07
+2024-08-29 14:09:27,842 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=178288.0, ans=0.125
+2024-08-29 14:09:30,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=178288.0, ans=0.125
+2024-08-29 14:09:44,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=178394.66666666666, ans=0.0
+2024-08-29 14:09:46,659 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.811e+02 2.215e+02 2.668e+02 4.320e+02, threshold=4.429e+02, percent-clipped=1.0
+2024-08-29 14:10:24,260 INFO [train.py:1114] (1/4) Epoch 14, batch 1100, loss[loss=0.2085, simple_loss=0.2777, pruned_loss=0.05102, ctc_loss=0.09312, over 19567.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2822, pruned_loss=0.05792, ctc_loss=0.109, over 3832045.48 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:13:37,850 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.43 vs. limit=15.0
+2024-08-29 14:13:47,690 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.32 vs. limit=15.0
+2024-08-29 14:15:40,954 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.20 vs. limit=12.0
+2024-08-29 14:19:03,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=178661.33333333334, ans=0.125
+2024-08-29 14:19:08,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=178661.33333333334, ans=0.125
+2024-08-29 14:19:15,469 INFO [train.py:1114] (1/4) Epoch 14, batch 1150, loss[loss=0.184, simple_loss=0.2537, pruned_loss=0.04113, ctc_loss=0.08028, over 19594.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2819, pruned_loss=0.05792, ctc_loss=0.1092, over 3830601.15 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:19:46,500 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=178768.0, ans=0.125
+2024-08-29 14:20:02,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=178768.0, ans=0.125
+2024-08-29 14:20:22,688 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:20:36,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=178821.33333333334, ans=0.125
+2024-08-29 14:21:23,614 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.78 vs. limit=10.0
+2024-08-29 14:21:36,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=178874.66666666666, ans=0.125
+2024-08-29 14:22:13,328 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.701e+02 1.876e+02 2.352e+02 3.362e+02, threshold=3.753e+02, percent-clipped=0.0
+2024-08-29 14:22:33,801 INFO [train.py:1114] (1/4) Epoch 14, batch 1200, loss[loss=0.2405, simple_loss=0.3038, pruned_loss=0.06372, ctc_loss=0.1245, over 19831.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2832, pruned_loss=0.05834, ctc_loss=0.1101, over 3826118.36 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:23:26,052 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.09 vs. limit=12.0
+2024-08-29 14:23:59,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-29 14:24:14,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-29 14:24:14,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=179034.66666666666, ans=0.0
+2024-08-29 14:24:30,914 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=179088.0, ans=0.125
+2024-08-29 14:24:42,928 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=179141.33333333334, ans=0.1
+2024-08-29 14:25:06,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=179141.33333333334, ans=0.2
+2024-08-29 14:29:53,986 INFO [train.py:1114] (1/4) Epoch 14, batch 1250, loss[loss=0.2384, simple_loss=0.2996, pruned_loss=0.06547, ctc_loss=0.1157, over 19505.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2832, pruned_loss=0.05783, ctc_loss=0.1089, over 3843744.84 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:29:54,509 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.64 vs. limit=15.0
+2024-08-29 14:30:09,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=179248.0, ans=15.0
+2024-08-29 14:32:08,119 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.03 vs. limit=22.5
+2024-08-29 14:32:11,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_ff2.min_abs, batch_count=179301.33333333334, ans=0.1
+2024-08-29 14:32:12,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=179354.66666666666, ans=0.0
+2024-08-29 14:32:29,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=179408.0, ans=0.2
+2024-08-29 14:32:35,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=179408.0, ans=0.125
+2024-08-29 14:32:39,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=179461.33333333334, ans=15.0
+2024-08-29 14:32:41,060 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.718e+02 2.120e+02 2.679e+02 4.271e+02, threshold=4.240e+02, percent-clipped=3.0
+2024-08-29 14:32:51,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=179461.33333333334, ans=0.0
+2024-08-29 14:33:10,211 INFO [train.py:1114] (1/4) Epoch 14, batch 1300, loss[loss=0.2809, simple_loss=0.3229, pruned_loss=0.088, ctc_loss=0.1575, over 18980.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2824, pruned_loss=0.05739, ctc_loss=0.1081, over 3845894.64 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:34:01,446 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-29 14:34:03,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=179514.66666666666, ans=0.05
+2024-08-29 14:34:09,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=179568.0, ans=0.1
+2024-08-29 14:34:40,327 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=179621.33333333334, ans=0.025
+2024-08-29 14:34:41,310 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=179621.33333333334, ans=0.125
+2024-08-29 14:35:27,707 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.97 vs. limit=22.5
+2024-08-29 14:35:42,416 INFO [train.py:1114] (1/4) Epoch 14, batch 1350, loss[loss=0.2211, simple_loss=0.2842, pruned_loss=0.05783, ctc_loss=0.1058, over 19772.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2821, pruned_loss=0.05729, ctc_loss=0.1077, over 3856351.84 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:36:09,164 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.71 vs. limit=22.5
+2024-08-29 14:36:16,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=179834.66666666666, ans=0.125
+2024-08-29 14:36:28,522 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=179888.0, ans=0.125
+2024-08-29 14:40:29,507 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.654e+02 1.881e+02 2.431e+02 4.376e+02, threshold=3.761e+02, percent-clipped=1.0
+2024-08-29 14:41:36,136 INFO [train.py:1114] (1/4) Epoch 14, batch 1400, loss[loss=0.1907, simple_loss=0.2498, pruned_loss=0.04794, ctc_loss=0.08931, over 19666.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2817, pruned_loss=0.05731, ctc_loss=0.1076, over 3863430.61 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:41:36,409 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=180048.0, ans=0.1
+2024-08-29 14:41:37,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=180048.0, ans=0.0
+2024-08-29 14:41:54,715 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.45 vs. limit=12.0
+2024-08-29 14:42:39,827 INFO [train.py:1114] (1/4) Epoch 14, batch 1450, loss[loss=0.2385, simple_loss=0.2986, pruned_loss=0.06508, ctc_loss=0.1208, over 19663.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2824, pruned_loss=0.0576, ctc_loss=0.1082, over 3861905.87 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:42:41,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=180314.66666666666, ans=0.125
+2024-08-29 14:42:44,782 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=180314.66666666666, ans=0.0
+2024-08-29 14:42:47,193 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=180314.66666666666, ans=0.07
+2024-08-29 14:42:48,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=180314.66666666666, ans=0.0
+2024-08-29 14:43:05,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=180421.33333333334, ans=0.2
+2024-08-29 14:44:10,682 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180474.66666666666, ans=0.1
+2024-08-29 14:44:19,128 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.30 vs. limit=12.0
+2024-08-29 14:44:19,806 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.699e+02 1.929e+02 2.254e+02 4.469e+02, threshold=3.859e+02, percent-clipped=1.0
+2024-08-29 14:45:03,439 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=180528.0, ans=0.125
+2024-08-29 14:45:06,818 INFO [train.py:1114] (1/4) Epoch 14, batch 1500, loss[loss=0.2226, simple_loss=0.2898, pruned_loss=0.05664, ctc_loss=0.1055, over 19586.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2828, pruned_loss=0.05767, ctc_loss=0.1082, over 3861529.31 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:45:07,242 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:45:12,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=180581.33333333334, ans=0.125
+2024-08-29 14:45:16,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=180581.33333333334, ans=0.125
+2024-08-29 14:45:16,636 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=12.63 vs. limit=15.0
+2024-08-29 14:45:46,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=180634.66666666666, ans=0.2
+2024-08-29 14:46:22,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=180794.66666666666, ans=0.0
+2024-08-29 14:46:25,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=180794.66666666666, ans=0.035
+2024-08-29 14:46:27,475 INFO [train.py:1114] (1/4) Epoch 14, batch 1550, loss[loss=0.2563, simple_loss=0.3165, pruned_loss=0.07186, ctc_loss=0.1313, over 19621.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2829, pruned_loss=0.05773, ctc_loss=0.1086, over 3846645.55 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:46:55,639 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=180954.66666666666, ans=0.1
+2024-08-29 14:48:27,881 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=181008.0, ans=0.0
+2024-08-29 14:48:37,412 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.702e+02 2.011e+02 2.397e+02 3.479e+02, threshold=4.023e+02, percent-clipped=0.0
+2024-08-29 14:48:47,139 INFO [train.py:1114] (1/4) Epoch 14, batch 1600, loss[loss=0.2097, simple_loss=0.2816, pruned_loss=0.04984, ctc_loss=0.09518, over 19843.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2822, pruned_loss=0.05739, ctc_loss=0.108, over 3835823.69 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:51:09,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=181274.66666666666, ans=0.1
+2024-08-29 14:51:10,901 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.74 vs. limit=15.0
+2024-08-29 14:51:13,537 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:51:21,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=181328.0, ans=0.0
+2024-08-29 14:51:29,810 INFO [train.py:1114] (1/4) Epoch 14, batch 1650, loss[loss=0.232, simple_loss=0.2957, pruned_loss=0.06139, ctc_loss=0.1139, over 19621.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2819, pruned_loss=0.05736, ctc_loss=0.108, over 3832200.11 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:51:46,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=181434.66666666666, ans=0.125
+2024-08-29 14:51:48,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=181434.66666666666, ans=0.125
+2024-08-29 14:51:51,002 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.94 vs. limit=15.0
+2024-08-29 14:51:53,383 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.42 vs. limit=10.0
+2024-08-29 14:52:28,555 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.808e+02 2.247e+02 2.720e+02 5.029e+02, threshold=4.494e+02, percent-clipped=3.0
+2024-08-29 14:52:31,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=181594.66666666666, ans=0.125
+2024-08-29 14:52:38,137 INFO [train.py:1114] (1/4) Epoch 14, batch 1700, loss[loss=0.1954, simple_loss=0.2557, pruned_loss=0.04734, ctc_loss=0.1012, over 19669.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2817, pruned_loss=0.05723, ctc_loss=0.1077, over 3846575.53 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:52:51,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=181701.33333333334, ans=0.5
+2024-08-29 14:52:51,288 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.36 vs. limit=15.0
+2024-08-29 14:53:07,746 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.23 vs. limit=10.0
+2024-08-29 14:53:11,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=181808.0, ans=0.125
+2024-08-29 14:53:13,214 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.61 vs. limit=15.0
+2024-08-29 14:53:24,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=181861.33333333334, ans=0.2
+2024-08-29 14:53:43,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=181861.33333333334, ans=0.0
+2024-08-29 14:53:46,192 INFO [train.py:1114] (1/4) Epoch 14, batch 1750, loss[loss=0.2128, simple_loss=0.266, pruned_loss=0.05882, ctc_loss=0.1046, over 19673.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2813, pruned_loss=0.05692, ctc_loss=0.107, over 3852118.27 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:53:51,222 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.00 vs. limit=15.0
+2024-08-29 14:53:52,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=181914.66666666666, ans=0.07
+2024-08-29 14:53:54,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=181914.66666666666, ans=0.125
+2024-08-29 14:53:54,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=181914.66666666666, ans=0.07
+2024-08-29 14:54:36,128 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=182021.33333333334, ans=0.2
+2024-08-29 14:54:36,517 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.86 vs. limit=6.0
+2024-08-29 14:55:00,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=182021.33333333334, ans=0.0
+2024-08-29 14:55:02,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=182074.66666666666, ans=0.5
+2024-08-29 14:55:07,779 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:56:25,486 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.791e+02 2.085e+02 2.712e+02 5.021e+02, threshold=4.170e+02, percent-clipped=2.0
+2024-08-29 14:56:30,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=182128.0, ans=0.125
+2024-08-29 14:56:34,697 INFO [train.py:1114] (1/4) Epoch 14, batch 1800, loss[loss=0.1978, simple_loss=0.2758, pruned_loss=0.04307, ctc_loss=0.08433, over 19615.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.282, pruned_loss=0.05724, ctc_loss=0.1078, over 3853355.10 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:57:23,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=182234.66666666666, ans=0.125
+2024-08-29 14:57:43,590 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=182341.33333333334, ans=0.1
+2024-08-29 14:57:56,277 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=182394.66666666666, ans=0.0
+2024-08-29 14:57:56,438 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.25 vs. limit=12.0
+2024-08-29 14:57:58,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=182394.66666666666, ans=0.07
+2024-08-29 14:58:05,941 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.98 vs. limit=22.5
+2024-08-29 14:58:07,409 INFO [train.py:1114] (1/4) Epoch 14, batch 1850, loss[loss=0.1958, simple_loss=0.274, pruned_loss=0.04251, ctc_loss=0.08157, over 19584.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2816, pruned_loss=0.05706, ctc_loss=0.1074, over 3856493.44 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:58:08,163 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.31 vs. limit=15.0
+2024-08-29 14:58:12,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=182448.0, ans=0.0
+2024-08-29 15:00:44,749 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:03:25,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=182608.0, ans=0.125
+2024-08-29 15:03:29,627 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.679e+02 1.934e+02 2.278e+02 6.084e+02, threshold=3.868e+02, percent-clipped=1.0
+2024-08-29 15:03:40,811 INFO [train.py:1114] (1/4) Epoch 14, batch 1900, loss[loss=0.2211, simple_loss=0.2908, pruned_loss=0.05518, ctc_loss=0.1026, over 19663.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2822, pruned_loss=0.05708, ctc_loss=0.1075, over 3860795.35 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:03:42,551 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.74 vs. limit=12.0
+2024-08-29 15:03:47,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=182714.66666666666, ans=0.125
+2024-08-29 15:03:48,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=182714.66666666666, ans=0.125
+2024-08-29 15:03:54,006 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.36 vs. limit=6.0
+2024-08-29 15:04:54,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=182928.0, ans=0.0
+2024-08-29 15:04:56,086 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=182928.0, ans=0.2
+2024-08-29 15:05:18,945 INFO [train.py:1114] (1/4) Epoch 14, batch 1950, loss[loss=0.2162, simple_loss=0.2737, pruned_loss=0.0582, ctc_loss=0.1056, over 19584.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2829, pruned_loss=0.05716, ctc_loss=0.1075, over 3869924.75 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:05:25,679 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=182981.33333333334, ans=0.1
+2024-08-29 15:05:33,002 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.04 vs. limit=15.0
+2024-08-29 15:05:35,730 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=183034.66666666666, ans=0.125
+2024-08-29 15:05:39,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=183034.66666666666, ans=0.0
+2024-08-29 15:05:51,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=183088.0, ans=0.125
+2024-08-29 15:05:51,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=183088.0, ans=0.125
+2024-08-29 15:05:56,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=183141.33333333334, ans=0.05
+2024-08-29 15:06:04,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=183194.66666666666, ans=0.125
+2024-08-29 15:06:06,640 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.683e+02 1.939e+02 2.319e+02 3.642e+02, threshold=3.877e+02, percent-clipped=0.0
+2024-08-29 15:06:45,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=183194.66666666666, ans=0.0
+2024-08-29 15:06:48,390 INFO [train.py:1114] (1/4) Epoch 14, batch 2000, loss[loss=0.1776, simple_loss=0.2447, pruned_loss=0.04086, ctc_loss=0.07181, over 19667.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2829, pruned_loss=0.05711, ctc_loss=0.1073, over 3854683.60 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:06:53,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=183248.0, ans=0.0
+2024-08-29 15:06:55,150 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.67 vs. limit=15.0
+2024-08-29 15:07:34,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=183461.33333333334, ans=0.125
+2024-08-29 15:07:45,838 INFO [train.py:1114] (1/4) Epoch 14, batch 2050, loss[loss=0.2029, simple_loss=0.2626, pruned_loss=0.05177, ctc_loss=0.09918, over 19696.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2817, pruned_loss=0.05684, ctc_loss=0.1069, over 3850423.23 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:07:59,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=183568.0, ans=0.0
+2024-08-29 15:08:38,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=183621.33333333334, ans=0.125
+2024-08-29 15:08:38,218 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=183621.33333333334, ans=0.1
+2024-08-29 15:09:20,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=183621.33333333334, ans=0.125
+2024-08-29 15:09:39,970 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.749e+02 1.987e+02 2.455e+02 3.413e+02, threshold=3.973e+02, percent-clipped=0.0
+2024-08-29 15:09:46,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=183728.0, ans=0.125
+2024-08-29 15:09:48,904 INFO [train.py:1114] (1/4) Epoch 14, batch 2100, loss[loss=0.1978, simple_loss=0.2753, pruned_loss=0.04463, ctc_loss=0.07736, over 19783.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2809, pruned_loss=0.05641, ctc_loss=0.1059, over 3857559.04 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:10:05,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=183834.66666666666, ans=0.125
+2024-08-29 15:10:06,307 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.47 vs. limit=15.0
+2024-08-29 15:10:21,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=183834.66666666666, ans=0.1
+2024-08-29 15:10:26,431 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.58 vs. limit=15.0
+2024-08-29 15:10:40,973 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.93 vs. limit=15.0
+2024-08-29 15:10:57,758 INFO [train.py:1114] (1/4) Epoch 14, batch 2150, loss[loss=0.2364, simple_loss=0.2919, pruned_loss=0.06565, ctc_loss=0.1241, over 19581.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2804, pruned_loss=0.05633, ctc_loss=0.1057, over 3868055.93 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:10:59,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=184048.0, ans=0.125
+2024-08-29 15:11:22,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=184154.66666666666, ans=0.125
+2024-08-29 15:11:31,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=184208.0, ans=0.025
+2024-08-29 15:11:44,626 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 1.765e+02 2.209e+02 2.742e+02 6.061e+02, threshold=4.418e+02, percent-clipped=6.0
+2024-08-29 15:12:04,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=184261.33333333334, ans=0.125
+2024-08-29 15:12:09,353 INFO [train.py:1114] (1/4) Epoch 14, batch 2200, loss[loss=0.232, simple_loss=0.2903, pruned_loss=0.06305, ctc_loss=0.119, over 19577.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2799, pruned_loss=0.05589, ctc_loss=0.105, over 3867342.29 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:12:19,045 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.04 vs. limit=15.0
+2024-08-29 15:13:13,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=184421.33333333334, ans=0.0
+2024-08-29 15:13:15,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=184421.33333333334, ans=0.0
+2024-08-29 15:13:19,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=184421.33333333334, ans=0.0
+2024-08-29 15:13:19,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=184421.33333333334, ans=15.0
+2024-08-29 15:13:40,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=184528.0, ans=0.125
+2024-08-29 15:13:47,040 INFO [train.py:1114] (1/4) Epoch 14, batch 2250, loss[loss=0.2245, simple_loss=0.2899, pruned_loss=0.05685, ctc_loss=0.1132, over 19609.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2804, pruned_loss=0.05645, ctc_loss=0.106, over 3867767.48 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:13:58,367 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=184634.66666666666, ans=0.07
+2024-08-29 15:14:03,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=184634.66666666666, ans=0.0
+2024-08-29 15:14:23,546 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.81 vs. limit=15.0
+2024-08-29 15:14:28,997 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:14:31,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=184794.66666666666, ans=0.125
+2024-08-29 15:14:33,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=184794.66666666666, ans=0.125
+2024-08-29 15:14:34,163 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.796e+02 2.116e+02 2.512e+02 3.767e+02, threshold=4.231e+02, percent-clipped=0.0
+2024-08-29 15:14:39,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=184794.66666666666, ans=0.125
+2024-08-29 15:14:43,287 INFO [train.py:1114] (1/4) Epoch 14, batch 2300, loss[loss=0.1991, simple_loss=0.2607, pruned_loss=0.04998, ctc_loss=0.09384, over 19503.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2799, pruned_loss=0.05676, ctc_loss=0.1064, over 3861437.78 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:14:46,685 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=184848.0, ans=0.125
+2024-08-29 15:14:58,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=184901.33333333334, ans=0.125
+2024-08-29 15:15:02,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=184901.33333333334, ans=0.0
+2024-08-29 15:15:22,793 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.88 vs. limit=15.0
+2024-08-29 15:15:40,907 INFO [train.py:1114] (1/4) Epoch 14, batch 2350, loss[loss=0.2358, simple_loss=0.2907, pruned_loss=0.06608, ctc_loss=0.1217, over 19691.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.28, pruned_loss=0.05689, ctc_loss=0.1064, over 3864115.47 frames. ], batch size: 63, lr: 1.05e-02, grad_scale: 64.0
+2024-08-29 15:15:46,766 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=185114.66666666666, ans=0.0
+2024-08-29 15:15:46,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=185114.66666666666, ans=0.125
+2024-08-29 15:16:06,420 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.54 vs. limit=22.5
+2024-08-29 15:16:06,476 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.38 vs. limit=15.0
+2024-08-29 15:16:28,757 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.724e+02 2.017e+02 2.647e+02 4.792e+02, threshold=4.034e+02, percent-clipped=3.0
+2024-08-29 15:16:32,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=185328.0, ans=0.0
+2024-08-29 15:16:36,505 INFO [train.py:1114] (1/4) Epoch 14, batch 2400, loss[loss=0.2476, simple_loss=0.2999, pruned_loss=0.07166, ctc_loss=0.1299, over 19263.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2825, pruned_loss=0.05796, ctc_loss=0.1085, over 3858838.15 frames. ], batch size: 71, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:16:36,761 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:17:30,034 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.95 vs. limit=15.0
+2024-08-29 15:17:32,235 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.26 vs. limit=15.0
+2024-08-29 15:17:32,388 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=9.38 vs. limit=15.0
+2024-08-29 15:17:38,563 INFO [train.py:1114] (1/4) Epoch 14, batch 2450, loss[loss=0.3235, simple_loss=0.3436, pruned_loss=0.1087, ctc_loss=0.2148, over 13328.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.287, pruned_loss=0.06127, ctc_loss=0.1152, over 3731819.87 frames. ], batch size: 140, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:17:41,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=185648.0, ans=0.025
+2024-08-29 15:17:44,706 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=185648.0, ans=0.2
+2024-08-29 15:17:48,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=185648.0, ans=0.0
+2024-08-29 15:18:08,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=185754.66666666666, ans=0.2
+2024-08-29 15:18:09,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=185754.66666666666, ans=0.125
+2024-08-29 15:18:14,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=185808.0, ans=0.0
+2024-08-29 15:18:15,172 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=185808.0, ans=0.1
+2024-08-29 15:19:09,390 INFO [train.py:1114] (1/4) Epoch 15, batch 0, loss[loss=0.2221, simple_loss=0.274, pruned_loss=0.06146, ctc_loss=0.118, over 19792.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.274, pruned_loss=0.06146, ctc_loss=0.118, over 19792.00 frames. ], batch size: 49, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:19:09,391 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-29 15:19:20,886 INFO [train.py:1146] (1/4) Epoch 15, validation: loss=0.1908, simple_loss=0.2785, pruned_loss=0.03825, ctc_loss=0.06651, over 944034.00 frames.
+2024-08-29 15:19:20,887 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13709MB
+2024-08-29 15:19:22,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=185856.0, ans=0.125
+2024-08-29 15:19:24,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=185856.0, ans=0.2
+2024-08-29 15:19:25,768 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.942e+02 2.136e+02 2.424e+02 3.799e+02, threshold=4.272e+02, percent-clipped=0.0
+2024-08-29 15:19:56,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=186016.0, ans=0.125
+2024-08-29 15:20:00,901 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.90 vs. limit=22.5
+2024-08-29 15:20:13,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=186069.33333333334, ans=0.2
+2024-08-29 15:20:25,208 INFO [train.py:1114] (1/4) Epoch 15, batch 50, loss[loss=0.1742, simple_loss=0.2402, pruned_loss=0.03922, ctc_loss=0.07426, over 19715.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2834, pruned_loss=0.05894, ctc_loss=0.1123, over 844130.59 frames. ], batch size: 47, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:20:28,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=186122.66666666666, ans=0.025
+2024-08-29 15:21:00,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=186282.66666666666, ans=0.035
+2024-08-29 15:21:06,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=186282.66666666666, ans=0.0
+2024-08-29 15:21:25,442 INFO [train.py:1114] (1/4) Epoch 15, batch 100, loss[loss=0.1884, simple_loss=0.2603, pruned_loss=0.04254, ctc_loss=0.07877, over 19731.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2857, pruned_loss=0.05954, ctc_loss=0.1126, over 1499265.19 frames. ], batch size: 51, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:21:30,085 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.739e+02 1.952e+02 2.450e+02 4.288e+02, threshold=3.904e+02, percent-clipped=1.0
+2024-08-29 15:21:30,443 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=186389.33333333334, ans=0.2
+2024-08-29 15:22:01,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=186549.33333333334, ans=0.2
+2024-08-29 15:22:03,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=186549.33333333334, ans=0.125
+2024-08-29 15:22:29,353 INFO [train.py:1114] (1/4) Epoch 15, batch 150, loss[loss=0.2032, simple_loss=0.2547, pruned_loss=0.05499, ctc_loss=0.1043, over 19736.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2821, pruned_loss=0.05763, ctc_loss=0.1089, over 2027143.42 frames. ], batch size: 47, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:23:05,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=186816.0, ans=0.0
+2024-08-29 15:23:05,565 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.48 vs. limit=6.0
+2024-08-29 15:23:28,627 INFO [train.py:1114] (1/4) Epoch 15, batch 200, loss[loss=0.2464, simple_loss=0.3075, pruned_loss=0.06668, ctc_loss=0.13, over 18371.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2804, pruned_loss=0.05682, ctc_loss=0.1073, over 2433712.99 frames. ], batch size: 85, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:23:44,482 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.690e+02 2.002e+02 2.433e+02 3.884e+02, threshold=4.003e+02, percent-clipped=0.0
+2024-08-29 15:24:00,853 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=186976.0, ans=0.0
+2024-08-29 15:24:16,917 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.50 vs. limit=6.0
+2024-08-29 15:24:20,386 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.70 vs. limit=22.5
+2024-08-29 15:24:22,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=187082.66666666666, ans=0.0
+2024-08-29 15:24:33,782 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.36 vs. limit=6.0
+2024-08-29 15:25:01,148 INFO [train.py:1114] (1/4) Epoch 15, batch 250, loss[loss=0.2394, simple_loss=0.3063, pruned_loss=0.06325, ctc_loss=0.115, over 19426.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2807, pruned_loss=0.05678, ctc_loss=0.1068, over 2754856.41 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:25:31,894 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.86 vs. limit=15.0
+2024-08-29 15:25:42,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=187349.33333333334, ans=0.125
+2024-08-29 15:25:53,105 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=187402.66666666666, ans=0.125
+2024-08-29 15:25:57,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=187402.66666666666, ans=0.2
+2024-08-29 15:26:33,388 INFO [train.py:1114] (1/4) Epoch 15, batch 300, loss[loss=0.2345, simple_loss=0.2912, pruned_loss=0.06552, ctc_loss=0.1167, over 19546.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2798, pruned_loss=0.05623, ctc_loss=0.106, over 2999580.57 frames. ], batch size: 61, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:26:38,060 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.706e+02 2.088e+02 2.592e+02 3.748e+02, threshold=4.177e+02, percent-clipped=0.0
+2024-08-29 15:26:44,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=187509.33333333334, ans=0.2
+2024-08-29 15:26:45,360 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=187509.33333333334, ans=0.125
+2024-08-29 15:27:12,925 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=187616.0, ans=0.125
+2024-08-29 15:27:29,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=187669.33333333334, ans=0.125
+2024-08-29 15:27:34,910 INFO [train.py:1114] (1/4) Epoch 15, batch 350, loss[loss=0.1985, simple_loss=0.2557, pruned_loss=0.05166, ctc_loss=0.09502, over 19736.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2808, pruned_loss=0.05647, ctc_loss=0.106, over 3189607.36 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 16.0
+2024-08-29 15:27:41,666 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.35 vs. limit=15.0
+2024-08-29 15:27:46,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=187776.0, ans=0.025
+2024-08-29 15:27:52,390 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.88 vs. limit=6.0
+2024-08-29 15:27:58,850 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.30 vs. limit=15.0
+2024-08-29 15:28:15,917 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.13 vs. limit=15.0
+2024-08-29 15:28:30,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187936.0, ans=0.1
+2024-08-29 15:28:38,893 INFO [train.py:1114] (1/4) Epoch 15, batch 400, loss[loss=0.2093, simple_loss=0.285, pruned_loss=0.04894, ctc_loss=0.08944, over 19491.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2797, pruned_loss=0.05568, ctc_loss=0.1046, over 3341688.53 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:28:44,510 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.706e+02 2.043e+02 2.587e+02 5.210e+02, threshold=4.085e+02, percent-clipped=2.0
+2024-08-29 15:29:14,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187989.33333333334, ans=0.1
+2024-08-29 15:29:16,140 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.55 vs. limit=6.0
+2024-08-29 15:29:31,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=188096.0, ans=0.0
+2024-08-29 15:29:37,823 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.85 vs. limit=15.0
+2024-08-29 15:29:38,728 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.71 vs. limit=15.0
+2024-08-29 15:29:40,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=188149.33333333334, ans=0.125
+2024-08-29 15:29:47,615 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=9.68 vs. limit=22.5
+2024-08-29 15:30:07,910 INFO [train.py:1114] (1/4) Epoch 15, batch 450, loss[loss=0.2045, simple_loss=0.2766, pruned_loss=0.04802, ctc_loss=0.09077, over 19602.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2793, pruned_loss=0.05555, ctc_loss=0.1043, over 3448438.69 frames. ], batch size: 55, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:30:21,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=188309.33333333334, ans=0.025
+2024-08-29 15:30:22,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=188309.33333333334, ans=0.2
+2024-08-29 15:31:09,326 INFO [train.py:1114] (1/4) Epoch 15, batch 500, loss[loss=0.2313, simple_loss=0.2992, pruned_loss=0.06019, ctc_loss=0.1077, over 19663.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2786, pruned_loss=0.05505, ctc_loss=0.1038, over 3544209.62 frames. ], batch size: 63, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:31:10,714 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=188522.66666666666, ans=0.0
+2024-08-29 15:31:15,120 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.681e+02 1.897e+02 2.177e+02 4.545e+02, threshold=3.794e+02, percent-clipped=1.0
+2024-08-29 15:31:23,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=188576.0, ans=0.0
+2024-08-29 15:31:29,448 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=188576.0, ans=0.125
+2024-08-29 15:32:21,485 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=188629.33333333334, ans=0.0
+2024-08-29 15:32:27,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=188629.33333333334, ans=0.125
+2024-08-29 15:32:48,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=188736.0, ans=0.0
+2024-08-29 15:32:54,574 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=188736.0, ans=0.1
+2024-08-29 15:32:55,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=188736.0, ans=0.125
+2024-08-29 15:32:56,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=188736.0, ans=0.125
+2024-08-29 15:32:59,017 INFO [train.py:1114] (1/4) Epoch 15, batch 550, loss[loss=0.2321, simple_loss=0.2939, pruned_loss=0.06092, ctc_loss=0.1211, over 19161.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2787, pruned_loss=0.05491, ctc_loss=0.1034, over 3606403.89 frames. ], batch size: 71, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:33:04,360 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=188789.33333333334, ans=0.125
+2024-08-29 15:33:45,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=188842.66666666666, ans=0.0
+2024-08-29 15:34:02,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=188896.0, ans=0.0
+2024-08-29 15:34:03,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=188896.0, ans=0.125
+2024-08-29 15:34:12,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=188949.33333333334, ans=0.2
+2024-08-29 15:34:30,455 INFO [train.py:1114] (1/4) Epoch 15, batch 600, loss[loss=0.2472, simple_loss=0.3108, pruned_loss=0.06694, ctc_loss=0.1244, over 19405.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2791, pruned_loss=0.05499, ctc_loss=0.1034, over 3665565.83 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:34:34,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189056.0, ans=0.1
+2024-08-29 15:34:36,392 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.830e+02 2.111e+02 2.732e+02 4.380e+02, threshold=4.223e+02, percent-clipped=4.0
+2024-08-29 15:35:11,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=189216.0, ans=0.125
+2024-08-29 15:35:15,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189216.0, ans=0.1
+2024-08-29 15:35:25,985 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.44 vs. limit=22.5
+2024-08-29 15:35:31,329 INFO [train.py:1114] (1/4) Epoch 15, batch 650, loss[loss=0.1885, simple_loss=0.2646, pruned_loss=0.04073, ctc_loss=0.07733, over 19759.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2782, pruned_loss=0.05466, ctc_loss=0.1027, over 3715728.51 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:35:34,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=189322.66666666666, ans=0.125
+2024-08-29 15:35:34,512 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.40 vs. limit=12.0
+2024-08-29 15:35:36,475 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=189322.66666666666, ans=0.125
+2024-08-29 15:35:42,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=189376.0, ans=0.125
+2024-08-29 15:35:48,368 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=189376.0, ans=0.125
+2024-08-29 15:37:43,851 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.68 vs. limit=15.0
+2024-08-29 15:38:06,409 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-29 15:38:22,772 INFO [train.py:1114] (1/4) Epoch 15, batch 700, loss[loss=0.186, simple_loss=0.2532, pruned_loss=0.04343, ctc_loss=0.07997, over 19748.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2782, pruned_loss=0.05464, ctc_loss=0.1027, over 3748378.29 frames. ], batch size: 51, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:38:28,535 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.846e+02 2.430e+02 3.057e+02 4.272e+02, threshold=4.860e+02, percent-clipped=1.0
+2024-08-29 15:38:30,025 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:38:38,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=189642.66666666666, ans=0.125
+2024-08-29 15:38:49,310 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.36 vs. limit=15.0
+2024-08-29 15:38:49,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189696.0, ans=0.1
+2024-08-29 15:39:00,414 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.64 vs. limit=15.0
+2024-08-29 15:39:01,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=189749.33333333334, ans=10.0
+2024-08-29 15:39:05,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=189749.33333333334, ans=10.0
+2024-08-29 15:39:10,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=189749.33333333334, ans=0.2
+2024-08-29 15:39:22,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=189802.66666666666, ans=0.05
+2024-08-29 15:39:25,949 INFO [train.py:1114] (1/4) Epoch 15, batch 750, loss[loss=0.22, simple_loss=0.2824, pruned_loss=0.05601, ctc_loss=0.1137, over 19478.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2782, pruned_loss=0.05499, ctc_loss=0.1035, over 3775378.62 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:39:33,297 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=189856.0, ans=10.0
+2024-08-29 15:39:33,555 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.39 vs. limit=10.0
+2024-08-29 15:39:39,788 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.86 vs. limit=15.0
+2024-08-29 15:39:39,903 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.61 vs. limit=15.0
+2024-08-29 15:39:42,495 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.75 vs. limit=15.0
+2024-08-29 15:39:46,015 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.39 vs. limit=15.0
+2024-08-29 15:39:56,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=189962.66666666666, ans=0.125
+2024-08-29 15:40:00,220 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:40:15,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=190069.33333333334, ans=0.0
+2024-08-29 15:40:16,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=190069.33333333334, ans=0.125
+2024-08-29 15:40:18,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys.whitening_limit, batch_count=190069.33333333334, ans=6.0
+2024-08-29 15:40:28,195 INFO [train.py:1114] (1/4) Epoch 15, batch 800, loss[loss=0.1933, simple_loss=0.2553, pruned_loss=0.0474, ctc_loss=0.09119, over 19404.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.279, pruned_loss=0.05542, ctc_loss=0.1041, over 3797168.46 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:40:34,418 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.728e+02 2.068e+02 2.494e+02 4.984e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-29 15:40:35,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=190122.66666666666, ans=0.0
+2024-08-29 15:40:38,191 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=190122.66666666666, ans=0.0
+2024-08-29 15:40:47,966 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=190176.0, ans=0.125
+2024-08-29 15:41:03,783 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=190282.66666666666, ans=0.0
+2024-08-29 15:41:13,284 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.89 vs. limit=15.0
+2024-08-29 15:41:30,888 INFO [train.py:1114] (1/4) Epoch 15, batch 850, loss[loss=0.2247, simple_loss=0.2927, pruned_loss=0.05646, ctc_loss=0.1094, over 19649.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2788, pruned_loss=0.05524, ctc_loss=0.1039, over 3816716.82 frames. ], batch size: 59, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:41:33,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=190389.33333333334, ans=0.2
+2024-08-29 15:41:51,421 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=190442.66666666666, ans=0.125
+2024-08-29 15:41:56,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=190496.0, ans=0.125
+2024-08-29 15:42:01,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=190496.0, ans=0.025
+2024-08-29 15:42:14,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=190549.33333333334, ans=0.125
+2024-08-29 15:42:22,627 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.05 vs. limit=15.0
+2024-08-29 15:42:23,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=190602.66666666666, ans=0.0
+2024-08-29 15:42:34,681 INFO [train.py:1114] (1/4) Epoch 15, batch 900, loss[loss=0.2028, simple_loss=0.259, pruned_loss=0.05306, ctc_loss=0.1011, over 19801.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2792, pruned_loss=0.05556, ctc_loss=0.1043, over 3819125.90 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:42:40,563 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 1.760e+02 2.061e+02 2.441e+02 4.748e+02, threshold=4.121e+02, percent-clipped=4.0
+2024-08-29 15:43:24,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.54 vs. limit=15.0
+2024-08-29 15:43:38,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=190869.33333333334, ans=0.125
+2024-08-29 15:43:47,837 INFO [train.py:1114] (1/4) Epoch 15, batch 950, loss[loss=0.2075, simple_loss=0.271, pruned_loss=0.05176, ctc_loss=0.1012, over 19487.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2796, pruned_loss=0.05561, ctc_loss=0.1047, over 3820889.42 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:43:57,136 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.94 vs. limit=22.5
+2024-08-29 15:44:19,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=191029.33333333334, ans=0.0
+2024-08-29 15:44:26,093 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=191082.66666666666, ans=0.1
+2024-08-29 15:44:32,013 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=191082.66666666666, ans=0.125
+2024-08-29 15:44:42,698 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=191136.0, ans=0.1
+2024-08-29 15:44:47,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=191189.33333333334, ans=0.0
+2024-08-29 15:44:48,254 INFO [train.py:1114] (1/4) Epoch 15, batch 1000, loss[loss=0.1775, simple_loss=0.2477, pruned_loss=0.03963, ctc_loss=0.07025, over 19861.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2799, pruned_loss=0.05573, ctc_loss=0.1049, over 3815966.68 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:44:56,840 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 1.691e+02 1.934e+02 2.300e+02 3.610e+02, threshold=3.869e+02, percent-clipped=0.0
+2024-08-29 15:45:08,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=191242.66666666666, ans=0.125
+2024-08-29 15:45:25,141 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.19 vs. limit=12.0
+2024-08-29 15:45:37,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=191402.66666666666, ans=0.125
+2024-08-29 15:45:47,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=191402.66666666666, ans=0.05
+2024-08-29 15:45:53,382 INFO [train.py:1114] (1/4) Epoch 15, batch 1050, loss[loss=0.2086, simple_loss=0.2814, pruned_loss=0.04917, ctc_loss=0.09373, over 19833.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2797, pruned_loss=0.05587, ctc_loss=0.1052, over 3823073.04 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:46:38,467 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.18 vs. limit=12.0
+2024-08-29 15:46:39,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=191616.0, ans=0.125
+2024-08-29 15:46:54,815 INFO [train.py:1114] (1/4) Epoch 15, batch 1100, loss[loss=0.2013, simple_loss=0.2731, pruned_loss=0.04669, ctc_loss=0.09038, over 19583.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2796, pruned_loss=0.0558, ctc_loss=0.1052, over 3830141.95 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:47:17,589 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.746e+02 1.965e+02 2.496e+02 3.903e+02, threshold=3.929e+02, percent-clipped=1.0
+2024-08-29 15:47:18,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=191722.66666666666, ans=0.2
+2024-08-29 15:47:21,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=191722.66666666666, ans=0.0
+2024-08-29 15:47:26,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=191776.0, ans=0.125
+2024-08-29 15:47:39,193 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=3.74 vs. limit=12.0
+2024-08-29 15:48:12,563 INFO [train.py:1114] (1/4) Epoch 15, batch 1150, loss[loss=0.2105, simple_loss=0.2757, pruned_loss=0.05332, ctc_loss=0.09671, over 19589.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2787, pruned_loss=0.05515, ctc_loss=0.1039, over 3829266.93 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:48:27,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=192042.66666666666, ans=0.05
+2024-08-29 15:48:34,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=192042.66666666666, ans=0.04949747468305833
+2024-08-29 15:49:19,911 INFO [train.py:1114] (1/4) Epoch 15, batch 1200, loss[loss=0.219, simple_loss=0.2907, pruned_loss=0.05399, ctc_loss=0.09857, over 19846.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2794, pruned_loss=0.05529, ctc_loss=0.1042, over 3825173.40 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:49:26,212 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.719e+02 2.001e+02 2.349e+02 3.398e+02, threshold=4.002e+02, percent-clipped=0.0
+2024-08-29 15:49:26,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=192256.0, ans=0.1
+2024-08-29 15:49:36,506 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=192309.33333333334, ans=0.125
+2024-08-29 15:50:06,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=192416.0, ans=0.125
+2024-08-29 15:50:24,184 INFO [train.py:1114] (1/4) Epoch 15, batch 1250, loss[loss=0.2384, simple_loss=0.2907, pruned_loss=0.06797, ctc_loss=0.1253, over 19519.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2796, pruned_loss=0.05517, ctc_loss=0.1038, over 3843000.09 frames. ], batch size: 61, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:50:30,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=192522.66666666666, ans=0.0
+2024-08-29 15:51:06,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=192682.66666666666, ans=0.0
+2024-08-29 15:51:12,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=192736.0, ans=0.0
+2024-08-29 15:51:20,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=192736.0, ans=0.2
+2024-08-29 15:51:25,192 INFO [train.py:1114] (1/4) Epoch 15, batch 1300, loss[loss=0.2607, simple_loss=0.3093, pruned_loss=0.07754, ctc_loss=0.1425, over 18934.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2789, pruned_loss=0.05472, ctc_loss=0.1029, over 3846476.85 frames. ], batch size: 76, lr: 9.99e-03, grad_scale: 32.0
+2024-08-29 15:52:15,025 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.668e+02 1.955e+02 2.455e+02 4.261e+02, threshold=3.910e+02, percent-clipped=2.0
+2024-08-29 15:52:16,540 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=192789.33333333334, ans=0.09899494936611666
+2024-08-29 15:52:18,506 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=15.25 vs. limit=22.5
+2024-08-29 15:52:28,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=192842.66666666666, ans=0.125
+2024-08-29 15:52:28,828 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.60 vs. limit=15.0
+2024-08-29 15:52:34,421 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=192896.0, ans=0.1
+2024-08-29 15:52:45,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=192949.33333333334, ans=0.125
+2024-08-29 15:52:54,608 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=192949.33333333334, ans=0.125
+2024-08-29 15:53:10,978 INFO [train.py:1114] (1/4) Epoch 15, batch 1350, loss[loss=0.2072, simple_loss=0.2761, pruned_loss=0.05074, ctc_loss=0.09195, over 19755.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2785, pruned_loss=0.05448, ctc_loss=0.1025, over 3858434.80 frames. ], batch size: 54, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:53:22,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=193109.33333333334, ans=0.2
+2024-08-29 15:53:37,475 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.95 vs. limit=22.5
+2024-08-29 15:53:40,028 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.98 vs. limit=15.0
+2024-08-29 15:53:47,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=193216.0, ans=0.025
+2024-08-29 15:54:14,966 INFO [train.py:1114] (1/4) Epoch 15, batch 1400, loss[loss=0.1795, simple_loss=0.2404, pruned_loss=0.04285, ctc_loss=0.08194, over 19677.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.278, pruned_loss=0.05426, ctc_loss=0.1021, over 3864695.51 frames. ], batch size: 46, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:54:37,465 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.658e+02 1.833e+02 2.351e+02 3.730e+02, threshold=3.665e+02, percent-clipped=0.0
+2024-08-29 15:54:39,389 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.31 vs. limit=15.0
+2024-08-29 15:54:52,032 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn2.whiten.whitening_limit, batch_count=193376.0, ans=22.5
+2024-08-29 15:54:53,599 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.33 vs. limit=15.0
+2024-08-29 15:54:57,001 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.35 vs. limit=6.0
+2024-08-29 15:54:59,664 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=193429.33333333334, ans=0.07
+2024-08-29 15:55:10,880 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.00 vs. limit=22.5
+2024-08-29 15:55:16,622 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=193482.66666666666, ans=0.0
+2024-08-29 15:55:21,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=193482.66666666666, ans=0.0
+2024-08-29 15:55:25,538 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.58 vs. limit=22.5
+2024-08-29 15:55:32,371 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:55:42,767 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=193589.33333333334, ans=0.1
+2024-08-29 15:55:43,671 INFO [train.py:1114] (1/4) Epoch 15, batch 1450, loss[loss=0.2163, simple_loss=0.289, pruned_loss=0.05227, ctc_loss=0.09776, over 19683.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2795, pruned_loss=0.0551, ctc_loss=0.1038, over 3862898.74 frames. ], batch size: 63, lr: 9.97e-03, grad_scale: 32.0
+2024-08-29 15:55:48,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=193589.33333333334, ans=0.04949747468305833
+2024-08-29 15:55:51,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=193589.33333333334, ans=0.025
+2024-08-29 15:56:09,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=193696.0, ans=0.2
+2024-08-29 15:56:29,778 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.44 vs. limit=15.0
+2024-08-29 15:56:32,217 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.37 vs. limit=15.0
+2024-08-29 15:56:45,749 INFO [train.py:1114] (1/4) Epoch 15, batch 1500, loss[loss=0.2007, simple_loss=0.2768, pruned_loss=0.0449, ctc_loss=0.08704, over 19570.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2798, pruned_loss=0.05511, ctc_loss=0.1036, over 3862677.43 frames. ], batch size: 57, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:56:52,428 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.660e+02 1.885e+02 2.337e+02 4.281e+02, threshold=3.770e+02, percent-clipped=2.0
+2024-08-29 15:56:55,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=193856.0, ans=0.125
+2024-08-29 15:56:58,614 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=193909.33333333334, ans=0.125
+2024-08-29 15:57:05,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=193909.33333333334, ans=0.125
+2024-08-29 15:57:10,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=193909.33333333334, ans=0.1
+2024-08-29 15:57:23,771 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.25 vs. limit=6.0
+2024-08-29 15:57:25,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=194016.0, ans=0.0
+2024-08-29 15:57:26,994 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=194016.0, ans=0.0
+2024-08-29 15:57:40,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=194069.33333333334, ans=0.2
+2024-08-29 15:57:41,933 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.11 vs. limit=15.0
+2024-08-29 15:57:51,459 INFO [train.py:1114] (1/4) Epoch 15, batch 1550, loss[loss=0.2549, simple_loss=0.3081, pruned_loss=0.07401, ctc_loss=0.1341, over 19586.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.28, pruned_loss=0.05562, ctc_loss=0.1045, over 3845845.14 frames. ], batch size: 60, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:57:51,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=194122.66666666666, ans=0.0
+2024-08-29 15:58:01,170 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.66 vs. limit=15.0
+2024-08-29 15:58:21,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=194229.33333333334, ans=0.025
+2024-08-29 15:58:29,940 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.12 vs. limit=15.0
+2024-08-29 15:58:34,899 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=194282.66666666666, ans=0.125
+2024-08-29 15:58:53,438 INFO [train.py:1114] (1/4) Epoch 15, batch 1600, loss[loss=0.242, simple_loss=0.301, pruned_loss=0.06656, ctc_loss=0.1244, over 19833.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2802, pruned_loss=0.05575, ctc_loss=0.1049, over 3835219.53 frames. ], batch size: 57, lr: 9.95e-03, grad_scale: 32.0
+2024-08-29 15:58:55,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=194389.33333333334, ans=0.025
+2024-08-29 15:58:59,524 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.762e+02 2.164e+02 2.478e+02 4.927e+02, threshold=4.328e+02, percent-clipped=7.0
+2024-08-29 15:58:59,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=194389.33333333334, ans=0.0
+2024-08-29 16:00:11,013 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.09 vs. limit=6.0
+2024-08-29 16:00:30,011 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.59 vs. limit=15.0
+2024-08-29 16:00:35,206 INFO [train.py:1114] (1/4) Epoch 15, batch 1650, loss[loss=0.2289, simple_loss=0.2922, pruned_loss=0.06001, ctc_loss=0.114, over 19646.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2803, pruned_loss=0.05584, ctc_loss=0.1051, over 3831815.30 frames. ], batch size: 59, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:00:40,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=194656.0, ans=0.125
+2024-08-29 16:01:19,876 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.25 vs. limit=12.0
+2024-08-29 16:01:21,826 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=194816.0, ans=0.125
+2024-08-29 16:01:32,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=194869.33333333334, ans=0.125
+2024-08-29 16:01:38,038 INFO [train.py:1114] (1/4) Epoch 15, batch 1700, loss[loss=0.1995, simple_loss=0.2555, pruned_loss=0.05276, ctc_loss=0.095, over 19675.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2802, pruned_loss=0.05576, ctc_loss=0.1051, over 3846198.71 frames. ], batch size: 46, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:01:40,849 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.66 vs. limit=15.0
+2024-08-29 16:01:43,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=194922.66666666666, ans=0.125
+2024-08-29 16:01:44,059 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.696e+02 2.083e+02 2.797e+02 4.802e+02, threshold=4.167e+02, percent-clipped=3.0
+2024-08-29 16:01:46,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=194922.66666666666, ans=0.0
+2024-08-29 16:01:47,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.23 vs. limit=22.5
+2024-08-29 16:01:47,973 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=194922.66666666666, ans=0.0
+2024-08-29 16:01:52,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=194976.0, ans=0.125
+2024-08-29 16:01:53,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.out_whiten.whitening_limit, batch_count=194976.0, ans=8.0
+2024-08-29 16:02:09,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=195029.33333333334, ans=0.125
+2024-08-29 16:02:29,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=195136.0, ans=0.025
+2024-08-29 16:02:40,451 INFO [train.py:1114] (1/4) Epoch 15, batch 1750, loss[loss=0.1754, simple_loss=0.2433, pruned_loss=0.03895, ctc_loss=0.07434, over 19658.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2794, pruned_loss=0.05542, ctc_loss=0.1042, over 3851342.47 frames. ], batch size: 45, lr: 9.93e-03, grad_scale: 32.0
+2024-08-29 16:02:41,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=195189.33333333334, ans=0.0
+2024-08-29 16:02:44,558 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=6.537e-02
+2024-08-29 16:02:44,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.79 vs. limit=10.0
+2024-08-29 16:03:10,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=195296.0, ans=0.0
+2024-08-29 16:03:21,464 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.00 vs. limit=12.0
+2024-08-29 16:03:26,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=195402.66666666666, ans=0.0
+2024-08-29 16:03:28,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=195402.66666666666, ans=0.0
+2024-08-29 16:03:31,947 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=3.99 vs. limit=12.0
+2024-08-29 16:03:33,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=195402.66666666666, ans=0.04949747468305833
+2024-08-29 16:03:37,863 INFO [train.py:1114] (1/4) Epoch 15, batch 1800, loss[loss=0.2336, simple_loss=0.2969, pruned_loss=0.0619, ctc_loss=0.1163, over 19593.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2791, pruned_loss=0.05533, ctc_loss=0.104, over 3852161.23 frames. ], batch size: 55, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:03:43,643 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.702e+02 2.083e+02 2.690e+02 4.339e+02, threshold=4.166e+02, percent-clipped=1.0
+2024-08-29 16:03:57,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=195509.33333333334, ans=0.0
+2024-08-29 16:03:59,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=195562.66666666666, ans=0.125
+2024-08-29 16:04:07,275 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=195562.66666666666, ans=0.025
+2024-08-29 16:04:21,178 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.17 vs. limit=15.0
+2024-08-29 16:04:31,943 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.38 vs. limit=15.0
+2024-08-29 16:04:32,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=195669.33333333334, ans=0.0
+2024-08-29 16:04:34,674 INFO [train.py:1114] (1/4) Epoch 15, batch 1850, loss[loss=0.2161, simple_loss=0.2916, pruned_loss=0.05158, ctc_loss=0.09373, over 19563.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2789, pruned_loss=0.05507, ctc_loss=0.1035, over 3854557.53 frames. ], batch size: 57, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:04:52,499 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=195776.0, ans=0.025
+2024-08-29 16:04:53,525 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=195776.0, ans=0.125
+2024-08-29 16:04:53,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=195776.0, ans=0.0
+2024-08-29 16:05:13,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=195882.66666666666, ans=0.125
+2024-08-29 16:05:16,836 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=195882.66666666666, ans=0.0
+2024-08-29 16:05:35,601 INFO [train.py:1114] (1/4) Epoch 15, batch 1900, loss[loss=0.2064, simple_loss=0.281, pruned_loss=0.04739, ctc_loss=0.09226, over 19630.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2792, pruned_loss=0.05487, ctc_loss=0.1031, over 3859475.40 frames. ], batch size: 59, lr: 9.91e-03, grad_scale: 32.0
+2024-08-29 16:05:36,075 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.69 vs. limit=15.0
+2024-08-29 16:05:40,973 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.724e+02 2.102e+02 3.115e+02 5.340e+02, threshold=4.204e+02, percent-clipped=3.0
+2024-08-29 16:05:59,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=196096.0, ans=0.1
+2024-08-29 16:05:59,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=196096.0, ans=0.125
+2024-08-29 16:06:11,902 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.46 vs. limit=22.5
+2024-08-29 16:06:12,390 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=196149.33333333334, ans=0.125
+2024-08-29 16:06:19,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=196149.33333333334, ans=0.0
+2024-08-29 16:06:32,313 INFO [train.py:1114] (1/4) Epoch 15, batch 1950, loss[loss=0.1829, simple_loss=0.2569, pruned_loss=0.03933, ctc_loss=0.07545, over 19582.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2796, pruned_loss=0.05457, ctc_loss=0.1027, over 3868676.38 frames. ], batch size: 52, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:06:34,029 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=196256.0, ans=0.05
+2024-08-29 16:06:59,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=196309.33333333334, ans=0.125
+2024-08-29 16:07:10,611 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.89 vs. limit=6.0
+2024-08-29 16:07:28,523 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=196469.33333333334, ans=0.0
+2024-08-29 16:07:35,229 INFO [train.py:1114] (1/4) Epoch 15, batch 2000, loss[loss=0.1889, simple_loss=0.2495, pruned_loss=0.04694, ctc_loss=0.08584, over 19662.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2806, pruned_loss=0.05515, ctc_loss=0.1036, over 3854088.50 frames. ], batch size: 45, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:07:41,140 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.618e+02 1.832e+02 2.132e+02 4.362e+02, threshold=3.664e+02, percent-clipped=1.0
+2024-08-29 16:07:49,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=196576.0, ans=0.125
+2024-08-29 16:08:07,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=196629.33333333334, ans=0.125
+2024-08-29 16:08:32,347 INFO [train.py:1114] (1/4) Epoch 15, batch 2050, loss[loss=0.1715, simple_loss=0.2376, pruned_loss=0.0386, ctc_loss=0.07079, over 19731.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2795, pruned_loss=0.05507, ctc_loss=0.1035, over 3851358.94 frames. ], batch size: 47, lr: 9.89e-03, grad_scale: 32.0
+2024-08-29 16:08:53,821 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.33 vs. limit=15.0
+2024-08-29 16:08:56,332 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.29 vs. limit=15.0
+2024-08-29 16:09:10,318 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=196949.33333333334, ans=10.0
+2024-08-29 16:09:17,260 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.20 vs. limit=15.0
+2024-08-29 16:09:27,728 INFO [train.py:1114] (1/4) Epoch 15, batch 2100, loss[loss=0.2058, simple_loss=0.2708, pruned_loss=0.05181, ctc_loss=0.09275, over 19772.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2788, pruned_loss=0.0548, ctc_loss=0.1032, over 3859328.21 frames. ], batch size: 54, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:09:33,407 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.265e+02 1.691e+02 1.929e+02 2.354e+02 3.359e+02, threshold=3.858e+02, percent-clipped=0.0
+2024-08-29 16:09:38,257 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=197109.33333333334, ans=0.125
+2024-08-29 16:09:43,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=197109.33333333334, ans=0.125
+2024-08-29 16:09:58,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=197162.66666666666, ans=0.0
+2024-08-29 16:09:59,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=197162.66666666666, ans=0.025
+2024-08-29 16:10:12,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=197216.0, ans=0.2
+2024-08-29 16:10:16,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=197269.33333333334, ans=0.125
+2024-08-29 16:10:20,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=197269.33333333334, ans=0.09899494936611666
+2024-08-29 16:10:26,356 INFO [train.py:1114] (1/4) Epoch 15, batch 2150, loss[loss=0.2104, simple_loss=0.2807, pruned_loss=0.05093, ctc_loss=0.09551, over 19579.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.279, pruned_loss=0.05525, ctc_loss=0.1037, over 3869107.04 frames. ], batch size: 52, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:10:33,212 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.40 vs. limit=15.0
+2024-08-29 16:12:01,589 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=197429.33333333334, ans=10.0
+2024-08-29 16:12:08,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=197482.66666666666, ans=0.125
+2024-08-29 16:12:17,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=197482.66666666666, ans=0.125
+2024-08-29 16:12:24,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=197536.0, ans=0.2
+2024-08-29 16:12:31,424 INFO [train.py:1114] (1/4) Epoch 15, batch 2200, loss[loss=0.2421, simple_loss=0.3038, pruned_loss=0.06477, ctc_loss=0.1269, over 19604.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2789, pruned_loss=0.05501, ctc_loss=0.1033, over 3868156.74 frames. ], batch size: 57, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:12:36,856 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.787e+02 2.154e+02 2.730e+02 5.047e+02, threshold=4.308e+02, percent-clipped=4.0
+2024-08-29 16:12:47,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=197642.66666666666, ans=0.125
+2024-08-29 16:13:29,254 INFO [train.py:1114] (1/4) Epoch 15, batch 2250, loss[loss=0.2128, simple_loss=0.2898, pruned_loss=0.04987, ctc_loss=0.09016, over 19619.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2786, pruned_loss=0.05458, ctc_loss=0.1025, over 3868504.27 frames. ], batch size: 55, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:14:15,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=197962.66666666666, ans=0.0
+2024-08-29 16:14:16,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=197962.66666666666, ans=0.025
+2024-08-29 16:14:45,290 INFO [train.py:1114] (1/4) Epoch 15, batch 2300, loss[loss=0.2013, simple_loss=0.2656, pruned_loss=0.04905, ctc_loss=0.09707, over 19482.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2778, pruned_loss=0.05475, ctc_loss=0.1029, over 3861569.81 frames. ], batch size: 49, lr: 9.86e-03, grad_scale: 32.0
+2024-08-29 16:14:50,773 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.686e+02 1.986e+02 2.467e+02 4.553e+02, threshold=3.971e+02, percent-clipped=1.0
+2024-08-29 16:14:59,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=198176.0, ans=0.05
+2024-08-29 16:14:59,542 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.48 vs. limit=15.0
+2024-08-29 16:15:04,796 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=198176.0, ans=0.125
+2024-08-29 16:15:10,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=198229.33333333334, ans=0.125
+2024-08-29 16:15:16,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=198229.33333333334, ans=0.125
+2024-08-29 16:15:22,485 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.81 vs. limit=15.0
+2024-08-29 16:15:26,737 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=198282.66666666666, ans=0.1
+2024-08-29 16:15:30,923 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=198336.0, ans=0.1
+2024-08-29 16:15:31,398 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.03 vs. limit=15.0
+2024-08-29 16:15:34,372 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:15:43,139 INFO [train.py:1114] (1/4) Epoch 15, batch 2350, loss[loss=0.2133, simple_loss=0.284, pruned_loss=0.0511, ctc_loss=0.1009, over 19666.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.278, pruned_loss=0.05482, ctc_loss=0.1031, over 3863692.55 frames. ], batch size: 63, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:15:44,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=198389.33333333334, ans=0.0
+2024-08-29 16:15:50,808 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.96 vs. limit=22.5
+2024-08-29 16:16:14,364 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=198496.0, ans=0.125
+2024-08-29 16:16:16,679 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.44 vs. limit=22.5
+2024-08-29 16:16:20,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=198549.33333333334, ans=0.0
+2024-08-29 16:16:23,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=198549.33333333334, ans=0.125
+2024-08-29 16:16:27,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=198549.33333333334, ans=0.0
+2024-08-29 16:16:31,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=198602.66666666666, ans=0.0
+2024-08-29 16:16:42,886 INFO [train.py:1114] (1/4) Epoch 15, batch 2400, loss[loss=0.2262, simple_loss=0.2944, pruned_loss=0.0574, ctc_loss=0.1081, over 19295.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2798, pruned_loss=0.05527, ctc_loss=0.1038, over 3856804.39 frames. ], batch size: 71, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:16:48,396 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.659e+02 1.944e+02 2.492e+02 3.873e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-29 16:17:56,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=198762.66666666666, ans=0.125
+2024-08-29 16:18:10,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=198816.0, ans=0.125
+2024-08-29 16:18:17,863 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=12.09 vs. limit=15.0
+2024-08-29 16:18:18,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=198816.0, ans=0.125
+2024-08-29 16:18:25,639 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=198869.33333333334, ans=0.1
+2024-08-29 16:18:33,178 INFO [train.py:1114] (1/4) Epoch 15, batch 2450, loss[loss=0.3031, simple_loss=0.3291, pruned_loss=0.1001, ctc_loss=0.1923, over 12649.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2837, pruned_loss=0.05813, ctc_loss=0.1095, over 3727997.94 frames. ], batch size: 140, lr: 9.84e-03, grad_scale: 32.0
+2024-08-29 16:20:18,419 INFO [train.py:1114] (1/4) Epoch 16, batch 0, loss[loss=0.2006, simple_loss=0.2618, pruned_loss=0.05249, ctc_loss=0.08623, over 19798.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2618, pruned_loss=0.05249, ctc_loss=0.08623, over 19798.00 frames. ], batch size: 49, lr: 9.52e-03, grad_scale: 32.0
+2024-08-29 16:20:18,420 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-29 16:20:28,428 INFO [train.py:1146] (1/4) Epoch 16, validation: loss=0.1867, simple_loss=0.2755, pruned_loss=0.03636, ctc_loss=0.06317, over 944034.00 frames.
+2024-08-29 16:20:28,429 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13709MB
+2024-08-29 16:20:38,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=199130.66666666666, ans=0.0
+2024-08-29 16:20:44,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=199184.0, ans=0.125
+2024-08-29 16:20:48,968 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.810e+02 1.998e+02 2.276e+02 3.528e+02, threshold=3.997e+02, percent-clipped=0.0
+2024-08-29 16:21:08,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=199290.66666666666, ans=0.125
+2024-08-29 16:21:30,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=199344.0, ans=0.2
+2024-08-29 16:21:32,417 INFO [train.py:1114] (1/4) Epoch 16, batch 50, loss[loss=0.1878, simple_loss=0.2524, pruned_loss=0.04509, ctc_loss=0.08228, over 19724.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2797, pruned_loss=0.05648, ctc_loss=0.1065, over 845315.06 frames. ], batch size: 47, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:22:09,374 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.11 vs. limit=15.0
+2024-08-29 16:22:27,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=199610.66666666666, ans=0.125
+2024-08-29 16:22:28,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=199610.66666666666, ans=0.125
+2024-08-29 16:22:40,106 INFO [train.py:1114] (1/4) Epoch 16, batch 100, loss[loss=0.2069, simple_loss=0.2683, pruned_loss=0.05265, ctc_loss=0.1004, over 19711.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2813, pruned_loss=0.0557, ctc_loss=0.1055, over 1500370.32 frames. ], batch size: 51, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:22:40,923 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.58 vs. limit=22.5
+2024-08-29 16:23:05,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=199717.33333333334, ans=0.2
+2024-08-29 16:23:08,062 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 1.815e+02 2.137e+02 2.569e+02 4.869e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-29 16:23:21,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=199770.66666666666, ans=0.07
+2024-08-29 16:23:25,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=199770.66666666666, ans=0.0
+2024-08-29 16:23:27,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=199824.0, ans=0.0
+2024-08-29 16:37:11,211 INFO [train.py:1114] (1/4) Epoch 16, batch 150, loss[loss=0.1847, simple_loss=0.2424, pruned_loss=0.04628, ctc_loss=0.08619, over 19726.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2791, pruned_loss=0.0546, ctc_loss=0.1024, over 2028336.53 frames. ], batch size: 47, lr: 9.50e-03, grad_scale: 32.0
+2024-08-29 16:40:06,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=199984.0, ans=0.0
+2024-08-29 16:40:58,081 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.02 vs. limit=15.0
+2024-08-29 16:41:12,147 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.02 vs. limit=15.0
+2024-08-29 16:44:45,762 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=200037.33333333334, ans=0.2
+2024-08-29 16:48:09,839 INFO [train.py:1114] (1/4) Epoch 16, batch 200, loss[loss=0.2419, simple_loss=0.2951, pruned_loss=0.06806, ctc_loss=0.1317, over 17985.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2768, pruned_loss=0.05333, ctc_loss=0.1001, over 2435728.98 frames. ], batch size: 85, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:49:58,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=200197.33333333334, ans=0.0
+2024-08-29 16:49:58,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=200197.33333333334, ans=0.05
+2024-08-29 16:53:29,819 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.834e+02 2.227e+02 2.815e+02 4.534e+02, threshold=4.454e+02, percent-clipped=1.0
+2024-08-29 16:55:54,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=200357.33333333334, ans=0.125
+2024-08-29 16:56:29,763 INFO [train.py:1114] (1/4) Epoch 16, batch 250, loss[loss=0.2098, simple_loss=0.2774, pruned_loss=0.05116, ctc_loss=0.09941, over 19429.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2776, pruned_loss=0.05382, ctc_loss=0.1011, over 2754843.08 frames. ], batch size: 67, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:56:42,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=200464.0, ans=0.0
+2024-08-29 16:56:43,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=200464.0, ans=0.1
+2024-08-29 16:56:44,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=200464.0, ans=0.125
+2024-08-29 16:56:47,146 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.20 vs. limit=12.0
+2024-08-29 16:58:32,477 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=200517.33333333334, ans=0.125
+2024-08-29 16:58:42,192 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.89 vs. limit=22.5
+2024-08-29 16:58:55,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=200570.66666666666, ans=0.025
+2024-08-29 16:59:14,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=200624.0, ans=0.1
+2024-08-29 17:01:55,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=200677.33333333334, ans=0.05
+2024-08-29 17:03:13,494 INFO [train.py:1114] (1/4) Epoch 16, batch 300, loss[loss=0.2228, simple_loss=0.2864, pruned_loss=0.05822, ctc_loss=0.1068, over 19518.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2777, pruned_loss=0.05428, ctc_loss=0.1022, over 3000764.20 frames. ], batch size: 61, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:03:24,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=200730.66666666666, ans=0.0
+2024-08-29 17:03:36,040 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.663e+02 1.972e+02 2.398e+02 4.674e+02, threshold=3.943e+02, percent-clipped=1.0
+2024-08-29 17:08:30,914 INFO [train.py:1114] (1/4) Epoch 16, batch 350, loss[loss=0.2104, simple_loss=0.2664, pruned_loss=0.05703, ctc_loss=0.1007, over 19773.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2785, pruned_loss=0.05463, ctc_loss=0.1027, over 3190855.31 frames. ], batch size: 48, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:12:09,156 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.94 vs. limit=12.0
+2024-08-29 17:12:10,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=201104.0, ans=0.125
+2024-08-29 17:13:11,905 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:13:12,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=201210.66666666666, ans=0.1
+2024-08-29 17:13:17,604 INFO [train.py:1114] (1/4) Epoch 16, batch 400, loss[loss=0.2153, simple_loss=0.2772, pruned_loss=0.05562, ctc_loss=0.1053, over 19477.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.278, pruned_loss=0.05428, ctc_loss=0.1023, over 3342904.94 frames. ], batch size: 54, lr: 9.47e-03, grad_scale: 32.0
+2024-08-29 17:15:51,036 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.714e+02 1.905e+02 2.508e+02 3.565e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-29 17:16:24,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=201370.66666666666, ans=0.07
+2024-08-29 17:16:51,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=201477.33333333334, ans=0.125
+2024-08-29 17:17:01,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=201477.33333333334, ans=0.1
+2024-08-29 17:17:07,839 INFO [train.py:1114] (1/4) Epoch 16, batch 450, loss[loss=0.2288, simple_loss=0.2913, pruned_loss=0.06077, ctc_loss=0.1117, over 19611.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2783, pruned_loss=0.05443, ctc_loss=0.1024, over 3450336.79 frames. ], batch size: 55, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:20:49,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=201637.33333333334, ans=0.2
+2024-08-29 17:21:10,973 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=201690.66666666666, ans=0.125
+2024-08-29 17:21:57,452 INFO [train.py:1114] (1/4) Epoch 16, batch 500, loss[loss=0.2213, simple_loss=0.2954, pruned_loss=0.05465, ctc_loss=0.09452, over 19649.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2778, pruned_loss=0.05449, ctc_loss=0.1025, over 3545536.02 frames. ], batch size: 63, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:21:59,874 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=201797.33333333334, ans=0.125
+2024-08-29 17:22:46,942 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.689e+02 2.169e+02 2.570e+02 5.370e+02, threshold=4.338e+02, percent-clipped=3.0
+2024-08-29 17:22:47,653 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.22 vs. limit=15.0
+2024-08-29 17:23:10,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=201904.0, ans=0.125
+2024-08-29 17:23:43,703 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.40 vs. limit=12.0
+2024-08-29 17:23:52,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=202010.66666666666, ans=0.0
+2024-08-29 17:24:02,319 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.21 vs. limit=15.0
+2024-08-29 17:24:02,822 INFO [train.py:1114] (1/4) Epoch 16, batch 550, loss[loss=0.2558, simple_loss=0.3144, pruned_loss=0.072, ctc_loss=0.1333, over 19295.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2779, pruned_loss=0.0544, ctc_loss=0.1025, over 3607433.72 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:24:05,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=202064.0, ans=0.125
+2024-08-29 17:24:24,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=202064.0, ans=0.0
+2024-08-29 17:24:29,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=202117.33333333334, ans=0.0
+2024-08-29 17:24:36,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=202170.66666666666, ans=0.025
+2024-08-29 17:24:44,025 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.42 vs. limit=15.0
+2024-08-29 17:25:21,524 INFO [train.py:1114] (1/4) Epoch 16, batch 600, loss[loss=0.2304, simple_loss=0.2968, pruned_loss=0.05941, ctc_loss=0.113, over 19341.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2778, pruned_loss=0.05393, ctc_loss=0.1017, over 3665042.59 frames. ], batch size: 67, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:25:25,324 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:25:27,933 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.40 vs. limit=15.0
+2024-08-29 17:27:03,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=202384.0, ans=0.0
+2024-08-29 17:27:04,566 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.652e+02 1.934e+02 2.290e+02 3.719e+02, threshold=3.867e+02, percent-clipped=0.0
+2024-08-29 17:28:12,563 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=202437.33333333334, ans=0.5
+2024-08-29 17:30:09,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=202490.66666666666, ans=0.125
+2024-08-29 17:30:09,235 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:31:03,786 INFO [train.py:1114] (1/4) Epoch 16, batch 650, loss[loss=0.2008, simple_loss=0.2684, pruned_loss=0.04866, ctc_loss=0.08987, over 19775.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2771, pruned_loss=0.05351, ctc_loss=0.101, over 3716361.67 frames. ], batch size: 54, lr: 9.44e-03, grad_scale: 32.0
+2024-08-29 17:32:04,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=202597.33333333334, ans=0.125
+2024-08-29 17:32:16,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=202597.33333333334, ans=0.0
+2024-08-29 17:32:24,682 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=202650.66666666666, ans=0.04949747468305833
+2024-08-29 17:32:36,690 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=202704.0, ans=0.125
+2024-08-29 17:33:07,523 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.70 vs. limit=15.0
+2024-08-29 17:33:43,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=202810.66666666666, ans=0.2
+2024-08-29 17:34:02,107 INFO [train.py:1114] (1/4) Epoch 16, batch 700, loss[loss=0.2245, simple_loss=0.2864, pruned_loss=0.05863, ctc_loss=0.1132, over 19726.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.278, pruned_loss=0.05418, ctc_loss=0.1022, over 3748555.01 frames. ], batch size: 51, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:35:12,332 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.286e+02 1.755e+02 2.110e+02 2.761e+02 5.047e+02, threshold=4.220e+02, percent-clipped=5.0
+2024-08-29 17:36:00,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=202970.66666666666, ans=0.1
+2024-08-29 17:36:22,102 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.62 vs. limit=15.0
+2024-08-29 17:36:22,185 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.83 vs. limit=10.0
+2024-08-29 17:36:22,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=202970.66666666666, ans=0.125
+2024-08-29 17:38:14,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=203024.0, ans=0.125
+2024-08-29 17:41:59,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=203077.33333333334, ans=0.125
+2024-08-29 17:42:00,558 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.08 vs. limit=12.0
+2024-08-29 17:42:01,673 INFO [train.py:1114] (1/4) Epoch 16, batch 750, loss[loss=0.1938, simple_loss=0.2683, pruned_loss=0.0438, ctc_loss=0.0794, over 19527.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2771, pruned_loss=0.05376, ctc_loss=0.1013, over 3773192.21 frames. ], batch size: 54, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:42:27,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=203184.0, ans=0.2
+2024-08-29 17:46:04,711 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.02 vs. limit=15.0
+2024-08-29 17:46:10,162 INFO [train.py:1114] (1/4) Epoch 16, batch 800, loss[loss=0.2058, simple_loss=0.2618, pruned_loss=0.05492, ctc_loss=0.1001, over 19808.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2767, pruned_loss=0.05347, ctc_loss=0.1005, over 3794738.18 frames. ], batch size: 49, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:48:03,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=203450.66666666666, ans=0.05
+2024-08-29 17:48:06,791 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.32 vs. limit=15.0
+2024-08-29 17:48:15,895 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.745e+02 2.069e+02 2.556e+02 3.770e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-29 17:49:06,932 INFO [train.py:1114] (1/4) Epoch 16, batch 850, loss[loss=0.2226, simple_loss=0.2896, pruned_loss=0.0572, ctc_loss=0.1032, over 19655.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2762, pruned_loss=0.05338, ctc_loss=0.1002, over 3814579.97 frames. ], batch size: 59, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:49:40,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=203770.66666666666, ans=0.0
+2024-08-29 17:50:21,098 INFO [train.py:1114] (1/4) Epoch 16, batch 900, loss[loss=0.1854, simple_loss=0.2494, pruned_loss=0.04475, ctc_loss=0.07982, over 19423.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2769, pruned_loss=0.05393, ctc_loss=0.1014, over 3817507.33 frames. ], batch size: 48, lr: 9.41e-03, grad_scale: 32.0
+2024-08-29 17:50:48,724 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.676e+02 1.827e+02 2.350e+02 4.099e+02, threshold=3.653e+02, percent-clipped=0.0
+2024-08-29 17:51:16,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=204037.33333333334, ans=0.1
+2024-08-29 17:53:23,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=204144.0, ans=0.0
+2024-08-29 17:53:32,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=204144.0, ans=0.125
+2024-08-29 17:53:37,493 INFO [train.py:1114] (1/4) Epoch 16, batch 950, loss[loss=0.2095, simple_loss=0.2706, pruned_loss=0.05239, ctc_loss=0.1088, over 19489.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.277, pruned_loss=0.05383, ctc_loss=0.1013, over 3820500.71 frames. ], batch size: 49, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:54:15,957 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:54:39,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=204250.66666666666, ans=0.0
+2024-08-29 17:54:54,857 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.25 vs. limit=15.0
+2024-08-29 17:54:59,155 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.77 vs. limit=15.0
+2024-08-29 17:55:35,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=204410.66666666666, ans=0.2
+2024-08-29 17:55:43,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=204410.66666666666, ans=0.125
+2024-08-29 17:55:46,640 INFO [train.py:1114] (1/4) Epoch 16, batch 1000, loss[loss=0.2193, simple_loss=0.2817, pruned_loss=0.05655, ctc_loss=0.1095, over 19869.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2782, pruned_loss=0.0543, ctc_loss=0.1022, over 3817813.39 frames. ], batch size: 52, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:56:07,201 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.649e+02 1.918e+02 2.268e+02 3.238e+02, threshold=3.836e+02, percent-clipped=0.0
+2024-08-29 17:57:00,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=204570.66666666666, ans=0.1
+2024-08-29 17:57:43,276 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.49 vs. limit=15.0
+2024-08-29 17:57:54,926 INFO [train.py:1114] (1/4) Epoch 16, batch 1050, loss[loss=0.2436, simple_loss=0.2993, pruned_loss=0.06845, ctc_loss=0.1272, over 19829.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2775, pruned_loss=0.05414, ctc_loss=0.1017, over 3823563.85 frames. ], batch size: 57, lr: 9.39e-03, grad_scale: 32.0
+2024-08-29 17:57:55,224 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=204730.66666666666, ans=0.125
+2024-08-29 17:58:28,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=204730.66666666666, ans=0.0
+2024-08-29 17:58:29,314 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=204730.66666666666, ans=0.125
+2024-08-29 17:58:29,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=204730.66666666666, ans=0.1
+2024-08-29 17:58:30,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=204730.66666666666, ans=0.1
+2024-08-29 18:00:49,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=204944.0, ans=0.125
+2024-08-29 18:00:53,273 INFO [train.py:1114] (1/4) Epoch 16, batch 1100, loss[loss=0.2166, simple_loss=0.2754, pruned_loss=0.05747, ctc_loss=0.1071, over 19592.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2774, pruned_loss=0.05422, ctc_loss=0.102, over 3829492.88 frames. ], batch size: 52, lr: 9.39e-03, grad_scale: 16.0
+2024-08-29 18:01:10,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=204997.33333333334, ans=0.125
+2024-08-29 18:01:19,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=205050.66666666666, ans=0.0
+2024-08-29 18:01:27,925 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 1.694e+02 1.874e+02 2.325e+02 3.063e+02, threshold=3.748e+02, percent-clipped=0.0
+2024-08-29 18:02:43,480 INFO [train.py:1114] (1/4) Epoch 16, batch 1150, loss[loss=0.2003, simple_loss=0.2648, pruned_loss=0.0488, ctc_loss=0.0956, over 19567.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2773, pruned_loss=0.05422, ctc_loss=0.1022, over 3828230.62 frames. ], batch size: 52, lr: 9.38e-03, grad_scale: 16.0
+2024-08-29 18:02:46,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=205264.0, ans=0.125
+2024-08-29 18:02:48,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=205264.0, ans=0.125
+2024-08-29 18:03:01,544 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=205317.33333333334, ans=0.0
+2024-08-29 18:03:02,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=205317.33333333334, ans=0.5
+2024-08-29 18:03:19,251 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=205424.0, ans=0.125
+2024-08-29 18:03:20,895 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.19 vs. limit=15.0
+2024-08-29 18:03:40,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=205477.33333333334, ans=0.125
+2024-08-29 18:03:45,100 INFO [train.py:1114] (1/4) Epoch 16, batch 1200, loss[loss=0.215, simple_loss=0.286, pruned_loss=0.05197, ctc_loss=0.1001, over 19850.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2786, pruned_loss=0.0544, ctc_loss=0.1026, over 3824031.93 frames. ], batch size: 57, lr: 9.38e-03, grad_scale: 32.0
+2024-08-29 18:03:50,357 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.62 vs. limit=15.0
+2024-08-29 18:04:06,312 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.725e+02 2.012e+02 2.470e+02 3.418e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-29 18:04:07,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=205637.33333333334, ans=0.125
+2024-08-29 18:04:40,675 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.30 vs. limit=15.0
+2024-08-29 18:04:50,727 INFO [train.py:1114] (1/4) Epoch 16, batch 1250, loss[loss=0.2415, simple_loss=0.3028, pruned_loss=0.066, ctc_loss=0.1205, over 19502.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2784, pruned_loss=0.05426, ctc_loss=0.1023, over 3842506.11 frames. ], batch size: 61, lr: 9.37e-03, grad_scale: 32.0
+2024-08-29 18:05:22,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=205850.66666666666, ans=0.125
+2024-08-29 18:05:33,300 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=205904.0, ans=0.0
+2024-08-29 18:05:35,813 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=205904.0, ans=0.1
+2024-08-29 18:05:49,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=205957.33333333334, ans=0.025
+2024-08-29 18:05:50,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=205957.33333333334, ans=10.0
+2024-08-29 18:06:24,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=206010.66666666666, ans=0.0
+2024-08-29 18:06:29,216 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.05 vs. limit=15.0
+2024-08-29 18:06:31,294 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=206010.66666666666, ans=0.125
+2024-08-29 18:06:33,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=206010.66666666666, ans=0.125
+2024-08-29 18:06:35,884 INFO [train.py:1114] (1/4) Epoch 16, batch 1300, loss[loss=0.2207, simple_loss=0.2892, pruned_loss=0.05525, ctc_loss=0.104, over 18933.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2777, pruned_loss=0.05374, ctc_loss=0.1012, over 3846574.33 frames. ], batch size: 76, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:06:45,580 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.54 vs. limit=22.5
+2024-08-29 18:06:56,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=206117.33333333334, ans=0.5
+2024-08-29 18:06:57,554 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.716e+02 2.090e+02 2.690e+02 4.268e+02, threshold=4.180e+02, percent-clipped=3.0
+2024-08-29 18:07:04,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=206170.66666666666, ans=0.0
+2024-08-29 18:07:07,384 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.96 vs. limit=22.5
+2024-08-29 18:07:12,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=206224.0, ans=0.125
+2024-08-29 18:07:18,753 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=206224.0, ans=0.2
+2024-08-29 18:07:22,864 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.60 vs. limit=15.0
+2024-08-29 18:07:23,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=206277.33333333334, ans=0.125
+2024-08-29 18:07:34,540 INFO [train.py:1114] (1/4) Epoch 16, batch 1350, loss[loss=0.2193, simple_loss=0.2821, pruned_loss=0.05672, ctc_loss=0.1076, over 19737.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2772, pruned_loss=0.05339, ctc_loss=0.1006, over 3857255.26 frames. ], batch size: 54, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:09:45,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=206490.66666666666, ans=0.1
+2024-08-29 18:09:47,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=206490.66666666666, ans=0.0
+2024-08-29 18:09:47,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=206490.66666666666, ans=0.025
+2024-08-29 18:09:47,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=206490.66666666666, ans=0.125
+2024-08-29 18:10:13,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=206544.0, ans=0.125
+2024-08-29 18:10:56,015 INFO [train.py:1114] (1/4) Epoch 16, batch 1400, loss[loss=0.2021, simple_loss=0.2627, pruned_loss=0.05119, ctc_loss=0.09769, over 19665.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2769, pruned_loss=0.05331, ctc_loss=0.1004, over 3863890.49 frames. ], batch size: 46, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:10:58,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=206597.33333333334, ans=0.025
+2024-08-29 18:10:59,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=206597.33333333334, ans=0.0
+2024-08-29 18:12:49,257 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=206650.66666666666, ans=0.0
+2024-08-29 18:13:15,196 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.659e+02 1.830e+02 2.117e+02 3.619e+02, threshold=3.659e+02, percent-clipped=0.0
+2024-08-29 18:13:24,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=206704.0, ans=0.025
+2024-08-29 18:13:26,036 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=206704.0, ans=0.0
+2024-08-29 18:14:30,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=206757.33333333334, ans=0.125
+2024-08-29 18:14:31,298 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.49 vs. limit=12.0
+2024-08-29 18:14:31,520 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.39 vs. limit=12.0
+2024-08-29 18:14:34,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=206757.33333333334, ans=0.0
+2024-08-29 18:14:38,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=206810.66666666666, ans=0.2
+2024-08-29 18:14:41,518 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.04 vs. limit=15.0
+2024-08-29 18:14:49,669 INFO [train.py:1114] (1/4) Epoch 16, batch 1450, loss[loss=0.2204, simple_loss=0.2821, pruned_loss=0.05816, ctc_loss=0.1062, over 19670.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2774, pruned_loss=0.0534, ctc_loss=0.1006, over 3862124.09 frames. ], batch size: 63, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:14:50,200 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.31 vs. limit=15.0
+2024-08-29 18:15:27,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=206970.66666666666, ans=0.07
+2024-08-29 18:15:35,536 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=206970.66666666666, ans=0.125
+2024-08-29 18:15:37,889 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=207024.0, ans=0.025
+2024-08-29 18:16:06,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=207077.33333333334, ans=0.09899494936611666
+2024-08-29 18:16:06,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=207077.33333333334, ans=0.125
+2024-08-29 18:16:10,749 INFO [train.py:1114] (1/4) Epoch 16, batch 1500, loss[loss=0.2386, simple_loss=0.2999, pruned_loss=0.06431, ctc_loss=0.1217, over 19580.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2776, pruned_loss=0.05328, ctc_loss=0.1005, over 3862803.67 frames. ], batch size: 57, lr: 9.34e-03, grad_scale: 32.0
+2024-08-29 18:16:26,737 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=207184.0, ans=0.2
+2024-08-29 18:16:32,415 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.680e+02 1.893e+02 2.490e+02 3.994e+02, threshold=3.786e+02, percent-clipped=1.0
+2024-08-29 18:16:41,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=207237.33333333334, ans=0.0
+2024-08-29 18:17:33,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=207397.33333333334, ans=0.125
+2024-08-29 18:17:34,473 INFO [train.py:1114] (1/4) Epoch 16, batch 1550, loss[loss=0.2362, simple_loss=0.2973, pruned_loss=0.06391, ctc_loss=0.1183, over 19604.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2787, pruned_loss=0.05408, ctc_loss=0.1022, over 3847412.94 frames. ], batch size: 60, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:17:35,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=207397.33333333334, ans=0.0
+2024-08-29 18:17:36,914 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=207397.33333333334, ans=0.0
+2024-08-29 18:17:38,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=207397.33333333334, ans=0.0
+2024-08-29 18:19:24,027 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.23 vs. limit=15.0
+2024-08-29 18:19:55,347 INFO [train.py:1114] (1/4) Epoch 16, batch 1600, loss[loss=0.236, simple_loss=0.3005, pruned_loss=0.06185, ctc_loss=0.1194, over 19845.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2784, pruned_loss=0.05423, ctc_loss=0.1024, over 3835699.28 frames. ], batch size: 57, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:19:55,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=207664.0, ans=0.2
+2024-08-29 18:20:27,219 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=207664.0, ans=0.035
+2024-08-29 18:20:34,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=207664.0, ans=0.125
+2024-08-29 18:21:41,882 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.10 vs. limit=6.0
+2024-08-29 18:21:55,746 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.773e+02 1.965e+02 2.508e+02 5.321e+02, threshold=3.930e+02, percent-clipped=3.0
+2024-08-29 18:21:57,293 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=207770.66666666666, ans=0.125
+2024-08-29 18:22:00,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=207770.66666666666, ans=0.1
+2024-08-29 18:22:06,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=207770.66666666666, ans=0.125
+2024-08-29 18:22:11,791 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.62 vs. limit=10.0
+2024-08-29 18:22:12,484 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=207824.0, ans=0.125
+2024-08-29 18:22:12,632 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=207824.0, ans=0.0
+2024-08-29 18:22:53,651 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:23:00,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=207930.66666666666, ans=0.1
+2024-08-29 18:23:01,481 INFO [train.py:1114] (1/4) Epoch 16, batch 1650, loss[loss=0.2219, simple_loss=0.2963, pruned_loss=0.05367, ctc_loss=0.1004, over 19618.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2785, pruned_loss=0.05465, ctc_loss=0.1032, over 3831939.66 frames. ], batch size: 59, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:23:05,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=207930.66666666666, ans=0.125
+2024-08-29 18:24:25,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=208037.33333333334, ans=0.125
+2024-08-29 18:24:26,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=208037.33333333334, ans=0.125
+2024-08-29 18:24:40,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=208090.66666666666, ans=0.125
+2024-08-29 18:24:53,226 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=208144.0, ans=0.025
+2024-08-29 18:24:58,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=208197.33333333334, ans=0.025
+2024-08-29 18:26:13,693 INFO [train.py:1114] (1/4) Epoch 16, batch 1700, loss[loss=0.1729, simple_loss=0.2448, pruned_loss=0.03593, ctc_loss=0.0729, over 19656.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2779, pruned_loss=0.05407, ctc_loss=0.102, over 3846742.17 frames. ], batch size: 46, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:26:34,599 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.759e+02 2.180e+02 2.878e+02 5.111e+02, threshold=4.361e+02, percent-clipped=4.0
+2024-08-29 18:26:48,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=208304.0, ans=0.2
+2024-08-29 18:26:50,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=208357.33333333334, ans=0.125
+2024-08-29 18:26:58,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=208357.33333333334, ans=0.125
+2024-08-29 18:27:13,924 INFO [train.py:1114] (1/4) Epoch 16, batch 1750, loss[loss=0.1749, simple_loss=0.2389, pruned_loss=0.04079, ctc_loss=0.07308, over 19637.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2773, pruned_loss=0.05407, ctc_loss=0.1021, over 3850980.22 frames. ], batch size: 45, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:27:14,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=208464.0, ans=0.125
+2024-08-29 18:27:49,482 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.49 vs. limit=22.5
+2024-08-29 18:29:02,202 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.76 vs. limit=15.0
+2024-08-29 18:29:23,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=208624.0, ans=0.0
+2024-08-29 18:30:06,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=208677.33333333334, ans=0.1
+2024-08-29 18:30:17,802 INFO [train.py:1114] (1/4) Epoch 16, batch 1800, loss[loss=0.2122, simple_loss=0.2815, pruned_loss=0.05236, ctc_loss=0.09556, over 19619.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2771, pruned_loss=0.05385, ctc_loss=0.1017, over 3853296.31 frames. ], batch size: 55, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:30:25,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=208730.66666666666, ans=0.125
+2024-08-29 18:30:37,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=208784.0, ans=0.1
+2024-08-29 18:30:45,747 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 1.693e+02 1.985e+02 2.381e+02 4.228e+02, threshold=3.971e+02, percent-clipped=0.0
+2024-08-29 18:31:20,224 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=208944.0, ans=0.0
+2024-08-29 18:31:22,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=208944.0, ans=0.125
+2024-08-29 18:31:45,939 INFO [train.py:1114] (1/4) Epoch 16, batch 1850, loss[loss=0.2497, simple_loss=0.3056, pruned_loss=0.07034, ctc_loss=0.1329, over 19586.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2771, pruned_loss=0.05402, ctc_loss=0.1019, over 3856883.61 frames. ], batch size: 57, lr: 9.30e-03, grad_scale: 32.0
+2024-08-29 18:32:48,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=209104.0, ans=0.1
+2024-08-29 18:33:06,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=209210.66666666666, ans=0.125
+2024-08-29 18:33:10,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=209210.66666666666, ans=0.05
+2024-08-29 18:33:13,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=209210.66666666666, ans=0.0
+2024-08-29 18:33:17,353 INFO [train.py:1114] (1/4) Epoch 16, batch 1900, loss[loss=0.2047, simple_loss=0.2799, pruned_loss=0.04699, ctc_loss=0.08852, over 19647.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.278, pruned_loss=0.05439, ctc_loss=0.1024, over 3860956.87 frames. ], batch size: 59, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:33:18,684 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=209264.0, ans=0.035
+2024-08-29 18:33:19,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=209264.0, ans=0.0
+2024-08-29 18:33:40,786 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.785e+02 2.354e+02 2.964e+02 6.037e+02, threshold=4.708e+02, percent-clipped=9.0
+2024-08-29 18:33:52,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=209370.66666666666, ans=0.125
+2024-08-29 18:33:53,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=209424.0, ans=0.125
+2024-08-29 18:34:30,814 INFO [train.py:1114] (1/4) Epoch 16, batch 1950, loss[loss=0.2213, simple_loss=0.2852, pruned_loss=0.05764, ctc_loss=0.1055, over 19605.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2785, pruned_loss=0.05418, ctc_loss=0.1019, over 3870048.19 frames. ], batch size: 52, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:34:35,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=209530.66666666666, ans=0.025
+2024-08-29 18:34:42,901 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.34 vs. limit=6.0
+2024-08-29 18:35:06,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=209584.0, ans=0.0
+2024-08-29 18:35:08,010 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.40 vs. limit=8.0
+2024-08-29 18:35:15,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=209637.33333333334, ans=0.025
+2024-08-29 18:35:22,707 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=209690.66666666666, ans=0.2
+2024-08-29 18:35:38,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=209744.0, ans=0.0
+2024-08-29 18:35:42,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=209744.0, ans=0.125
+2024-08-29 18:35:51,668 INFO [train.py:1114] (1/4) Epoch 16, batch 2000, loss[loss=0.2059, simple_loss=0.2582, pruned_loss=0.05672, ctc_loss=0.1005, over 19660.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2796, pruned_loss=0.0549, ctc_loss=0.1031, over 3855245.04 frames. ], batch size: 45, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:36:03,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=209850.66666666666, ans=0.0
+2024-08-29 18:36:09,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=209850.66666666666, ans=0.125
+2024-08-29 18:36:13,162 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.666e+02 1.888e+02 2.185e+02 3.516e+02, threshold=3.775e+02, percent-clipped=0.0
+2024-08-29 18:36:14,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=209904.0, ans=0.125
+2024-08-29 18:36:18,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=209904.0, ans=0.125
+2024-08-29 18:37:02,153 INFO [train.py:1114] (1/4) Epoch 16, batch 2050, loss[loss=0.2005, simple_loss=0.255, pruned_loss=0.05287, ctc_loss=0.1008, over 19737.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2783, pruned_loss=0.05458, ctc_loss=0.1026, over 3852853.59 frames. ], batch size: 47, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:37:33,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=210064.0, ans=10.0
+2024-08-29 18:37:36,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=210064.0, ans=0.95
+2024-08-29 18:37:46,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=210117.33333333334, ans=0.125
+2024-08-29 18:37:51,241 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.15 vs. limit=22.5
+2024-08-29 18:37:52,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=210170.66666666666, ans=0.025
+2024-08-29 18:37:57,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=210170.66666666666, ans=0.1
+2024-08-29 18:38:16,966 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=210170.66666666666, ans=0.125
+2024-08-29 18:38:32,500 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=210224.0, ans=0.125
+2024-08-29 18:38:48,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=210277.33333333334, ans=0.2
+2024-08-29 18:38:59,606 INFO [train.py:1114] (1/4) Epoch 16, batch 2100, loss[loss=0.2034, simple_loss=0.2746, pruned_loss=0.04882, ctc_loss=0.08652, over 19766.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2778, pruned_loss=0.05422, ctc_loss=0.102, over 3859400.07 frames. ], batch size: 54, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:39:08,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=210330.66666666666, ans=0.0
+2024-08-29 18:39:22,241 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.792e+02 2.112e+02 2.675e+02 4.176e+02, threshold=4.223e+02, percent-clipped=3.0
+2024-08-29 18:39:28,707 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=210437.33333333334, ans=0.1
+2024-08-29 18:39:33,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=210437.33333333334, ans=0.0
+2024-08-29 18:39:37,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=210490.66666666666, ans=0.025
+2024-08-29 18:39:57,967 INFO [train.py:1114] (1/4) Epoch 16, batch 2150, loss[loss=0.1858, simple_loss=0.2578, pruned_loss=0.0414, ctc_loss=0.07755, over 19576.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2773, pruned_loss=0.05417, ctc_loss=0.1018, over 3869308.08 frames. ], batch size: 52, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:40:05,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=210597.33333333334, ans=0.125
+2024-08-29 18:40:07,054 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.89 vs. limit=15.0
+2024-08-29 18:40:47,976 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.36 vs. limit=15.0
+2024-08-29 18:41:08,955 INFO [train.py:1114] (1/4) Epoch 16, batch 2200, loss[loss=0.2042, simple_loss=0.2731, pruned_loss=0.04846, ctc_loss=0.09591, over 19591.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2768, pruned_loss=0.0537, ctc_loss=0.101, over 3867258.39 frames. ], batch size: 57, lr: 9.26e-03, grad_scale: 32.0
+2024-08-29 18:41:29,789 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.757e+02 2.042e+02 2.598e+02 4.148e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-08-29 18:42:31,175 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.57 vs. limit=22.5
+2024-08-29 18:42:31,565 INFO [train.py:1114] (1/4) Epoch 16, batch 2250, loss[loss=0.2181, simple_loss=0.2856, pruned_loss=0.0553, ctc_loss=0.1, over 19622.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2773, pruned_loss=0.05405, ctc_loss=0.1016, over 3866351.87 frames. ], batch size: 55, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:42:48,294 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=211184.0, ans=0.1
+2024-08-29 18:42:52,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=211184.0, ans=0.1
+2024-08-29 18:43:15,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=211290.66666666666, ans=0.0
+2024-08-29 18:43:17,738 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.94 vs. limit=15.0
+2024-08-29 18:44:22,636 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.97 vs. limit=6.0
+2024-08-29 18:44:24,272 INFO [train.py:1114] (1/4) Epoch 16, batch 2300, loss[loss=0.1881, simple_loss=0.2587, pruned_loss=0.04296, ctc_loss=0.07883, over 19515.00 frames. ], tot_loss[loss=0.212, simple_loss=0.276, pruned_loss=0.05371, ctc_loss=0.1012, over 3859699.18 frames. ], batch size: 49, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:45:10,437 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.785e+02 2.121e+02 2.618e+02 4.213e+02, threshold=4.241e+02, percent-clipped=2.0
+2024-08-29 18:45:10,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=211450.66666666666, ans=0.1
+2024-08-29 18:45:16,239 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:45:59,060 INFO [train.py:1114] (1/4) Epoch 16, batch 2350, loss[loss=0.2311, simple_loss=0.2954, pruned_loss=0.06194, ctc_loss=0.1077, over 19677.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2759, pruned_loss=0.05356, ctc_loss=0.101, over 3862859.07 frames. ], batch size: 63, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:46:17,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=211717.33333333334, ans=0.0
+2024-08-29 18:46:39,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=211824.0, ans=0.2
+2024-08-29 18:46:54,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=211877.33333333334, ans=0.1
+2024-08-29 18:47:00,338 INFO [train.py:1114] (1/4) Epoch 16, batch 2400, loss[loss=0.2201, simple_loss=0.288, pruned_loss=0.05495, ctc_loss=0.1058, over 19323.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2777, pruned_loss=0.05399, ctc_loss=0.102, over 3858282.58 frames. ], batch size: 71, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:47:00,877 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.01 vs. limit=15.0
+2024-08-29 18:47:05,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.86 vs. limit=15.0
+2024-08-29 18:47:13,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=211984.0, ans=0.125
+2024-08-29 18:47:20,731 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.800e+02 2.132e+02 2.653e+02 4.129e+02, threshold=4.264e+02, percent-clipped=0.0
+2024-08-29 18:47:23,062 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=212037.33333333334, ans=0.0
+2024-08-29 18:47:37,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=212090.66666666666, ans=0.125
+2024-08-29 18:47:55,321 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.81 vs. limit=10.0
+2024-08-29 18:47:56,900 INFO [train.py:1114] (1/4) Epoch 16, batch 2450, loss[loss=0.2555, simple_loss=0.2986, pruned_loss=0.0777, ctc_loss=0.1427, over 13173.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2817, pruned_loss=0.0569, ctc_loss=0.1076, over 3731705.48 frames. ], batch size: 143, lr: 9.23e-03, grad_scale: 32.0
+2024-08-29 18:48:05,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=212197.33333333334, ans=0.0
+2024-08-29 18:48:05,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=212197.33333333334, ans=0.0
+2024-08-29 18:48:10,077 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=18.52 vs. limit=15.0
+2024-08-29 18:48:34,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=212357.33333333334, ans=0.0
+2024-08-29 18:48:34,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212357.33333333334, ans=0.1
+2024-08-29 18:55:35,479 INFO [train.py:1114] (1/4) Epoch 17, batch 0, loss[loss=0.2156, simple_loss=0.2669, pruned_loss=0.05999, ctc_loss=0.1109, over 19820.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2669, pruned_loss=0.05999, ctc_loss=0.1109, over 19820.00 frames. ], batch size: 49, lr: 8.95e-03, grad_scale: 32.0
+2024-08-29 18:55:35,479 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-29 18:56:00,095 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.1579, 4.4619, 3.9628, 4.1698], device='cuda:1')
+2024-08-29 18:56:00,527 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.9178, 2.1115, 3.4138, 3.5431], device='cuda:1')
+2024-08-29 18:56:04,690 INFO [train.py:1146] (1/4) Epoch 17, validation: loss=0.1843, simple_loss=0.2733, pruned_loss=0.03544, ctc_loss=0.06098, over 944034.00 frames.
+2024-08-29 18:56:04,691 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13746MB
+2024-08-29 18:56:54,620 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=212458.66666666666, ans=0.04949747468305833
+2024-08-29 18:56:56,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=212458.66666666666, ans=0.2
+2024-08-29 18:57:44,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=212458.66666666666, ans=6.0
+2024-08-29 18:58:13,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=212458.66666666666, ans=0.0
+2024-08-29 18:58:17,814 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.35 vs. limit=15.0
+2024-08-29 18:58:22,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=212512.0, ans=0.125
+2024-08-29 18:58:30,839 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.824e+02 2.030e+02 2.233e+02 3.073e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-29 18:58:36,749 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=212565.33333333334, ans=0.1
+2024-08-29 18:58:39,267 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.44 vs. limit=15.0
+2024-08-29 19:05:26,872 INFO [train.py:1114] (1/4) Epoch 17, batch 50, loss[loss=0.2075, simple_loss=0.2707, pruned_loss=0.05266, ctc_loss=0.09763, over 19696.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2789, pruned_loss=0.05382, ctc_loss=0.103, over 844645.99 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:08:06,409 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=212778.66666666666, ans=0.0
+2024-08-29 19:08:10,282 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.94 vs. limit=22.5
+2024-08-29 19:08:14,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=212778.66666666666, ans=0.05
+2024-08-29 19:08:24,119 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.47 vs. limit=6.0
+2024-08-29 19:08:31,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=212832.0, ans=0.125
+2024-08-29 19:08:52,530 INFO [train.py:1114] (1/4) Epoch 17, batch 100, loss[loss=0.1961, simple_loss=0.2684, pruned_loss=0.04431, ctc_loss=0.08798, over 19711.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2809, pruned_loss=0.05483, ctc_loss=0.1037, over 1499050.01 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:09:13,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=212992.0, ans=0.025
+2024-08-29 19:09:16,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=213045.33333333334, ans=0.1
+2024-08-29 19:09:25,903 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.707e+02 1.910e+02 2.335e+02 3.363e+02, threshold=3.820e+02, percent-clipped=0.0
+2024-08-29 19:09:30,764 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:09:38,206 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.86 vs. limit=10.0
+2024-08-29 19:09:58,145 INFO [train.py:1114] (1/4) Epoch 17, batch 150, loss[loss=0.1623, simple_loss=0.229, pruned_loss=0.03399, ctc_loss=0.06913, over 19703.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2776, pruned_loss=0.05326, ctc_loss=0.1004, over 2028977.84 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:10:52,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=213205.33333333334, ans=0.0
+2024-08-29 19:10:52,510 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.53 vs. limit=15.0
+2024-08-29 19:12:27,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=213312.0, ans=0.125
+2024-08-29 19:16:05,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=213365.33333333334, ans=0.125
+2024-08-29 19:16:15,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=213365.33333333334, ans=0.025
+2024-08-29 19:16:24,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213418.66666666666, ans=0.1
+2024-08-29 19:16:29,778 INFO [train.py:1114] (1/4) Epoch 17, batch 200, loss[loss=0.227, simple_loss=0.289, pruned_loss=0.06031, ctc_loss=0.1113, over 18150.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2769, pruned_loss=0.05285, ctc_loss=0.09965, over 2437042.07 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:16:34,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=213472.0, ans=0.125
+2024-08-29 19:24:57,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=213472.0, ans=0.125
+2024-08-29 19:25:01,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=213472.0, ans=0.1
+2024-08-29 19:27:12,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=213525.33333333334, ans=0.125
+2024-08-29 19:27:23,591 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.72 vs. limit=15.0
+2024-08-29 19:27:57,278 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.724e+02 1.931e+02 2.405e+02 4.691e+02, threshold=3.862e+02, percent-clipped=4.0
+2024-08-29 19:28:20,318 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.min_positive, batch_count=213685.33333333334, ans=0.025
+2024-08-29 19:28:32,681 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.59 vs. limit=15.0
+2024-08-29 19:28:36,483 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=213685.33333333334, ans=0.1
+2024-08-29 19:28:38,490 INFO [train.py:1114] (1/4) Epoch 17, batch 250, loss[loss=0.2089, simple_loss=0.2837, pruned_loss=0.04961, ctc_loss=0.08752, over 19395.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2764, pruned_loss=0.05266, ctc_loss=0.09906, over 2757247.20 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:29:12,343 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.04 vs. limit=12.0
+2024-08-29 19:29:46,681 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.74 vs. limit=22.5
+2024-08-29 19:30:02,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=214005.33333333334, ans=0.125
+2024-08-29 19:30:03,450 INFO [train.py:1114] (1/4) Epoch 17, batch 300, loss[loss=0.2456, simple_loss=0.3028, pruned_loss=0.06936, ctc_loss=0.124, over 19510.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2754, pruned_loss=0.05233, ctc_loss=0.09848, over 3001398.30 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:31:58,268 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.01 vs. limit=15.0
+2024-08-29 19:32:02,219 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.689e+02 1.972e+02 2.447e+02 4.331e+02, threshold=3.945e+02, percent-clipped=1.0
+2024-08-29 19:32:17,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=214165.33333333334, ans=0.125
+2024-08-29 19:32:41,676 INFO [train.py:1114] (1/4) Epoch 17, batch 350, loss[loss=0.2143, simple_loss=0.2706, pruned_loss=0.05697, ctc_loss=0.1099, over 19756.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2765, pruned_loss=0.05266, ctc_loss=0.09935, over 3191743.97 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:32:43,003 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214272.0, ans=0.1
+2024-08-29 19:33:04,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=214325.33333333334, ans=0.125
+2024-08-29 19:33:45,830 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=214432.0, ans=0.0
+2024-08-29 19:33:54,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=214432.0, ans=0.125
+2024-08-29 19:34:03,456 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.82 vs. limit=15.0
+2024-08-29 19:34:08,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=214485.33333333334, ans=0.0
+2024-08-29 19:34:18,289 INFO [train.py:1114] (1/4) Epoch 17, batch 400, loss[loss=0.1982, simple_loss=0.2759, pruned_loss=0.04333, ctc_loss=0.08452, over 19487.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2755, pruned_loss=0.05199, ctc_loss=0.09798, over 3343628.60 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:34:39,684 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=214538.66666666666, ans=0.125
+2024-08-29 19:34:50,726 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.16 vs. limit=22.5
+2024-08-29 19:35:28,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=214592.0, ans=0.125
+2024-08-29 19:35:29,919 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214592.0, ans=0.1
+2024-08-29 19:35:33,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214645.33333333334, ans=0.1
+2024-08-29 19:35:37,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-29 19:36:30,687 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.665e+02 1.964e+02 2.553e+02 4.238e+02, threshold=3.929e+02, percent-clipped=2.0
+2024-08-29 19:37:49,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=214752.0, ans=0.125
+2024-08-29 19:37:57,091 INFO [train.py:1114] (1/4) Epoch 17, batch 450, loss[loss=0.205, simple_loss=0.2852, pruned_loss=0.04567, ctc_loss=0.08386, over 19605.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2754, pruned_loss=0.05196, ctc_loss=0.0978, over 3450831.97 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:38:15,133 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=214805.33333333334, ans=0.125
+2024-08-29 19:38:32,853 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=214858.66666666666, ans=0.125
+2024-08-29 19:38:42,641 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:38:58,782 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.23 vs. limit=15.0
+2024-08-29 19:39:30,549 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.49 vs. limit=15.0
+2024-08-29 19:40:14,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_na.min_abs, batch_count=215018.66666666666, ans=0.02
+2024-08-29 19:40:26,570 INFO [train.py:1114] (1/4) Epoch 17, batch 500, loss[loss=0.2001, simple_loss=0.2795, pruned_loss=0.04451, ctc_loss=0.07937, over 19661.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2744, pruned_loss=0.05156, ctc_loss=0.09707, over 3546821.36 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:41:54,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=215178.66666666666, ans=0.0
+2024-08-29 19:42:38,132 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.765e+02 1.983e+02 2.603e+02 4.687e+02, threshold=3.966e+02, percent-clipped=3.0
+2024-08-29 19:43:10,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=215232.0, ans=0.125
+2024-08-29 19:43:32,525 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=215232.0, ans=0.125
+2024-08-29 19:43:33,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=215285.33333333334, ans=0.025
+2024-08-29 19:43:45,810 INFO [train.py:1114] (1/4) Epoch 17, batch 550, loss[loss=0.2209, simple_loss=0.2842, pruned_loss=0.05764, ctc_loss=0.1058, over 19299.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2742, pruned_loss=0.05173, ctc_loss=0.09741, over 3608064.98 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-29 19:44:10,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=215338.66666666666, ans=0.0
+2024-08-29 19:45:00,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=215392.0, ans=0.125
+2024-08-29 19:46:07,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=215552.0, ans=0.2
+2024-08-29 19:46:11,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=215552.0, ans=15.0
+2024-08-29 19:46:12,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=215552.0, ans=0.125
+2024-08-29 19:47:01,419 INFO [train.py:1114] (1/4) Epoch 17, batch 600, loss[loss=0.2426, simple_loss=0.3044, pruned_loss=0.06544, ctc_loss=0.1247, over 19430.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2744, pruned_loss=0.0517, ctc_loss=0.09714, over 3665994.82 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:47:01,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=215605.33333333334, ans=0.125
+2024-08-29 19:47:06,214 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:47:41,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=215658.66666666666, ans=0.0
+2024-08-29 19:47:49,286 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.94 vs. limit=15.0
+2024-08-29 19:48:19,060 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.690e+02 1.951e+02 2.307e+02 4.172e+02, threshold=3.901e+02, percent-clipped=2.0
+2024-08-29 19:48:32,501 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.56 vs. limit=22.5
+2024-08-29 19:49:04,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=215818.66666666666, ans=0.125
+2024-08-29 19:49:05,827 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.80 vs. limit=15.0
+2024-08-29 19:49:17,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=215818.66666666666, ans=0.125
+2024-08-29 19:49:21,622 INFO [train.py:1114] (1/4) Epoch 17, batch 650, loss[loss=0.2027, simple_loss=0.2669, pruned_loss=0.0497, ctc_loss=0.0977, over 19764.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2737, pruned_loss=0.05148, ctc_loss=0.09689, over 3716311.99 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:49:27,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=215872.0, ans=0.0
+2024-08-29 19:49:33,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=215872.0, ans=0.2
+2024-08-29 19:49:39,932 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.78 vs. limit=22.5
+2024-08-29 19:50:19,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=215978.66666666666, ans=0.0
+2024-08-29 19:51:32,001 INFO [train.py:1114] (1/4) Epoch 17, batch 700, loss[loss=0.1928, simple_loss=0.2621, pruned_loss=0.04503, ctc_loss=0.08341, over 19726.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2745, pruned_loss=0.0517, ctc_loss=0.09731, over 3749338.22 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:51:32,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=216138.66666666666, ans=0.125
+2024-08-29 19:52:43,598 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.757e+02 1.978e+02 2.439e+02 3.670e+02, threshold=3.956e+02, percent-clipped=0.0
+2024-08-29 19:53:36,104 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.77 vs. limit=5.0
+2024-08-29 19:53:42,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=216352.0, ans=0.05
+2024-08-29 19:53:46,884 INFO [train.py:1114] (1/4) Epoch 17, batch 750, loss[loss=0.2077, simple_loss=0.2808, pruned_loss=0.04834, ctc_loss=0.09486, over 19479.00 frames. ], tot_loss[loss=0.208, simple_loss=0.274, pruned_loss=0.05158, ctc_loss=0.09707, over 3775726.61 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:54:29,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=216405.33333333334, ans=0.125
+2024-08-29 19:55:25,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=216458.66666666666, ans=0.125
+2024-08-29 19:55:48,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=216458.66666666666, ans=0.125
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-2
new file mode 100644
index 0000000000000000000000000000000000000000..1d4982eea27f10654d1ea4c2a7ae5e4e367da32d
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-2
@@ -0,0 +1,1191 @@
+2024-08-29 13:08:38,314 INFO [train.py:1182] (2/4) Training started
+2024-08-29 13:08:41,303 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-29 13:08:41,306 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 13:08:41,307 INFO [train.py:1212] (2/4) About to create model
+2024-08-29 13:08:41,985 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-29 13:08:41,985 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 13:08:59,021 INFO [train.py:1231] (2/4) Using DDP
+2024-08-29 13:09:40,406 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-29 13:09:40,610 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-29 13:09:40,611 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 13:09:40,690 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-29 13:09:40,690 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-29 13:09:40,690 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-29 13:09:42,276 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-29 13:09:42,276 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-29 13:09:42,374 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-29 13:09:42,446 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-29 13:09:42,771 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-29 13:09:42,771 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 13:14:18,572 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12782MB
+2024-08-29 13:14:21,282 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12849MB
+2024-08-29 13:14:38,616 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 13:14:39,780 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=12.51 vs. limit=7.5
+2024-08-29 13:14:45,668 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 13:15:10,810 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 13:15:12,351 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 13:15:12,373 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-29 13:16:15,184 INFO [train.py:1114] (2/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.256, pruned_loss=0.05389, ctc_loss=0.09807, over 19789.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:16:15,185 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-29 13:16:31,404 INFO [train.py:1146] (2/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 13:16:31,405 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-29 13:21:54,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=172634.66666666666, ans=0.125
+2024-08-29 13:24:29,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=172688.0, ans=0.0
+2024-08-29 13:24:55,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=172688.0, ans=0.0
+2024-08-29 13:25:31,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=172741.33333333334, ans=0.0
+2024-08-29 13:26:36,385 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.07 vs. limit=15.0
+2024-08-29 13:26:36,728 INFO [train.py:1114] (2/4) Epoch 14, batch 50, loss[loss=0.1944, simple_loss=0.262, pruned_loss=0.04625, ctc_loss=0.08578, over 19751.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.288, pruned_loss=0.06104, ctc_loss=0.1157, over 845084.53 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:27:33,928 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-29 13:30:41,138 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.85 vs. limit=15.0
+2024-08-29 13:30:52,744 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=172954.66666666666, ans=0.125
+2024-08-29 13:30:52,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=172954.66666666666, ans=0.125
+2024-08-29 13:32:29,769 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.749e+02 1.974e+02 2.504e+02 4.970e+02, threshold=3.948e+02, percent-clipped=4.0
+2024-08-29 13:32:58,225 INFO [train.py:1114] (2/4) Epoch 14, batch 100, loss[loss=0.225, simple_loss=0.2822, pruned_loss=0.06166, ctc_loss=0.1112, over 19711.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.2892, pruned_loss=0.06157, ctc_loss=0.1164, over 1499046.71 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:34:18,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=173221.33333333334, ans=0.0
+2024-08-29 13:34:47,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=173274.66666666666, ans=0.0
+2024-08-29 13:36:02,976 INFO [train.py:1114] (2/4) Epoch 14, batch 150, loss[loss=0.1864, simple_loss=0.2532, pruned_loss=0.04365, ctc_loss=0.08054, over 19692.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.2848, pruned_loss=0.05882, ctc_loss=0.1109, over 2028604.36 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:36:03,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=173381.33333333334, ans=0.0
+2024-08-29 13:36:27,316 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.62 vs. limit=15.0
+2024-08-29 13:37:19,618 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.730e+02 2.035e+02 2.422e+02 3.683e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-29 13:37:21,152 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=173594.66666666666, ans=0.0
+2024-08-29 13:37:29,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=173648.0, ans=0.025
+2024-08-29 13:37:30,507 INFO [train.py:1114] (2/4) Epoch 14, batch 200, loss[loss=0.2416, simple_loss=0.2965, pruned_loss=0.06757, ctc_loss=0.1291, over 18454.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2838, pruned_loss=0.05848, ctc_loss=0.1103, over 2436843.10 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:37:42,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=173648.0, ans=0.125
+2024-08-29 13:37:43,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=173701.33333333334, ans=0.0
+2024-08-29 13:38:14,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=173701.33333333334, ans=0.2
+2024-08-29 13:38:46,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=173701.33333333334, ans=0.0
+2024-08-29 13:39:53,314 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.55 vs. limit=10.0
+2024-08-29 13:40:12,291 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.21 vs. limit=15.0
+2024-08-29 13:40:51,996 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.46 vs. limit=10.0
+2024-08-29 13:42:18,839 INFO [train.py:1114] (2/4) Epoch 14, batch 250, loss[loss=0.2443, simple_loss=0.307, pruned_loss=0.06637, ctc_loss=0.1224, over 19384.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.284, pruned_loss=0.05871, ctc_loss=0.1107, over 2757067.39 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:43:11,454 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=173914.66666666666, ans=0.0
+2024-08-29 13:43:44,513 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=173968.0, ans=0.1
+2024-08-29 13:43:56,586 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=174021.33333333334, ans=0.0
+2024-08-29 13:44:13,476 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.787e+02 2.022e+02 2.717e+02 4.953e+02, threshold=4.043e+02, percent-clipped=2.0
+2024-08-29 13:44:51,098 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=174181.33333333334, ans=0.2
+2024-08-29 13:44:52,061 INFO [train.py:1114] (2/4) Epoch 14, batch 300, loss[loss=0.2384, simple_loss=0.3008, pruned_loss=0.06413, ctc_loss=0.1195, over 19542.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2835, pruned_loss=0.05826, ctc_loss=0.1096, over 3001239.55 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:45:02,470 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.39 vs. limit=12.0
+2024-08-29 13:46:17,858 INFO [train.py:1114] (2/4) Epoch 14, batch 350, loss[loss=0.1895, simple_loss=0.2577, pruned_loss=0.04455, ctc_loss=0.08061, over 19738.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2839, pruned_loss=0.05875, ctc_loss=0.1104, over 3191462.47 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 16.0
+2024-08-29 13:47:23,127 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.46 vs. limit=10.0
+2024-08-29 13:47:31,030 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=174608.0, ans=0.2
+2024-08-29 13:47:32,299 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=174608.0, ans=0.125
+2024-08-29 13:47:33,593 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=174608.0, ans=0.025
+2024-08-29 13:47:39,415 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.793e+02 2.058e+02 2.658e+02 4.429e+02, threshold=4.116e+02, percent-clipped=3.0
+2024-08-29 13:48:31,289 INFO [train.py:1114] (2/4) Epoch 14, batch 400, loss[loss=0.2226, simple_loss=0.2834, pruned_loss=0.05753, ctc_loss=0.1169, over 19482.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2826, pruned_loss=0.05798, ctc_loss=0.109, over 3343716.94 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:48:48,941 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=20.27 vs. limit=22.5
+2024-08-29 13:48:49,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=174768.0, ans=10.0
+2024-08-29 13:50:10,912 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=174821.33333333334, ans=0.0
+2024-08-29 13:50:50,709 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=174928.0, ans=0.1
+2024-08-29 13:50:50,939 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.16 vs. limit=15.0
+2024-08-29 13:50:55,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=174928.0, ans=0.025
+2024-08-29 13:50:57,558 INFO [train.py:1114] (2/4) Epoch 14, batch 450, loss[loss=0.2201, simple_loss=0.2812, pruned_loss=0.05784, ctc_loss=0.1085, over 19609.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2828, pruned_loss=0.05797, ctc_loss=0.1091, over 3449888.47 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:51:26,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=175088.0, ans=0.125
+2024-08-29 13:51:34,298 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.23 vs. limit=15.0
+2024-08-29 13:51:35,595 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.47 vs. limit=22.5
+2024-08-29 13:51:39,919 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=175141.33333333334, ans=10.0
+2024-08-29 13:51:46,108 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.20 vs. limit=15.0
+2024-08-29 13:51:50,564 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.713e+02 1.900e+02 2.415e+02 4.159e+02, threshold=3.800e+02, percent-clipped=2.0
+2024-08-29 13:52:14,275 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=175194.66666666666, ans=0.0
+2024-08-29 13:52:16,283 INFO [train.py:1114] (2/4) Epoch 14, batch 500, loss[loss=0.2506, simple_loss=0.3101, pruned_loss=0.06955, ctc_loss=0.1299, over 19681.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2822, pruned_loss=0.0579, ctc_loss=0.1091, over 3545845.53 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:52:45,361 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.02 vs. limit=15.0
+2024-08-29 13:52:46,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=175301.33333333334, ans=0.2
+2024-08-29 13:52:48,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=175354.66666666666, ans=0.0
+2024-08-29 13:52:51,298 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.16 vs. limit=12.0
+2024-08-29 13:52:59,642 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.29 vs. limit=10.0
+2024-08-29 13:53:06,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=175408.0, ans=0.125
+2024-08-29 13:53:12,488 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 13:53:14,958 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.48 vs. limit=15.0
+2024-08-29 13:53:16,439 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.78 vs. limit=15.0
+2024-08-29 13:53:22,876 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=175514.66666666666, ans=0.125
+2024-08-29 13:53:23,921 INFO [train.py:1114] (2/4) Epoch 14, batch 550, loss[loss=0.2655, simple_loss=0.3084, pruned_loss=0.08149, ctc_loss=0.1492, over 19311.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2822, pruned_loss=0.05798, ctc_loss=0.1093, over 3607179.29 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:53:34,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=175514.66666666666, ans=0.0
+2024-08-29 13:53:34,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=175514.66666666666, ans=0.05
+2024-08-29 13:53:34,287 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=175514.66666666666, ans=0.05
+2024-08-29 13:54:00,891 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=20.22 vs. limit=22.5
+2024-08-29 13:54:07,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=175674.66666666666, ans=0.07
+2024-08-29 13:54:09,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=175674.66666666666, ans=0.2
+2024-08-29 13:54:18,078 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.725e+02 1.963e+02 2.348e+02 4.063e+02, threshold=3.927e+02, percent-clipped=2.0
+2024-08-29 13:54:28,212 INFO [train.py:1114] (2/4) Epoch 14, batch 600, loss[loss=0.2636, simple_loss=0.3204, pruned_loss=0.07603, ctc_loss=0.137, over 19341.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2821, pruned_loss=0.05777, ctc_loss=0.1088, over 3663666.03 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:54:49,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=175834.66666666666, ans=0.125
+2024-08-29 13:54:50,519 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.34 vs. limit=6.0
+2024-08-29 13:55:09,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=175941.33333333334, ans=0.1
+2024-08-29 13:55:09,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=175941.33333333334, ans=0.125
+2024-08-29 13:55:22,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=175994.66666666666, ans=0.2
+2024-08-29 13:55:30,806 INFO [train.py:1114] (2/4) Epoch 14, batch 650, loss[loss=0.1931, simple_loss=0.2602, pruned_loss=0.04665, ctc_loss=0.08168, over 19778.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2814, pruned_loss=0.05735, ctc_loss=0.1079, over 3714475.89 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:55:35,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=176048.0, ans=0.5
+2024-08-29 13:55:45,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=176101.33333333334, ans=0.0
+2024-08-29 13:55:49,334 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.29 vs. limit=15.0
+2024-08-29 13:55:59,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176154.66666666666, ans=0.1
+2024-08-29 13:56:19,021 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=176208.0, ans=0.1
+2024-08-29 13:56:24,635 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.743e+02 2.058e+02 2.560e+02 4.338e+02, threshold=4.116e+02, percent-clipped=4.0
+2024-08-29 13:56:30,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=176261.33333333334, ans=0.125
+2024-08-29 13:56:34,654 INFO [train.py:1114] (2/4) Epoch 14, batch 700, loss[loss=0.2009, simple_loss=0.2719, pruned_loss=0.04738, ctc_loss=0.08777, over 19746.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.281, pruned_loss=0.05665, ctc_loss=0.1067, over 3746176.45 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:56:52,675 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=176368.0, ans=0.0
+2024-08-29 13:56:53,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176368.0, ans=0.1
+2024-08-29 13:57:37,034 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.09 vs. limit=12.0
+2024-08-29 13:57:48,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=176474.66666666666, ans=0.2
+2024-08-29 13:57:51,146 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.84 vs. limit=10.0
+2024-08-29 13:58:00,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=176528.0, ans=0.025
+2024-08-29 13:58:02,121 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.37 vs. limit=6.0
+2024-08-29 13:58:05,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=176528.0, ans=0.125
+2024-08-29 13:58:06,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=176528.0, ans=0.125
+2024-08-29 13:58:12,842 INFO [train.py:1114] (2/4) Epoch 14, batch 750, loss[loss=0.2264, simple_loss=0.2916, pruned_loss=0.05847, ctc_loss=0.1104, over 19499.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2803, pruned_loss=0.05598, ctc_loss=0.1054, over 3773023.39 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:58:16,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=176581.33333333334, ans=0.0
+2024-08-29 13:58:24,035 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=176634.66666666666, ans=0.0
+2024-08-29 13:58:24,185 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=176634.66666666666, ans=0.0
+2024-08-29 13:58:26,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=176634.66666666666, ans=0.2
+2024-08-29 13:58:38,843 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=176688.0, ans=0.035
+2024-08-29 13:58:59,438 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=176741.33333333334, ans=0.2
+2024-08-29 13:59:04,430 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=176794.66666666666, ans=0.125
+2024-08-29 13:59:06,499 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.910e+02 2.277e+02 2.884e+02 4.780e+02, threshold=4.554e+02, percent-clipped=3.0
+2024-08-29 13:59:23,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=176794.66666666666, ans=0.025
+2024-08-29 13:59:25,438 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=176794.66666666666, ans=0.0
+2024-08-29 13:59:28,726 INFO [train.py:1114] (2/4) Epoch 14, batch 800, loss[loss=0.1979, simple_loss=0.2629, pruned_loss=0.0481, ctc_loss=0.09169, over 19393.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2811, pruned_loss=0.0564, ctc_loss=0.1064, over 3795191.57 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:59:34,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=176848.0, ans=10.0
+2024-08-29 13:59:38,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=176848.0, ans=0.025
+2024-08-29 13:59:44,748 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.34 vs. limit=15.0
+2024-08-29 13:59:45,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=176901.33333333334, ans=0.125
+2024-08-29 13:59:53,013 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=176901.33333333334, ans=0.025
+2024-08-29 14:01:19,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=177008.0, ans=0.0
+2024-08-29 14:01:22,042 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=177008.0, ans=0.125
+2024-08-29 14:02:30,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=177008.0, ans=0.0
+2024-08-29 14:02:49,613 INFO [train.py:1114] (2/4) Epoch 14, batch 850, loss[loss=0.2079, simple_loss=0.2803, pruned_loss=0.049, ctc_loss=0.09358, over 19643.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2809, pruned_loss=0.05649, ctc_loss=0.1065, over 3814842.26 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:02:52,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=177114.66666666666, ans=0.07
+2024-08-29 14:03:10,288 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=177168.0, ans=0.0
+2024-08-29 14:03:21,264 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177221.33333333334, ans=0.1
+2024-08-29 14:03:22,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=177221.33333333334, ans=0.1
+2024-08-29 14:03:40,314 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.703e+02 1.970e+02 2.385e+02 3.831e+02, threshold=3.939e+02, percent-clipped=0.0
+2024-08-29 14:03:49,900 INFO [train.py:1114] (2/4) Epoch 14, batch 900, loss[loss=0.2043, simple_loss=0.2573, pruned_loss=0.05479, ctc_loss=0.1042, over 19417.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2811, pruned_loss=0.05687, ctc_loss=0.1072, over 3817936.02 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:04:00,501 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=177434.66666666666, ans=0.0
+2024-08-29 14:04:05,062 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=177434.66666666666, ans=0.125
+2024-08-29 14:04:16,889 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.65 vs. limit=15.0
+2024-08-29 14:04:22,342 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=177488.0, ans=0.2
+2024-08-29 14:04:34,400 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=177541.33333333334, ans=0.0
+2024-08-29 14:04:34,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=177541.33333333334, ans=0.125
+2024-08-29 14:04:35,643 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=177541.33333333334, ans=0.125
+2024-08-29 14:04:44,086 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=177594.66666666666, ans=0.2
+2024-08-29 14:04:52,318 INFO [train.py:1114] (2/4) Epoch 14, batch 950, loss[loss=0.1934, simple_loss=0.2577, pruned_loss=0.04707, ctc_loss=0.08743, over 19488.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2815, pruned_loss=0.05718, ctc_loss=0.1078, over 3819119.82 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:04:53,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=177648.0, ans=0.2
+2024-08-29 14:05:03,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=177701.33333333334, ans=0.0
+2024-08-29 14:05:12,249 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177701.33333333334, ans=0.1
+2024-08-29 14:05:22,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=177754.66666666666, ans=0.1
+2024-08-29 14:05:26,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=177754.66666666666, ans=0.125
+2024-08-29 14:05:26,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=177754.66666666666, ans=0.025
+2024-08-29 14:05:30,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=177754.66666666666, ans=0.125
+2024-08-29 14:06:19,921 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.335e+02 1.740e+02 1.996e+02 2.581e+02 3.979e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-29 14:06:21,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=177861.33333333334, ans=15.0
+2024-08-29 14:06:24,814 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=177861.33333333334, ans=0.125
+2024-08-29 14:07:04,899 INFO [train.py:1114] (2/4) Epoch 14, batch 1000, loss[loss=0.2194, simple_loss=0.2774, pruned_loss=0.05777, ctc_loss=0.1146, over 19852.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2826, pruned_loss=0.05785, ctc_loss=0.109, over 3815772.92 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:08:06,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=177968.0, ans=0.1
+2024-08-29 14:08:25,717 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:08:30,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=178074.66666666666, ans=0.125
+2024-08-29 14:08:37,576 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=178074.66666666666, ans=0.125
+2024-08-29 14:08:42,042 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.67 vs. limit=15.0
+2024-08-29 14:08:56,383 INFO [train.py:1114] (2/4) Epoch 14, batch 1050, loss[loss=0.2395, simple_loss=0.3022, pruned_loss=0.06401, ctc_loss=0.1217, over 19852.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2819, pruned_loss=0.05766, ctc_loss=0.1084, over 3822703.55 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:08:59,599 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.13 vs. limit=15.0
+2024-08-29 14:09:20,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=178288.0, ans=0.125
+2024-08-29 14:09:21,910 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=178288.0, ans=0.0
+2024-08-29 14:09:36,356 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.74 vs. limit=12.0
+2024-08-29 14:09:46,651 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.811e+02 2.215e+02 2.668e+02 4.320e+02, threshold=4.429e+02, percent-clipped=1.0
+2024-08-29 14:10:20,914 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=178394.66666666666, ans=0.125
+2024-08-29 14:10:24,267 INFO [train.py:1114] (2/4) Epoch 14, batch 1100, loss[loss=0.1842, simple_loss=0.256, pruned_loss=0.03997, ctc_loss=0.08112, over 19588.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.281, pruned_loss=0.05678, ctc_loss=0.1069, over 3830526.86 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:13:47,949 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.14 vs. limit=12.0
+2024-08-29 14:14:51,640 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=178608.0, ans=0.09899494936611666
+2024-08-29 14:15:44,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=178608.0, ans=0.1
+2024-08-29 14:19:15,472 INFO [train.py:1114] (2/4) Epoch 14, batch 1150, loss[loss=0.2052, simple_loss=0.2738, pruned_loss=0.04951, ctc_loss=0.09424, over 19586.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2813, pruned_loss=0.05705, ctc_loss=0.1073, over 3831181.97 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:20:19,720 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=178821.33333333334, ans=0.035
+2024-08-29 14:20:32,902 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.43 vs. limit=10.0
+2024-08-29 14:21:27,807 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:22:13,324 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.701e+02 1.876e+02 2.352e+02 3.362e+02, threshold=3.753e+02, percent-clipped=0.0
+2024-08-29 14:22:14,839 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.04 vs. limit=6.0
+2024-08-29 14:22:33,812 INFO [train.py:1114] (2/4) Epoch 14, batch 1200, loss[loss=0.2178, simple_loss=0.2867, pruned_loss=0.05444, ctc_loss=0.09984, over 19837.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2826, pruned_loss=0.05779, ctc_loss=0.1087, over 3826923.42 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:22:47,378 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=178981.33333333334, ans=0.04949747468305833
+2024-08-29 14:23:24,747 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=178981.33333333334, ans=0.125
+2024-08-29 14:23:48,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=179034.66666666666, ans=0.2
+2024-08-29 14:24:07,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-29 14:25:21,664 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.66 vs. limit=15.0
+2024-08-29 14:29:53,984 INFO [train.py:1114] (2/4) Epoch 14, batch 1250, loss[loss=0.2319, simple_loss=0.2986, pruned_loss=0.06079, ctc_loss=0.1088, over 19523.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2834, pruned_loss=0.05817, ctc_loss=0.1091, over 3844708.95 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:32:20,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=179354.66666666666, ans=0.125
+2024-08-29 14:32:41,060 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.718e+02 2.120e+02 2.679e+02 4.271e+02, threshold=4.240e+02, percent-clipped=3.0
+2024-08-29 14:32:45,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=179461.33333333334, ans=10.0
+2024-08-29 14:32:52,348 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-29 14:33:10,209 INFO [train.py:1114] (2/4) Epoch 14, batch 1300, loss[loss=0.2749, simple_loss=0.3228, pruned_loss=0.08324, ctc_loss=0.1513, over 18859.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2826, pruned_loss=0.0579, ctc_loss=0.1086, over 3848027.07 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:33:54,989 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:34:06,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=179568.0, ans=0.2
+2024-08-29 14:35:05,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=179674.66666666666, ans=0.2
+2024-08-29 14:35:09,058 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.23 vs. limit=15.0
+2024-08-29 14:35:12,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=179728.0, ans=0.5
+2024-08-29 14:35:31,101 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.30 vs. limit=15.0
+2024-08-29 14:35:39,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=179728.0, ans=0.0
+2024-08-29 14:35:40,089 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=179728.0, ans=0.0
+2024-08-29 14:35:42,425 INFO [train.py:1114] (2/4) Epoch 14, batch 1350, loss[loss=0.2239, simple_loss=0.2803, pruned_loss=0.06077, ctc_loss=0.115, over 19790.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2818, pruned_loss=0.05749, ctc_loss=0.1078, over 3857906.85 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:36:05,435 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=179834.66666666666, ans=0.125
+2024-08-29 14:36:11,255 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=179834.66666666666, ans=0.0
+2024-08-29 14:36:26,164 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=179888.0, ans=0.0
+2024-08-29 14:40:29,507 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.654e+02 1.881e+02 2.431e+02 4.376e+02, threshold=3.761e+02, percent-clipped=1.0
+2024-08-29 14:41:36,141 INFO [train.py:1114] (2/4) Epoch 14, batch 1400, loss[loss=0.2007, simple_loss=0.2613, pruned_loss=0.05072, ctc_loss=0.0969, over 19660.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2814, pruned_loss=0.0575, ctc_loss=0.1077, over 3865083.37 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:41:54,262 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=180101.33333333334, ans=0.0
+2024-08-29 14:42:23,018 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.93 vs. limit=22.5
+2024-08-29 14:42:39,827 INFO [train.py:1114] (2/4) Epoch 14, batch 1450, loss[loss=0.2624, simple_loss=0.3146, pruned_loss=0.07781, ctc_loss=0.1366, over 19663.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2827, pruned_loss=0.05808, ctc_loss=0.1088, over 3861354.24 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:42:40,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=180314.66666666666, ans=0.0
+2024-08-29 14:42:41,653 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.94 vs. limit=12.0
+2024-08-29 14:42:47,235 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=180314.66666666666, ans=0.0
+2024-08-29 14:42:51,992 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.22 vs. limit=6.0
+2024-08-29 14:43:07,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=180421.33333333334, ans=0.0
+2024-08-29 14:43:09,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=180421.33333333334, ans=0.0
+2024-08-29 14:44:06,293 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180474.66666666666, ans=0.1
+2024-08-29 14:44:07,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=180474.66666666666, ans=0.125
+2024-08-29 14:44:08,922 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.22 vs. limit=15.0
+2024-08-29 14:44:17,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=180528.0, ans=0.125
+2024-08-29 14:44:19,812 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.699e+02 1.929e+02 2.254e+02 4.469e+02, threshold=3.859e+02, percent-clipped=1.0
+2024-08-29 14:45:06,807 INFO [train.py:1114] (2/4) Epoch 14, batch 1500, loss[loss=0.2304, simple_loss=0.2932, pruned_loss=0.06126, ctc_loss=0.1125, over 19592.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.283, pruned_loss=0.05786, ctc_loss=0.1086, over 3861121.37 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:45:08,546 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=180581.33333333334, ans=0.125
+2024-08-29 14:45:11,042 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=180581.33333333334, ans=0.1
+2024-08-29 14:45:23,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=180634.66666666666, ans=10.0
+2024-08-29 14:45:43,286 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:45:57,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=180688.0, ans=0.0
+2024-08-29 14:46:21,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=180794.66666666666, ans=0.0
+2024-08-29 14:46:27,494 INFO [train.py:1114] (2/4) Epoch 14, batch 1550, loss[loss=0.2407, simple_loss=0.3035, pruned_loss=0.06454, ctc_loss=0.1219, over 19599.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2827, pruned_loss=0.0576, ctc_loss=0.1082, over 3844900.86 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:46:27,768 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=180848.0, ans=0.125
+2024-08-29 14:46:37,001 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=180848.0, ans=0.0
+2024-08-29 14:46:45,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=180901.33333333334, ans=0.1
+2024-08-29 14:46:48,241 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.86 vs. limit=12.0
+2024-08-29 14:46:57,077 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.96 vs. limit=6.0
+2024-08-29 14:48:37,417 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.702e+02 2.011e+02 2.397e+02 3.479e+02, threshold=4.023e+02, percent-clipped=0.0
+2024-08-29 14:48:47,139 INFO [train.py:1114] (2/4) Epoch 14, batch 1600, loss[loss=0.2142, simple_loss=0.291, pruned_loss=0.05048, ctc_loss=0.09136, over 19846.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2828, pruned_loss=0.05796, ctc_loss=0.1086, over 3833861.75 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:48:48,453 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=181114.66666666666, ans=0.125
+2024-08-29 14:49:01,407 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.99 vs. limit=6.0
+2024-08-29 14:49:04,539 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=181168.0, ans=0.125
+2024-08-29 14:49:50,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=181221.33333333334, ans=0.1
+2024-08-29 14:51:00,295 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.62 vs. limit=22.5
+2024-08-29 14:51:08,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=181274.66666666666, ans=0.0
+2024-08-29 14:51:29,813 INFO [train.py:1114] (2/4) Epoch 14, batch 1650, loss[loss=0.2336, simple_loss=0.3, pruned_loss=0.06046, ctc_loss=0.1158, over 19648.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2827, pruned_loss=0.05808, ctc_loss=0.1089, over 3830692.64 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:51:33,729 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=181381.33333333334, ans=0.0
+2024-08-29 14:51:44,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=181434.66666666666, ans=0.1
+2024-08-29 14:51:46,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=181434.66666666666, ans=0.125
+2024-08-29 14:51:54,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=181434.66666666666, ans=0.125
+2024-08-29 14:51:59,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=181488.0, ans=0.025
+2024-08-29 14:52:11,852 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=181488.0, ans=0.0
+2024-08-29 14:52:27,748 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=181594.66666666666, ans=0.125
+2024-08-29 14:52:28,555 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.808e+02 2.247e+02 2.720e+02 5.029e+02, threshold=4.494e+02, percent-clipped=3.0
+2024-08-29 14:52:31,120 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=181594.66666666666, ans=0.1
+2024-08-29 14:52:32,435 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:52:33,467 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=181594.66666666666, ans=0.0
+2024-08-29 14:52:38,159 INFO [train.py:1114] (2/4) Epoch 14, batch 1700, loss[loss=0.1758, simple_loss=0.235, pruned_loss=0.04201, ctc_loss=0.08143, over 19674.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2822, pruned_loss=0.05771, ctc_loss=0.1081, over 3844833.60 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:52:44,665 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.46 vs. limit=10.0
+2024-08-29 14:52:55,066 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.56 vs. limit=22.5
+2024-08-29 14:52:57,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=181701.33333333334, ans=10.0
+2024-08-29 14:53:01,963 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.43 vs. limit=15.0
+2024-08-29 14:53:10,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=181754.66666666666, ans=0.0
+2024-08-29 14:53:12,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=181808.0, ans=0.0
+2024-08-29 14:53:13,212 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.30 vs. limit=12.0
+2024-08-29 14:53:17,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=181808.0, ans=0.125
+2024-08-29 14:53:46,215 INFO [train.py:1114] (2/4) Epoch 14, batch 1750, loss[loss=0.2062, simple_loss=0.2557, pruned_loss=0.05652, ctc_loss=0.1093, over 19631.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2816, pruned_loss=0.0573, ctc_loss=0.1077, over 3850031.43 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:53:46,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=181914.66666666666, ans=0.0
+2024-08-29 14:54:02,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=181968.0, ans=0.025
+2024-08-29 14:54:36,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=182021.33333333334, ans=0.2
+2024-08-29 14:55:03,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=182074.66666666666, ans=0.2
+2024-08-29 14:55:10,232 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182074.66666666666, ans=0.1
+2024-08-29 14:56:25,498 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.791e+02 2.085e+02 2.712e+02 5.021e+02, threshold=4.170e+02, percent-clipped=2.0
+2024-08-29 14:56:27,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=182128.0, ans=0.95
+2024-08-29 14:56:31,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=182128.0, ans=0.0
+2024-08-29 14:56:34,707 INFO [train.py:1114] (2/4) Epoch 14, batch 1800, loss[loss=0.2414, simple_loss=0.2993, pruned_loss=0.06631, ctc_loss=0.1272, over 19616.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2821, pruned_loss=0.05777, ctc_loss=0.1086, over 3851925.79 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:57:14,069 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=182181.33333333334, ans=0.125
+2024-08-29 14:57:15,263 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=182181.33333333334, ans=0.0
+2024-08-29 14:58:07,427 INFO [train.py:1114] (2/4) Epoch 14, batch 1850, loss[loss=0.2124, simple_loss=0.2823, pruned_loss=0.0521, ctc_loss=0.09589, over 19581.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2815, pruned_loss=0.0573, ctc_loss=0.1077, over 3855220.69 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:58:09,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=182448.0, ans=0.1
+2024-08-29 14:59:45,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=182554.66666666666, ans=0.1
+2024-08-29 14:59:45,530 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.34 vs. limit=15.0
+2024-08-29 15:00:48,042 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=182608.0, ans=0.125
+2024-08-29 15:03:29,626 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.679e+02 1.934e+02 2.278e+02 6.084e+02, threshold=3.868e+02, percent-clipped=1.0
+2024-08-29 15:03:40,221 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.68 vs. limit=15.0
+2024-08-29 15:03:40,833 INFO [train.py:1114] (2/4) Epoch 14, batch 1900, loss[loss=0.2228, simple_loss=0.29, pruned_loss=0.05598, ctc_loss=0.1089, over 19652.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2819, pruned_loss=0.05714, ctc_loss=0.1075, over 3859926.01 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:03:46,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=182714.66666666666, ans=0.0
+2024-08-29 15:03:51,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=182768.0, ans=0.125
+2024-08-29 15:03:55,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=182768.0, ans=0.125
+2024-08-29 15:04:54,067 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.81 vs. limit=15.0
+2024-08-29 15:05:18,937 INFO [train.py:1114] (2/4) Epoch 14, batch 1950, loss[loss=0.2148, simple_loss=0.2797, pruned_loss=0.05439, ctc_loss=0.1027, over 19597.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2835, pruned_loss=0.0576, ctc_loss=0.1081, over 3869013.35 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:05:38,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183034.66666666666, ans=0.1
+2024-08-29 15:05:39,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=183034.66666666666, ans=0.125
+2024-08-29 15:05:52,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=183141.33333333334, ans=0.125
+2024-08-29 15:05:53,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=183141.33333333334, ans=0.125
+2024-08-29 15:05:55,562 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=183141.33333333334, ans=0.125
+2024-08-29 15:06:06,636 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.683e+02 1.939e+02 2.319e+02 3.642e+02, threshold=3.877e+02, percent-clipped=0.0
+2024-08-29 15:06:44,339 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.45 vs. limit=15.0
+2024-08-29 15:06:48,389 INFO [train.py:1114] (2/4) Epoch 14, batch 2000, loss[loss=0.2042, simple_loss=0.2545, pruned_loss=0.05597, ctc_loss=0.105, over 19640.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2838, pruned_loss=0.05782, ctc_loss=0.1084, over 3853811.42 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:06:49,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183248.0, ans=0.1
+2024-08-29 15:07:00,317 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=183301.33333333334, ans=0.125
+2024-08-29 15:07:32,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=183408.0, ans=0.125
+2024-08-29 15:07:45,827 INFO [train.py:1114] (2/4) Epoch 14, batch 2050, loss[loss=0.1948, simple_loss=0.2566, pruned_loss=0.049, ctc_loss=0.0877, over 19736.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2829, pruned_loss=0.05766, ctc_loss=0.1082, over 3849695.00 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:07:46,418 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.37 vs. limit=22.5
+2024-08-29 15:07:57,264 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=183568.0, ans=0.0
+2024-08-29 15:08:03,122 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=183568.0, ans=0.125
+2024-08-29 15:08:39,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=183621.33333333334, ans=0.125
+2024-08-29 15:08:40,430 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183621.33333333334, ans=0.1
+2024-08-29 15:08:41,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=183621.33333333334, ans=0.125
+2024-08-29 15:09:22,579 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.25 vs. limit=22.5
+2024-08-29 15:09:27,255 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.60 vs. limit=15.0
+2024-08-29 15:09:30,357 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:09:33,848 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.39 vs. limit=22.5
+2024-08-29 15:09:39,965 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.749e+02 1.987e+02 2.455e+02 3.413e+02, threshold=3.973e+02, percent-clipped=0.0
+2024-08-29 15:09:48,926 INFO [train.py:1114] (2/4) Epoch 14, batch 2100, loss[loss=0.2118, simple_loss=0.277, pruned_loss=0.0529, ctc_loss=0.1021, over 19785.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2825, pruned_loss=0.05765, ctc_loss=0.1081, over 3857710.45 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:09:52,004 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.63 vs. limit=15.0
+2024-08-29 15:10:53,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=183994.66666666666, ans=0.07
+2024-08-29 15:10:57,760 INFO [train.py:1114] (2/4) Epoch 14, batch 2150, loss[loss=0.1764, simple_loss=0.254, pruned_loss=0.03595, ctc_loss=0.06709, over 19866.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2814, pruned_loss=0.05707, ctc_loss=0.107, over 3868069.69 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:11:00,273 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=184048.0, ans=0.125
+2024-08-29 15:11:19,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=184154.66666666666, ans=0.2
+2024-08-29 15:11:36,301 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=184208.0, ans=0.125
+2024-08-29 15:11:44,630 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 1.765e+02 2.209e+02 2.742e+02 6.061e+02, threshold=4.418e+02, percent-clipped=6.0
+2024-08-29 15:12:07,191 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=184261.33333333334, ans=0.025
+2024-08-29 15:12:09,348 INFO [train.py:1114] (2/4) Epoch 14, batch 2200, loss[loss=0.2518, simple_loss=0.3063, pruned_loss=0.07219, ctc_loss=0.1321, over 19569.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2812, pruned_loss=0.05685, ctc_loss=0.1064, over 3866659.28 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:12:12,257 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.48 vs. limit=15.0
+2024-08-29 15:12:14,036 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=184314.66666666666, ans=0.1
+2024-08-29 15:13:15,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=184421.33333333334, ans=0.125
+2024-08-29 15:13:17,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=184421.33333333334, ans=0.025
+2024-08-29 15:13:47,045 INFO [train.py:1114] (2/4) Epoch 14, batch 2250, loss[loss=0.2068, simple_loss=0.2751, pruned_loss=0.05087, ctc_loss=0.09203, over 19629.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.281, pruned_loss=0.05664, ctc_loss=0.1062, over 3866444.72 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:14:02,967 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184634.66666666666, ans=0.1
+2024-08-29 15:14:08,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=184688.0, ans=0.125
+2024-08-29 15:14:11,119 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.97 vs. limit=15.0
+2024-08-29 15:14:15,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=184688.0, ans=0.125
+2024-08-29 15:14:26,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=184741.33333333334, ans=0.1
+2024-08-29 15:14:34,160 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.796e+02 2.116e+02 2.512e+02 3.767e+02, threshold=4.231e+02, percent-clipped=0.0
+2024-08-29 15:14:37,768 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=184794.66666666666, ans=0.0
+2024-08-29 15:14:43,318 INFO [train.py:1114] (2/4) Epoch 14, batch 2300, loss[loss=0.209, simple_loss=0.2684, pruned_loss=0.05468, ctc_loss=0.1007, over 19501.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2805, pruned_loss=0.0569, ctc_loss=0.1069, over 3860560.42 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:14:49,200 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.00 vs. limit=15.0
+2024-08-29 15:14:53,652 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=184901.33333333334, ans=0.2
+2024-08-29 15:14:56,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=184901.33333333334, ans=0.125
+2024-08-29 15:15:14,723 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.99 vs. limit=10.0
+2024-08-29 15:15:25,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=185008.0, ans=0.125
+2024-08-29 15:15:30,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=185061.33333333334, ans=0.05
+2024-08-29 15:15:40,919 INFO [train.py:1114] (2/4) Epoch 14, batch 2350, loss[loss=0.2295, simple_loss=0.2962, pruned_loss=0.05984, ctc_loss=0.1076, over 19632.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2798, pruned_loss=0.05663, ctc_loss=0.1062, over 3863177.60 frames. ], batch size: 63, lr: 1.05e-02, grad_scale: 64.0
+2024-08-29 15:15:41,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=185114.66666666666, ans=0.125
+2024-08-29 15:15:52,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=185168.0, ans=0.2
+2024-08-29 15:15:59,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=185168.0, ans=0.125
+2024-08-29 15:15:59,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=185168.0, ans=0.1
+2024-08-29 15:16:08,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=185221.33333333334, ans=0.125
+2024-08-29 15:16:23,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=185274.66666666666, ans=0.125
+2024-08-29 15:16:25,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=185328.0, ans=0.0
+2024-08-29 15:16:28,749 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.724e+02 2.017e+02 2.647e+02 4.792e+02, threshold=4.034e+02, percent-clipped=3.0
+2024-08-29 15:16:36,537 INFO [train.py:1114] (2/4) Epoch 14, batch 2400, loss[loss=0.2426, simple_loss=0.3009, pruned_loss=0.067, ctc_loss=0.1259, over 19310.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2827, pruned_loss=0.05786, ctc_loss=0.1083, over 3857715.38 frames. ], batch size: 71, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:16:54,846 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=185434.66666666666, ans=0.025
+2024-08-29 15:17:27,063 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.34 vs. limit=22.5
+2024-08-29 15:17:38,556 INFO [train.py:1114] (2/4) Epoch 14, batch 2450, loss[loss=0.2777, simple_loss=0.3102, pruned_loss=0.08884, ctc_loss=0.169, over 14029.00 frames. ], tot_loss[loss=0.226, simple_loss=0.2861, pruned_loss=0.06026, ctc_loss=0.1133, over 3730481.11 frames. ], batch size: 140, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:17:49,338 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=185701.33333333334, ans=0.125
+2024-08-29 15:17:55,988 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=185701.33333333334, ans=0.0
+2024-08-29 15:18:02,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=185754.66666666666, ans=0.125
+2024-08-29 15:18:13,061 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:18:20,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=185808.0, ans=0.0
+2024-08-29 15:18:20,929 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.44 vs. limit=6.0
+2024-08-29 15:19:09,362 INFO [train.py:1114] (2/4) Epoch 15, batch 0, loss[loss=0.1964, simple_loss=0.2563, pruned_loss=0.04926, ctc_loss=0.09507, over 19833.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2563, pruned_loss=0.04926, ctc_loss=0.09507, over 19833.00 frames. ], batch size: 49, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:19:09,363 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-29 15:19:20,878 INFO [train.py:1146] (2/4) Epoch 15, validation: loss=0.1908, simple_loss=0.2785, pruned_loss=0.03825, ctc_loss=0.06651, over 944034.00 frames.
+2024-08-29 15:19:20,878 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13710MB
+2024-08-29 15:19:22,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=185856.0, ans=0.125
+2024-08-29 15:19:24,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=185856.0, ans=0.2
+2024-08-29 15:19:25,766 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.942e+02 2.136e+02 2.424e+02 3.799e+02, threshold=4.272e+02, percent-clipped=0.0
+2024-08-29 15:19:37,142 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:19:49,080 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=185962.66666666666, ans=0.125
+2024-08-29 15:19:56,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=186016.0, ans=0.125
+2024-08-29 15:19:57,209 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:20:15,978 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:20:25,214 INFO [train.py:1114] (2/4) Epoch 15, batch 50, loss[loss=0.1832, simple_loss=0.2494, pruned_loss=0.04233, ctc_loss=0.08059, over 19709.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.2867, pruned_loss=0.06009, ctc_loss=0.1142, over 844718.33 frames. ], batch size: 47, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:20:49,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=186229.33333333334, ans=0.2
+2024-08-29 15:20:59,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=186229.33333333334, ans=0.0
+2024-08-29 15:21:12,731 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.47 vs. limit=6.0
+2024-08-29 15:21:25,451 INFO [train.py:1114] (2/4) Epoch 15, batch 100, loss[loss=0.2268, simple_loss=0.2878, pruned_loss=0.06056, ctc_loss=0.1119, over 19719.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2862, pruned_loss=0.05977, ctc_loss=0.1133, over 1498447.04 frames. ], batch size: 51, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:21:30,082 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.739e+02 1.952e+02 2.450e+02 4.288e+02, threshold=3.904e+02, percent-clipped=1.0
+2024-08-29 15:21:39,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=186442.66666666666, ans=0.2
+2024-08-29 15:21:43,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=186442.66666666666, ans=0.125
+2024-08-29 15:22:03,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=186549.33333333334, ans=0.2
+2024-08-29 15:22:14,075 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.28 vs. limit=22.5
+2024-08-29 15:22:29,350 INFO [train.py:1114] (2/4) Epoch 15, batch 150, loss[loss=0.2191, simple_loss=0.2601, pruned_loss=0.0647, ctc_loss=0.1215, over 19674.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2817, pruned_loss=0.05696, ctc_loss=0.1074, over 2027404.41 frames. ], batch size: 47, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:22:33,738 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.51 vs. limit=12.0
+2024-08-29 15:22:40,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=186709.33333333334, ans=0.125
+2024-08-29 15:22:47,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=186709.33333333334, ans=0.125
+2024-08-29 15:23:00,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=186762.66666666666, ans=0.0
+2024-08-29 15:23:28,623 INFO [train.py:1114] (2/4) Epoch 15, batch 200, loss[loss=0.2522, simple_loss=0.3013, pruned_loss=0.07285, ctc_loss=0.1436, over 18235.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2805, pruned_loss=0.05644, ctc_loss=0.1064, over 2435581.35 frames. ], batch size: 85, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:23:44,481 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.690e+02 2.002e+02 2.433e+02 3.884e+02, threshold=4.003e+02, percent-clipped=0.0
+2024-08-29 15:23:53,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=186976.0, ans=0.125
+2024-08-29 15:24:21,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=187082.66666666666, ans=0.125
+2024-08-29 15:24:53,144 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.47 vs. limit=15.0
+2024-08-29 15:24:57,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=187136.0, ans=0.0
+2024-08-29 15:24:59,371 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_ff2.min_abs, batch_count=187189.33333333334, ans=0.1
+2024-08-29 15:25:01,150 INFO [train.py:1114] (2/4) Epoch 15, batch 250, loss[loss=0.2548, simple_loss=0.3134, pruned_loss=0.07205, ctc_loss=0.13, over 19383.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2809, pruned_loss=0.05678, ctc_loss=0.107, over 2755509.28 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:25:17,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=187242.66666666666, ans=0.1
+2024-08-29 15:25:24,669 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.72 vs. limit=15.0
+2024-08-29 15:25:39,511 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.67 vs. limit=22.5
+2024-08-29 15:25:45,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=187349.33333333334, ans=0.125
+2024-08-29 15:26:00,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=187402.66666666666, ans=0.125
+2024-08-29 15:26:33,390 INFO [train.py:1114] (2/4) Epoch 15, batch 300, loss[loss=0.2356, simple_loss=0.2965, pruned_loss=0.06355, ctc_loss=0.1193, over 19522.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2802, pruned_loss=0.05614, ctc_loss=0.1058, over 3000361.20 frames. ], batch size: 61, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:26:38,053 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.706e+02 2.088e+02 2.592e+02 3.748e+02, threshold=4.177e+02, percent-clipped=0.0
+2024-08-29 15:26:38,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=187456.0, ans=0.0
+2024-08-29 15:26:39,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=187456.0, ans=0.125
+2024-08-29 15:27:07,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=187562.66666666666, ans=0.2
+2024-08-29 15:27:21,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=187616.0, ans=0.025
+2024-08-29 15:27:25,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=187669.33333333334, ans=0.1
+2024-08-29 15:27:34,916 INFO [train.py:1114] (2/4) Epoch 15, batch 350, loss[loss=0.1902, simple_loss=0.252, pruned_loss=0.04709, ctc_loss=0.08577, over 19738.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2807, pruned_loss=0.05627, ctc_loss=0.1059, over 3190095.41 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 16.0
+2024-08-29 15:27:53,189 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=187776.0, ans=0.0
+2024-08-29 15:27:56,231 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.33 vs. limit=10.0
+2024-08-29 15:28:07,206 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.01 vs. limit=12.0
+2024-08-29 15:28:12,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=187829.33333333334, ans=0.95
+2024-08-29 15:28:13,676 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.99 vs. limit=15.0
+2024-08-29 15:28:15,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=187882.66666666666, ans=0.1
+2024-08-29 15:28:38,900 INFO [train.py:1114] (2/4) Epoch 15, batch 400, loss[loss=0.2253, simple_loss=0.2917, pruned_loss=0.05702, ctc_loss=0.1123, over 19479.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2802, pruned_loss=0.05609, ctc_loss=0.1054, over 3342533.88 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:28:39,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=187989.33333333334, ans=0.05
+2024-08-29 15:28:44,516 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.706e+02 2.043e+02 2.587e+02 5.210e+02, threshold=4.085e+02, percent-clipped=2.0
+2024-08-29 15:29:18,006 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=188042.66666666666, ans=0.0
+2024-08-29 15:29:38,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=188096.0, ans=0.09899494936611666
+2024-08-29 15:29:38,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.07 vs. limit=15.0
+2024-08-29 15:29:48,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=188149.33333333334, ans=0.0
+2024-08-29 15:30:07,910 INFO [train.py:1114] (2/4) Epoch 15, batch 450, loss[loss=0.2276, simple_loss=0.299, pruned_loss=0.05719, ctc_loss=0.1045, over 19616.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2808, pruned_loss=0.05655, ctc_loss=0.1063, over 3451192.28 frames. ], batch size: 55, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:30:16,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=188256.0, ans=0.0
+2024-08-29 15:30:19,868 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=188309.33333333334, ans=0.0
+2024-08-29 15:30:31,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=188362.66666666666, ans=0.1
+2024-08-29 15:30:35,711 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=6.67 vs. limit=15.0
+2024-08-29 15:30:40,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=188362.66666666666, ans=0.125
+2024-08-29 15:31:02,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=188469.33333333334, ans=0.125
+2024-08-29 15:31:09,365 INFO [train.py:1114] (2/4) Epoch 15, batch 500, loss[loss=0.2374, simple_loss=0.3027, pruned_loss=0.06264, ctc_loss=0.1173, over 19721.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2797, pruned_loss=0.05599, ctc_loss=0.1052, over 3546389.06 frames. ], batch size: 63, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:31:14,505 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.65 vs. limit=15.0
+2024-08-29 15:31:15,121 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.681e+02 1.897e+02 2.177e+02 4.545e+02, threshold=3.794e+02, percent-clipped=1.0
+2024-08-29 15:31:34,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=188629.33333333334, ans=0.125
+2024-08-29 15:31:34,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=188629.33333333334, ans=0.125
+2024-08-29 15:32:20,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=188629.33333333334, ans=0.0
+2024-08-29 15:32:20,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=188629.33333333334, ans=0.025
+2024-08-29 15:32:26,519 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.31 vs. limit=15.0
+2024-08-29 15:32:31,319 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.61 vs. limit=22.5
+2024-08-29 15:32:42,935 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=188736.0, ans=0.025
+2024-08-29 15:32:43,128 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.00 vs. limit=15.0
+2024-08-29 15:32:45,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=188736.0, ans=0.0
+2024-08-29 15:32:51,285 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.49 vs. limit=8.0
+2024-08-29 15:32:59,036 INFO [train.py:1114] (2/4) Epoch 15, batch 550, loss[loss=0.2403, simple_loss=0.2983, pruned_loss=0.06631, ctc_loss=0.1241, over 19235.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.28, pruned_loss=0.0561, ctc_loss=0.1054, over 3607735.30 frames. ], batch size: 71, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:33:04,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=188789.33333333334, ans=0.0
+2024-08-29 15:33:05,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=188789.33333333334, ans=0.0
+2024-08-29 15:33:06,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=188789.33333333334, ans=0.125
+2024-08-29 15:33:54,150 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=188896.0, ans=0.125
+2024-08-29 15:33:59,060 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.05 vs. limit=15.0
+2024-08-29 15:34:01,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=188896.0, ans=0.125
+2024-08-29 15:34:30,461 INFO [train.py:1114] (2/4) Epoch 15, batch 600, loss[loss=0.2652, simple_loss=0.3206, pruned_loss=0.07608, ctc_loss=0.1441, over 19372.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2798, pruned_loss=0.05586, ctc_loss=0.1049, over 3666449.23 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:34:30,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=189056.0, ans=0.1
+2024-08-29 15:34:33,099 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=189056.0, ans=0.0
+2024-08-29 15:34:36,390 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.830e+02 2.111e+02 2.732e+02 4.380e+02, threshold=4.223e+02, percent-clipped=4.0
+2024-08-29 15:34:36,788 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189056.0, ans=0.1
+2024-08-29 15:34:45,950 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.72 vs. limit=15.0
+2024-08-29 15:34:51,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=189109.33333333334, ans=0.0
+2024-08-29 15:35:26,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=189269.33333333334, ans=0.09899494936611666
+2024-08-29 15:35:29,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189269.33333333334, ans=0.1
+2024-08-29 15:35:31,353 INFO [train.py:1114] (2/4) Epoch 15, batch 650, loss[loss=0.2279, simple_loss=0.2874, pruned_loss=0.06106, ctc_loss=0.1157, over 19753.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2798, pruned_loss=0.05612, ctc_loss=0.1057, over 3717593.53 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:35:35,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=189322.66666666666, ans=0.2
+2024-08-29 15:35:45,851 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=189376.0, ans=0.125
+2024-08-29 15:37:41,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=189376.0, ans=0.0
+2024-08-29 15:37:42,833 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.38 vs. limit=15.0
+2024-08-29 15:37:55,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_na.min_abs, batch_count=189429.33333333334, ans=0.02
+2024-08-29 15:38:08,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-29 15:38:08,610 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=189482.66666666666, ans=0.1
+2024-08-29 15:38:14,737 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.46 vs. limit=15.0
+2024-08-29 15:38:22,771 INFO [train.py:1114] (2/4) Epoch 15, batch 700, loss[loss=0.2265, simple_loss=0.287, pruned_loss=0.06042, ctc_loss=0.1127, over 19734.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2805, pruned_loss=0.05636, ctc_loss=0.1061, over 3749162.36 frames. ], batch size: 51, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:38:24,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=189589.33333333334, ans=0.125
+2024-08-29 15:38:28,537 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.846e+02 2.430e+02 3.057e+02 4.272e+02, threshold=4.860e+02, percent-clipped=1.0
+2024-08-29 15:38:32,473 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=189589.33333333334, ans=0.04949747468305833
+2024-08-29 15:38:55,938 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.07 vs. limit=6.0
+2024-08-29 15:39:01,676 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.17 vs. limit=15.0
+2024-08-29 15:39:03,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=189749.33333333334, ans=0.125
+2024-08-29 15:39:15,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=189802.66666666666, ans=0.125
+2024-08-29 15:39:25,955 INFO [train.py:1114] (2/4) Epoch 15, batch 750, loss[loss=0.2427, simple_loss=0.3011, pruned_loss=0.06657, ctc_loss=0.1277, over 19494.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2795, pruned_loss=0.05589, ctc_loss=0.1051, over 3775088.89 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:39:26,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=189856.0, ans=0.0
+2024-08-29 15:39:28,902 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=189856.0, ans=0.2
+2024-08-29 15:39:30,859 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=189856.0, ans=0.125
+2024-08-29 15:39:51,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=189962.66666666666, ans=0.125
+2024-08-29 15:39:51,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=189962.66666666666, ans=0.1
+2024-08-29 15:40:02,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=190016.0, ans=0.125
+2024-08-29 15:40:16,958 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.59 vs. limit=15.0
+2024-08-29 15:40:28,196 INFO [train.py:1114] (2/4) Epoch 15, batch 800, loss[loss=0.2287, simple_loss=0.2735, pruned_loss=0.06742, ctc_loss=0.1228, over 19409.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2792, pruned_loss=0.05573, ctc_loss=0.1046, over 3796237.68 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:40:34,422 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.728e+02 2.068e+02 2.494e+02 4.984e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-29 15:40:50,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=190176.0, ans=0.125
+2024-08-29 15:41:21,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=190336.0, ans=0.0
+2024-08-29 15:41:22,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=190336.0, ans=0.125
+2024-08-29 15:41:30,888 INFO [train.py:1114] (2/4) Epoch 15, batch 850, loss[loss=0.2367, simple_loss=0.3061, pruned_loss=0.06064, ctc_loss=0.115, over 19641.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2793, pruned_loss=0.05576, ctc_loss=0.1047, over 3814513.23 frames. ], batch size: 59, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:41:31,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=190389.33333333334, ans=0.0
+2024-08-29 15:41:36,258 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.20 vs. limit=10.0
+2024-08-29 15:41:39,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=190389.33333333334, ans=0.0
+2024-08-29 15:41:42,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=190442.66666666666, ans=0.0
+2024-08-29 15:41:52,680 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.95 vs. limit=15.0
+2024-08-29 15:41:56,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=190496.0, ans=0.125
+2024-08-29 15:41:58,383 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=190496.0, ans=0.125
+2024-08-29 15:42:05,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=190549.33333333334, ans=0.025
+2024-08-29 15:42:34,686 INFO [train.py:1114] (2/4) Epoch 15, batch 900, loss[loss=0.1983, simple_loss=0.2597, pruned_loss=0.0494, ctc_loss=0.09539, over 19803.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2795, pruned_loss=0.05571, ctc_loss=0.1048, over 3818417.52 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:42:39,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=190656.0, ans=0.0
+2024-08-29 15:42:40,572 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 1.760e+02 2.061e+02 2.441e+02 4.748e+02, threshold=4.121e+02, percent-clipped=4.0
+2024-08-29 15:42:58,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=190709.33333333334, ans=0.125
+2024-08-29 15:43:08,860 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=190762.66666666666, ans=0.2
+2024-08-29 15:43:17,789 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.30 vs. limit=15.0
+2024-08-29 15:43:19,929 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:43:23,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=190816.0, ans=0.2
+2024-08-29 15:43:47,830 INFO [train.py:1114] (2/4) Epoch 15, batch 950, loss[loss=0.1848, simple_loss=0.2499, pruned_loss=0.04397, ctc_loss=0.07963, over 19507.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2801, pruned_loss=0.05591, ctc_loss=0.1053, over 3821132.33 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:43:51,805 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=190922.66666666666, ans=0.1
+2024-08-29 15:43:59,287 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=190976.0, ans=0.1
+2024-08-29 15:44:09,342 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.21 vs. limit=15.0
+2024-08-29 15:44:14,732 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=191029.33333333334, ans=0.0
+2024-08-29 15:44:20,008 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=191029.33333333334, ans=0.125
+2024-08-29 15:44:22,446 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=191029.33333333334, ans=0.05
+2024-08-29 15:44:26,181 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=191082.66666666666, ans=0.95
+2024-08-29 15:44:29,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=191082.66666666666, ans=0.125
+2024-08-29 15:44:48,251 INFO [train.py:1114] (2/4) Epoch 15, batch 1000, loss[loss=0.1911, simple_loss=0.2622, pruned_loss=0.04306, ctc_loss=0.08457, over 19856.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2805, pruned_loss=0.05602, ctc_loss=0.1053, over 3818637.71 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:44:49,161 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.59 vs. limit=12.0
+2024-08-29 15:44:56,848 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 1.691e+02 1.934e+02 2.300e+02 3.610e+02, threshold=3.869e+02, percent-clipped=0.0
+2024-08-29 15:44:59,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=191189.33333333334, ans=0.125
+2024-08-29 15:45:01,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=191242.66666666666, ans=0.125
+2024-08-29 15:45:20,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=191296.0, ans=0.125
+2024-08-29 15:45:22,861 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.38 vs. limit=22.5
+2024-08-29 15:45:44,186 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.91 vs. limit=15.0
+2024-08-29 15:45:52,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=191456.0, ans=0.125
+2024-08-29 15:45:53,379 INFO [train.py:1114] (2/4) Epoch 15, batch 1050, loss[loss=0.207, simple_loss=0.2837, pruned_loss=0.04759, ctc_loss=0.08767, over 19838.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2791, pruned_loss=0.05545, ctc_loss=0.104, over 3825181.74 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:45:58,151 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.14 vs. limit=12.0
+2024-08-29 15:46:22,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=191562.66666666666, ans=0.0
+2024-08-29 15:46:23,073 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=191562.66666666666, ans=0.2
+2024-08-29 15:46:26,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=191562.66666666666, ans=0.2
+2024-08-29 15:46:38,278 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=191616.0, ans=0.125
+2024-08-29 15:46:42,642 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.75 vs. limit=10.0
+2024-08-29 15:46:53,183 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.17 vs. limit=12.0
+2024-08-29 15:46:54,810 INFO [train.py:1114] (2/4) Epoch 15, batch 1100, loss[loss=0.1951, simple_loss=0.2629, pruned_loss=0.04664, ctc_loss=0.0851, over 19591.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2789, pruned_loss=0.05536, ctc_loss=0.1038, over 3832078.55 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:46:57,388 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=191722.66666666666, ans=0.1
+2024-08-29 15:47:17,585 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.746e+02 1.965e+02 2.496e+02 3.903e+02, threshold=3.929e+02, percent-clipped=1.0
+2024-08-29 15:47:17,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_abs, batch_count=191722.66666666666, ans=0.5
+2024-08-29 15:47:23,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=191776.0, ans=0.035
+2024-08-29 15:47:27,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=191776.0, ans=0.125
+2024-08-29 15:47:30,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=191776.0, ans=0.125
+2024-08-29 15:47:31,853 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.18 vs. limit=15.0
+2024-08-29 15:48:09,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=191936.0, ans=0.0
+2024-08-29 15:48:12,567 INFO [train.py:1114] (2/4) Epoch 15, batch 1150, loss[loss=0.2081, simple_loss=0.2745, pruned_loss=0.0515, ctc_loss=0.09673, over 19580.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2791, pruned_loss=0.05553, ctc_loss=0.1043, over 3829412.36 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:48:18,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=191989.33333333334, ans=0.125
+2024-08-29 15:48:18,694 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=191989.33333333334, ans=0.125
+2024-08-29 15:48:20,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=191989.33333333334, ans=0.125
+2024-08-29 15:48:20,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=191989.33333333334, ans=0.1
+2024-08-29 15:48:23,730 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.21 vs. limit=15.0
+2024-08-29 15:48:26,971 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.28 vs. limit=22.5
+2024-08-29 15:48:29,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=192042.66666666666, ans=0.2
+2024-08-29 15:48:29,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=192042.66666666666, ans=0.1
+2024-08-29 15:48:39,899 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=192096.0, ans=0.125
+2024-08-29 15:49:07,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=192202.66666666666, ans=0.0
+2024-08-29 15:49:11,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=192202.66666666666, ans=0.0
+2024-08-29 15:49:19,909 INFO [train.py:1114] (2/4) Epoch 15, batch 1200, loss[loss=0.2037, simple_loss=0.2715, pruned_loss=0.04996, ctc_loss=0.08978, over 19831.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2796, pruned_loss=0.05558, ctc_loss=0.1046, over 3824886.04 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:49:26,221 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.719e+02 2.001e+02 2.349e+02 3.398e+02, threshold=4.002e+02, percent-clipped=0.0
+2024-08-29 15:49:46,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=192309.33333333334, ans=0.0
+2024-08-29 15:49:47,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=192362.66666666666, ans=0.0
+2024-08-29 15:50:00,955 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.81 vs. limit=15.0
+2024-08-29 15:50:01,911 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=192416.0, ans=0.125
+2024-08-29 15:50:24,206 INFO [train.py:1114] (2/4) Epoch 15, batch 1250, loss[loss=0.2307, simple_loss=0.2943, pruned_loss=0.06236, ctc_loss=0.1059, over 19539.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2799, pruned_loss=0.05533, ctc_loss=0.1041, over 3842529.22 frames. ], batch size: 61, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:50:51,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=192629.33333333334, ans=0.125
+2024-08-29 15:51:00,542 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:51:11,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=192682.66666666666, ans=0.07
+2024-08-29 15:51:12,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=192736.0, ans=0.125
+2024-08-29 15:51:21,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=192736.0, ans=0.0
+2024-08-29 15:51:25,224 INFO [train.py:1114] (2/4) Epoch 15, batch 1300, loss[loss=0.2247, simple_loss=0.2925, pruned_loss=0.05732, ctc_loss=0.1058, over 18994.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2792, pruned_loss=0.05504, ctc_loss=0.1037, over 3846946.16 frames. ], batch size: 76, lr: 9.99e-03, grad_scale: 32.0
+2024-08-29 15:52:15,030 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.668e+02 1.955e+02 2.455e+02 4.261e+02, threshold=3.910e+02, percent-clipped=2.0
+2024-08-29 15:52:27,492 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=192842.66666666666, ans=0.125
+2024-08-29 15:52:29,663 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=192842.66666666666, ans=0.025
+2024-08-29 15:52:36,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=192896.0, ans=0.125
+2024-08-29 15:52:59,164 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=193002.66666666666, ans=0.125
+2024-08-29 15:53:10,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=193056.0, ans=0.125
+2024-08-29 15:53:11,012 INFO [train.py:1114] (2/4) Epoch 15, batch 1350, loss[loss=0.214, simple_loss=0.2827, pruned_loss=0.05244, ctc_loss=0.101, over 19791.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2784, pruned_loss=0.05459, ctc_loss=0.1026, over 3857593.22 frames. ], batch size: 54, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:53:17,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=193056.0, ans=0.125
+2024-08-29 15:53:22,541 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.22 vs. limit=22.5
+2024-08-29 15:53:22,738 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.50 vs. limit=15.0
+2024-08-29 15:53:24,001 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.73 vs. limit=15.0
+2024-08-29 15:53:36,517 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.00 vs. limit=15.0
+2024-08-29 15:53:50,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=193216.0, ans=0.125
+2024-08-29 15:54:04,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=193269.33333333334, ans=0.125
+2024-08-29 15:54:08,509 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.85 vs. limit=15.0
+2024-08-29 15:54:08,584 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.93 vs. limit=15.0
+2024-08-29 15:54:14,986 INFO [train.py:1114] (2/4) Epoch 15, batch 1400, loss[loss=0.1862, simple_loss=0.2488, pruned_loss=0.0452, ctc_loss=0.08282, over 19685.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2779, pruned_loss=0.05454, ctc_loss=0.1024, over 3864412.35 frames. ], batch size: 46, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:54:36,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=193322.66666666666, ans=0.125
+2024-08-29 15:54:37,466 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.658e+02 1.833e+02 2.351e+02 3.730e+02, threshold=3.665e+02, percent-clipped=0.0
+2024-08-29 15:54:37,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=193322.66666666666, ans=0.125
+2024-08-29 15:54:49,766 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=193376.0, ans=0.125
+2024-08-29 15:55:19,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=193482.66666666666, ans=0.125
+2024-08-29 15:55:20,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff3.min_abs, batch_count=193482.66666666666, ans=0.2
+2024-08-29 15:55:43,679 INFO [train.py:1114] (2/4) Epoch 15, batch 1450, loss[loss=0.2581, simple_loss=0.3129, pruned_loss=0.07341, ctc_loss=0.1412, over 19631.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.279, pruned_loss=0.05514, ctc_loss=0.1038, over 3862827.17 frames. ], batch size: 63, lr: 9.97e-03, grad_scale: 32.0
+2024-08-29 15:56:03,449 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=193642.66666666666, ans=0.04949747468305833
+2024-08-29 15:56:06,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=193642.66666666666, ans=0.09899494936611666
+2024-08-29 15:56:34,131 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=193802.66666666666, ans=0.125
+2024-08-29 15:56:45,752 INFO [train.py:1114] (2/4) Epoch 15, batch 1500, loss[loss=0.2343, simple_loss=0.3054, pruned_loss=0.05908, ctc_loss=0.1126, over 19600.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2798, pruned_loss=0.05557, ctc_loss=0.1045, over 3862635.72 frames. ], batch size: 57, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:56:50,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=193856.0, ans=0.125
+2024-08-29 15:56:52,436 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.660e+02 1.885e+02 2.337e+02 4.281e+02, threshold=3.770e+02, percent-clipped=2.0
+2024-08-29 15:57:02,365 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=193909.33333333334, ans=0.125
+2024-08-29 15:57:04,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=193909.33333333334, ans=0.125
+2024-08-29 15:57:51,480 INFO [train.py:1114] (2/4) Epoch 15, batch 1550, loss[loss=0.246, simple_loss=0.3022, pruned_loss=0.06823, ctc_loss=0.1334, over 19585.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2802, pruned_loss=0.05583, ctc_loss=0.1056, over 3846467.81 frames. ], batch size: 60, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:57:51,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=194122.66666666666, ans=0.0
+2024-08-29 15:58:13,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=194176.0, ans=0.0
+2024-08-29 15:58:21,002 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=194229.33333333334, ans=0.125
+2024-08-29 15:58:22,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=194229.33333333334, ans=0.0
+2024-08-29 15:58:34,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=194282.66666666666, ans=0.125
+2024-08-29 15:58:53,431 INFO [train.py:1114] (2/4) Epoch 15, batch 1600, loss[loss=0.2094, simple_loss=0.2783, pruned_loss=0.05062, ctc_loss=0.09817, over 19824.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2799, pruned_loss=0.05556, ctc_loss=0.105, over 3835679.45 frames. ], batch size: 57, lr: 9.95e-03, grad_scale: 32.0
+2024-08-29 15:58:59,523 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.762e+02 2.164e+02 2.478e+02 4.927e+02, threshold=4.328e+02, percent-clipped=7.0
+2024-08-29 15:58:59,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=194389.33333333334, ans=0.07
+2024-08-29 16:00:03,593 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=194496.0, ans=0.0
+2024-08-29 16:00:25,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=194602.66666666666, ans=0.2
+2024-08-29 16:00:35,225 INFO [train.py:1114] (2/4) Epoch 15, batch 1650, loss[loss=0.2143, simple_loss=0.2895, pruned_loss=0.05157, ctc_loss=0.08966, over 19655.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2794, pruned_loss=0.05541, ctc_loss=0.1046, over 3831237.10 frames. ], batch size: 59, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:00:55,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=194709.33333333334, ans=0.2
+2024-08-29 16:01:01,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=194762.66666666666, ans=0.125
+2024-08-29 16:01:24,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=194816.0, ans=0.125
+2024-08-29 16:01:29,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=194869.33333333334, ans=0.0
+2024-08-29 16:01:34,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=194869.33333333334, ans=0.125
+2024-08-29 16:01:38,040 INFO [train.py:1114] (2/4) Epoch 15, batch 1700, loss[loss=0.1731, simple_loss=0.2369, pruned_loss=0.03976, ctc_loss=0.07424, over 19696.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2792, pruned_loss=0.05539, ctc_loss=0.1043, over 3845903.82 frames. ], batch size: 46, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:01:44,063 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.696e+02 2.083e+02 2.797e+02 4.802e+02, threshold=4.167e+02, percent-clipped=3.0
+2024-08-29 16:01:45,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=194922.66666666666, ans=0.125
+2024-08-29 16:01:45,558 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=194922.66666666666, ans=0.1
+2024-08-29 16:01:50,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=194976.0, ans=0.0
+2024-08-29 16:01:57,692 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.64 vs. limit=15.0
+2024-08-29 16:01:58,755 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.14 vs. limit=6.0
+2024-08-29 16:01:59,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=194976.0, ans=0.2
+2024-08-29 16:02:02,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=195029.33333333334, ans=0.0
+2024-08-29 16:02:04,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=195029.33333333334, ans=0.2
+2024-08-29 16:02:23,239 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=195082.66666666666, ans=0.125
+2024-08-29 16:02:32,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=195136.0, ans=0.1
+2024-08-29 16:02:40,447 INFO [train.py:1114] (2/4) Epoch 15, batch 1750, loss[loss=0.1776, simple_loss=0.2427, pruned_loss=0.04107, ctc_loss=0.07602, over 19637.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2787, pruned_loss=0.05514, ctc_loss=0.1039, over 3851631.43 frames. ], batch size: 45, lr: 9.93e-03, grad_scale: 32.0
+2024-08-29 16:02:40,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=195189.33333333334, ans=0.0
+2024-08-29 16:02:47,810 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=195189.33333333334, ans=0.125
+2024-08-29 16:02:47,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=195189.33333333334, ans=0.125
+2024-08-29 16:03:01,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=195242.66666666666, ans=0.125
+2024-08-29 16:03:06,118 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=195296.0, ans=0.2
+2024-08-29 16:03:10,801 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=195296.0, ans=0.1
+2024-08-29 16:03:19,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=195349.33333333334, ans=0.2
+2024-08-29 16:03:21,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=195349.33333333334, ans=10.0
+2024-08-29 16:03:37,862 INFO [train.py:1114] (2/4) Epoch 15, batch 1800, loss[loss=0.2086, simple_loss=0.2788, pruned_loss=0.05049, ctc_loss=0.09367, over 19621.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2793, pruned_loss=0.05544, ctc_loss=0.1045, over 3853079.40 frames. ], batch size: 55, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:03:43,647 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.702e+02 2.083e+02 2.690e+02 4.339e+02, threshold=4.166e+02, percent-clipped=1.0
+2024-08-29 16:03:50,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=195509.33333333334, ans=0.0
+2024-08-29 16:03:57,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=195509.33333333334, ans=0.125
+2024-08-29 16:03:58,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=195509.33333333334, ans=0.1
+2024-08-29 16:04:09,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=195562.66666666666, ans=0.0
+2024-08-29 16:04:33,939 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=195722.66666666666, ans=0.07
+2024-08-29 16:04:34,690 INFO [train.py:1114] (2/4) Epoch 15, batch 1850, loss[loss=0.2293, simple_loss=0.2974, pruned_loss=0.05852, ctc_loss=0.1102, over 19593.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.279, pruned_loss=0.05541, ctc_loss=0.1043, over 3856062.68 frames. ], batch size: 57, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:04:40,819 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.91 vs. limit=22.5
+2024-08-29 16:04:46,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=195776.0, ans=0.1
+2024-08-29 16:05:02,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=195829.33333333334, ans=0.0
+2024-08-29 16:05:05,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=195829.33333333334, ans=0.2
+2024-08-29 16:05:11,772 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.54 vs. limit=15.0
+2024-08-29 16:05:14,562 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=195882.66666666666, ans=0.035
+2024-08-29 16:05:15,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=195882.66666666666, ans=0.025
+2024-08-29 16:05:35,629 INFO [train.py:1114] (2/4) Epoch 15, batch 1900, loss[loss=0.2004, simple_loss=0.2839, pruned_loss=0.04155, ctc_loss=0.08457, over 19673.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2793, pruned_loss=0.05531, ctc_loss=0.1042, over 3860767.44 frames. ], batch size: 59, lr: 9.91e-03, grad_scale: 32.0
+2024-08-29 16:05:35,787 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=195989.33333333334, ans=0.125
+2024-08-29 16:05:36,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=195989.33333333334, ans=0.1
+2024-08-29 16:05:40,981 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.724e+02 2.102e+02 3.115e+02 5.340e+02, threshold=4.204e+02, percent-clipped=3.0
+2024-08-29 16:05:46,230 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.29 vs. limit=15.0
+2024-08-29 16:05:55,937 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=196042.66666666666, ans=0.125
+2024-08-29 16:06:01,868 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.43 vs. limit=15.0
+2024-08-29 16:06:12,546 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=196149.33333333334, ans=0.5
+2024-08-29 16:06:32,317 INFO [train.py:1114] (2/4) Epoch 15, batch 1950, loss[loss=0.1898, simple_loss=0.2597, pruned_loss=0.04342, ctc_loss=0.08289, over 19582.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2804, pruned_loss=0.05547, ctc_loss=0.1043, over 3870328.65 frames. ], batch size: 52, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:06:35,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=196256.0, ans=0.125
+2024-08-29 16:06:52,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=196309.33333333334, ans=0.125
+2024-08-29 16:07:35,229 INFO [train.py:1114] (2/4) Epoch 15, batch 2000, loss[loss=0.1816, simple_loss=0.2425, pruned_loss=0.0434, ctc_loss=0.08482, over 19658.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2809, pruned_loss=0.0559, ctc_loss=0.1052, over 3855587.52 frames. ], batch size: 45, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:07:41,140 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.618e+02 1.832e+02 2.132e+02 4.362e+02, threshold=3.664e+02, percent-clipped=1.0
+2024-08-29 16:07:42,672 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=196522.66666666666, ans=0.125
+2024-08-29 16:08:32,344 INFO [train.py:1114] (2/4) Epoch 15, batch 2050, loss[loss=0.1999, simple_loss=0.2554, pruned_loss=0.05251, ctc_loss=0.09842, over 19698.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2797, pruned_loss=0.0555, ctc_loss=0.1045, over 3851679.79 frames. ], batch size: 47, lr: 9.89e-03, grad_scale: 32.0
+2024-08-29 16:08:48,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=196842.66666666666, ans=0.125
+2024-08-29 16:09:15,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197002.66666666666, ans=0.1
+2024-08-29 16:09:16,049 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=197002.66666666666, ans=0.125
+2024-08-29 16:09:17,561 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=17.53 vs. limit=22.5
+2024-08-29 16:09:20,296 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=197002.66666666666, ans=0.125
+2024-08-29 16:09:27,758 INFO [train.py:1114] (2/4) Epoch 15, batch 2100, loss[loss=0.2023, simple_loss=0.2783, pruned_loss=0.04464, ctc_loss=0.09274, over 19786.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.279, pruned_loss=0.05493, ctc_loss=0.1033, over 3858826.06 frames. ], batch size: 54, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:09:33,398 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.265e+02 1.691e+02 1.929e+02 2.354e+02 3.359e+02, threshold=3.858e+02, percent-clipped=0.0
+2024-08-29 16:09:46,227 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=197109.33333333334, ans=0.1
+2024-08-29 16:09:49,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=197162.66666666666, ans=0.125
+2024-08-29 16:09:51,267 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.44 vs. limit=22.5
+2024-08-29 16:10:26,363 INFO [train.py:1114] (2/4) Epoch 15, batch 2150, loss[loss=0.1985, simple_loss=0.2646, pruned_loss=0.04798, ctc_loss=0.09123, over 19846.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2786, pruned_loss=0.05498, ctc_loss=0.1032, over 3869447.86 frames. ], batch size: 52, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:10:36,172 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=197322.66666666666, ans=0.0
+2024-08-29 16:10:37,487 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.22 vs. limit=15.0
+2024-08-29 16:10:39,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=197376.0, ans=0.2
+2024-08-29 16:11:58,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=197429.33333333334, ans=0.125
+2024-08-29 16:11:58,465 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.62 vs. limit=15.0
+2024-08-29 16:12:11,992 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=197482.66666666666, ans=0.125
+2024-08-29 16:12:14,669 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.36 vs. limit=15.0
+2024-08-29 16:12:18,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=197482.66666666666, ans=0.125
+2024-08-29 16:12:27,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=197536.0, ans=0.125
+2024-08-29 16:12:31,431 INFO [train.py:1114] (2/4) Epoch 15, batch 2200, loss[loss=0.2221, simple_loss=0.2927, pruned_loss=0.05426, ctc_loss=0.1076, over 19603.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2787, pruned_loss=0.05505, ctc_loss=0.1035, over 3868541.93 frames. ], batch size: 57, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:12:36,863 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.787e+02 2.154e+02 2.730e+02 5.047e+02, threshold=4.308e+02, percent-clipped=4.0
+2024-08-29 16:12:38,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=197589.33333333334, ans=0.0
+2024-08-29 16:13:29,266 INFO [train.py:1114] (2/4) Epoch 15, batch 2250, loss[loss=0.2184, simple_loss=0.2852, pruned_loss=0.05588, ctc_loss=0.09942, over 19601.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.279, pruned_loss=0.05501, ctc_loss=0.1034, over 3867770.91 frames. ], batch size: 55, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:13:35,173 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=197856.0, ans=0.025
+2024-08-29 16:14:00,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=197909.33333333334, ans=0.125
+2024-08-29 16:14:03,332 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.13 vs. limit=22.5
+2024-08-29 16:14:09,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=197962.66666666666, ans=0.95
+2024-08-29 16:14:15,912 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.50 vs. limit=6.0
+2024-08-29 16:14:35,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=198069.33333333334, ans=0.2
+2024-08-29 16:14:39,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=198069.33333333334, ans=0.125
+2024-08-29 16:14:45,316 INFO [train.py:1114] (2/4) Epoch 15, batch 2300, loss[loss=0.1915, simple_loss=0.2676, pruned_loss=0.04184, ctc_loss=0.0795, over 19491.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2786, pruned_loss=0.05524, ctc_loss=0.1038, over 3860747.25 frames. ], batch size: 49, lr: 9.86e-03, grad_scale: 32.0
+2024-08-29 16:14:45,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=198122.66666666666, ans=0.1
+2024-08-29 16:14:46,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=198122.66666666666, ans=0.025
+2024-08-29 16:14:50,773 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.686e+02 1.986e+02 2.467e+02 4.553e+02, threshold=3.971e+02, percent-clipped=1.0
+2024-08-29 16:14:55,937 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.03 vs. limit=15.0
+2024-08-29 16:15:23,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=198282.66666666666, ans=0.0
+2024-08-29 16:15:43,152 INFO [train.py:1114] (2/4) Epoch 15, batch 2350, loss[loss=0.2244, simple_loss=0.2853, pruned_loss=0.05981, ctc_loss=0.1095, over 19656.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2784, pruned_loss=0.05521, ctc_loss=0.1038, over 3863738.90 frames. ], batch size: 63, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:15:52,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=198389.33333333334, ans=0.0
+2024-08-29 16:16:01,828 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=198442.66666666666, ans=0.125
+2024-08-29 16:16:42,894 INFO [train.py:1114] (2/4) Epoch 15, batch 2400, loss[loss=0.2204, simple_loss=0.2922, pruned_loss=0.054, ctc_loss=0.1014, over 19323.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2809, pruned_loss=0.05616, ctc_loss=0.1054, over 3857244.99 frames. ], batch size: 71, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:16:47,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=198656.0, ans=0.125
+2024-08-29 16:16:48,397 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.659e+02 1.944e+02 2.492e+02 3.873e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-29 16:16:49,641 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=198656.0, ans=0.125
+2024-08-29 16:16:54,656 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=198709.33333333334, ans=0.1
+2024-08-29 16:17:55,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=198709.33333333334, ans=0.125
+2024-08-29 16:17:55,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=198709.33333333334, ans=0.125
+2024-08-29 16:17:59,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=198762.66666666666, ans=0.0
+2024-08-29 16:18:13,709 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=198816.0, ans=0.125
+2024-08-29 16:18:33,178 INFO [train.py:1114] (2/4) Epoch 15, batch 2450, loss[loss=0.2908, simple_loss=0.3216, pruned_loss=0.09428, ctc_loss=0.1788, over 13574.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2849, pruned_loss=0.05891, ctc_loss=0.1114, over 3730810.64 frames. ], batch size: 140, lr: 9.84e-03, grad_scale: 32.0
+2024-08-29 16:18:41,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=198922.66666666666, ans=0.0
+2024-08-29 16:18:44,394 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.79 vs. limit=15.0
+2024-08-29 16:18:55,159 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=12.04 vs. limit=15.0
+2024-08-29 16:19:00,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=199029.33333333334, ans=0.5
+2024-08-29 16:19:13,405 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.32 vs. limit=15.0
+2024-08-29 16:20:18,425 INFO [train.py:1114] (2/4) Epoch 16, batch 0, loss[loss=0.2139, simple_loss=0.2757, pruned_loss=0.05488, ctc_loss=0.106, over 19810.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2757, pruned_loss=0.05488, ctc_loss=0.106, over 19810.00 frames. ], batch size: 49, lr: 9.52e-03, grad_scale: 32.0
+2024-08-29 16:20:18,425 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-29 16:20:28,424 INFO [train.py:1146] (2/4) Epoch 16, validation: loss=0.1867, simple_loss=0.2755, pruned_loss=0.03636, ctc_loss=0.06317, over 944034.00 frames.
+2024-08-29 16:20:28,424 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13710MB
+2024-08-29 16:20:34,695 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=199130.66666666666, ans=0.125
+2024-08-29 16:20:48,964 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.810e+02 1.998e+02 2.276e+02 3.528e+02, threshold=3.997e+02, percent-clipped=0.0
+2024-08-29 16:21:26,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=199344.0, ans=0.125
+2024-08-29 16:21:28,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=199344.0, ans=0.125
+2024-08-29 16:21:30,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=199344.0, ans=0.125
+2024-08-29 16:21:32,416 INFO [train.py:1114] (2/4) Epoch 16, batch 50, loss[loss=0.1996, simple_loss=0.2655, pruned_loss=0.04806, ctc_loss=0.09386, over 19693.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.284, pruned_loss=0.05776, ctc_loss=0.11, over 845655.20 frames. ], batch size: 47, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:21:36,296 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=199397.33333333334, ans=0.0
+2024-08-29 16:21:55,705 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=199504.0, ans=0.0
+2024-08-29 16:22:03,629 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=18.00 vs. limit=22.5
+2024-08-29 16:22:27,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=199610.66666666666, ans=0.2
+2024-08-29 16:22:40,107 INFO [train.py:1114] (2/4) Epoch 16, batch 100, loss[loss=0.1939, simple_loss=0.2585, pruned_loss=0.04715, ctc_loss=0.08736, over 19733.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2835, pruned_loss=0.05693, ctc_loss=0.1078, over 1500622.48 frames. ], batch size: 51, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:23:05,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=199717.33333333334, ans=0.125
+2024-08-29 16:23:06,386 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.97 vs. limit=15.0
+2024-08-29 16:23:08,066 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 1.815e+02 2.137e+02 2.569e+02 4.869e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-29 16:23:22,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=199770.66666666666, ans=6.0
+2024-08-29 16:23:28,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=199824.0, ans=0.0
+2024-08-29 16:23:29,810 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=199824.0, ans=0.125
+2024-08-29 16:30:57,494 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.47 vs. limit=15.0
+2024-08-29 16:37:11,226 INFO [train.py:1114] (2/4) Epoch 16, batch 150, loss[loss=0.2009, simple_loss=0.2533, pruned_loss=0.05439, ctc_loss=0.09929, over 19735.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.28, pruned_loss=0.05486, ctc_loss=0.1036, over 2028424.02 frames. ], batch size: 47, lr: 9.50e-03, grad_scale: 32.0
+2024-08-29 16:43:33,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=200037.33333333334, ans=0.0
+2024-08-29 16:47:05,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=200144.0, ans=0.125
+2024-08-29 16:48:09,841 INFO [train.py:1114] (2/4) Epoch 16, batch 200, loss[loss=0.248, simple_loss=0.3041, pruned_loss=0.06928, ctc_loss=0.1334, over 18244.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2796, pruned_loss=0.05486, ctc_loss=0.1032, over 2435460.16 frames. ], batch size: 85, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:49:57,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=200197.33333333334, ans=0.125
+2024-08-29 16:49:57,623 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=200197.33333333334, ans=0.125
+2024-08-29 16:49:58,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=200197.33333333334, ans=0.0
+2024-08-29 16:53:29,822 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.834e+02 2.227e+02 2.815e+02 4.534e+02, threshold=4.454e+02, percent-clipped=1.0
+2024-08-29 16:53:35,445 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.63 vs. limit=15.0
+2024-08-29 16:54:00,202 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=200304.0, ans=0.1
+2024-08-29 16:54:08,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=200304.0, ans=0.0
+2024-08-29 16:54:29,213 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.39 vs. limit=15.0
+2024-08-29 16:55:30,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=200357.33333333334, ans=0.125
+2024-08-29 16:56:00,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=200357.33333333334, ans=0.2
+2024-08-29 16:56:29,762 INFO [train.py:1114] (2/4) Epoch 16, batch 250, loss[loss=0.2318, simple_loss=0.3022, pruned_loss=0.05891, ctc_loss=0.1088, over 19404.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2799, pruned_loss=0.05514, ctc_loss=0.1033, over 2755380.50 frames. ], batch size: 67, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:56:47,504 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.26 vs. limit=12.0
+2024-08-29 16:58:24,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=200464.0, ans=0.0
+2024-08-29 16:59:04,418 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.31 vs. limit=15.0
+2024-08-29 17:01:55,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=200677.33333333334, ans=0.125
+2024-08-29 17:03:13,502 INFO [train.py:1114] (2/4) Epoch 16, batch 300, loss[loss=0.2498, simple_loss=0.3113, pruned_loss=0.069, ctc_loss=0.1257, over 19539.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2781, pruned_loss=0.05411, ctc_loss=0.1018, over 3000463.33 frames. ], batch size: 61, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:03:36,033 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.663e+02 1.972e+02 2.398e+02 4.674e+02, threshold=3.943e+02, percent-clipped=1.0
+2024-08-29 17:04:35,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=200837.33333333334, ans=0.125
+2024-08-29 17:08:30,909 INFO [train.py:1114] (2/4) Epoch 16, batch 350, loss[loss=0.1857, simple_loss=0.2519, pruned_loss=0.04225, ctc_loss=0.08741, over 19766.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2784, pruned_loss=0.05417, ctc_loss=0.102, over 3190334.08 frames. ], batch size: 48, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:12:34,807 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=201157.33333333334, ans=0.2
+2024-08-29 17:13:14,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=201210.66666666666, ans=10.0
+2024-08-29 17:13:14,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=201210.66666666666, ans=0.2
+2024-08-29 17:13:17,636 INFO [train.py:1114] (2/4) Epoch 16, batch 400, loss[loss=0.228, simple_loss=0.2928, pruned_loss=0.05946, ctc_loss=0.1109, over 19496.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2778, pruned_loss=0.05402, ctc_loss=0.1016, over 3342254.71 frames. ], batch size: 54, lr: 9.47e-03, grad_scale: 32.0
+2024-08-29 17:13:45,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=201317.33333333334, ans=0.125
+2024-08-29 17:14:43,813 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.32 vs. limit=15.0
+2024-08-29 17:15:51,037 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.714e+02 1.905e+02 2.508e+02 3.565e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-29 17:16:24,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=201370.66666666666, ans=0.1
+2024-08-29 17:17:07,856 INFO [train.py:1114] (2/4) Epoch 16, batch 450, loss[loss=0.212, simple_loss=0.2857, pruned_loss=0.04991, ctc_loss=0.09618, over 19604.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2784, pruned_loss=0.05423, ctc_loss=0.102, over 3451391.03 frames. ], batch size: 55, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:21:12,275 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=201690.66666666666, ans=0.5
+2024-08-29 17:21:57,475 INFO [train.py:1114] (2/4) Epoch 16, batch 500, loss[loss=0.2285, simple_loss=0.2895, pruned_loss=0.06116, ctc_loss=0.1127, over 19690.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2771, pruned_loss=0.0536, ctc_loss=0.101, over 3546751.85 frames. ], batch size: 63, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:22:02,152 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=201797.33333333334, ans=0.125
+2024-08-29 17:22:46,941 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.689e+02 2.169e+02 2.570e+02 5.370e+02, threshold=4.338e+02, percent-clipped=3.0
+2024-08-29 17:23:43,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=201957.33333333334, ans=0.125
+2024-08-29 17:23:54,683 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.50 vs. limit=15.0
+2024-08-29 17:23:58,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=202010.66666666666, ans=0.0
+2024-08-29 17:24:00,239 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.18 vs. limit=15.0
+2024-08-29 17:24:02,393 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.70 vs. limit=22.5
+2024-08-29 17:24:02,823 INFO [train.py:1114] (2/4) Epoch 16, batch 550, loss[loss=0.224, simple_loss=0.2926, pruned_loss=0.05677, ctc_loss=0.1046, over 19263.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2781, pruned_loss=0.05407, ctc_loss=0.1019, over 3607782.23 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:24:07,826 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202064.0, ans=0.1
+2024-08-29 17:24:26,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=202117.33333333334, ans=0.2
+2024-08-29 17:24:48,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=202170.66666666666, ans=0.0
+2024-08-29 17:25:01,332 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:25:07,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=202277.33333333334, ans=0.0
+2024-08-29 17:25:15,251 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.24 vs. limit=22.5
+2024-08-29 17:25:21,518 INFO [train.py:1114] (2/4) Epoch 16, batch 600, loss[loss=0.2229, simple_loss=0.2816, pruned_loss=0.06015, ctc_loss=0.1097, over 19406.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2775, pruned_loss=0.05362, ctc_loss=0.1012, over 3666050.09 frames. ], batch size: 67, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:25:21,712 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=202330.66666666666, ans=10.0
+2024-08-29 17:25:21,956 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.60 vs. limit=12.0
+2024-08-29 17:26:15,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten.whitening_limit, batch_count=202330.66666666666, ans=15.0
+2024-08-29 17:26:28,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202384.0, ans=0.1
+2024-08-29 17:27:04,571 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.652e+02 1.934e+02 2.290e+02 3.719e+02, threshold=3.867e+02, percent-clipped=0.0
+2024-08-29 17:27:59,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=202437.33333333334, ans=0.0
+2024-08-29 17:31:03,799 INFO [train.py:1114] (2/4) Epoch 16, batch 650, loss[loss=0.2005, simple_loss=0.2713, pruned_loss=0.04744, ctc_loss=0.08696, over 19768.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2764, pruned_loss=0.05311, ctc_loss=0.1, over 3716557.92 frames. ], batch size: 54, lr: 9.44e-03, grad_scale: 32.0
+2024-08-29 17:32:10,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=202597.33333333334, ans=0.125
+2024-08-29 17:32:16,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=202597.33333333334, ans=0.07
+2024-08-29 17:32:51,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=202757.33333333334, ans=0.0
+2024-08-29 17:33:12,864 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=202810.66666666666, ans=0.0
+2024-08-29 17:33:47,573 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=202810.66666666666, ans=0.125
+2024-08-29 17:33:59,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=202810.66666666666, ans=0.0
+2024-08-29 17:34:02,139 INFO [train.py:1114] (2/4) Epoch 16, batch 700, loss[loss=0.1926, simple_loss=0.2673, pruned_loss=0.0425, ctc_loss=0.08229, over 19714.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2766, pruned_loss=0.05315, ctc_loss=0.1003, over 3747994.16 frames. ], batch size: 51, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:35:12,329 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.286e+02 1.755e+02 2.110e+02 2.761e+02 5.047e+02, threshold=4.220e+02, percent-clipped=5.0
+2024-08-29 17:35:36,305 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.43 vs. limit=10.0
+2024-08-29 17:36:26,314 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=202970.66666666666, ans=0.2
+2024-08-29 17:41:52,269 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=203077.33333333334, ans=0.125
+2024-08-29 17:42:01,660 INFO [train.py:1114] (2/4) Epoch 16, batch 750, loss[loss=0.2288, simple_loss=0.2881, pruned_loss=0.06193, ctc_loss=0.1141, over 19509.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2757, pruned_loss=0.05286, ctc_loss=0.09961, over 3774184.24 frames. ], batch size: 54, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:42:07,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=203130.66666666666, ans=0.125
+2024-08-29 17:42:07,988 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=203130.66666666666, ans=0.0
+2024-08-29 17:42:15,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=203130.66666666666, ans=0.125
+2024-08-29 17:42:33,500 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=203237.33333333334, ans=0.2
+2024-08-29 17:46:05,955 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.90 vs. limit=10.0
+2024-08-29 17:46:10,166 INFO [train.py:1114] (2/4) Epoch 16, batch 800, loss[loss=0.1791, simple_loss=0.2471, pruned_loss=0.04081, ctc_loss=0.0735, over 19413.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2759, pruned_loss=0.05308, ctc_loss=0.1001, over 3795165.12 frames. ], batch size: 48, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:48:15,893 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.745e+02 2.069e+02 2.556e+02 3.770e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-29 17:49:06,932 INFO [train.py:1114] (2/4) Epoch 16, batch 850, loss[loss=0.2089, simple_loss=0.2795, pruned_loss=0.05105, ctc_loss=0.0903, over 19676.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2765, pruned_loss=0.05346, ctc_loss=0.1008, over 3814723.49 frames. ], batch size: 59, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:49:08,595 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.61 vs. limit=15.0
+2024-08-29 17:49:10,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=203664.0, ans=0.025
+2024-08-29 17:49:21,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=203717.33333333334, ans=0.125
+2024-08-29 17:49:24,007 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=203717.33333333334, ans=0.0
+2024-08-29 17:49:46,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=203824.0, ans=0.05
+2024-08-29 17:50:21,093 INFO [train.py:1114] (2/4) Epoch 16, batch 900, loss[loss=0.2114, simple_loss=0.268, pruned_loss=0.05651, ctc_loss=0.1047, over 19797.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2773, pruned_loss=0.05408, ctc_loss=0.1018, over 3819248.79 frames. ], batch size: 49, lr: 9.41e-03, grad_scale: 32.0
+2024-08-29 17:50:31,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=203930.66666666666, ans=0.1
+2024-08-29 17:50:38,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=203930.66666666666, ans=0.125
+2024-08-29 17:50:48,718 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.676e+02 1.827e+02 2.350e+02 4.099e+02, threshold=3.653e+02, percent-clipped=0.0
+2024-08-29 17:53:24,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=204144.0, ans=0.1
+2024-08-29 17:53:28,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=204144.0, ans=0.0
+2024-08-29 17:53:34,438 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.58 vs. limit=15.0
+2024-08-29 17:53:36,853 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.whiten.whitening_limit, batch_count=204197.33333333334, ans=12.0
+2024-08-29 17:53:37,487 INFO [train.py:1114] (2/4) Epoch 16, batch 950, loss[loss=0.2416, simple_loss=0.2811, pruned_loss=0.07375, ctc_loss=0.1366, over 19492.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2783, pruned_loss=0.05491, ctc_loss=0.1033, over 3821051.82 frames. ], batch size: 49, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:54:09,532 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=204197.33333333334, ans=0.0
+2024-08-29 17:54:09,702 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.19 vs. limit=15.0
+2024-08-29 17:55:45,616 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=204464.0, ans=0.0
+2024-08-29 17:55:46,643 INFO [train.py:1114] (2/4) Epoch 16, batch 1000, loss[loss=0.1847, simple_loss=0.2602, pruned_loss=0.03905, ctc_loss=0.0776, over 19853.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2788, pruned_loss=0.05503, ctc_loss=0.1037, over 3817152.03 frames. ], batch size: 52, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:55:59,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=204517.33333333334, ans=0.0
+2024-08-29 17:56:07,199 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.649e+02 1.918e+02 2.268e+02 3.238e+02, threshold=3.836e+02, percent-clipped=0.0
+2024-08-29 17:56:53,350 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=204570.66666666666, ans=0.0
+2024-08-29 17:57:39,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=204624.0, ans=0.0
+2024-08-29 17:57:49,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=204677.33333333334, ans=0.2
+2024-08-29 17:57:54,935 INFO [train.py:1114] (2/4) Epoch 16, batch 1050, loss[loss=0.2255, simple_loss=0.2934, pruned_loss=0.05762, ctc_loss=0.1062, over 19846.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2783, pruned_loss=0.05492, ctc_loss=0.1033, over 3822679.86 frames. ], batch size: 57, lr: 9.39e-03, grad_scale: 32.0
+2024-08-29 17:59:15,963 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=204784.0, ans=0.0
+2024-08-29 17:59:20,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=204784.0, ans=0.025
+2024-08-29 17:59:50,030 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:00:15,655 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.21 vs. limit=15.0
+2024-08-29 18:00:18,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=204890.66666666666, ans=0.95
+2024-08-29 18:00:29,412 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1.whitening_limit, batch_count=204890.66666666666, ans=10.0
+2024-08-29 18:00:30,494 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.66 vs. limit=22.5
+2024-08-29 18:00:37,277 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=204944.0, ans=0.2
+2024-08-29 18:00:45,146 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.23 vs. limit=15.0
+2024-08-29 18:00:53,274 INFO [train.py:1114] (2/4) Epoch 16, batch 1100, loss[loss=0.1937, simple_loss=0.2653, pruned_loss=0.04562, ctc_loss=0.07705, over 19591.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2776, pruned_loss=0.05435, ctc_loss=0.1023, over 3829363.67 frames. ], batch size: 52, lr: 9.39e-03, grad_scale: 16.0
+2024-08-29 18:00:53,867 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.00 vs. limit=22.5
+2024-08-29 18:01:14,286 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.16 vs. limit=22.5
+2024-08-29 18:01:16,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=205050.66666666666, ans=0.125
+2024-08-29 18:01:21,322 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=205050.66666666666, ans=0.1
+2024-08-29 18:01:27,926 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 1.694e+02 1.874e+02 2.325e+02 3.063e+02, threshold=3.748e+02, percent-clipped=0.0
+2024-08-29 18:01:40,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=205104.0, ans=0.0
+2024-08-29 18:02:31,906 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=205210.66666666666, ans=0.125
+2024-08-29 18:02:43,482 INFO [train.py:1114] (2/4) Epoch 16, batch 1150, loss[loss=0.1997, simple_loss=0.2617, pruned_loss=0.0494, ctc_loss=0.09744, over 19594.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2773, pruned_loss=0.05428, ctc_loss=0.102, over 3828213.04 frames. ], batch size: 52, lr: 9.38e-03, grad_scale: 16.0
+2024-08-29 18:02:49,208 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.85 vs. limit=15.0
+2024-08-29 18:03:01,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=205317.33333333334, ans=0.025
+2024-08-29 18:03:01,747 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.85 vs. limit=10.0
+2024-08-29 18:03:09,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=205370.66666666666, ans=0.0
+2024-08-29 18:03:25,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=205424.0, ans=0.125
+2024-08-29 18:03:26,279 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=205424.0, ans=0.125
+2024-08-29 18:03:33,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=205477.33333333334, ans=0.125
+2024-08-29 18:03:34,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=205477.33333333334, ans=0.125
+2024-08-29 18:03:40,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=205477.33333333334, ans=0.125
+2024-08-29 18:03:45,087 INFO [train.py:1114] (2/4) Epoch 16, batch 1200, loss[loss=0.2324, simple_loss=0.2974, pruned_loss=0.06049, ctc_loss=0.1163, over 19854.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2779, pruned_loss=0.05427, ctc_loss=0.1021, over 3824553.84 frames. ], batch size: 57, lr: 9.38e-03, grad_scale: 32.0
+2024-08-29 18:03:47,667 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=205530.66666666666, ans=0.125
+2024-08-29 18:03:54,174 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.15 vs. limit=22.5
+2024-08-29 18:04:06,319 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.725e+02 2.012e+02 2.470e+02 3.418e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-29 18:04:12,362 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:04:50,730 INFO [train.py:1114] (2/4) Epoch 16, batch 1250, loss[loss=0.2212, simple_loss=0.2862, pruned_loss=0.05793, ctc_loss=0.1008, over 19543.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2781, pruned_loss=0.05422, ctc_loss=0.1019, over 3843199.45 frames. ], batch size: 61, lr: 9.37e-03, grad_scale: 32.0
+2024-08-29 18:05:16,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=205797.33333333334, ans=0.0
+2024-08-29 18:05:32,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=205904.0, ans=0.2
+2024-08-29 18:05:38,185 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=205904.0, ans=0.0
+2024-08-29 18:05:39,446 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=205904.0, ans=0.125
+2024-08-29 18:05:45,843 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.81 vs. limit=10.0
+2024-08-29 18:05:58,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=205957.33333333334, ans=0.025
+2024-08-29 18:06:21,281 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.47 vs. limit=15.0
+2024-08-29 18:06:22,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=205957.33333333334, ans=0.0
+2024-08-29 18:06:35,888 INFO [train.py:1114] (2/4) Epoch 16, batch 1300, loss[loss=0.2713, simple_loss=0.3177, pruned_loss=0.08123, ctc_loss=0.1563, over 18896.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2773, pruned_loss=0.05386, ctc_loss=0.1012, over 3846464.53 frames. ], batch size: 76, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:06:38,342 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=206064.0, ans=0.1
+2024-08-29 18:06:46,580 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.20 vs. limit=15.0
+2024-08-29 18:06:57,547 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.716e+02 2.090e+02 2.690e+02 4.268e+02, threshold=4.180e+02, percent-clipped=3.0
+2024-08-29 18:07:34,536 INFO [train.py:1114] (2/4) Epoch 16, batch 1350, loss[loss=0.2206, simple_loss=0.2834, pruned_loss=0.05728, ctc_loss=0.1084, over 19779.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2771, pruned_loss=0.05369, ctc_loss=0.101, over 3856788.89 frames. ], batch size: 54, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:07:54,346 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.82 vs. limit=6.0
+2024-08-29 18:08:14,988 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=206437.33333333334, ans=0.1
+2024-08-29 18:09:47,309 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.65 vs. limit=15.0
+2024-08-29 18:10:08,574 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=206490.66666666666, ans=0.0
+2024-08-29 18:10:12,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=206544.0, ans=0.125
+2024-08-29 18:10:56,042 INFO [train.py:1114] (2/4) Epoch 16, batch 1400, loss[loss=0.1717, simple_loss=0.2377, pruned_loss=0.03885, ctc_loss=0.07008, over 19676.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2772, pruned_loss=0.05392, ctc_loss=0.1013, over 3863689.80 frames. ], batch size: 46, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:11:02,537 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.61 vs. limit=15.0
+2024-08-29 18:13:15,203 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.659e+02 1.830e+02 2.117e+02 3.619e+02, threshold=3.659e+02, percent-clipped=0.0
+2024-08-29 18:13:25,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=206704.0, ans=0.125
+2024-08-29 18:13:26,106 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=206704.0, ans=0.125
+2024-08-29 18:14:17,904 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.51 vs. limit=15.0
+2024-08-29 18:14:29,157 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=206757.33333333334, ans=0.0
+2024-08-29 18:14:35,444 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=206810.66666666666, ans=0.0
+2024-08-29 18:14:36,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=206810.66666666666, ans=0.125
+2024-08-29 18:14:42,496 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:14:49,664 INFO [train.py:1114] (2/4) Epoch 16, batch 1450, loss[loss=0.2283, simple_loss=0.2874, pruned_loss=0.06094, ctc_loss=0.1182, over 19694.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2777, pruned_loss=0.05415, ctc_loss=0.1018, over 3862915.83 frames. ], batch size: 63, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:15:17,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=206917.33333333334, ans=0.05
+2024-08-29 18:15:22,861 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.30 vs. limit=15.0
+2024-08-29 18:15:28,643 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=206970.66666666666, ans=0.125
+2024-08-29 18:16:10,738 INFO [train.py:1114] (2/4) Epoch 16, batch 1500, loss[loss=0.2188, simple_loss=0.2884, pruned_loss=0.05343, ctc_loss=0.1059, over 19577.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2775, pruned_loss=0.05383, ctc_loss=0.1012, over 3862313.53 frames. ], batch size: 57, lr: 9.34e-03, grad_scale: 32.0
+2024-08-29 18:16:22,288 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.06 vs. limit=6.0
+2024-08-29 18:16:32,421 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.680e+02 1.893e+02 2.490e+02 3.994e+02, threshold=3.786e+02, percent-clipped=1.0
+2024-08-29 18:16:50,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=207290.66666666666, ans=0.1
+2024-08-29 18:17:17,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=207344.0, ans=0.0
+2024-08-29 18:17:19,469 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff2.min_abs, batch_count=207344.0, ans=0.1
+2024-08-29 18:17:31,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=207344.0, ans=0.0
+2024-08-29 18:17:34,476 INFO [train.py:1114] (2/4) Epoch 16, batch 1550, loss[loss=0.2143, simple_loss=0.2844, pruned_loss=0.05306, ctc_loss=0.09529, over 19614.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2777, pruned_loss=0.05406, ctc_loss=0.1017, over 3846657.29 frames. ], batch size: 60, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:17:45,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=207450.66666666666, ans=0.125
+2024-08-29 18:17:47,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=207450.66666666666, ans=0.1
+2024-08-29 18:18:00,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=207504.0, ans=0.0
+2024-08-29 18:19:22,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=207504.0, ans=0.125
+2024-08-29 18:19:46,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=207610.66666666666, ans=0.125
+2024-08-29 18:19:55,343 INFO [train.py:1114] (2/4) Epoch 16, batch 1600, loss[loss=0.2272, simple_loss=0.2937, pruned_loss=0.0584, ctc_loss=0.1098, over 19857.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.277, pruned_loss=0.05363, ctc_loss=0.101, over 3836976.80 frames. ], batch size: 57, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:21:45,090 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.98 vs. limit=15.0
+2024-08-29 18:21:48,821 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=207717.33333333334, ans=0.125
+2024-08-29 18:21:55,744 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.773e+02 1.965e+02 2.508e+02 5.321e+02, threshold=3.930e+02, percent-clipped=3.0
+2024-08-29 18:21:56,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=207717.33333333334, ans=0.125
+2024-08-29 18:22:13,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=207824.0, ans=0.0
+2024-08-29 18:22:14,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=207824.0, ans=0.2
+2024-08-29 18:22:21,949 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.86 vs. limit=10.0
+2024-08-29 18:22:51,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=207877.33333333334, ans=0.07
+2024-08-29 18:23:01,504 INFO [train.py:1114] (2/4) Epoch 16, batch 1650, loss[loss=0.2119, simple_loss=0.2813, pruned_loss=0.05271, ctc_loss=0.09248, over 19662.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.277, pruned_loss=0.05346, ctc_loss=0.1006, over 3832569.65 frames. ], batch size: 59, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:23:06,976 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.08 vs. limit=6.0
+2024-08-29 18:23:09,187 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=207930.66666666666, ans=0.2
+2024-08-29 18:23:47,521 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=207984.0, ans=0.125
+2024-08-29 18:23:48,060 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.10 vs. limit=15.0
+2024-08-29 18:24:17,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=208037.33333333334, ans=0.0
+2024-08-29 18:24:40,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=208090.66666666666, ans=0.2
+2024-08-29 18:24:58,900 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.73 vs. limit=12.0
+2024-08-29 18:26:13,690 INFO [train.py:1114] (2/4) Epoch 16, batch 1700, loss[loss=0.2063, simple_loss=0.2625, pruned_loss=0.0544, ctc_loss=0.1031, over 19681.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2767, pruned_loss=0.05316, ctc_loss=0.1, over 3846790.89 frames. ], batch size: 46, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:26:17,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=208197.33333333334, ans=0.125
+2024-08-29 18:26:26,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=208250.66666666666, ans=0.1
+2024-08-29 18:26:30,618 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.36 vs. limit=15.0
+2024-08-29 18:26:34,594 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.759e+02 2.180e+02 2.878e+02 5.111e+02, threshold=4.361e+02, percent-clipped=4.0
+2024-08-29 18:26:35,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=208304.0, ans=0.125
+2024-08-29 18:27:10,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=208410.66666666666, ans=0.0
+2024-08-29 18:27:13,922 INFO [train.py:1114] (2/4) Epoch 16, batch 1750, loss[loss=0.191, simple_loss=0.2499, pruned_loss=0.04782, ctc_loss=0.09117, over 19663.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2763, pruned_loss=0.053, ctc_loss=0.09979, over 3850963.00 frames. ], batch size: 45, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:27:52,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.whiten.whitening_limit, batch_count=208517.33333333334, ans=12.0
+2024-08-29 18:29:08,373 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=208570.66666666666, ans=0.125
+2024-08-29 18:29:11,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=208570.66666666666, ans=0.1
+2024-08-29 18:29:19,266 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.24 vs. limit=15.0
+2024-08-29 18:29:20,122 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=208624.0, ans=0.2
+2024-08-29 18:30:02,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=208624.0, ans=0.125
+2024-08-29 18:30:17,047 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=208730.66666666666, ans=0.125
+2024-08-29 18:30:17,807 INFO [train.py:1114] (2/4) Epoch 16, batch 1800, loss[loss=0.2243, simple_loss=0.2916, pruned_loss=0.05658, ctc_loss=0.1096, over 19613.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2766, pruned_loss=0.05304, ctc_loss=0.09997, over 3852362.47 frames. ], batch size: 55, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:30:34,917 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=208784.0, ans=0.125
+2024-08-29 18:30:45,742 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 1.693e+02 1.985e+02 2.381e+02 4.228e+02, threshold=3.971e+02, percent-clipped=0.0
+2024-08-29 18:30:58,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=208837.33333333334, ans=0.1
+2024-08-29 18:31:26,475 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.25 vs. limit=6.0
+2024-08-29 18:31:45,937 INFO [train.py:1114] (2/4) Epoch 16, batch 1850, loss[loss=0.2274, simple_loss=0.2931, pruned_loss=0.05955, ctc_loss=0.1063, over 19584.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2765, pruned_loss=0.053, ctc_loss=0.09977, over 3855814.81 frames. ], batch size: 57, lr: 9.30e-03, grad_scale: 32.0
+2024-08-29 18:31:57,183 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.39 vs. limit=22.5
+2024-08-29 18:32:10,534 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=208997.33333333334, ans=0.125
+2024-08-29 18:32:13,097 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=208997.33333333334, ans=0.035
+2024-08-29 18:32:51,677 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=209157.33333333334, ans=0.125
+2024-08-29 18:32:51,944 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.30 vs. limit=15.0
+2024-08-29 18:32:53,753 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=209157.33333333334, ans=0.025
+2024-08-29 18:33:06,877 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=209210.66666666666, ans=22.5
+2024-08-29 18:33:08,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=209210.66666666666, ans=0.125
+2024-08-29 18:33:17,354 INFO [train.py:1114] (2/4) Epoch 16, batch 1900, loss[loss=0.2241, simple_loss=0.2963, pruned_loss=0.05482, ctc_loss=0.1054, over 19668.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2774, pruned_loss=0.05345, ctc_loss=0.1005, over 3860371.95 frames. ], batch size: 59, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:33:28,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=209317.33333333334, ans=0.125
+2024-08-29 18:33:36,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=209317.33333333334, ans=0.125
+2024-08-29 18:33:36,244 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:33:40,790 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.785e+02 2.354e+02 2.964e+02 6.037e+02, threshold=4.708e+02, percent-clipped=9.0
+2024-08-29 18:34:03,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=209424.0, ans=0.04949747468305833
+2024-08-29 18:34:11,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=209477.33333333334, ans=0.125
+2024-08-29 18:34:30,807 INFO [train.py:1114] (2/4) Epoch 16, batch 1950, loss[loss=0.2008, simple_loss=0.2748, pruned_loss=0.04581, ctc_loss=0.08767, over 19581.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2785, pruned_loss=0.05372, ctc_loss=0.1011, over 3869552.69 frames. ], batch size: 52, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:34:40,178 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=209530.66666666666, ans=0.125
+2024-08-29 18:35:03,424 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.89 vs. limit=15.0
+2024-08-29 18:35:21,779 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.29 vs. limit=22.5
+2024-08-29 18:35:23,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=209690.66666666666, ans=0.125
+2024-08-29 18:35:27,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=209690.66666666666, ans=0.125
+2024-08-29 18:35:34,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=209744.0, ans=0.2
+2024-08-29 18:35:42,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=209744.0, ans=0.125
+2024-08-29 18:35:46,778 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.05 vs. limit=15.0
+2024-08-29 18:35:48,767 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=209744.0, ans=0.1
+2024-08-29 18:35:51,674 INFO [train.py:1114] (2/4) Epoch 16, batch 2000, loss[loss=0.1655, simple_loss=0.234, pruned_loss=0.03539, ctc_loss=0.06541, over 19647.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2787, pruned_loss=0.05387, ctc_loss=0.1016, over 3853865.43 frames. ], batch size: 45, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:35:52,914 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-29 18:35:54,623 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-29 18:36:03,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=209850.66666666666, ans=0.1
+2024-08-29 18:36:13,158 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.666e+02 1.888e+02 2.185e+02 3.516e+02, threshold=3.775e+02, percent-clipped=0.0
+2024-08-29 18:36:30,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=209957.33333333334, ans=0.0
+2024-08-29 18:36:30,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=209957.33333333334, ans=0.025
+2024-08-29 18:36:31,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=209957.33333333334, ans=0.125
+2024-08-29 18:36:37,679 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.52 vs. limit=10.0
+2024-08-29 18:36:49,201 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=209957.33333333334, ans=0.125
+2024-08-29 18:37:02,151 INFO [train.py:1114] (2/4) Epoch 16, batch 2050, loss[loss=0.2107, simple_loss=0.2642, pruned_loss=0.05787, ctc_loss=0.1038, over 19714.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2778, pruned_loss=0.05387, ctc_loss=0.1016, over 3850267.81 frames. ], batch size: 47, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:37:41,619 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.25 vs. limit=6.0
+2024-08-29 18:38:17,904 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=210170.66666666666, ans=0.0
+2024-08-29 18:38:44,388 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.33 vs. limit=15.0
+2024-08-29 18:38:59,605 INFO [train.py:1114] (2/4) Epoch 16, batch 2100, loss[loss=0.2271, simple_loss=0.2879, pruned_loss=0.06084, ctc_loss=0.1114, over 19770.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2778, pruned_loss=0.05408, ctc_loss=0.1021, over 3857183.94 frames. ], batch size: 54, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:39:09,341 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=210384.0, ans=0.125
+2024-08-29 18:39:19,479 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.00 vs. limit=6.0
+2024-08-29 18:39:22,238 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.792e+02 2.112e+02 2.675e+02 4.176e+02, threshold=4.223e+02, percent-clipped=3.0
+2024-08-29 18:39:23,732 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=210437.33333333334, ans=0.125
+2024-08-29 18:39:52,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=210544.0, ans=0.1
+2024-08-29 18:39:53,649 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=210544.0, ans=0.0
+2024-08-29 18:39:57,960 INFO [train.py:1114] (2/4) Epoch 16, batch 2150, loss[loss=0.1979, simple_loss=0.2634, pruned_loss=0.04814, ctc_loss=0.09041, over 19853.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2766, pruned_loss=0.05342, ctc_loss=0.1007, over 3867734.26 frames. ], batch size: 52, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:40:05,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=210597.33333333334, ans=0.0
+2024-08-29 18:40:55,491 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.96 vs. limit=15.0
+2024-08-29 18:41:00,749 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.54 vs. limit=12.0
+2024-08-29 18:41:01,662 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.74 vs. limit=15.0
+2024-08-29 18:41:04,882 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.02 vs. limit=15.0
+2024-08-29 18:41:06,023 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=210810.66666666666, ans=15.0
+2024-08-29 18:41:08,950 INFO [train.py:1114] (2/4) Epoch 16, batch 2200, loss[loss=0.239, simple_loss=0.3015, pruned_loss=0.06409, ctc_loss=0.1204, over 19582.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.277, pruned_loss=0.05366, ctc_loss=0.1009, over 3865825.84 frames. ], batch size: 57, lr: 9.26e-03, grad_scale: 32.0
+2024-08-29 18:41:29,791 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.757e+02 2.042e+02 2.598e+02 4.148e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-08-29 18:41:58,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=210970.66666666666, ans=0.0
+2024-08-29 18:42:13,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=211024.0, ans=0.125
+2024-08-29 18:42:31,579 INFO [train.py:1114] (2/4) Epoch 16, batch 2250, loss[loss=0.2055, simple_loss=0.2823, pruned_loss=0.04758, ctc_loss=0.08367, over 19607.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2779, pruned_loss=0.05424, ctc_loss=0.1019, over 3865772.78 frames. ], batch size: 55, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:42:40,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=211130.66666666666, ans=0.1
+2024-08-29 18:42:42,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=211184.0, ans=0.125
+2024-08-29 18:43:00,676 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.50 vs. limit=15.0
+2024-08-29 18:43:13,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=211290.66666666666, ans=0.125
+2024-08-29 18:44:24,290 INFO [train.py:1114] (2/4) Epoch 16, batch 2300, loss[loss=0.1897, simple_loss=0.255, pruned_loss=0.04568, ctc_loss=0.08266, over 19504.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2771, pruned_loss=0.05418, ctc_loss=0.1017, over 3860309.09 frames. ], batch size: 49, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:44:29,163 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:44:58,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=211397.33333333334, ans=0.0
+2024-08-29 18:44:59,712 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=211397.33333333334, ans=0.0
+2024-08-29 18:45:01,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=211450.66666666666, ans=0.2
+2024-08-29 18:45:05,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=211450.66666666666, ans=0.0
+2024-08-29 18:45:10,441 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.785e+02 2.121e+02 2.618e+02 4.213e+02, threshold=4.241e+02, percent-clipped=2.0
+2024-08-29 18:45:16,585 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.47 vs. limit=15.0
+2024-08-29 18:45:31,672 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=211557.33333333334, ans=0.2
+2024-08-29 18:45:34,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=211610.66666666666, ans=0.2
+2024-08-29 18:45:59,063 INFO [train.py:1114] (2/4) Epoch 16, batch 2350, loss[loss=0.2154, simple_loss=0.2906, pruned_loss=0.05129, ctc_loss=0.09405, over 19678.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2771, pruned_loss=0.05414, ctc_loss=0.1018, over 3863352.78 frames. ], batch size: 63, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:46:05,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=211664.0, ans=0.0
+2024-08-29 18:46:11,299 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=211717.33333333334, ans=0.0
+2024-08-29 18:46:39,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=211824.0, ans=0.125
+2024-08-29 18:46:45,253 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.06 vs. limit=15.0
+2024-08-29 18:46:47,276 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.63 vs. limit=15.0
+2024-08-29 18:46:52,711 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=211877.33333333334, ans=0.1
+2024-08-29 18:47:00,335 INFO [train.py:1114] (2/4) Epoch 16, batch 2400, loss[loss=0.2439, simple_loss=0.2983, pruned_loss=0.06797, ctc_loss=0.1338, over 19443.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2791, pruned_loss=0.05482, ctc_loss=0.1029, over 3857038.45 frames. ], batch size: 71, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:47:07,365 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.33 vs. limit=15.0
+2024-08-29 18:47:08,325 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:47:20,729 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.800e+02 2.132e+02 2.653e+02 4.129e+02, threshold=4.264e+02, percent-clipped=0.0
+2024-08-29 18:47:22,152 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212037.33333333334, ans=0.1
+2024-08-29 18:47:27,677 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.23 vs. limit=15.0
+2024-08-29 18:47:50,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=212144.0, ans=0.0
+2024-08-29 18:47:56,903 INFO [train.py:1114] (2/4) Epoch 16, batch 2450, loss[loss=0.271, simple_loss=0.3128, pruned_loss=0.08227, ctc_loss=0.1618, over 12918.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2826, pruned_loss=0.05731, ctc_loss=0.1081, over 3728353.95 frames. ], batch size: 140, lr: 9.23e-03, grad_scale: 32.0
+2024-08-29 18:48:00,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=212197.33333333334, ans=0.125
+2024-08-29 18:48:18,668 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.61 vs. limit=15.0
+2024-08-29 18:48:32,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=212304.0, ans=0.0
+2024-08-29 18:55:35,507 INFO [train.py:1114] (2/4) Epoch 17, batch 0, loss[loss=0.1952, simple_loss=0.2559, pruned_loss=0.04946, ctc_loss=0.08907, over 19416.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2559, pruned_loss=0.04946, ctc_loss=0.08907, over 19416.00 frames. ], batch size: 48, lr: 8.95e-03, grad_scale: 32.0
+2024-08-29 18:55:35,507 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-29 18:55:47,290 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.5471, 3.1733, 2.2076, 2.8463], device='cuda:2')
+2024-08-29 18:56:01,940 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.0126, 4.3107, 3.7867, 4.0314], device='cuda:2')
+2024-08-29 18:56:02,371 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.9238, 2.1456, 3.4846, 3.6289], device='cuda:2')
+2024-08-29 18:56:04,687 INFO [train.py:1146] (2/4) Epoch 17, validation: loss=0.1843, simple_loss=0.2733, pruned_loss=0.03544, ctc_loss=0.06098, over 944034.00 frames.
+2024-08-29 18:56:04,687 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13710MB
+2024-08-29 18:56:11,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.07 vs. limit=22.5
+2024-08-29 18:56:56,932 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-29 18:57:44,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-29 18:58:11,748 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212458.66666666666, ans=0.1
+2024-08-29 18:58:20,420 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=212512.0, ans=10.0
+2024-08-29 18:58:22,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.80 vs. limit=15.0
+2024-08-29 18:58:29,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212512.0, ans=0.1
+2024-08-29 18:58:30,847 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.824e+02 2.030e+02 2.233e+02 3.073e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-29 18:58:38,988 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=212565.33333333334, ans=0.125
+2024-08-29 18:58:55,897 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=212618.66666666666, ans=0.0
+2024-08-29 19:05:21,284 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.07 vs. limit=22.5
+2024-08-29 19:05:21,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=212618.66666666666, ans=0.2
+2024-08-29 19:05:26,891 INFO [train.py:1114] (2/4) Epoch 17, batch 50, loss[loss=0.1845, simple_loss=0.2497, pruned_loss=0.04289, ctc_loss=0.08394, over 19722.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2781, pruned_loss=0.05418, ctc_loss=0.1026, over 845315.44 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:07:29,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=212672.0, ans=0.125
+2024-08-29 19:07:37,575 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=212672.0, ans=0.125
+2024-08-29 19:07:54,495 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.91 vs. limit=15.0
+2024-08-29 19:08:15,521 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=212778.66666666666, ans=0.05
+2024-08-29 19:08:31,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=212832.0, ans=0.125
+2024-08-29 19:08:52,527 INFO [train.py:1114] (2/4) Epoch 17, batch 100, loss[loss=0.1877, simple_loss=0.2638, pruned_loss=0.04047, ctc_loss=0.07672, over 19753.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2776, pruned_loss=0.0535, ctc_loss=0.1012, over 1498877.26 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:08:57,930 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.02 vs. limit=10.0
+2024-08-29 19:09:04,521 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=212992.0, ans=0.0
+2024-08-29 19:09:06,761 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=212992.0, ans=0.125
+2024-08-29 19:09:25,905 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.707e+02 1.910e+02 2.335e+02 3.363e+02, threshold=3.820e+02, percent-clipped=0.0
+2024-08-29 19:09:34,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=213098.66666666666, ans=0.0
+2024-08-29 19:09:36,586 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=213098.66666666666, ans=0.125
+2024-08-29 19:09:53,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=213152.0, ans=0.125
+2024-08-29 19:09:55,289 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.39 vs. limit=22.5
+2024-08-29 19:09:58,140 INFO [train.py:1114] (2/4) Epoch 17, batch 150, loss[loss=0.1768, simple_loss=0.2476, pruned_loss=0.03841, ctc_loss=0.07289, over 19722.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2759, pruned_loss=0.0524, ctc_loss=0.09899, over 2027544.43 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:12:14,614 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.58 vs. limit=22.5
+2024-08-29 19:12:24,360 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=213312.0, ans=0.025
+2024-08-29 19:16:09,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=213365.33333333334, ans=0.0
+2024-08-29 19:16:10,390 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=213365.33333333334, ans=0.025
+2024-08-29 19:16:17,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-29 19:16:20,903 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-29 19:16:29,773 INFO [train.py:1114] (2/4) Epoch 17, batch 200, loss[loss=0.2228, simple_loss=0.2834, pruned_loss=0.05948, ctc_loss=0.1082, over 18397.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2749, pruned_loss=0.05217, ctc_loss=0.09843, over 2435139.54 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:25:01,400 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=213472.0, ans=0.125
+2024-08-29 19:26:30,513 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.59 vs. limit=6.0
+2024-08-29 19:27:13,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=213525.33333333334, ans=0.2
+2024-08-29 19:27:57,285 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.724e+02 1.931e+02 2.405e+02 4.691e+02, threshold=3.862e+02, percent-clipped=4.0
+2024-08-29 19:28:22,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=213685.33333333334, ans=0.09899494936611666
+2024-08-29 19:28:31,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=213685.33333333334, ans=0.125
+2024-08-29 19:28:32,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=213685.33333333334, ans=0.04949747468305833
+2024-08-29 19:28:38,496 INFO [train.py:1114] (2/4) Epoch 17, batch 250, loss[loss=0.2113, simple_loss=0.2798, pruned_loss=0.0518, ctc_loss=0.09803, over 19307.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2749, pruned_loss=0.05209, ctc_loss=0.09832, over 2755756.17 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:28:38,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=213738.66666666666, ans=0.2
+2024-08-29 19:29:14,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=213792.0, ans=0.125
+2024-08-29 19:29:31,723 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.22 vs. limit=15.0
+2024-08-29 19:29:40,014 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.30 vs. limit=10.0
+2024-08-29 19:29:44,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=213898.66666666666, ans=0.125
+2024-08-29 19:30:03,484 INFO [train.py:1114] (2/4) Epoch 17, batch 300, loss[loss=0.2625, simple_loss=0.3106, pruned_loss=0.07829, ctc_loss=0.1445, over 19521.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2754, pruned_loss=0.05218, ctc_loss=0.09871, over 3000078.39 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:30:03,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=214005.33333333334, ans=0.125
+2024-08-29 19:30:03,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=214005.33333333334, ans=0.0
+2024-08-29 19:30:25,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=214058.66666666666, ans=0.125
+2024-08-29 19:31:15,943 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=214112.0, ans=0.0
+2024-08-29 19:32:02,220 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.689e+02 1.972e+02 2.447e+02 4.331e+02, threshold=3.945e+02, percent-clipped=1.0
+2024-08-29 19:32:29,316 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.71 vs. limit=15.0
+2024-08-29 19:32:41,672 INFO [train.py:1114] (2/4) Epoch 17, batch 350, loss[loss=0.1969, simple_loss=0.2561, pruned_loss=0.04954, ctc_loss=0.09658, over 19751.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2759, pruned_loss=0.05248, ctc_loss=0.09934, over 3189055.41 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:32:48,341 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.34 vs. limit=10.0
+2024-08-29 19:33:32,197 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.21 vs. limit=15.0
+2024-08-29 19:33:43,915 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.49 vs. limit=12.0
+2024-08-29 19:34:13,751 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=214485.33333333334, ans=0.125
+2024-08-29 19:34:18,285 INFO [train.py:1114] (2/4) Epoch 17, batch 400, loss[loss=0.211, simple_loss=0.2806, pruned_loss=0.05164, ctc_loss=0.09515, over 19496.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2751, pruned_loss=0.05201, ctc_loss=0.09856, over 3340401.63 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:34:21,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=214538.66666666666, ans=0.2
+2024-08-29 19:34:39,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=214538.66666666666, ans=0.125
+2024-08-29 19:34:44,388 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=214592.0, ans=0.1
+2024-08-29 19:35:29,954 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214592.0, ans=0.1
+2024-08-29 19:35:31,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-29 19:35:39,776 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214645.33333333334, ans=0.1
+2024-08-29 19:35:43,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-29 19:36:30,693 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.665e+02 1.964e+02 2.553e+02 4.238e+02, threshold=3.929e+02, percent-clipped=2.0
+2024-08-29 19:37:35,490 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.83 vs. limit=12.0
+2024-08-29 19:37:38,742 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=214698.66666666666, ans=0.1
+2024-08-29 19:37:57,110 INFO [train.py:1114] (2/4) Epoch 17, batch 450, loss[loss=0.2365, simple_loss=0.2915, pruned_loss=0.06436, ctc_loss=0.1319, over 19617.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2752, pruned_loss=0.05186, ctc_loss=0.09807, over 3449884.10 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:38:13,932 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=214805.33333333334, ans=0.125
+2024-08-29 19:38:16,239 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:38:16,262 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=214805.33333333334, ans=0.2
+2024-08-29 19:38:43,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=214858.66666666666, ans=0.125
+2024-08-29 19:38:47,968 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:39:40,608 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.16 vs. limit=15.0
+2024-08-29 19:40:13,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=215018.66666666666, ans=0.0
+2024-08-29 19:40:15,675 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215018.66666666666, ans=0.1
+2024-08-29 19:40:17,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=215018.66666666666, ans=0.0
+2024-08-29 19:40:26,571 INFO [train.py:1114] (2/4) Epoch 17, batch 500, loss[loss=0.2139, simple_loss=0.2847, pruned_loss=0.05203, ctc_loss=0.09735, over 19708.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2745, pruned_loss=0.05158, ctc_loss=0.09746, over 3545931.45 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:40:28,006 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=215072.0, ans=0.2
+2024-08-29 19:40:33,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=215072.0, ans=0.125
+2024-08-29 19:41:41,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=215125.33333333334, ans=0.125
+2024-08-29 19:41:41,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=215125.33333333334, ans=10.0
+2024-08-29 19:42:01,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=215178.66666666666, ans=0.125
+2024-08-29 19:42:03,261 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.46 vs. limit=15.0
+2024-08-29 19:42:27,304 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=215178.66666666666, ans=0.0
+2024-08-29 19:42:38,137 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.765e+02 1.983e+02 2.603e+02 4.687e+02, threshold=3.966e+02, percent-clipped=3.0
+2024-08-29 19:42:38,918 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.40 vs. limit=15.0
+2024-08-29 19:43:34,737 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=215285.33333333334, ans=0.025
+2024-08-29 19:43:45,815 INFO [train.py:1114] (2/4) Epoch 17, batch 550, loss[loss=0.212, simple_loss=0.2756, pruned_loss=0.05377, ctc_loss=0.1021, over 19279.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2741, pruned_loss=0.05138, ctc_loss=0.0971, over 3608240.69 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-29 19:45:01,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=215392.0, ans=0.0
+2024-08-29 19:45:58,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=215498.66666666666, ans=0.125
+2024-08-29 19:46:13,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=215552.0, ans=0.125
+2024-08-29 19:46:53,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=215552.0, ans=0.125
+2024-08-29 19:47:01,417 INFO [train.py:1114] (2/4) Epoch 17, batch 600, loss[loss=0.247, simple_loss=0.3067, pruned_loss=0.06831, ctc_loss=0.1265, over 19456.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2746, pruned_loss=0.05162, ctc_loss=0.09735, over 3666029.34 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:47:08,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=215605.33333333334, ans=0.0
+2024-08-29 19:47:48,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=215658.66666666666, ans=0.0
+2024-08-29 19:48:03,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=215712.0, ans=0.2
+2024-08-29 19:48:19,070 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.690e+02 1.951e+02 2.307e+02 4.172e+02, threshold=3.901e+02, percent-clipped=2.0
+2024-08-29 19:49:21,617 INFO [train.py:1114] (2/4) Epoch 17, batch 650, loss[loss=0.2126, simple_loss=0.2809, pruned_loss=0.05252, ctc_loss=0.09804, over 19776.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2742, pruned_loss=0.05145, ctc_loss=0.09681, over 3716225.09 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:49:44,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=215925.33333333334, ans=0.0
+2024-08-29 19:51:27,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=216085.33333333334, ans=0.0
+2024-08-29 19:51:32,000 INFO [train.py:1114] (2/4) Epoch 17, batch 700, loss[loss=0.202, simple_loss=0.269, pruned_loss=0.04898, ctc_loss=0.09263, over 19707.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2751, pruned_loss=0.05211, ctc_loss=0.09801, over 3747768.85 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:51:35,437 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216138.66666666666, ans=0.1
+2024-08-29 19:51:36,932 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.19 vs. limit=15.0
+2024-08-29 19:51:42,772 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.19 vs. limit=15.0
+2024-08-29 19:52:43,602 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.757e+02 1.978e+02 2.439e+02 3.670e+02, threshold=3.956e+02, percent-clipped=0.0
+2024-08-29 19:53:30,412 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.26 vs. limit=12.0
+2024-08-29 19:53:31,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216298.66666666666, ans=0.1
+2024-08-29 19:53:46,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=216405.33333333334, ans=0.2
+2024-08-29 19:53:46,882 INFO [train.py:1114] (2/4) Epoch 17, batch 750, loss[loss=0.179, simple_loss=0.2559, pruned_loss=0.03701, ctc_loss=0.07007, over 19516.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2745, pruned_loss=0.0519, ctc_loss=0.09765, over 3774513.43 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:54:28,783 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.46 vs. limit=15.0
+2024-08-29 19:55:53,840 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=216512.0, ans=0.125
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-3
new file mode 100644
index 0000000000000000000000000000000000000000..a819b776d3bc4bf1105e74276aa574012e838552
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-29-13-08-38-3
@@ -0,0 +1,1138 @@
+2024-08-29 13:08:38,566 INFO [train.py:1182] (3/4) Training started
+2024-08-29 13:09:26,759 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-29 13:09:26,762 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2549.int.cedar.computecanada.ca', 'IP address': '172.16.145.242'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 14, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-29 13:09:26,762 INFO [train.py:1212] (3/4) About to create model
+2024-08-29 13:09:27,468 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-29 13:09:27,468 INFO [checkpoint.py:112] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-13.pt
+2024-08-29 13:09:28,644 INFO [train.py:1231] (3/4) Using DDP
+2024-08-29 13:09:40,407 INFO [train.py:1243] (3/4) Loading optimizer state dict
+2024-08-29 13:09:40,535 INFO [train.py:1251] (3/4) Loading scheduler state dict
+2024-08-29 13:09:40,535 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-29 13:09:40,690 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-29 13:09:40,690 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-29 13:09:40,690 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-29 13:09:40,691 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-29 13:09:42,272 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-29 13:09:42,276 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-29 13:09:42,374 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-29 13:09:42,447 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-29 13:09:42,768 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-29 13:09:42,877 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-29 13:14:18,566 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12808MB
+2024-08-29 13:14:21,279 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-29 13:14:38,612 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-29 13:14:39,590 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=256, metric=7.92 vs. limit=7.5
+2024-08-29 13:14:45,670 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 13:15:10,807 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 13:15:12,348 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 13:15:12,368 INFO [train.py:1344] (3/4) Loading grad scaler state dict
+2024-08-29 13:16:12,825 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=172581.33333333334, ans=0.04949747468305833
+2024-08-29 13:16:15,181 INFO [train.py:1114] (3/4) Epoch 14, batch 0, loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2615, pruned_loss=0.05192, ctc_loss=0.09419, over 19809.00 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:16:15,181 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-29 13:16:26,276 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([0.1110, 2.9698, 3.4716, 2.6793], device='cuda:3')
+2024-08-29 13:16:31,394 INFO [train.py:1146] (3/4) Epoch 14, validation: loss=0.1913, simple_loss=0.2789, pruned_loss=0.03846, ctc_loss=0.06724, over 944034.00 frames.
+2024-08-29 13:16:31,395 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-29 13:24:21,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=172688.0, ans=0.025
+2024-08-29 13:26:36,728 INFO [train.py:1114] (3/4) Epoch 14, batch 50, loss[loss=0.1855, simple_loss=0.2522, pruned_loss=0.04355, ctc_loss=0.07898, over 19744.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.287, pruned_loss=0.06114, ctc_loss=0.1164, over 844617.17 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:27:17,912 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=172901.33333333334, ans=0.1
+2024-08-29 13:30:21,683 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.70 vs. limit=15.0
+2024-08-29 13:32:00,551 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=19.07 vs. limit=22.5
+2024-08-29 13:32:29,768 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.749e+02 1.974e+02 2.504e+02 4.970e+02, threshold=3.948e+02, percent-clipped=4.0
+2024-08-29 13:32:58,201 INFO [train.py:1114] (3/4) Epoch 14, batch 100, loss[loss=0.223, simple_loss=0.2842, pruned_loss=0.05935, ctc_loss=0.1077, over 19736.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.2877, pruned_loss=0.06091, ctc_loss=0.1149, over 1499497.26 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:33:33,729 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.33 vs. limit=22.5
+2024-08-29 13:34:41,855 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=173274.66666666666, ans=0.125
+2024-08-29 13:35:59,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=173328.0, ans=0.125
+2024-08-29 13:36:02,982 INFO [train.py:1114] (3/4) Epoch 14, batch 150, loss[loss=0.1819, simple_loss=0.2454, pruned_loss=0.04306, ctc_loss=0.08076, over 19682.00 frames. ], tot_loss[loss=0.224, simple_loss=0.285, pruned_loss=0.05922, ctc_loss=0.1115, over 2027234.41 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:36:04,476 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=173381.33333333334, ans=0.125
+2024-08-29 13:36:05,468 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=173381.33333333334, ans=0.0
+2024-08-29 13:36:21,921 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=173434.66666666666, ans=0.09899494936611666
+2024-08-29 13:36:24,570 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.01 vs. limit=15.0
+2024-08-29 13:37:07,845 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=173541.33333333334, ans=0.0
+2024-08-29 13:37:15,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=173541.33333333334, ans=0.025
+2024-08-29 13:37:16,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=173541.33333333334, ans=0.1
+2024-08-29 13:37:19,616 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.730e+02 2.035e+02 2.422e+02 3.683e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-29 13:37:22,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=173594.66666666666, ans=0.0
+2024-08-29 13:37:30,510 INFO [train.py:1114] (3/4) Epoch 14, batch 200, loss[loss=0.2587, simple_loss=0.3053, pruned_loss=0.07685, ctc_loss=0.146, over 18104.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.2834, pruned_loss=0.05883, ctc_loss=0.1107, over 2434650.11 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:37:40,193 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.24 vs. limit=15.0
+2024-08-29 13:37:44,398 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=173701.33333333334, ans=15.0
+2024-08-29 13:37:48,410 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=173701.33333333334, ans=0.0
+2024-08-29 13:38:48,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=173754.66666666666, ans=0.0
+2024-08-29 13:41:34,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=173861.33333333334, ans=0.1
+2024-08-29 13:42:18,842 INFO [train.py:1114] (3/4) Epoch 14, batch 250, loss[loss=0.2527, simple_loss=0.3082, pruned_loss=0.07224, ctc_loss=0.1318, over 19411.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.2833, pruned_loss=0.05876, ctc_loss=0.1106, over 2753935.14 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:43:10,601 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=173914.66666666666, ans=0.0
+2024-08-29 13:43:15,084 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=173914.66666666666, ans=0.125
+2024-08-29 13:43:49,657 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.08 vs. limit=12.0
+2024-08-29 13:43:53,343 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.69 vs. limit=15.0
+2024-08-29 13:44:12,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=174128.0, ans=0.035
+2024-08-29 13:44:13,475 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.787e+02 2.022e+02 2.717e+02 4.953e+02, threshold=4.043e+02, percent-clipped=2.0
+2024-08-29 13:44:52,057 INFO [train.py:1114] (3/4) Epoch 14, batch 300, loss[loss=0.2255, simple_loss=0.2851, pruned_loss=0.06144, ctc_loss=0.1077, over 19518.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2821, pruned_loss=0.05811, ctc_loss=0.109, over 2999168.00 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 32.0
+2024-08-29 13:45:55,729 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=174394.66666666666, ans=0.025
+2024-08-29 13:46:17,856 INFO [train.py:1114] (3/4) Epoch 14, batch 350, loss[loss=0.1967, simple_loss=0.2568, pruned_loss=0.05007, ctc_loss=0.09137, over 19738.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2825, pruned_loss=0.05811, ctc_loss=0.1089, over 3190097.11 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 16.0
+2024-08-29 13:47:22,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=174554.66666666666, ans=0.125
+2024-08-29 13:47:31,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=174608.0, ans=0.2
+2024-08-29 13:47:39,425 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.793e+02 2.058e+02 2.658e+02 4.429e+02, threshold=4.116e+02, percent-clipped=3.0
+2024-08-29 13:48:31,277 INFO [train.py:1114] (3/4) Epoch 14, batch 400, loss[loss=0.2272, simple_loss=0.2911, pruned_loss=0.0604, ctc_loss=0.1059, over 19499.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2822, pruned_loss=0.05793, ctc_loss=0.1086, over 3342697.91 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:48:51,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=174768.0, ans=0.125
+2024-08-29 13:50:13,245 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=174821.33333333334, ans=0.125
+2024-08-29 13:50:47,576 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.98 vs. limit=15.0
+2024-08-29 13:50:57,540 INFO [train.py:1114] (3/4) Epoch 14, batch 450, loss[loss=0.2348, simple_loss=0.292, pruned_loss=0.06435, ctc_loss=0.1222, over 19617.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2822, pruned_loss=0.05793, ctc_loss=0.1088, over 3451257.46 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:51:01,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=174981.33333333334, ans=0.0
+2024-08-29 13:51:10,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=175034.66666666666, ans=0.0
+2024-08-29 13:51:38,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=175141.33333333334, ans=0.025
+2024-08-29 13:51:42,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=175141.33333333334, ans=0.125
+2024-08-29 13:51:50,568 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.713e+02 1.900e+02 2.415e+02 4.159e+02, threshold=3.800e+02, percent-clipped=2.0
+2024-08-29 13:52:16,300 INFO [train.py:1114] (3/4) Epoch 14, batch 500, loss[loss=0.2389, simple_loss=0.2998, pruned_loss=0.06514, ctc_loss=0.1196, over 19678.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2817, pruned_loss=0.05785, ctc_loss=0.1089, over 3547006.36 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:52:47,349 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=175354.66666666666, ans=0.95
+2024-08-29 13:52:56,723 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=175354.66666666666, ans=0.0
+2024-08-29 13:53:06,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff2.min_abs, batch_count=175408.0, ans=0.1
+2024-08-29 13:53:11,315 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=175461.33333333334, ans=0.07
+2024-08-29 13:53:23,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=175514.66666666666, ans=0.125
+2024-08-29 13:53:23,935 INFO [train.py:1114] (3/4) Epoch 14, batch 550, loss[loss=0.2486, simple_loss=0.3011, pruned_loss=0.07156, ctc_loss=0.1324, over 19229.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.282, pruned_loss=0.05805, ctc_loss=0.1092, over 3608381.59 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:53:26,093 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.72 vs. limit=15.0
+2024-08-29 13:53:35,361 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=175568.0, ans=0.0
+2024-08-29 13:54:10,964 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=175674.66666666666, ans=0.1
+2024-08-29 13:54:18,068 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.725e+02 1.963e+02 2.348e+02 4.063e+02, threshold=3.927e+02, percent-clipped=2.0
+2024-08-29 13:54:20,964 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=175728.0, ans=0.025
+2024-08-29 13:54:28,216 INFO [train.py:1114] (3/4) Epoch 14, batch 600, loss[loss=0.2343, simple_loss=0.2965, pruned_loss=0.06367, ctc_loss=0.1122, over 19367.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2821, pruned_loss=0.05784, ctc_loss=0.1087, over 3664729.30 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:54:32,235 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=175781.33333333334, ans=0.2
+2024-08-29 13:54:34,514 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=175781.33333333334, ans=0.2
+2024-08-29 13:54:40,443 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=175834.66666666666, ans=0.125
+2024-08-29 13:54:55,677 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=175888.0, ans=0.2
+2024-08-29 13:54:58,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=175888.0, ans=0.5
+2024-08-29 13:55:23,462 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.78 vs. limit=15.0
+2024-08-29 13:55:30,812 INFO [train.py:1114] (3/4) Epoch 14, batch 650, loss[loss=0.2182, simple_loss=0.282, pruned_loss=0.05675, ctc_loss=0.1024, over 19768.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2805, pruned_loss=0.05671, ctc_loss=0.1066, over 3715303.48 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:55:42,090 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=176101.33333333334, ans=0.2
+2024-08-29 13:55:45,551 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=176101.33333333334, ans=0.035
+2024-08-29 13:55:46,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=176101.33333333334, ans=0.015
+2024-08-29 13:56:06,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=176154.66666666666, ans=0.125
+2024-08-29 13:56:23,956 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=176261.33333333334, ans=10.0
+2024-08-29 13:56:24,632 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.743e+02 2.058e+02 2.560e+02 4.338e+02, threshold=4.116e+02, percent-clipped=4.0
+2024-08-29 13:56:33,857 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=176314.66666666666, ans=0.0
+2024-08-29 13:56:34,654 INFO [train.py:1114] (3/4) Epoch 14, batch 700, loss[loss=0.2153, simple_loss=0.2742, pruned_loss=0.05627, ctc_loss=0.1095, over 19748.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2809, pruned_loss=0.05674, ctc_loss=0.1068, over 3747071.82 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:56:42,585 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.71 vs. limit=15.0
+2024-08-29 13:57:36,591 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=176421.33333333334, ans=0.125
+2024-08-29 13:57:59,079 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=176474.66666666666, ans=0.2
+2024-08-29 13:58:12,843 INFO [train.py:1114] (3/4) Epoch 14, batch 750, loss[loss=0.2152, simple_loss=0.2786, pruned_loss=0.0547, ctc_loss=0.1058, over 19497.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.281, pruned_loss=0.05692, ctc_loss=0.107, over 3774604.89 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:58:14,473 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=176581.33333333334, ans=0.0
+2024-08-29 13:58:15,541 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=176581.33333333334, ans=0.0
+2024-08-29 13:58:19,498 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.39 vs. limit=10.0
+2024-08-29 13:58:22,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=176581.33333333334, ans=0.0
+2024-08-29 13:58:24,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=176634.66666666666, ans=0.025
+2024-08-29 13:58:53,668 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=176741.33333333334, ans=0.125
+2024-08-29 13:59:06,505 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.910e+02 2.277e+02 2.884e+02 4.780e+02, threshold=4.554e+02, percent-clipped=3.0
+2024-08-29 13:59:20,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=176794.66666666666, ans=0.025
+2024-08-29 13:59:27,098 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.90 vs. limit=22.5
+2024-08-29 13:59:28,726 INFO [train.py:1114] (3/4) Epoch 14, batch 800, loss[loss=0.1844, simple_loss=0.2463, pruned_loss=0.0456, ctc_loss=0.07821, over 19821.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2808, pruned_loss=0.05683, ctc_loss=0.1066, over 3796395.50 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 13:59:39,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=176848.0, ans=0.2
+2024-08-29 14:01:03,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=176954.66666666666, ans=0.025
+2024-08-29 14:01:13,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=176954.66666666666, ans=0.025
+2024-08-29 14:02:27,996 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177008.0, ans=0.1
+2024-08-29 14:02:31,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=177008.0, ans=0.1
+2024-08-29 14:02:49,613 INFO [train.py:1114] (3/4) Epoch 14, batch 850, loss[loss=0.2321, simple_loss=0.3057, pruned_loss=0.05655, ctc_loss=0.1137, over 19649.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2811, pruned_loss=0.05717, ctc_loss=0.1073, over 3815210.30 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:03:11,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=177168.0, ans=0.125
+2024-08-29 14:03:40,323 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.703e+02 1.970e+02 2.385e+02 3.831e+02, threshold=3.939e+02, percent-clipped=0.0
+2024-08-29 14:03:46,693 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=177328.0, ans=0.125
+2024-08-29 14:03:49,900 INFO [train.py:1114] (3/4) Epoch 14, batch 900, loss[loss=0.1848, simple_loss=0.2528, pruned_loss=0.04288, ctc_loss=0.0775, over 19814.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.282, pruned_loss=0.05782, ctc_loss=0.1082, over 3820149.52 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:03:50,590 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.94 vs. limit=15.0
+2024-08-29 14:04:12,009 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.53 vs. limit=15.0
+2024-08-29 14:04:24,970 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=177541.33333333334, ans=0.125
+2024-08-29 14:04:35,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177541.33333333334, ans=0.1
+2024-08-29 14:04:46,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=177594.66666666666, ans=0.0
+2024-08-29 14:04:52,321 INFO [train.py:1114] (3/4) Epoch 14, batch 950, loss[loss=0.2287, simple_loss=0.2813, pruned_loss=0.06452, ctc_loss=0.1176, over 19503.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2823, pruned_loss=0.0578, ctc_loss=0.1083, over 3821034.01 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-29 14:05:06,045 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=177701.33333333334, ans=0.035
+2024-08-29 14:05:08,504 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177701.33333333334, ans=0.1
+2024-08-29 14:05:13,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=177701.33333333334, ans=0.1
+2024-08-29 14:05:13,573 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=177701.33333333334, ans=15.0
+2024-08-29 14:05:26,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177754.66666666666, ans=0.1
+2024-08-29 14:05:30,123 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177754.66666666666, ans=0.1
+2024-08-29 14:06:19,917 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.335e+02 1.740e+02 1.996e+02 2.581e+02 3.979e+02, threshold=3.992e+02, percent-clipped=2.0
+2024-08-29 14:07:04,893 INFO [train.py:1114] (3/4) Epoch 14, batch 1000, loss[loss=0.2216, simple_loss=0.282, pruned_loss=0.05833, ctc_loss=0.1115, over 19846.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.2838, pruned_loss=0.0586, ctc_loss=0.1099, over 3817118.64 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:07:14,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=177914.66666666666, ans=0.0
+2024-08-29 14:08:08,172 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177968.0, ans=0.1
+2024-08-29 14:08:25,843 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=178021.33333333334, ans=0.2
+2024-08-29 14:08:30,659 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=178074.66666666666, ans=0.0
+2024-08-29 14:08:30,978 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.24 vs. limit=15.0
+2024-08-29 14:08:34,515 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.70 vs. limit=10.0
+2024-08-29 14:08:46,987 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=178128.0, ans=0.0
+2024-08-29 14:08:56,372 INFO [train.py:1114] (3/4) Epoch 14, batch 1050, loss[loss=0.2197, simple_loss=0.2838, pruned_loss=0.05698, ctc_loss=0.1039, over 19823.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2829, pruned_loss=0.05817, ctc_loss=0.109, over 3824209.19 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:09:01,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=178181.33333333334, ans=0.125
+2024-08-29 14:09:03,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=178181.33333333334, ans=0.5
+2024-08-29 14:09:08,802 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.27 vs. limit=22.5
+2024-08-29 14:09:37,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=178341.33333333334, ans=0.125
+2024-08-29 14:09:42,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=178341.33333333334, ans=0.125
+2024-08-29 14:09:46,184 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.45 vs. limit=10.0
+2024-08-29 14:09:46,658 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.811e+02 2.215e+02 2.668e+02 4.320e+02, threshold=4.429e+02, percent-clipped=1.0
+2024-08-29 14:10:24,264 INFO [train.py:1114] (3/4) Epoch 14, batch 1100, loss[loss=0.2127, simple_loss=0.2816, pruned_loss=0.05215, ctc_loss=0.09853, over 19572.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2824, pruned_loss=0.05782, ctc_loss=0.1086, over 3832090.41 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:13:47,435 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=178501.33333333334, ans=0.0
+2024-08-29 14:16:19,653 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:18:53,741 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=178661.33333333334, ans=0.125
+2024-08-29 14:19:15,474 INFO [train.py:1114] (3/4) Epoch 14, batch 1150, loss[loss=0.238, simple_loss=0.2916, pruned_loss=0.0673, ctc_loss=0.1242, over 19592.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.282, pruned_loss=0.05762, ctc_loss=0.1083, over 3830063.13 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:19:28,580 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=178714.66666666666, ans=0.1
+2024-08-29 14:19:46,475 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=178768.0, ans=0.0
+2024-08-29 14:19:46,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=178768.0, ans=0.125
+2024-08-29 14:22:13,328 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 1.701e+02 1.876e+02 2.352e+02 3.362e+02, threshold=3.753e+02, percent-clipped=0.0
+2024-08-29 14:22:33,806 INFO [train.py:1114] (3/4) Epoch 14, batch 1200, loss[loss=0.2031, simple_loss=0.278, pruned_loss=0.04622, ctc_loss=0.08951, over 19835.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2832, pruned_loss=0.0583, ctc_loss=0.1096, over 3826323.75 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:23:27,234 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=178981.33333333334, ans=0.1
+2024-08-29 14:24:14,678 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:24:14,819 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-29 14:24:21,804 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=179088.0, ans=0.125
+2024-08-29 14:24:24,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.93 vs. limit=15.0
+2024-08-29 14:24:27,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=179088.0, ans=0.125
+2024-08-29 14:24:33,152 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=179088.0, ans=0.125
+2024-08-29 14:25:42,514 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.66 vs. limit=15.0
+2024-08-29 14:29:53,988 INFO [train.py:1114] (3/4) Epoch 14, batch 1250, loss[loss=0.2471, simple_loss=0.3037, pruned_loss=0.06953, ctc_loss=0.1283, over 19526.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2837, pruned_loss=0.05835, ctc_loss=0.1096, over 3844490.95 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:30:29,406 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=179248.0, ans=0.125
+2024-08-29 14:31:44,067 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=179248.0, ans=0.125
+2024-08-29 14:31:59,559 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=179301.33333333334, ans=0.025
+2024-08-29 14:32:26,316 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.45 vs. limit=15.0
+2024-08-29 14:32:30,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=179408.0, ans=0.125
+2024-08-29 14:32:41,058 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.718e+02 2.120e+02 2.679e+02 4.271e+02, threshold=4.240e+02, percent-clipped=3.0
+2024-08-29 14:32:48,996 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.72 vs. limit=22.5
+2024-08-29 14:32:52,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-29 14:33:10,211 INFO [train.py:1114] (3/4) Epoch 14, batch 1300, loss[loss=0.2511, simple_loss=0.3026, pruned_loss=0.07268, ctc_loss=0.1357, over 18977.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.282, pruned_loss=0.05726, ctc_loss=0.1076, over 3849016.59 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:33:58,923 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=179514.66666666666, ans=0.1
+2024-08-29 14:33:59,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-29 14:34:06,193 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=179568.0, ans=0.2
+2024-08-29 14:34:15,193 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=179568.0, ans=0.1
+2024-08-29 14:35:11,194 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=179674.66666666666, ans=0.125
+2024-08-29 14:35:41,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=179781.33333333334, ans=0.125
+2024-08-29 14:35:42,424 INFO [train.py:1114] (3/4) Epoch 14, batch 1350, loss[loss=0.202, simple_loss=0.2723, pruned_loss=0.04809, ctc_loss=0.0885, over 19782.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2817, pruned_loss=0.05715, ctc_loss=0.1073, over 3860373.05 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:36:00,146 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:36:04,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=179781.33333333334, ans=0.0
+2024-08-29 14:36:12,562 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=179834.66666666666, ans=0.0
+2024-08-29 14:36:14,003 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.46 vs. limit=6.0
+2024-08-29 14:40:27,057 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:40:29,505 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.654e+02 1.881e+02 2.431e+02 4.376e+02, threshold=3.761e+02, percent-clipped=1.0
+2024-08-29 14:40:34,587 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=179994.66666666666, ans=0.125
+2024-08-29 14:41:36,143 INFO [train.py:1114] (3/4) Epoch 14, batch 1400, loss[loss=0.1769, simple_loss=0.2384, pruned_loss=0.04185, ctc_loss=0.07916, over 19689.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2816, pruned_loss=0.05728, ctc_loss=0.1076, over 3866720.71 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:41:52,168 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=180101.33333333334, ans=0.125
+2024-08-29 14:42:00,347 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 14:42:17,927 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=180208.0, ans=0.125
+2024-08-29 14:42:25,315 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.93 vs. limit=15.0
+2024-08-29 14:42:35,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=180261.33333333334, ans=0.0
+2024-08-29 14:42:37,761 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=180261.33333333334, ans=0.0
+2024-08-29 14:42:38,123 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.27 vs. limit=15.0
+2024-08-29 14:42:39,832 INFO [train.py:1114] (3/4) Epoch 14, batch 1450, loss[loss=0.2168, simple_loss=0.2834, pruned_loss=0.05551, ctc_loss=0.09833, over 19664.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2826, pruned_loss=0.05775, ctc_loss=0.1085, over 3864940.50 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:42:54,987 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=180368.0, ans=0.125
+2024-08-29 14:43:05,176 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=180421.33333333334, ans=0.05
+2024-08-29 14:44:05,187 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=180474.66666666666, ans=0.0
+2024-08-29 14:44:06,457 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=180474.66666666666, ans=0.2
+2024-08-29 14:44:19,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=180528.0, ans=0.0
+2024-08-29 14:44:19,802 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.272e+02 1.699e+02 1.929e+02 2.254e+02 4.469e+02, threshold=3.859e+02, percent-clipped=1.0
+2024-08-29 14:45:04,652 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=180528.0, ans=0.05
+2024-08-29 14:45:06,812 INFO [train.py:1114] (3/4) Epoch 14, batch 1500, loss[loss=0.2386, simple_loss=0.3042, pruned_loss=0.06322, ctc_loss=0.1166, over 19584.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2823, pruned_loss=0.05748, ctc_loss=0.1079, over 3864728.47 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:45:24,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=180634.66666666666, ans=0.025
+2024-08-29 14:45:58,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=180688.0, ans=0.1
+2024-08-29 14:46:08,882 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=180741.33333333334, ans=0.0
+2024-08-29 14:46:12,539 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=180741.33333333334, ans=0.2
+2024-08-29 14:46:24,184 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=180794.66666666666, ans=0.125
+2024-08-29 14:46:27,471 INFO [train.py:1114] (3/4) Epoch 14, batch 1550, loss[loss=0.2506, simple_loss=0.3044, pruned_loss=0.07213, ctc_loss=0.1315, over 19609.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2824, pruned_loss=0.05758, ctc_loss=0.1082, over 3848764.53 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:46:46,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=180901.33333333334, ans=0.2
+2024-08-29 14:46:56,932 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.67 vs. limit=15.0
+2024-08-29 14:47:01,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=180954.66666666666, ans=0.95
+2024-08-29 14:47:48,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=181008.0, ans=0.0
+2024-08-29 14:48:37,408 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.702e+02 2.011e+02 2.397e+02 3.479e+02, threshold=4.023e+02, percent-clipped=0.0
+2024-08-29 14:48:47,138 INFO [train.py:1114] (3/4) Epoch 14, batch 1600, loss[loss=0.2187, simple_loss=0.2823, pruned_loss=0.05514, ctc_loss=0.1118, over 19841.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2818, pruned_loss=0.05749, ctc_loss=0.1079, over 3837124.52 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-29 14:49:03,405 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=181168.0, ans=0.2
+2024-08-29 14:49:03,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=181168.0, ans=0.04949747468305833
+2024-08-29 14:49:51,881 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.54 vs. limit=15.0
+2024-08-29 14:50:20,257 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=181221.33333333334, ans=0.025
+2024-08-29 14:51:28,765 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=181381.33333333334, ans=0.0
+2024-08-29 14:51:29,815 INFO [train.py:1114] (3/4) Epoch 14, batch 1650, loss[loss=0.2281, simple_loss=0.2885, pruned_loss=0.05934, ctc_loss=0.1226, over 19635.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2821, pruned_loss=0.05769, ctc_loss=0.1084, over 3834583.61 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:51:40,938 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=181381.33333333334, ans=0.125
+2024-08-29 14:52:19,831 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.07 vs. limit=15.0
+2024-08-29 14:52:28,554 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.808e+02 2.247e+02 2.720e+02 5.029e+02, threshold=4.494e+02, percent-clipped=3.0
+2024-08-29 14:52:29,389 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.59 vs. limit=15.0
+2024-08-29 14:52:35,002 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.67 vs. limit=22.5
+2024-08-29 14:52:38,137 INFO [train.py:1114] (3/4) Epoch 14, batch 1700, loss[loss=0.2017, simple_loss=0.2567, pruned_loss=0.05132, ctc_loss=0.1103, over 19673.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2818, pruned_loss=0.05741, ctc_loss=0.1081, over 3848471.89 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:52:41,017 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.09 vs. limit=15.0
+2024-08-29 14:52:45,387 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=181648.0, ans=0.125
+2024-08-29 14:52:49,269 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.89 vs. limit=10.0
+2024-08-29 14:53:19,590 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=181808.0, ans=0.2
+2024-08-29 14:53:23,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=181861.33333333334, ans=0.0
+2024-08-29 14:53:23,281 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.01 vs. limit=15.0
+2024-08-29 14:53:46,197 INFO [train.py:1114] (3/4) Epoch 14, batch 1750, loss[loss=0.2285, simple_loss=0.2731, pruned_loss=0.06732, ctc_loss=0.123, over 19696.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2814, pruned_loss=0.05725, ctc_loss=0.1078, over 3854184.62 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:53:58,987 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=181968.0, ans=0.2
+2024-08-29 14:54:37,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=182021.33333333334, ans=0.125
+2024-08-29 14:55:00,509 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=182021.33333333334, ans=0.0
+2024-08-29 14:55:03,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=182074.66666666666, ans=0.025
+2024-08-29 14:55:10,231 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=182074.66666666666, ans=0.2
+2024-08-29 14:56:25,491 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.791e+02 2.085e+02 2.712e+02 5.021e+02, threshold=4.170e+02, percent-clipped=2.0
+2024-08-29 14:56:34,703 INFO [train.py:1114] (3/4) Epoch 14, batch 1800, loss[loss=0.2298, simple_loss=0.2925, pruned_loss=0.06015, ctc_loss=0.1169, over 19601.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2814, pruned_loss=0.05704, ctc_loss=0.1072, over 3855590.03 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:57:15,605 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.75 vs. limit=15.0
+2024-08-29 14:57:22,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182234.66666666666, ans=0.1
+2024-08-29 14:57:26,534 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=182234.66666666666, ans=0.125
+2024-08-29 14:57:31,015 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=15.0
+2024-08-29 14:57:42,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182288.0, ans=0.1
+2024-08-29 14:57:54,857 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=182394.66666666666, ans=0.0
+2024-08-29 14:58:03,232 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=182394.66666666666, ans=0.1
+2024-08-29 14:58:05,519 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=182394.66666666666, ans=0.025
+2024-08-29 14:58:07,414 INFO [train.py:1114] (3/4) Epoch 14, batch 1850, loss[loss=0.2225, simple_loss=0.2851, pruned_loss=0.05743, ctc_loss=0.1128, over 19599.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2815, pruned_loss=0.05695, ctc_loss=0.107, over 3858221.10 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 14:58:16,971 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=182448.0, ans=0.0
+2024-08-29 15:00:44,653 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182608.0, ans=0.1
+2024-08-29 15:03:25,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=182608.0, ans=0.0
+2024-08-29 15:03:29,627 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.679e+02 1.934e+02 2.278e+02 6.084e+02, threshold=3.868e+02, percent-clipped=1.0
+2024-08-29 15:03:40,813 INFO [train.py:1114] (3/4) Epoch 14, batch 1900, loss[loss=0.2269, simple_loss=0.2936, pruned_loss=0.05862, ctc_loss=0.1076, over 19678.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2822, pruned_loss=0.05703, ctc_loss=0.1074, over 3862304.89 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:03:51,537 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.52 vs. limit=15.0
+2024-08-29 15:03:55,601 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=182768.0, ans=0.125
+2024-08-29 15:04:35,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=182821.33333333334, ans=0.125
+2024-08-29 15:04:43,321 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=182874.66666666666, ans=0.0
+2024-08-29 15:04:52,389 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=182928.0, ans=0.0
+2024-08-29 15:05:18,943 INFO [train.py:1114] (3/4) Epoch 14, batch 1950, loss[loss=0.2073, simple_loss=0.268, pruned_loss=0.05295, ctc_loss=0.1015, over 19580.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2834, pruned_loss=0.05746, ctc_loss=0.108, over 3871186.13 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:05:39,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=183034.66666666666, ans=0.0
+2024-08-29 15:05:41,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=183088.0, ans=0.0
+2024-08-29 15:05:42,351 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.83 vs. limit=22.5
+2024-08-29 15:05:57,860 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=183141.33333333334, ans=0.125
+2024-08-29 15:06:06,635 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.683e+02 1.939e+02 2.319e+02 3.642e+02, threshold=3.877e+02, percent-clipped=0.0
+2024-08-29 15:06:44,090 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=183194.66666666666, ans=0.125
+2024-08-29 15:06:48,390 INFO [train.py:1114] (3/4) Epoch 14, batch 2000, loss[loss=0.1939, simple_loss=0.2571, pruned_loss=0.04752, ctc_loss=0.08904, over 19655.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2836, pruned_loss=0.0576, ctc_loss=0.1085, over 3856216.08 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:07:00,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=183301.33333333334, ans=0.125
+2024-08-29 15:07:06,947 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=183301.33333333334, ans=0.5
+2024-08-29 15:07:19,213 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=183354.66666666666, ans=0.05
+2024-08-29 15:07:24,456 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.28 vs. limit=15.0
+2024-08-29 15:07:43,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183461.33333333334, ans=0.1
+2024-08-29 15:07:45,831 INFO [train.py:1114] (3/4) Epoch 14, batch 2050, loss[loss=0.1959, simple_loss=0.2567, pruned_loss=0.04964, ctc_loss=0.08948, over 19725.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2831, pruned_loss=0.05789, ctc_loss=0.1092, over 3853148.84 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:07:52,316 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.34 vs. limit=10.0
+2024-08-29 15:08:39,274 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183621.33333333334, ans=0.1
+2024-08-29 15:09:21,512 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.69 vs. limit=22.5
+2024-08-29 15:09:33,016 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.50 vs. limit=22.5
+2024-08-29 15:09:39,965 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.749e+02 1.987e+02 2.455e+02 3.413e+02, threshold=3.973e+02, percent-clipped=0.0
+2024-08-29 15:09:47,048 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=3.666e-02
+2024-08-29 15:09:48,913 INFO [train.py:1114] (3/4) Epoch 14, batch 2100, loss[loss=0.2216, simple_loss=0.2838, pruned_loss=0.05831, ctc_loss=0.107, over 19763.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2819, pruned_loss=0.05712, ctc_loss=0.1078, over 3859523.11 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:10:21,885 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=183834.66666666666, ans=0.0
+2024-08-29 15:10:33,852 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=183888.0, ans=0.1
+2024-08-29 15:10:52,285 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=183994.66666666666, ans=0.0
+2024-08-29 15:10:57,751 INFO [train.py:1114] (3/4) Epoch 14, batch 2150, loss[loss=0.215, simple_loss=0.2825, pruned_loss=0.05304, ctc_loss=0.1034, over 19831.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2813, pruned_loss=0.05689, ctc_loss=0.1072, over 3870758.51 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:11:00,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=184048.0, ans=0.125
+2024-08-29 15:11:10,497 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=3.83 vs. limit=12.0
+2024-08-29 15:11:16,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=184101.33333333334, ans=0.125
+2024-08-29 15:11:24,129 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.22 vs. limit=22.5
+2024-08-29 15:11:35,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=184208.0, ans=0.025
+2024-08-29 15:11:44,623 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 1.765e+02 2.209e+02 2.742e+02 6.061e+02, threshold=4.418e+02, percent-clipped=6.0
+2024-08-29 15:12:09,346 INFO [train.py:1114] (3/4) Epoch 14, batch 2200, loss[loss=0.2476, simple_loss=0.3099, pruned_loss=0.06692, ctc_loss=0.1285, over 19593.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2814, pruned_loss=0.05687, ctc_loss=0.1072, over 3869156.33 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:12:10,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=184314.66666666666, ans=0.0
+2024-08-29 15:12:25,112 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=184368.0, ans=0.95
+2024-08-29 15:13:23,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=184474.66666666666, ans=0.1
+2024-08-29 15:13:29,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=184474.66666666666, ans=0.125
+2024-08-29 15:13:33,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=184474.66666666666, ans=0.125
+2024-08-29 15:13:34,617 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=184528.0, ans=0.0
+2024-08-29 15:13:40,085 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=10.25 vs. limit=15.0
+2024-08-29 15:13:47,033 INFO [train.py:1114] (3/4) Epoch 14, batch 2250, loss[loss=0.2288, simple_loss=0.2925, pruned_loss=0.05975, ctc_loss=0.114, over 19619.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2814, pruned_loss=0.05673, ctc_loss=0.107, over 3868179.89 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:13:55,328 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.60 vs. limit=22.5
+2024-08-29 15:14:11,758 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=184688.0, ans=0.125
+2024-08-29 15:14:13,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=184688.0, ans=0.125
+2024-08-29 15:14:16,368 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.00 vs. limit=15.0
+2024-08-29 15:14:19,610 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184741.33333333334, ans=0.1
+2024-08-29 15:14:28,081 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.78 vs. limit=15.0
+2024-08-29 15:14:33,559 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.78 vs. limit=15.0
+2024-08-29 15:14:34,167 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.796e+02 2.116e+02 2.512e+02 3.767e+02, threshold=4.231e+02, percent-clipped=0.0
+2024-08-29 15:14:34,450 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=184794.66666666666, ans=0.0
+2024-08-29 15:14:39,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=184794.66666666666, ans=0.125
+2024-08-29 15:14:43,288 INFO [train.py:1114] (3/4) Epoch 14, batch 2300, loss[loss=0.2378, simple_loss=0.2894, pruned_loss=0.06759, ctc_loss=0.1278, over 19511.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2812, pruned_loss=0.05736, ctc_loss=0.108, over 3862283.03 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 32.0
+2024-08-29 15:14:49,978 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=184848.0, ans=0.125
+2024-08-29 15:14:52,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184848.0, ans=0.1
+2024-08-29 15:14:56,525 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.25 vs. limit=15.0
+2024-08-29 15:14:58,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=184901.33333333334, ans=0.125
+2024-08-29 15:15:03,313 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=184901.33333333334, ans=0.2
+2024-08-29 15:15:17,572 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.58 vs. limit=22.5
+2024-08-29 15:15:40,915 INFO [train.py:1114] (3/4) Epoch 14, batch 2350, loss[loss=0.2209, simple_loss=0.2846, pruned_loss=0.05787, ctc_loss=0.1038, over 19685.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.281, pruned_loss=0.05727, ctc_loss=0.1076, over 3864424.63 frames. ], batch size: 63, lr: 1.05e-02, grad_scale: 64.0
+2024-08-29 15:15:42,690 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=6.43 vs. limit=12.0
+2024-08-29 15:15:50,349 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.35 vs. limit=22.5
+2024-08-29 15:16:01,586 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=185168.0, ans=0.125
+2024-08-29 15:16:10,238 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=185221.33333333334, ans=0.125
+2024-08-29 15:16:11,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=185221.33333333334, ans=0.0
+2024-08-29 15:16:18,958 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=185274.66666666666, ans=0.0
+2024-08-29 15:16:28,743 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.724e+02 2.017e+02 2.647e+02 4.792e+02, threshold=4.034e+02, percent-clipped=3.0
+2024-08-29 15:16:36,505 INFO [train.py:1114] (3/4) Epoch 14, batch 2400, loss[loss=0.2076, simple_loss=0.2835, pruned_loss=0.04765, ctc_loss=0.09089, over 19265.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2829, pruned_loss=0.05793, ctc_loss=0.1087, over 3858364.47 frames. ], batch size: 71, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:17:10,420 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=185488.0, ans=0.125
+2024-08-29 15:17:30,948 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=185594.66666666666, ans=0.1
+2024-08-29 15:17:38,575 INFO [train.py:1114] (3/4) Epoch 14, batch 2450, loss[loss=0.2401, simple_loss=0.2918, pruned_loss=0.06864, ctc_loss=0.1279, over 13755.00 frames. ], tot_loss[loss=0.226, simple_loss=0.2863, pruned_loss=0.06023, ctc_loss=0.1132, over 3730663.52 frames. ], batch size: 140, lr: 1.05e-02, grad_scale: 32.0
+2024-08-29 15:17:41,373 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.39 vs. limit=10.0
+2024-08-29 15:17:48,083 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=185648.0, ans=0.2
+2024-08-29 15:17:48,399 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.40 vs. limit=10.0
+2024-08-29 15:17:54,933 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:18:00,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=185754.66666666666, ans=0.0
+2024-08-29 15:18:03,902 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=185754.66666666666, ans=0.025
+2024-08-29 15:18:18,835 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=10.76 vs. limit=15.0
+2024-08-29 15:19:09,366 INFO [train.py:1114] (3/4) Epoch 15, batch 0, loss[loss=0.2154, simple_loss=0.2655, pruned_loss=0.06015, ctc_loss=0.1124, over 19803.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2655, pruned_loss=0.06015, ctc_loss=0.1124, over 19803.00 frames. ], batch size: 49, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:19:09,367 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-29 15:19:20,880 INFO [train.py:1146] (3/4) Epoch 15, validation: loss=0.1908, simple_loss=0.2785, pruned_loss=0.03825, ctc_loss=0.06651, over 944034.00 frames.
+2024-08-29 15:19:20,880 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13716MB
+2024-08-29 15:19:22,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=185856.0, ans=0.125
+2024-08-29 15:19:25,766 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 1.942e+02 2.136e+02 2.424e+02 3.799e+02, threshold=4.272e+02, percent-clipped=0.0
+2024-08-29 15:19:32,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=185909.33333333334, ans=0.125
+2024-08-29 15:19:38,586 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.22 vs. limit=15.0
+2024-08-29 15:19:56,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=186016.0, ans=0.0
+2024-08-29 15:20:15,882 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:20:25,217 INFO [train.py:1114] (3/4) Epoch 15, batch 50, loss[loss=0.1826, simple_loss=0.2529, pruned_loss=0.0408, ctc_loss=0.07666, over 19728.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2841, pruned_loss=0.05873, ctc_loss=0.1109, over 845438.59 frames. ], batch size: 47, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:20:48,266 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=12.31 vs. limit=15.0
+2024-08-29 15:20:59,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=186229.33333333334, ans=0.0
+2024-08-29 15:21:05,168 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=186282.66666666666, ans=0.125
+2024-08-29 15:21:08,995 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.27 vs. limit=10.0
+2024-08-29 15:21:11,246 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=186282.66666666666, ans=0.09899494936611666
+2024-08-29 15:21:25,436 INFO [train.py:1114] (3/4) Epoch 15, batch 100, loss[loss=0.1999, simple_loss=0.2707, pruned_loss=0.04725, ctc_loss=0.08665, over 19720.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2853, pruned_loss=0.05833, ctc_loss=0.1101, over 1500617.17 frames. ], batch size: 51, lr: 1.02e-02, grad_scale: 32.0
+2024-08-29 15:21:30,077 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.739e+02 1.952e+02 2.450e+02 4.288e+02, threshold=3.904e+02, percent-clipped=1.0
+2024-08-29 15:21:32,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=186389.33333333334, ans=0.0
+2024-08-29 15:21:43,119 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=186442.66666666666, ans=0.125
+2024-08-29 15:21:46,682 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.79 vs. limit=15.0
+2024-08-29 15:21:50,469 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.67 vs. limit=12.0
+2024-08-29 15:21:59,699 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=186496.0, ans=0.1
+2024-08-29 15:22:29,354 INFO [train.py:1114] (3/4) Epoch 15, batch 150, loss[loss=0.2192, simple_loss=0.2726, pruned_loss=0.06102, ctc_loss=0.1094, over 19726.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2827, pruned_loss=0.05713, ctc_loss=0.1078, over 2029400.20 frames. ], batch size: 47, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:22:36,258 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.94 vs. limit=22.5
+2024-08-29 15:23:12,318 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=186816.0, ans=0.0
+2024-08-29 15:23:28,625 INFO [train.py:1114] (3/4) Epoch 15, batch 200, loss[loss=0.2107, simple_loss=0.2789, pruned_loss=0.05141, ctc_loss=0.09941, over 18291.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2808, pruned_loss=0.0564, ctc_loss=0.1066, over 2436424.74 frames. ], batch size: 85, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:23:29,088 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.04 vs. limit=15.0
+2024-08-29 15:23:44,479 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.690e+02 2.002e+02 2.433e+02 3.884e+02, threshold=4.003e+02, percent-clipped=0.0
+2024-08-29 15:23:57,322 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=186976.0, ans=0.125
+2024-08-29 15:25:01,144 INFO [train.py:1114] (3/4) Epoch 15, batch 250, loss[loss=0.247, simple_loss=0.3024, pruned_loss=0.06897, ctc_loss=0.1339, over 19362.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2819, pruned_loss=0.0572, ctc_loss=0.1081, over 2756645.61 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:25:45,177 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.51 vs. limit=22.5
+2024-08-29 15:25:48,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=187349.33333333334, ans=0.1
+2024-08-29 15:26:33,393 INFO [train.py:1114] (3/4) Epoch 15, batch 300, loss[loss=0.2533, simple_loss=0.3093, pruned_loss=0.07137, ctc_loss=0.1362, over 19527.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2815, pruned_loss=0.0571, ctc_loss=0.1079, over 3001399.26 frames. ], batch size: 61, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:26:38,059 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.706e+02 2.088e+02 2.592e+02 3.748e+02, threshold=4.177e+02, percent-clipped=0.0
+2024-08-29 15:27:24,439 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=187669.33333333334, ans=0.125
+2024-08-29 15:27:30,555 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=187669.33333333334, ans=0.125
+2024-08-29 15:27:34,913 INFO [train.py:1114] (3/4) Epoch 15, batch 350, loss[loss=0.164, simple_loss=0.2346, pruned_loss=0.03411, ctc_loss=0.06262, over 19756.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2814, pruned_loss=0.05687, ctc_loss=0.1075, over 3191349.76 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 16.0
+2024-08-29 15:27:46,037 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187776.0, ans=0.1
+2024-08-29 15:28:12,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=187829.33333333334, ans=0.125
+2024-08-29 15:28:28,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=187936.0, ans=0.2
+2024-08-29 15:28:32,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=187936.0, ans=0.125
+2024-08-29 15:28:32,244 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=187936.0, ans=0.0
+2024-08-29 15:28:38,897 INFO [train.py:1114] (3/4) Epoch 15, batch 400, loss[loss=0.2196, simple_loss=0.2894, pruned_loss=0.05281, ctc_loss=0.1104, over 19495.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2805, pruned_loss=0.05615, ctc_loss=0.1061, over 3343169.69 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:28:44,507 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.706e+02 2.043e+02 2.587e+02 5.210e+02, threshold=4.085e+02, percent-clipped=2.0
+2024-08-29 15:29:22,825 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.99 vs. limit=15.0
+2024-08-29 15:29:34,933 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.87 vs. limit=15.0
+2024-08-29 15:29:41,955 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:30:07,911 INFO [train.py:1114] (3/4) Epoch 15, batch 450, loss[loss=0.1945, simple_loss=0.2724, pruned_loss=0.042, ctc_loss=0.08143, over 19615.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2807, pruned_loss=0.05627, ctc_loss=0.1062, over 3451307.41 frames. ], batch size: 55, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:30:10,300 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=188256.0, ans=0.04949747468305833
+2024-08-29 15:30:17,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=188256.0, ans=0.025
+2024-08-29 15:30:23,265 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=188309.33333333334, ans=0.125
+2024-08-29 15:30:43,568 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=188416.0, ans=0.125
+2024-08-29 15:31:07,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=188469.33333333334, ans=0.025
+2024-08-29 15:31:09,330 INFO [train.py:1114] (3/4) Epoch 15, batch 500, loss[loss=0.2358, simple_loss=0.2971, pruned_loss=0.06482, ctc_loss=0.1123, over 19672.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2797, pruned_loss=0.05582, ctc_loss=0.1052, over 3546011.40 frames. ], batch size: 63, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:31:15,115 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.681e+02 1.897e+02 2.177e+02 4.545e+02, threshold=3.794e+02, percent-clipped=1.0
+2024-08-29 15:31:20,317 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=188576.0, ans=0.125
+2024-08-29 15:31:23,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=188576.0, ans=0.0
+2024-08-29 15:31:31,910 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=188629.33333333334, ans=0.125
+2024-08-29 15:32:29,637 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=188682.66666666666, ans=0.2
+2024-08-29 15:32:37,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=188682.66666666666, ans=0.025
+2024-08-29 15:32:45,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=188736.0, ans=0.025
+2024-08-29 15:32:46,788 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.38 vs. limit=15.0
+2024-08-29 15:32:59,020 INFO [train.py:1114] (3/4) Epoch 15, batch 550, loss[loss=0.2556, simple_loss=0.3083, pruned_loss=0.07355, ctc_loss=0.1398, over 19330.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2798, pruned_loss=0.05598, ctc_loss=0.1053, over 3607152.83 frames. ], batch size: 71, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:33:06,871 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=188789.33333333334, ans=0.125
+2024-08-29 15:33:39,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=188789.33333333334, ans=0.025
+2024-08-29 15:33:52,225 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.60 vs. limit=15.0
+2024-08-29 15:33:55,366 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=188896.0, ans=0.09899494936611666
+2024-08-29 15:34:03,197 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=188896.0, ans=0.0
+2024-08-29 15:34:09,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=188949.33333333334, ans=0.0
+2024-08-29 15:34:16,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=188949.33333333334, ans=0.1
+2024-08-29 15:34:30,453 INFO [train.py:1114] (3/4) Epoch 15, batch 600, loss[loss=0.2281, simple_loss=0.2957, pruned_loss=0.05904, ctc_loss=0.1062, over 19413.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2801, pruned_loss=0.05595, ctc_loss=0.105, over 3665156.89 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:34:36,387 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.830e+02 2.111e+02 2.732e+02 4.380e+02, threshold=4.223e+02, percent-clipped=4.0
+2024-08-29 15:34:55,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=189162.66666666666, ans=0.0
+2024-08-29 15:34:58,853 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=189162.66666666666, ans=0.2
+2024-08-29 15:35:01,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=189162.66666666666, ans=0.1
+2024-08-29 15:35:04,722 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=189162.66666666666, ans=0.07
+2024-08-29 15:35:19,013 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.02 vs. limit=15.0
+2024-08-29 15:35:22,271 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.98 vs. limit=22.5
+2024-08-29 15:35:31,329 INFO [train.py:1114] (3/4) Epoch 15, batch 650, loss[loss=0.2038, simple_loss=0.2714, pruned_loss=0.04943, ctc_loss=0.0932, over 19787.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2789, pruned_loss=0.05506, ctc_loss=0.1037, over 3715693.55 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:35:36,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=189322.66666666666, ans=0.125
+2024-08-29 15:35:40,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=189322.66666666666, ans=0.0
+2024-08-29 15:35:41,271 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=189322.66666666666, ans=0.125
+2024-08-29 15:35:44,967 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=189376.0, ans=0.2
+2024-08-29 15:37:58,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-29 15:38:00,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-29 15:38:03,039 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.53 vs. limit=15.0
+2024-08-29 15:38:08,651 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-29 15:38:22,779 INFO [train.py:1114] (3/4) Epoch 15, batch 700, loss[loss=0.2081, simple_loss=0.2704, pruned_loss=0.05327, ctc_loss=0.09811, over 19715.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.28, pruned_loss=0.05574, ctc_loss=0.1048, over 3748440.39 frames. ], batch size: 51, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:38:28,534 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.846e+02 2.430e+02 3.057e+02 4.272e+02, threshold=4.860e+02, percent-clipped=1.0
+2024-08-29 15:38:51,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=189696.0, ans=0.125
+2024-08-29 15:39:25,952 INFO [train.py:1114] (3/4) Epoch 15, batch 750, loss[loss=0.2383, simple_loss=0.2943, pruned_loss=0.06593, ctc_loss=0.1261, over 19493.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2793, pruned_loss=0.05543, ctc_loss=0.1041, over 3775284.96 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:39:30,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=189856.0, ans=0.125
+2024-08-29 15:39:41,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=189909.33333333334, ans=0.0
+2024-08-29 15:39:43,274 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=189909.33333333334, ans=0.125
+2024-08-29 15:39:59,083 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=189962.66666666666, ans=0.0
+2024-08-29 15:40:10,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=190016.0, ans=0.125
+2024-08-29 15:40:15,480 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=6.152e-02
+2024-08-29 15:40:28,197 INFO [train.py:1114] (3/4) Epoch 15, batch 800, loss[loss=0.1963, simple_loss=0.255, pruned_loss=0.04942, ctc_loss=0.09701, over 19789.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2789, pruned_loss=0.05515, ctc_loss=0.1038, over 3796497.36 frames. ], batch size: 49, lr: 1.01e-02, grad_scale: 32.0
+2024-08-29 15:40:34,420 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.728e+02 2.068e+02 2.494e+02 4.984e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-29 15:40:48,019 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=190176.0, ans=0.0
+2024-08-29 15:41:03,758 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=190282.66666666666, ans=0.1
+2024-08-29 15:41:21,405 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=190336.0, ans=0.1
+2024-08-29 15:41:30,889 INFO [train.py:1114] (3/4) Epoch 15, batch 850, loss[loss=0.2429, simple_loss=0.3039, pruned_loss=0.06593, ctc_loss=0.125, over 19675.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2787, pruned_loss=0.05499, ctc_loss=0.1036, over 3815182.91 frames. ], batch size: 59, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:41:39,439 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=190389.33333333334, ans=0.0
+2024-08-29 15:41:48,851 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=190442.66666666666, ans=0.0
+2024-08-29 15:41:53,973 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.80 vs. limit=15.0
+2024-08-29 15:41:58,267 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:42:13,313 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.69 vs. limit=15.0
+2024-08-29 15:42:34,688 INFO [train.py:1114] (3/4) Epoch 15, batch 900, loss[loss=0.1921, simple_loss=0.2605, pruned_loss=0.04502, ctc_loss=0.08434, over 19821.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2788, pruned_loss=0.05516, ctc_loss=0.104, over 3818290.68 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:42:40,569 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 1.760e+02 2.061e+02 2.441e+02 4.748e+02, threshold=4.121e+02, percent-clipped=4.0
+2024-08-29 15:43:10,275 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=190762.66666666666, ans=0.2
+2024-08-29 15:43:18,988 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.35 vs. limit=22.5
+2024-08-29 15:43:28,547 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=190816.0, ans=0.0
+2024-08-29 15:43:44,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=190869.33333333334, ans=0.1
+2024-08-29 15:43:45,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=190922.66666666666, ans=0.07
+2024-08-29 15:43:47,839 INFO [train.py:1114] (3/4) Epoch 15, batch 950, loss[loss=0.2196, simple_loss=0.2736, pruned_loss=0.06054, ctc_loss=0.1113, over 19516.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2795, pruned_loss=0.05575, ctc_loss=0.1053, over 3821023.95 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:44:03,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=190976.0, ans=0.0
+2024-08-29 15:44:07,533 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=190976.0, ans=0.125
+2024-08-29 15:44:08,012 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=16.95 vs. limit=22.5
+2024-08-29 15:44:21,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=191029.33333333334, ans=0.125
+2024-08-29 15:44:22,375 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=191029.33333333334, ans=0.125
+2024-08-29 15:44:26,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=191082.66666666666, ans=0.0
+2024-08-29 15:44:40,904 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.92 vs. limit=22.5
+2024-08-29 15:44:48,249 INFO [train.py:1114] (3/4) Epoch 15, batch 1000, loss[loss=0.1821, simple_loss=0.2542, pruned_loss=0.04024, ctc_loss=0.07365, over 19857.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2801, pruned_loss=0.05618, ctc_loss=0.1063, over 3817004.52 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:44:54,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=191189.33333333334, ans=0.0
+2024-08-29 15:44:56,839 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 1.691e+02 1.934e+02 2.300e+02 3.610e+02, threshold=3.869e+02, percent-clipped=0.0
+2024-08-29 15:45:29,742 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.17 vs. limit=15.0
+2024-08-29 15:45:41,591 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=191402.66666666666, ans=0.125
+2024-08-29 15:45:42,494 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=191402.66666666666, ans=0.125
+2024-08-29 15:45:52,352 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=191456.0, ans=0.0
+2024-08-29 15:45:53,376 INFO [train.py:1114] (3/4) Epoch 15, batch 1050, loss[loss=0.2148, simple_loss=0.2883, pruned_loss=0.05133, ctc_loss=0.09686, over 19829.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2794, pruned_loss=0.05601, ctc_loss=0.1058, over 3822843.87 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:46:21,806 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=191562.66666666666, ans=0.125
+2024-08-29 15:46:25,089 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=191562.66666666666, ans=0.125
+2024-08-29 15:46:36,962 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=191616.0, ans=0.125
+2024-08-29 15:46:53,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=191722.66666666666, ans=0.0
+2024-08-29 15:46:54,816 INFO [train.py:1114] (3/4) Epoch 15, batch 1100, loss[loss=0.1992, simple_loss=0.2714, pruned_loss=0.0457, ctc_loss=0.08912, over 19589.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2797, pruned_loss=0.05596, ctc_loss=0.1057, over 3831076.80 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:46:57,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=191722.66666666666, ans=0.125
+2024-08-29 15:47:17,581 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.746e+02 1.965e+02 2.496e+02 3.903e+02, threshold=3.929e+02, percent-clipped=1.0
+2024-08-29 15:47:17,789 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=191722.66666666666, ans=0.2
+2024-08-29 15:47:30,067 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=191776.0, ans=0.0
+2024-08-29 15:47:45,791 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=191829.33333333334, ans=0.1
+2024-08-29 15:48:02,186 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=191936.0, ans=0.0
+2024-08-29 15:48:08,170 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=191936.0, ans=0.125
+2024-08-29 15:48:09,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=191936.0, ans=0.1
+2024-08-29 15:48:09,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=191936.0, ans=0.0
+2024-08-29 15:48:12,572 INFO [train.py:1114] (3/4) Epoch 15, batch 1150, loss[loss=0.2333, simple_loss=0.2886, pruned_loss=0.06437, ctc_loss=0.1231, over 19581.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2798, pruned_loss=0.05598, ctc_loss=0.1055, over 3828649.12 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:48:18,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=191989.33333333334, ans=0.2
+2024-08-29 15:48:21,018 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=191989.33333333334, ans=0.0
+2024-08-29 15:48:22,223 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=191989.33333333334, ans=0.0
+2024-08-29 15:48:25,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=192042.66666666666, ans=0.025
+2024-08-29 15:48:29,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=192042.66666666666, ans=0.04949747468305833
+2024-08-29 15:48:31,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=192042.66666666666, ans=0.125
+2024-08-29 15:49:04,857 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.75 vs. limit=22.5
+2024-08-29 15:49:08,002 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=192202.66666666666, ans=0.0
+2024-08-29 15:49:11,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=192202.66666666666, ans=0.1
+2024-08-29 15:49:16,657 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=192202.66666666666, ans=0.1
+2024-08-29 15:49:17,918 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=192202.66666666666, ans=0.0
+2024-08-29 15:49:19,908 INFO [train.py:1114] (3/4) Epoch 15, batch 1200, loss[loss=0.2343, simple_loss=0.2996, pruned_loss=0.06097, ctc_loss=0.1176, over 19844.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2812, pruned_loss=0.05674, ctc_loss=0.107, over 3825254.90 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:49:26,217 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.719e+02 2.001e+02 2.349e+02 3.398e+02, threshold=4.002e+02, percent-clipped=0.0
+2024-08-29 15:49:28,472 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.08 vs. limit=22.5
+2024-08-29 15:49:36,452 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=192309.33333333334, ans=0.1
+2024-08-29 15:50:01,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=192416.0, ans=0.0
+2024-08-29 15:50:04,135 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=192416.0, ans=0.125
+2024-08-29 15:50:04,328 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.56 vs. limit=15.0
+2024-08-29 15:50:18,609 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=192469.33333333334, ans=0.0
+2024-08-29 15:50:24,185 INFO [train.py:1114] (3/4) Epoch 15, batch 1250, loss[loss=0.268, simple_loss=0.3139, pruned_loss=0.08021, ctc_loss=0.1544, over 19533.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2817, pruned_loss=0.05671, ctc_loss=0.1069, over 3842670.75 frames. ], batch size: 61, lr: 1.00e-02, grad_scale: 32.0
+2024-08-29 15:50:24,709 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.97 vs. limit=22.5
+2024-08-29 15:50:50,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=192629.33333333334, ans=0.0
+2024-08-29 15:51:02,972 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=192682.66666666666, ans=0.0
+2024-08-29 15:51:13,378 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=192736.0, ans=0.0
+2024-08-29 15:51:25,187 INFO [train.py:1114] (3/4) Epoch 15, batch 1300, loss[loss=0.2156, simple_loss=0.2829, pruned_loss=0.05371, ctc_loss=0.1021, over 18844.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2805, pruned_loss=0.05602, ctc_loss=0.1057, over 3846304.33 frames. ], batch size: 76, lr: 9.99e-03, grad_scale: 32.0
+2024-08-29 15:51:30,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=192789.33333333334, ans=0.1
+2024-08-29 15:52:15,028 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.668e+02 1.955e+02 2.455e+02 4.261e+02, threshold=3.910e+02, percent-clipped=2.0
+2024-08-29 15:52:20,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=192842.66666666666, ans=0.0
+2024-08-29 15:52:28,513 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=192842.66666666666, ans=0.1
+2024-08-29 15:52:34,382 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=192896.0, ans=0.0
+2024-08-29 15:52:36,713 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=192896.0, ans=0.125
+2024-08-29 15:52:36,734 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=192896.0, ans=0.1
+2024-08-29 15:53:03,186 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=193002.66666666666, ans=0.1
+2024-08-29 15:53:07,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=193002.66666666666, ans=0.125
+2024-08-29 15:53:10,983 INFO [train.py:1114] (3/4) Epoch 15, batch 1350, loss[loss=0.2185, simple_loss=0.2877, pruned_loss=0.05406, ctc_loss=0.1028, over 19755.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2794, pruned_loss=0.05542, ctc_loss=0.1044, over 3857938.98 frames. ], batch size: 54, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:53:15,067 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=193056.0, ans=0.2
+2024-08-29 15:53:28,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=193109.33333333334, ans=0.2
+2024-08-29 15:53:36,341 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.22 vs. limit=15.0
+2024-08-29 15:54:14,978 INFO [train.py:1114] (3/4) Epoch 15, batch 1400, loss[loss=0.1799, simple_loss=0.2446, pruned_loss=0.04175, ctc_loss=0.07907, over 19659.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2789, pruned_loss=0.05525, ctc_loss=0.1041, over 3864298.37 frames. ], batch size: 46, lr: 9.98e-03, grad_scale: 32.0
+2024-08-29 15:54:36,722 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.20 vs. limit=22.5
+2024-08-29 15:54:37,471 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.658e+02 1.833e+02 2.351e+02 3.730e+02, threshold=3.665e+02, percent-clipped=0.0
+2024-08-29 15:55:08,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=193429.33333333334, ans=0.125
+2024-08-29 15:55:11,571 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=193429.33333333334, ans=0.0
+2024-08-29 15:55:43,694 INFO [train.py:1114] (3/4) Epoch 15, batch 1450, loss[loss=0.2404, simple_loss=0.3049, pruned_loss=0.06375, ctc_loss=0.1209, over 19675.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2799, pruned_loss=0.05592, ctc_loss=0.1053, over 3862218.51 frames. ], batch size: 63, lr: 9.97e-03, grad_scale: 32.0
+2024-08-29 15:55:43,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=193589.33333333334, ans=0.2
+2024-08-29 15:55:52,027 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 15:56:35,564 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.18 vs. limit=6.0
+2024-08-29 15:56:43,523 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=193802.66666666666, ans=0.0
+2024-08-29 15:56:45,749 INFO [train.py:1114] (3/4) Epoch 15, batch 1500, loss[loss=0.2268, simple_loss=0.2902, pruned_loss=0.05967, ctc_loss=0.1104, over 19575.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2801, pruned_loss=0.05592, ctc_loss=0.1055, over 3862151.23 frames. ], batch size: 57, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:56:52,420 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 1.660e+02 1.885e+02 2.337e+02 4.281e+02, threshold=3.770e+02, percent-clipped=2.0
+2024-08-29 15:57:03,892 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.78 vs. limit=12.0
+2024-08-29 15:57:05,277 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.91 vs. limit=22.5
+2024-08-29 15:57:25,325 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.13 vs. limit=6.0
+2024-08-29 15:57:36,736 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=194069.33333333334, ans=0.0
+2024-08-29 15:57:51,459 INFO [train.py:1114] (3/4) Epoch 15, batch 1550, loss[loss=0.2528, simple_loss=0.3069, pruned_loss=0.0718, ctc_loss=0.138, over 19612.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2802, pruned_loss=0.05615, ctc_loss=0.1061, over 3846859.26 frames. ], batch size: 60, lr: 9.96e-03, grad_scale: 32.0
+2024-08-29 15:57:58,760 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.12 vs. limit=15.0
+2024-08-29 15:58:19,835 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=194229.33333333334, ans=0.125
+2024-08-29 15:58:21,066 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=194229.33333333334, ans=0.05
+2024-08-29 15:58:23,984 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.18 vs. limit=15.0
+2024-08-29 15:58:29,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=194282.66666666666, ans=0.0
+2024-08-29 15:58:31,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=194282.66666666666, ans=0.125
+2024-08-29 15:58:35,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=194282.66666666666, ans=0.025
+2024-08-29 15:58:52,397 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=194389.33333333334, ans=0.0
+2024-08-29 15:58:53,438 INFO [train.py:1114] (3/4) Epoch 15, batch 1600, loss[loss=0.2103, simple_loss=0.2843, pruned_loss=0.04943, ctc_loss=0.09369, over 19843.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2796, pruned_loss=0.05597, ctc_loss=0.1055, over 3836220.94 frames. ], batch size: 57, lr: 9.95e-03, grad_scale: 32.0
+2024-08-29 15:58:59,523 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.762e+02 2.164e+02 2.478e+02 4.927e+02, threshold=4.328e+02, percent-clipped=7.0
+2024-08-29 15:59:53,268 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.28 vs. limit=15.0
+2024-08-29 15:59:57,535 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=194442.66666666666, ans=0.2
+2024-08-29 16:00:05,068 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.81 vs. limit=22.5
+2024-08-29 16:00:10,521 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:00:21,493 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=194549.33333333334, ans=0.0
+2024-08-29 16:00:35,212 INFO [train.py:1114] (3/4) Epoch 15, batch 1650, loss[loss=0.2232, simple_loss=0.2823, pruned_loss=0.05951, ctc_loss=0.1126, over 19672.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2794, pruned_loss=0.05582, ctc_loss=0.1052, over 3833576.65 frames. ], batch size: 59, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:00:40,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=194656.0, ans=0.125
+2024-08-29 16:00:44,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=194656.0, ans=0.125
+2024-08-29 16:00:51,770 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=194709.33333333334, ans=0.025
+2024-08-29 16:01:03,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=194762.66666666666, ans=0.07
+2024-08-29 16:01:05,819 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=194762.66666666666, ans=0.1
+2024-08-29 16:01:20,642 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=194816.0, ans=0.0
+2024-08-29 16:01:21,899 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=194816.0, ans=0.125
+2024-08-29 16:01:30,008 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=194869.33333333334, ans=0.025
+2024-08-29 16:01:38,034 INFO [train.py:1114] (3/4) Epoch 15, batch 1700, loss[loss=0.1963, simple_loss=0.2552, pruned_loss=0.04992, ctc_loss=0.09407, over 19681.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2792, pruned_loss=0.05537, ctc_loss=0.1043, over 3847382.16 frames. ], batch size: 46, lr: 9.94e-03, grad_scale: 32.0
+2024-08-29 16:01:44,058 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.696e+02 2.083e+02 2.797e+02 4.802e+02, threshold=4.167e+02, percent-clipped=3.0
+2024-08-29 16:01:47,933 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=194922.66666666666, ans=0.1
+2024-08-29 16:01:59,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=194976.0, ans=0.125
+2024-08-29 16:02:10,642 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.48 vs. limit=15.0
+2024-08-29 16:02:17,936 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.14 vs. limit=15.0
+2024-08-29 16:02:40,456 INFO [train.py:1114] (3/4) Epoch 15, batch 1750, loss[loss=0.1845, simple_loss=0.2473, pruned_loss=0.04466, ctc_loss=0.08095, over 19631.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2791, pruned_loss=0.05554, ctc_loss=0.1044, over 3852360.31 frames. ], batch size: 45, lr: 9.93e-03, grad_scale: 32.0
+2024-08-29 16:02:57,182 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=195242.66666666666, ans=0.125
+2024-08-29 16:02:57,201 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=195242.66666666666, ans=0.1
+2024-08-29 16:02:59,689 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=195242.66666666666, ans=0.0
+2024-08-29 16:03:16,489 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=195349.33333333334, ans=0.1
+2024-08-29 16:03:16,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=195349.33333333334, ans=0.125
+2024-08-29 16:03:21,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=195349.33333333334, ans=0.09899494936611666
+2024-08-29 16:03:31,611 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=195402.66666666666, ans=0.0
+2024-08-29 16:03:32,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=195402.66666666666, ans=0.125
+2024-08-29 16:03:37,864 INFO [train.py:1114] (3/4) Epoch 15, batch 1800, loss[loss=0.2241, simple_loss=0.2856, pruned_loss=0.05822, ctc_loss=0.1155, over 19619.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2791, pruned_loss=0.05543, ctc_loss=0.1042, over 3854242.67 frames. ], batch size: 55, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:03:43,641 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.702e+02 2.083e+02 2.690e+02 4.339e+02, threshold=4.166e+02, percent-clipped=1.0
+2024-08-29 16:03:57,193 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=195509.33333333334, ans=0.0
+2024-08-29 16:04:00,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=195562.66666666666, ans=0.0
+2024-08-29 16:04:13,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=195616.0, ans=0.125
+2024-08-29 16:04:20,374 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.02 vs. limit=15.0
+2024-08-29 16:04:28,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=195669.33333333334, ans=0.5
+2024-08-29 16:04:34,673 INFO [train.py:1114] (3/4) Epoch 15, batch 1850, loss[loss=0.2337, simple_loss=0.3017, pruned_loss=0.06078, ctc_loss=0.1106, over 19596.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2792, pruned_loss=0.05546, ctc_loss=0.1041, over 3857016.34 frames. ], batch size: 57, lr: 9.92e-03, grad_scale: 32.0
+2024-08-29 16:04:36,465 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.70 vs. limit=15.0
+2024-08-29 16:04:55,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=195829.33333333334, ans=0.0
+2024-08-29 16:04:59,754 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.24 vs. limit=15.0
+2024-08-29 16:05:16,825 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=195882.66666666666, ans=0.0
+2024-08-29 16:05:21,624 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.93 vs. limit=15.0
+2024-08-29 16:05:23,666 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.21 vs. limit=6.0
+2024-08-29 16:05:28,440 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=195936.0, ans=22.5
+2024-08-29 16:05:35,600 INFO [train.py:1114] (3/4) Epoch 15, batch 1900, loss[loss=0.1888, simple_loss=0.2737, pruned_loss=0.03713, ctc_loss=0.07414, over 19674.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2794, pruned_loss=0.05541, ctc_loss=0.1041, over 3861337.71 frames. ], batch size: 59, lr: 9.91e-03, grad_scale: 32.0
+2024-08-29 16:05:40,971 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.724e+02 2.102e+02 3.115e+02 5.340e+02, threshold=4.204e+02, percent-clipped=3.0
+2024-08-29 16:05:46,880 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:05:51,280 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=196042.66666666666, ans=0.1
+2024-08-29 16:06:03,700 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=196096.0, ans=0.125
+2024-08-29 16:06:04,717 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:06:05,844 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=196096.0, ans=0.0
+2024-08-29 16:06:11,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=196149.33333333334, ans=0.0
+2024-08-29 16:06:12,641 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.35 vs. limit=22.5
+2024-08-29 16:06:32,319 INFO [train.py:1114] (3/4) Epoch 15, batch 1950, loss[loss=0.1998, simple_loss=0.2673, pruned_loss=0.04744, ctc_loss=0.0938, over 19588.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2803, pruned_loss=0.05537, ctc_loss=0.1038, over 3870605.46 frames. ], batch size: 52, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:06:45,987 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=196256.0, ans=0.125
+2024-08-29 16:06:46,175 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.90 vs. limit=15.0
+2024-08-29 16:06:56,403 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.10 vs. limit=6.0
+2024-08-29 16:07:18,261 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=196416.0, ans=0.07
+2024-08-29 16:07:35,223 INFO [train.py:1114] (3/4) Epoch 15, batch 2000, loss[loss=0.1731, simple_loss=0.2412, pruned_loss=0.03823, ctc_loss=0.07157, over 19650.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2805, pruned_loss=0.05535, ctc_loss=0.1038, over 3854597.70 frames. ], batch size: 45, lr: 9.90e-03, grad_scale: 32.0
+2024-08-29 16:07:41,135 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.618e+02 1.832e+02 2.132e+02 4.362e+02, threshold=3.664e+02, percent-clipped=1.0
+2024-08-29 16:08:13,237 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:08:23,478 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=196736.0, ans=0.125
+2024-08-29 16:08:32,352 INFO [train.py:1114] (3/4) Epoch 15, batch 2050, loss[loss=0.2242, simple_loss=0.2718, pruned_loss=0.06372, ctc_loss=0.1228, over 19729.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2799, pruned_loss=0.05554, ctc_loss=0.1043, over 3850197.16 frames. ], batch size: 47, lr: 9.89e-03, grad_scale: 32.0
+2024-08-29 16:08:42,761 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.48 vs. limit=22.5
+2024-08-29 16:08:45,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=196842.66666666666, ans=0.95
+2024-08-29 16:09:00,282 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=196896.0, ans=0.125
+2024-08-29 16:09:05,224 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.90 vs. limit=15.0
+2024-08-29 16:09:05,467 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.16 vs. limit=12.0
+2024-08-29 16:09:07,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=196949.33333333334, ans=0.125
+2024-08-29 16:09:11,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=196949.33333333334, ans=0.1
+2024-08-29 16:09:27,721 INFO [train.py:1114] (3/4) Epoch 15, batch 2100, loss[loss=0.231, simple_loss=0.2922, pruned_loss=0.06283, ctc_loss=0.1102, over 19757.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2793, pruned_loss=0.05518, ctc_loss=0.1036, over 3858054.45 frames. ], batch size: 54, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:09:33,396 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.265e+02 1.691e+02 1.929e+02 2.354e+02 3.359e+02, threshold=3.858e+02, percent-clipped=0.0
+2024-08-29 16:09:40,564 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=197109.33333333334, ans=0.2
+2024-08-29 16:10:06,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=197216.0, ans=0.125
+2024-08-29 16:10:22,934 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=197269.33333333334, ans=0.0
+2024-08-29 16:10:26,361 INFO [train.py:1114] (3/4) Epoch 15, batch 2150, loss[loss=0.2106, simple_loss=0.2756, pruned_loss=0.05246, ctc_loss=0.1014, over 19846.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2787, pruned_loss=0.05507, ctc_loss=0.1034, over 3870024.16 frames. ], batch size: 52, lr: 9.88e-03, grad_scale: 32.0
+2024-08-29 16:10:34,972 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=197322.66666666666, ans=0.0
+2024-08-29 16:12:01,895 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.49 vs. limit=15.0
+2024-08-29 16:12:03,976 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=197429.33333333334, ans=0.125
+2024-08-29 16:12:14,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197482.66666666666, ans=0.1
+2024-08-29 16:12:20,821 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.41 vs. limit=12.0
+2024-08-29 16:12:25,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197536.0, ans=0.1
+2024-08-29 16:12:31,434 INFO [train.py:1114] (3/4) Epoch 15, batch 2200, loss[loss=0.221, simple_loss=0.2887, pruned_loss=0.05551, ctc_loss=0.1055, over 19591.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2788, pruned_loss=0.05503, ctc_loss=0.1034, over 3868630.81 frames. ], batch size: 57, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:12:36,854 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.412e+02 1.787e+02 2.154e+02 2.730e+02 5.047e+02, threshold=4.308e+02, percent-clipped=4.0
+2024-08-29 16:12:48,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=197642.66666666666, ans=0.125
+2024-08-29 16:12:49,884 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.03 vs. limit=12.0
+2024-08-29 16:12:55,091 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=197696.0, ans=0.07
+2024-08-29 16:13:23,816 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=197802.66666666666, ans=0.0
+2024-08-29 16:13:29,260 INFO [train.py:1114] (3/4) Epoch 15, batch 2250, loss[loss=0.1955, simple_loss=0.2769, pruned_loss=0.04144, ctc_loss=0.07798, over 19617.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2792, pruned_loss=0.05517, ctc_loss=0.1038, over 3868730.69 frames. ], batch size: 55, lr: 9.87e-03, grad_scale: 32.0
+2024-08-29 16:13:29,481 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=197856.0, ans=0.0
+2024-08-29 16:13:32,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=197856.0, ans=0.125
+2024-08-29 16:13:56,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=197856.0, ans=0.125
+2024-08-29 16:14:32,535 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=198069.33333333334, ans=0.125
+2024-08-29 16:14:38,152 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=198069.33333333334, ans=0.125
+2024-08-29 16:14:43,821 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.18 vs. limit=10.0
+2024-08-29 16:14:45,279 INFO [train.py:1114] (3/4) Epoch 15, batch 2300, loss[loss=0.1973, simple_loss=0.2634, pruned_loss=0.04813, ctc_loss=0.08737, over 19511.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2783, pruned_loss=0.0552, ctc_loss=0.1036, over 3862189.96 frames. ], batch size: 49, lr: 9.86e-03, grad_scale: 32.0
+2024-08-29 16:14:50,767 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.686e+02 1.986e+02 2.467e+02 4.553e+02, threshold=3.971e+02, percent-clipped=1.0
+2024-08-29 16:14:55,636 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=5.232e-03
+2024-08-29 16:15:12,607 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=198229.33333333334, ans=0.125
+2024-08-29 16:15:19,929 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=198282.66666666666, ans=0.5
+2024-08-29 16:15:43,140 INFO [train.py:1114] (3/4) Epoch 15, batch 2350, loss[loss=0.2245, simple_loss=0.2961, pruned_loss=0.05562, ctc_loss=0.1039, over 19665.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2777, pruned_loss=0.05487, ctc_loss=0.103, over 3864387.38 frames. ], batch size: 63, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:15:45,854 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=198389.33333333334, ans=0.0
+2024-08-29 16:15:49,324 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=198389.33333333334, ans=0.125
+2024-08-29 16:15:50,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=198389.33333333334, ans=0.0
+2024-08-29 16:15:53,774 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=198442.66666666666, ans=0.125
+2024-08-29 16:16:08,070 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.96 vs. limit=15.0
+2024-08-29 16:16:10,568 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.40 vs. limit=15.0
+2024-08-29 16:16:13,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=198496.0, ans=0.125
+2024-08-29 16:16:25,077 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:16:42,892 INFO [train.py:1114] (3/4) Epoch 15, batch 2400, loss[loss=0.2445, simple_loss=0.3031, pruned_loss=0.06856, ctc_loss=0.1217, over 19298.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2802, pruned_loss=0.05587, ctc_loss=0.1047, over 3858996.48 frames. ], batch size: 71, lr: 9.85e-03, grad_scale: 64.0
+2024-08-29 16:16:46,737 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.20 vs. limit=10.0
+2024-08-29 16:16:48,392 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.659e+02 1.944e+02 2.492e+02 3.873e+02, threshold=3.888e+02, percent-clipped=0.0
+2024-08-29 16:16:48,655 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=198656.0, ans=0.2
+2024-08-29 16:16:48,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=198656.0, ans=0.1
+2024-08-29 16:16:58,018 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=198709.33333333334, ans=0.0
+2024-08-29 16:16:59,098 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=198709.33333333334, ans=0.125
+2024-08-29 16:18:01,379 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=198762.66666666666, ans=0.95
+2024-08-29 16:18:30,987 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.80 vs. limit=22.5
+2024-08-29 16:18:33,172 INFO [train.py:1114] (3/4) Epoch 15, batch 2450, loss[loss=0.294, simple_loss=0.3207, pruned_loss=0.09749, ctc_loss=0.181, over 13070.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.2843, pruned_loss=0.0588, ctc_loss=0.1107, over 3732883.61 frames. ], batch size: 140, lr: 9.84e-03, grad_scale: 32.0
+2024-08-29 16:18:53,295 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=198976.0, ans=0.0
+2024-08-29 16:19:04,235 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=199029.33333333334, ans=0.2
+2024-08-29 16:19:05,337 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=199029.33333333334, ans=0.025
+2024-08-29 16:19:05,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=199029.33333333334, ans=0.125
+2024-08-29 16:19:06,278 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=199082.66666666666, ans=0.0
+2024-08-29 16:19:12,393 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.31 vs. limit=15.0
+2024-08-29 16:20:18,424 INFO [train.py:1114] (3/4) Epoch 16, batch 0, loss[loss=0.1853, simple_loss=0.2482, pruned_loss=0.04497, ctc_loss=0.08101, over 19393.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2482, pruned_loss=0.04497, ctc_loss=0.08101, over 19393.00 frames. ], batch size: 48, lr: 9.52e-03, grad_scale: 32.0
+2024-08-29 16:20:18,424 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-29 16:20:28,425 INFO [train.py:1146] (3/4) Epoch 16, validation: loss=0.1867, simple_loss=0.2755, pruned_loss=0.03636, ctc_loss=0.06317, over 944034.00 frames.
+2024-08-29 16:20:28,426 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13716MB
+2024-08-29 16:20:28,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=199130.66666666666, ans=0.0
+2024-08-29 16:20:34,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=199130.66666666666, ans=0.125
+2024-08-29 16:20:40,608 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=199184.0, ans=0.125
+2024-08-29 16:20:48,958 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.810e+02 1.998e+02 2.276e+02 3.528e+02, threshold=3.997e+02, percent-clipped=0.0
+2024-08-29 16:20:50,382 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=199184.0, ans=0.125
+2024-08-29 16:21:07,187 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=199290.66666666666, ans=0.2
+2024-08-29 16:21:08,359 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=199290.66666666666, ans=0.025
+2024-08-29 16:21:32,417 INFO [train.py:1114] (3/4) Epoch 16, batch 50, loss[loss=0.2027, simple_loss=0.2594, pruned_loss=0.05301, ctc_loss=0.09983, over 19689.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2804, pruned_loss=0.05563, ctc_loss=0.1051, over 843817.09 frames. ], batch size: 47, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:21:32,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=199397.33333333334, ans=0.0
+2024-08-29 16:22:28,631 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.21 vs. limit=15.0
+2024-08-29 16:22:40,105 INFO [train.py:1114] (3/4) Epoch 16, batch 100, loss[loss=0.1886, simple_loss=0.2613, pruned_loss=0.04167, ctc_loss=0.0814, over 19715.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2833, pruned_loss=0.05667, ctc_loss=0.107, over 1497155.71 frames. ], batch size: 51, lr: 9.51e-03, grad_scale: 32.0
+2024-08-29 16:23:04,943 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten.whitening_limit, batch_count=199717.33333333334, ans=15.0
+2024-08-29 16:23:07,137 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=199717.33333333334, ans=0.125
+2024-08-29 16:23:08,063 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 1.815e+02 2.137e+02 2.569e+02 4.869e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-29 16:23:13,167 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=199717.33333333334, ans=0.125
+2024-08-29 16:30:49,566 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 16:37:11,211 INFO [train.py:1114] (3/4) Epoch 16, batch 150, loss[loss=0.1851, simple_loss=0.2432, pruned_loss=0.04602, ctc_loss=0.08715, over 19693.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.279, pruned_loss=0.05463, ctc_loss=0.1031, over 2025780.12 frames. ], batch size: 47, lr: 9.50e-03, grad_scale: 32.0
+2024-08-29 16:45:46,777 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=200090.66666666666, ans=0.2
+2024-08-29 16:46:51,373 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=200144.0, ans=0.0
+2024-08-29 16:47:03,827 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=200144.0, ans=0.125
+2024-08-29 16:48:03,536 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=200144.0, ans=0.1
+2024-08-29 16:48:04,008 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.92 vs. limit=15.0
+2024-08-29 16:48:09,836 INFO [train.py:1114] (3/4) Epoch 16, batch 200, loss[loss=0.2428, simple_loss=0.3015, pruned_loss=0.06637, ctc_loss=0.1284, over 18381.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2781, pruned_loss=0.05396, ctc_loss=0.1018, over 2433521.47 frames. ], batch size: 85, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:49:16,962 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=200197.33333333334, ans=0.125
+2024-08-29 16:49:56,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=200197.33333333334, ans=0.0
+2024-08-29 16:49:56,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=200197.33333333334, ans=0.025
+2024-08-29 16:50:14,925 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=200250.66666666666, ans=0.025
+2024-08-29 16:53:29,825 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.321e+02 1.834e+02 2.227e+02 2.815e+02 4.534e+02, threshold=4.454e+02, percent-clipped=1.0
+2024-08-29 16:53:55,216 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=200304.0, ans=0.1
+2024-08-29 16:56:29,762 INFO [train.py:1114] (3/4) Epoch 16, batch 250, loss[loss=0.2679, simple_loss=0.3151, pruned_loss=0.08053, ctc_loss=0.1491, over 19394.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2787, pruned_loss=0.05419, ctc_loss=0.1023, over 2754594.03 frames. ], batch size: 67, lr: 9.49e-03, grad_scale: 32.0
+2024-08-29 16:56:40,469 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=200464.0, ans=0.125
+2024-08-29 16:58:34,374 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.99 vs. limit=15.0
+2024-08-29 16:58:51,912 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=200570.66666666666, ans=0.035
+2024-08-29 16:58:54,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=200570.66666666666, ans=0.1
+2024-08-29 16:59:04,414 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=200624.0, ans=0.0
+2024-08-29 16:59:34,470 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=200677.33333333334, ans=0.2
+2024-08-29 17:01:56,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=200677.33333333334, ans=0.95
+2024-08-29 17:02:02,568 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.30 vs. limit=15.0
+2024-08-29 17:02:22,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=200677.33333333334, ans=0.2
+2024-08-29 17:03:13,497 INFO [train.py:1114] (3/4) Epoch 16, batch 300, loss[loss=0.2191, simple_loss=0.287, pruned_loss=0.05477, ctc_loss=0.104, over 19561.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.278, pruned_loss=0.05396, ctc_loss=0.102, over 3000245.92 frames. ], batch size: 61, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:03:15,008 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.86 vs. limit=10.0
+2024-08-29 17:03:15,174 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.00 vs. limit=10.0
+2024-08-29 17:03:36,032 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.663e+02 1.972e+02 2.398e+02 4.674e+02, threshold=3.943e+02, percent-clipped=1.0
+2024-08-29 17:03:37,609 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=200784.0, ans=0.2
+2024-08-29 17:04:38,870 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.54 vs. limit=15.0
+2024-08-29 17:06:05,412 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=200837.33333333334, ans=0.0
+2024-08-29 17:07:54,975 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=200890.66666666666, ans=0.0
+2024-08-29 17:08:30,915 INFO [train.py:1114] (3/4) Epoch 16, batch 350, loss[loss=0.1888, simple_loss=0.2524, pruned_loss=0.04695, ctc_loss=0.07831, over 19765.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2786, pruned_loss=0.05403, ctc_loss=0.1019, over 3190397.90 frames. ], batch size: 48, lr: 9.48e-03, grad_scale: 32.0
+2024-08-29 17:08:33,771 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=200997.33333333334, ans=0.125
+2024-08-29 17:12:11,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=201104.0, ans=0.125
+2024-08-29 17:12:33,788 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=201157.33333333334, ans=0.125
+2024-08-29 17:13:10,707 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=201210.66666666666, ans=0.1
+2024-08-29 17:13:11,888 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=201210.66666666666, ans=0.1
+2024-08-29 17:13:12,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=201210.66666666666, ans=0.125
+2024-08-29 17:13:17,606 INFO [train.py:1114] (3/4) Epoch 16, batch 400, loss[loss=0.21, simple_loss=0.2793, pruned_loss=0.05131, ctc_loss=0.09505, over 19500.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2771, pruned_loss=0.05331, ctc_loss=0.1006, over 3343357.30 frames. ], batch size: 54, lr: 9.47e-03, grad_scale: 32.0
+2024-08-29 17:13:28,209 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.10 vs. limit=22.5
+2024-08-29 17:13:36,238 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.60 vs. limit=15.0
+2024-08-29 17:13:45,753 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.35 vs. limit=15.0
+2024-08-29 17:15:22,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=201317.33333333334, ans=0.125
+2024-08-29 17:15:51,033 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.714e+02 1.905e+02 2.508e+02 3.565e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-29 17:16:23,300 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=201370.66666666666, ans=0.125
+2024-08-29 17:16:35,593 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=12.0
+2024-08-29 17:16:38,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=201424.0, ans=0.125
+2024-08-29 17:16:49,127 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.whiten.whitening_limit, batch_count=201477.33333333334, ans=15.0
+2024-08-29 17:17:07,840 INFO [train.py:1114] (3/4) Epoch 16, batch 450, loss[loss=0.207, simple_loss=0.2811, pruned_loss=0.04721, ctc_loss=0.09633, over 19621.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2778, pruned_loss=0.05385, ctc_loss=0.1015, over 3450908.43 frames. ], batch size: 55, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:18:11,441 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.30 vs. limit=15.0
+2024-08-29 17:20:51,372 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.47 vs. limit=15.0
+2024-08-29 17:21:30,730 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.57 vs. limit=15.0
+2024-08-29 17:21:35,072 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=201744.0, ans=0.0
+2024-08-29 17:21:57,452 INFO [train.py:1114] (3/4) Epoch 16, batch 500, loss[loss=0.2327, simple_loss=0.3048, pruned_loss=0.05828, ctc_loss=0.1102, over 19654.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2775, pruned_loss=0.05373, ctc_loss=0.1014, over 3546959.70 frames. ], batch size: 63, lr: 9.46e-03, grad_scale: 32.0
+2024-08-29 17:21:57,594 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=201797.33333333334, ans=0.125
+2024-08-29 17:22:46,943 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 1.689e+02 2.169e+02 2.570e+02 5.370e+02, threshold=4.338e+02, percent-clipped=3.0
+2024-08-29 17:23:27,246 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.86 vs. limit=15.0
+2024-08-29 17:23:57,685 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.94 vs. limit=15.0
+2024-08-29 17:23:58,751 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=202010.66666666666, ans=0.125
+2024-08-29 17:24:02,823 INFO [train.py:1114] (3/4) Epoch 16, batch 550, loss[loss=0.2366, simple_loss=0.2953, pruned_loss=0.06411, ctc_loss=0.1242, over 19186.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2767, pruned_loss=0.05312, ctc_loss=0.1005, over 3608772.35 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:24:16,473 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=202064.0, ans=0.0
+2024-08-29 17:24:32,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=202117.33333333334, ans=0.125
+2024-08-29 17:24:33,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=202117.33333333334, ans=0.07
+2024-08-29 17:24:34,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=202117.33333333334, ans=0.2
+2024-08-29 17:24:49,643 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202170.66666666666, ans=0.1
+2024-08-29 17:24:58,134 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.10 vs. limit=22.5
+2024-08-29 17:25:11,757 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=202277.33333333334, ans=0.2
+2024-08-29 17:25:21,525 INFO [train.py:1114] (3/4) Epoch 16, batch 600, loss[loss=0.2605, simple_loss=0.3058, pruned_loss=0.07822, ctc_loss=0.147, over 19353.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2767, pruned_loss=0.05319, ctc_loss=0.1005, over 3666381.67 frames. ], batch size: 67, lr: 9.45e-03, grad_scale: 32.0
+2024-08-29 17:26:15,424 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=202330.66666666666, ans=0.125
+2024-08-29 17:26:49,875 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=202384.0, ans=0.07
+2024-08-29 17:27:03,821 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=202384.0, ans=0.1
+2024-08-29 17:27:04,568 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.652e+02 1.934e+02 2.290e+02 3.719e+02, threshold=3.867e+02, percent-clipped=0.0
+2024-08-29 17:28:13,853 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=202437.33333333334, ans=0.0
+2024-08-29 17:28:46,559 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.91 vs. limit=22.5
+2024-08-29 17:29:44,527 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=202490.66666666666, ans=0.025
+2024-08-29 17:30:10,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202490.66666666666, ans=0.1
+2024-08-29 17:30:20,266 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=202544.0, ans=0.125
+2024-08-29 17:30:44,114 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=202544.0, ans=0.1
+2024-08-29 17:31:03,780 INFO [train.py:1114] (3/4) Epoch 16, batch 650, loss[loss=0.2253, simple_loss=0.2814, pruned_loss=0.06199, ctc_loss=0.1132, over 19773.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2759, pruned_loss=0.05285, ctc_loss=0.09992, over 3716583.34 frames. ], batch size: 54, lr: 9.44e-03, grad_scale: 32.0
+2024-08-29 17:32:04,620 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:32:55,128 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=202757.33333333334, ans=0.125
+2024-08-29 17:33:46,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=202810.66666666666, ans=0.125
+2024-08-29 17:34:02,109 INFO [train.py:1114] (3/4) Epoch 16, batch 700, loss[loss=0.2052, simple_loss=0.2716, pruned_loss=0.05088, ctc_loss=0.0925, over 19721.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2765, pruned_loss=0.05306, ctc_loss=0.1002, over 3748262.13 frames. ], batch size: 51, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:34:17,167 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=202917.33333333334, ans=0.025
+2024-08-29 17:35:12,327 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.286e+02 1.755e+02 2.110e+02 2.761e+02 5.047e+02, threshold=4.220e+02, percent-clipped=5.0
+2024-08-29 17:36:00,935 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.40 vs. limit=15.0
+2024-08-29 17:36:24,152 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=202970.66666666666, ans=0.0
+2024-08-29 17:36:32,830 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 17:38:14,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=203024.0, ans=0.125
+2024-08-29 17:38:16,526 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.72 vs. limit=15.0
+2024-08-29 17:42:01,665 INFO [train.py:1114] (3/4) Epoch 16, batch 750, loss[loss=0.2213, simple_loss=0.2884, pruned_loss=0.05615, ctc_loss=0.1045, over 19488.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2759, pruned_loss=0.05282, ctc_loss=0.09953, over 3773674.92 frames. ], batch size: 54, lr: 9.43e-03, grad_scale: 32.0
+2024-08-29 17:42:03,123 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=203130.66666666666, ans=0.1
+2024-08-29 17:42:03,170 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=203130.66666666666, ans=0.0
+2024-08-29 17:42:12,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=203130.66666666666, ans=0.025
+2024-08-29 17:42:32,529 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=203184.0, ans=0.0
+2024-08-29 17:42:48,134 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.93 vs. limit=15.0
+2024-08-29 17:46:05,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=203344.0, ans=0.125
+2024-08-29 17:46:10,162 INFO [train.py:1114] (3/4) Epoch 16, batch 800, loss[loss=0.1928, simple_loss=0.2654, pruned_loss=0.04245, ctc_loss=0.08811, over 19796.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2755, pruned_loss=0.05271, ctc_loss=0.09925, over 3795955.34 frames. ], batch size: 49, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:48:05,196 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=203450.66666666666, ans=0.2
+2024-08-29 17:48:15,896 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.745e+02 2.069e+02 2.556e+02 3.770e+02, threshold=4.138e+02, percent-clipped=0.0
+2024-08-29 17:48:24,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=203504.0, ans=0.025
+2024-08-29 17:48:31,955 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.92 vs. limit=15.0
+2024-08-29 17:48:42,491 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.58 vs. limit=15.0
+2024-08-29 17:49:06,936 INFO [train.py:1114] (3/4) Epoch 16, batch 850, loss[loss=0.2056, simple_loss=0.277, pruned_loss=0.04808, ctc_loss=0.09494, over 19678.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2758, pruned_loss=0.05289, ctc_loss=0.09981, over 3815245.16 frames. ], batch size: 59, lr: 9.42e-03, grad_scale: 32.0
+2024-08-29 17:49:18,211 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=203664.0, ans=0.1
+2024-08-29 17:49:40,384 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.35 vs. limit=22.5
+2024-08-29 17:50:21,093 INFO [train.py:1114] (3/4) Epoch 16, batch 900, loss[loss=0.2062, simple_loss=0.2619, pruned_loss=0.05546, ctc_loss=0.09898, over 19440.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2763, pruned_loss=0.05348, ctc_loss=0.1007, over 3818838.11 frames. ], batch size: 48, lr: 9.41e-03, grad_scale: 32.0
+2024-08-29 17:50:30,005 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=203930.66666666666, ans=0.0
+2024-08-29 17:50:33,217 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=203930.66666666666, ans=0.1
+2024-08-29 17:50:37,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=203930.66666666666, ans=0.125
+2024-08-29 17:50:48,727 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.676e+02 1.827e+02 2.350e+02 4.099e+02, threshold=3.653e+02, percent-clipped=0.0
+2024-08-29 17:51:19,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=204037.33333333334, ans=0.125
+2024-08-29 17:53:28,353 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=204144.0, ans=0.0
+2024-08-29 17:53:37,491 INFO [train.py:1114] (3/4) Epoch 16, batch 950, loss[loss=0.1943, simple_loss=0.2581, pruned_loss=0.04797, ctc_loss=0.08628, over 19475.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2769, pruned_loss=0.05383, ctc_loss=0.1011, over 3819271.02 frames. ], batch size: 49, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:54:04,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=204197.33333333334, ans=0.0
+2024-08-29 17:54:06,003 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=204197.33333333334, ans=0.2
+2024-08-29 17:54:33,828 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.49 vs. limit=10.0
+2024-08-29 17:54:34,853 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=204250.66666666666, ans=0.2
+2024-08-29 17:54:57,722 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=204357.33333333334, ans=0.125
+2024-08-29 17:55:46,647 INFO [train.py:1114] (3/4) Epoch 16, batch 1000, loss[loss=0.2103, simple_loss=0.2702, pruned_loss=0.05417, ctc_loss=0.1053, over 19848.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2783, pruned_loss=0.05446, ctc_loss=0.1024, over 3815685.28 frames. ], batch size: 52, lr: 9.40e-03, grad_scale: 32.0
+2024-08-29 17:56:04,620 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.89 vs. limit=22.5
+2024-08-29 17:56:07,202 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.649e+02 1.918e+02 2.268e+02 3.238e+02, threshold=3.836e+02, percent-clipped=0.0
+2024-08-29 17:56:58,390 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.46 vs. limit=15.0
+2024-08-29 17:57:31,265 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=204624.0, ans=0.125
+2024-08-29 17:57:39,296 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=204624.0, ans=0.0
+2024-08-29 17:57:54,923 INFO [train.py:1114] (3/4) Epoch 16, batch 1050, loss[loss=0.2358, simple_loss=0.3024, pruned_loss=0.06251, ctc_loss=0.1105, over 19848.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2777, pruned_loss=0.0544, ctc_loss=0.1022, over 3822643.35 frames. ], batch size: 57, lr: 9.39e-03, grad_scale: 32.0
+2024-08-29 17:58:32,634 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=204730.66666666666, ans=0.125
+2024-08-29 17:58:33,069 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.98 vs. limit=15.0
+2024-08-29 17:58:35,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=204730.66666666666, ans=0.025
+2024-08-29 17:59:51,134 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=204784.0, ans=0.0
+2024-08-29 18:00:09,106 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=204837.33333333334, ans=0.0
+2024-08-29 18:00:21,173 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.01 vs. limit=15.0
+2024-08-29 18:00:53,277 INFO [train.py:1114] (3/4) Epoch 16, batch 1100, loss[loss=0.2242, simple_loss=0.2877, pruned_loss=0.05941, ctc_loss=0.1046, over 19573.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.278, pruned_loss=0.05464, ctc_loss=0.1026, over 3830346.76 frames. ], batch size: 52, lr: 9.39e-03, grad_scale: 16.0
+2024-08-29 18:01:10,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=204997.33333333334, ans=0.125
+2024-08-29 18:01:21,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=205050.66666666666, ans=0.0
+2024-08-29 18:01:27,924 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 1.694e+02 1.874e+02 2.325e+02 3.063e+02, threshold=3.748e+02, percent-clipped=0.0
+2024-08-29 18:01:53,174 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.09 vs. limit=10.0
+2024-08-29 18:01:56,580 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.67 vs. limit=15.0
+2024-08-29 18:02:27,560 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.50 vs. limit=6.0
+2024-08-29 18:02:38,217 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.34 vs. limit=15.0
+2024-08-29 18:02:38,892 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=205210.66666666666, ans=0.125
+2024-08-29 18:02:43,481 INFO [train.py:1114] (3/4) Epoch 16, batch 1150, loss[loss=0.2022, simple_loss=0.2655, pruned_loss=0.05062, ctc_loss=0.09409, over 19595.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2779, pruned_loss=0.05486, ctc_loss=0.1031, over 3828951.50 frames. ], batch size: 52, lr: 9.38e-03, grad_scale: 16.0
+2024-08-29 18:02:50,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=205264.0, ans=0.125
+2024-08-29 18:03:00,358 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=205317.33333333334, ans=0.0
+2024-08-29 18:03:14,411 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=205370.66666666666, ans=0.0
+2024-08-29 18:03:31,383 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=205477.33333333334, ans=0.0
+2024-08-29 18:03:41,602 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=205477.33333333334, ans=0.0
+2024-08-29 18:03:45,089 INFO [train.py:1114] (3/4) Epoch 16, batch 1200, loss[loss=0.2286, simple_loss=0.2945, pruned_loss=0.05886, ctc_loss=0.1122, over 19837.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.279, pruned_loss=0.05503, ctc_loss=0.1035, over 3825450.87 frames. ], batch size: 57, lr: 9.38e-03, grad_scale: 32.0
+2024-08-29 18:03:48,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=205530.66666666666, ans=0.125
+2024-08-29 18:03:52,521 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=205530.66666666666, ans=0.125
+2024-08-29 18:03:52,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=205530.66666666666, ans=0.2
+2024-08-29 18:03:59,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=205584.0, ans=0.0
+2024-08-29 18:04:02,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=205584.0, ans=0.125
+2024-08-29 18:04:06,045 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.98 vs. limit=15.0
+2024-08-29 18:04:06,318 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.725e+02 2.012e+02 2.470e+02 3.418e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-29 18:04:08,915 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=205637.33333333334, ans=0.2
+2024-08-29 18:04:45,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=205744.0, ans=0.125
+2024-08-29 18:04:50,733 INFO [train.py:1114] (3/4) Epoch 16, batch 1250, loss[loss=0.2429, simple_loss=0.2998, pruned_loss=0.06801, ctc_loss=0.1248, over 19533.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.279, pruned_loss=0.05475, ctc_loss=0.1029, over 3843629.75 frames. ], batch size: 61, lr: 9.37e-03, grad_scale: 32.0
+2024-08-29 18:04:55,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=205797.33333333334, ans=0.0
+2024-08-29 18:05:48,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=205957.33333333334, ans=0.125
+2024-08-29 18:06:32,847 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.84 vs. limit=10.0
+2024-08-29 18:06:33,697 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=206010.66666666666, ans=0.0
+2024-08-29 18:06:35,884 INFO [train.py:1114] (3/4) Epoch 16, batch 1300, loss[loss=0.236, simple_loss=0.2915, pruned_loss=0.06604, ctc_loss=0.1211, over 18726.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2777, pruned_loss=0.05402, ctc_loss=0.1015, over 3847465.37 frames. ], batch size: 76, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:06:47,258 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=206117.33333333334, ans=0.0
+2024-08-29 18:06:57,555 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.716e+02 2.090e+02 2.690e+02 4.268e+02, threshold=4.180e+02, percent-clipped=3.0
+2024-08-29 18:07:22,331 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=206277.33333333334, ans=0.125
+2024-08-29 18:07:31,349 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=206277.33333333334, ans=0.1
+2024-08-29 18:07:34,544 INFO [train.py:1114] (3/4) Epoch 16, batch 1350, loss[loss=0.2105, simple_loss=0.2817, pruned_loss=0.05093, ctc_loss=0.09367, over 19771.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.278, pruned_loss=0.05431, ctc_loss=0.1021, over 3856232.56 frames. ], batch size: 54, lr: 9.36e-03, grad_scale: 32.0
+2024-08-29 18:07:40,799 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=206330.66666666666, ans=0.125
+2024-08-29 18:07:54,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=206384.0, ans=0.0
+2024-08-29 18:07:57,539 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:10:13,282 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=206544.0, ans=0.1
+2024-08-29 18:10:22,629 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=206597.33333333334, ans=0.0
+2024-08-29 18:10:56,016 INFO [train.py:1114] (3/4) Epoch 16, batch 1400, loss[loss=0.1763, simple_loss=0.2408, pruned_loss=0.0403, ctc_loss=0.07789, over 19667.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2781, pruned_loss=0.05445, ctc_loss=0.1023, over 3863166.97 frames. ], batch size: 46, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:10:57,926 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.35 vs. limit=15.0
+2024-08-29 18:11:02,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=206597.33333333334, ans=0.125
+2024-08-29 18:13:15,197 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.659e+02 1.830e+02 2.117e+02 3.619e+02, threshold=3.659e+02, percent-clipped=0.0
+2024-08-29 18:14:20,342 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.15 vs. limit=15.0
+2024-08-29 18:14:49,672 INFO [train.py:1114] (3/4) Epoch 16, batch 1450, loss[loss=0.218, simple_loss=0.2807, pruned_loss=0.05661, ctc_loss=0.1054, over 19669.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2781, pruned_loss=0.05418, ctc_loss=0.1017, over 3862211.86 frames. ], batch size: 63, lr: 9.35e-03, grad_scale: 32.0
+2024-08-29 18:15:10,110 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=206864.0, ans=0.1
+2024-08-29 18:15:16,083 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.04 vs. limit=22.5
+2024-08-29 18:15:29,906 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=206970.66666666666, ans=0.05
+2024-08-29 18:15:40,066 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=207024.0, ans=0.125
+2024-08-29 18:15:55,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=207024.0, ans=0.0
+2024-08-29 18:15:56,922 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=207024.0, ans=0.125
+2024-08-29 18:16:10,740 INFO [train.py:1114] (3/4) Epoch 16, batch 1500, loss[loss=0.1954, simple_loss=0.2782, pruned_loss=0.04008, ctc_loss=0.08101, over 19576.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2781, pruned_loss=0.05398, ctc_loss=0.1012, over 3862602.86 frames. ], batch size: 57, lr: 9.34e-03, grad_scale: 32.0
+2024-08-29 18:16:20,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=207130.66666666666, ans=0.05
+2024-08-29 18:16:32,416 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.680e+02 1.893e+02 2.490e+02 3.994e+02, threshold=3.786e+02, percent-clipped=1.0
+2024-08-29 18:16:49,525 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=207290.66666666666, ans=0.125
+2024-08-29 18:17:34,470 INFO [train.py:1114] (3/4) Epoch 16, batch 1550, loss[loss=0.2278, simple_loss=0.2923, pruned_loss=0.05991, ctc_loss=0.1086, over 19608.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.278, pruned_loss=0.05406, ctc_loss=0.1018, over 3847341.01 frames. ], batch size: 60, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:17:38,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff2.min_abs, batch_count=207397.33333333334, ans=0.1
+2024-08-29 18:17:52,779 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.60 vs. limit=22.5
+2024-08-29 18:17:59,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=207504.0, ans=0.125
+2024-08-29 18:18:00,595 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=207504.0, ans=0.1
+2024-08-29 18:19:22,737 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:19:32,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=207557.33333333334, ans=0.0
+2024-08-29 18:19:44,731 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=207610.66666666666, ans=0.025
+2024-08-29 18:19:55,348 INFO [train.py:1114] (3/4) Epoch 16, batch 1600, loss[loss=0.2209, simple_loss=0.2872, pruned_loss=0.05686, ctc_loss=0.1024, over 19828.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.278, pruned_loss=0.05421, ctc_loss=0.1021, over 3837493.35 frames. ], batch size: 57, lr: 9.33e-03, grad_scale: 32.0
+2024-08-29 18:21:55,747 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.773e+02 1.965e+02 2.508e+02 5.321e+02, threshold=3.930e+02, percent-clipped=3.0
+2024-08-29 18:22:20,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=207877.33333333334, ans=0.0
+2024-08-29 18:22:20,737 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=207877.33333333334, ans=0.0
+2024-08-29 18:22:53,544 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=207877.33333333334, ans=0.125
+2024-08-29 18:23:01,490 INFO [train.py:1114] (3/4) Epoch 16, batch 1650, loss[loss=0.1923, simple_loss=0.2724, pruned_loss=0.04045, ctc_loss=0.07838, over 19646.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2773, pruned_loss=0.05373, ctc_loss=0.1014, over 3834750.07 frames. ], batch size: 59, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:24:27,789 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=208037.33333333334, ans=0.2
+2024-08-29 18:24:29,814 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=208037.33333333334, ans=0.125
+2024-08-29 18:24:33,862 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.86 vs. limit=15.0
+2024-08-29 18:24:35,805 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=208090.66666666666, ans=0.1
+2024-08-29 18:24:49,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=208144.0, ans=0.125
+2024-08-29 18:24:51,250 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.39 vs. limit=15.0
+2024-08-29 18:24:53,248 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=208144.0, ans=0.025
+2024-08-29 18:26:13,695 INFO [train.py:1114] (3/4) Epoch 16, batch 1700, loss[loss=0.1905, simple_loss=0.253, pruned_loss=0.04637, ctc_loss=0.08794, over 19643.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2767, pruned_loss=0.05317, ctc_loss=0.1006, over 3848173.81 frames. ], batch size: 46, lr: 9.32e-03, grad_scale: 32.0
+2024-08-29 18:26:21,145 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=208197.33333333334, ans=0.2
+2024-08-29 18:26:34,605 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.759e+02 2.180e+02 2.878e+02 5.111e+02, threshold=4.361e+02, percent-clipped=4.0
+2024-08-29 18:26:43,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=208304.0, ans=0.2
+2024-08-29 18:26:43,739 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.85 vs. limit=10.0
+2024-08-29 18:26:47,103 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=208304.0, ans=0.2
+2024-08-29 18:26:50,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=208357.33333333334, ans=0.125
+2024-08-29 18:26:58,509 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=208357.33333333334, ans=0.0
+2024-08-29 18:27:03,873 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=208410.66666666666, ans=0.1
+2024-08-29 18:27:08,507 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.76 vs. limit=22.5
+2024-08-29 18:27:13,925 INFO [train.py:1114] (3/4) Epoch 16, batch 1750, loss[loss=0.1891, simple_loss=0.2482, pruned_loss=0.04801, ctc_loss=0.0849, over 19636.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2764, pruned_loss=0.05313, ctc_loss=0.1006, over 3852413.95 frames. ], batch size: 45, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:27:14,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=208464.0, ans=0.1
+2024-08-29 18:27:40,012 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=208464.0, ans=0.125
+2024-08-29 18:27:44,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=208464.0, ans=0.125
+2024-08-29 18:27:47,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=208517.33333333334, ans=0.125
+2024-08-29 18:27:50,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=208517.33333333334, ans=0.1
+2024-08-29 18:27:51,191 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=208517.33333333334, ans=0.125
+2024-08-29 18:29:16,849 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=208624.0, ans=0.2
+2024-08-29 18:30:09,185 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 18:30:12,495 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=208677.33333333334, ans=0.025
+2024-08-29 18:30:12,950 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.67 vs. limit=6.0
+2024-08-29 18:30:14,781 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=208677.33333333334, ans=0.0
+2024-08-29 18:30:17,798 INFO [train.py:1114] (3/4) Epoch 16, batch 1800, loss[loss=0.1844, simple_loss=0.2651, pruned_loss=0.03759, ctc_loss=0.07119, over 19627.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2767, pruned_loss=0.05309, ctc_loss=0.1005, over 3853919.08 frames. ], batch size: 55, lr: 9.31e-03, grad_scale: 32.0
+2024-08-29 18:30:26,151 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.73 vs. limit=22.5
+2024-08-29 18:30:32,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=208784.0, ans=0.0
+2024-08-29 18:30:41,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=208784.0, ans=0.0
+2024-08-29 18:30:45,743 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 1.693e+02 1.985e+02 2.381e+02 4.228e+02, threshold=3.971e+02, percent-clipped=0.0
+2024-08-29 18:30:59,039 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.50 vs. limit=15.0
+2024-08-29 18:31:03,723 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=208890.66666666666, ans=0.125
+2024-08-29 18:31:10,840 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.27 vs. limit=15.0
+2024-08-29 18:31:43,419 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.46 vs. limit=15.0
+2024-08-29 18:31:44,993 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=208997.33333333334, ans=0.125
+2024-08-29 18:31:44,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=208997.33333333334, ans=0.05
+2024-08-29 18:31:45,936 INFO [train.py:1114] (3/4) Epoch 16, batch 1850, loss[loss=0.2081, simple_loss=0.2844, pruned_loss=0.049, ctc_loss=0.08461, over 19584.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2768, pruned_loss=0.0532, ctc_loss=0.1005, over 3855947.60 frames. ], batch size: 57, lr: 9.30e-03, grad_scale: 32.0
+2024-08-29 18:31:55,885 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=208997.33333333334, ans=0.125
+2024-08-29 18:32:36,525 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=209050.66666666666, ans=0.0
+2024-08-29 18:32:40,806 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=209104.0, ans=0.125
+2024-08-29 18:32:53,825 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=209157.33333333334, ans=0.125
+2024-08-29 18:32:54,705 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=209157.33333333334, ans=0.1
+2024-08-29 18:33:17,358 INFO [train.py:1114] (3/4) Epoch 16, batch 1900, loss[loss=0.2091, simple_loss=0.2779, pruned_loss=0.05064, ctc_loss=0.09732, over 19658.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2771, pruned_loss=0.05348, ctc_loss=0.1009, over 3860573.92 frames. ], batch size: 59, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:33:29,166 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.54 vs. limit=15.0
+2024-08-29 18:33:36,153 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=209317.33333333334, ans=0.2
+2024-08-29 18:33:40,786 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.785e+02 2.354e+02 2.964e+02 6.037e+02, threshold=4.708e+02, percent-clipped=9.0
+2024-08-29 18:33:41,953 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=209370.66666666666, ans=0.1
+2024-08-29 18:33:45,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=209370.66666666666, ans=0.125
+2024-08-29 18:33:47,043 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.35 vs. limit=22.5
+2024-08-29 18:34:08,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=209477.33333333334, ans=0.025
+2024-08-29 18:34:30,809 INFO [train.py:1114] (3/4) Epoch 16, batch 1950, loss[loss=0.2277, simple_loss=0.283, pruned_loss=0.06336, ctc_loss=0.1141, over 19597.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2778, pruned_loss=0.05369, ctc_loss=0.1011, over 3869340.09 frames. ], batch size: 52, lr: 9.29e-03, grad_scale: 32.0
+2024-08-29 18:34:51,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=209530.66666666666, ans=0.125
+2024-08-29 18:34:52,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=209530.66666666666, ans=0.125
+2024-08-29 18:35:22,876 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.41 vs. limit=22.5
+2024-08-29 18:35:45,583 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=209744.0, ans=0.125
+2024-08-29 18:35:49,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=209744.0, ans=0.125
+2024-08-29 18:35:51,672 INFO [train.py:1114] (3/4) Epoch 16, batch 2000, loss[loss=0.1858, simple_loss=0.2511, pruned_loss=0.0432, ctc_loss=0.08509, over 19634.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2777, pruned_loss=0.05342, ctc_loss=0.1009, over 3853325.83 frames. ], batch size: 45, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:35:56,708 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-29 18:36:00,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-29 18:36:13,169 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.666e+02 1.888e+02 2.185e+02 3.516e+02, threshold=3.775e+02, percent-clipped=0.0
+2024-08-29 18:36:17,627 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=209904.0, ans=0.0
+2024-08-29 18:36:19,755 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=209904.0, ans=0.125
+2024-08-29 18:36:31,794 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=209957.33333333334, ans=0.125
+2024-08-29 18:37:02,154 INFO [train.py:1114] (3/4) Epoch 16, batch 2050, loss[loss=0.2076, simple_loss=0.2572, pruned_loss=0.0584, ctc_loss=0.1029, over 19677.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2771, pruned_loss=0.05377, ctc_loss=0.1016, over 3850064.81 frames. ], batch size: 47, lr: 9.28e-03, grad_scale: 32.0
+2024-08-29 18:37:03,511 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=210064.0, ans=0.0
+2024-08-29 18:37:51,944 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=210170.66666666666, ans=0.2
+2024-08-29 18:38:39,933 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=210224.0, ans=0.5
+2024-08-29 18:38:59,605 INFO [train.py:1114] (3/4) Epoch 16, batch 2100, loss[loss=0.1917, simple_loss=0.2623, pruned_loss=0.04469, ctc_loss=0.07934, over 19758.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2761, pruned_loss=0.05308, ctc_loss=0.1003, over 3858504.80 frames. ], batch size: 54, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:38:59,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=210330.66666666666, ans=0.125
+2024-08-29 18:39:22,234 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.792e+02 2.112e+02 2.675e+02 4.176e+02, threshold=4.223e+02, percent-clipped=3.0
+2024-08-29 18:39:41,639 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=210490.66666666666, ans=0.1
+2024-08-29 18:39:43,775 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=210490.66666666666, ans=0.2
+2024-08-29 18:39:46,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=210544.0, ans=0.125
+2024-08-29 18:39:55,980 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=210544.0, ans=0.125
+2024-08-29 18:39:57,958 INFO [train.py:1114] (3/4) Epoch 16, batch 2150, loss[loss=0.1844, simple_loss=0.2483, pruned_loss=0.04396, ctc_loss=0.08131, over 19857.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2751, pruned_loss=0.05249, ctc_loss=0.09901, over 3870464.23 frames. ], batch size: 52, lr: 9.27e-03, grad_scale: 32.0
+2024-08-29 18:40:05,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=210597.33333333334, ans=0.125
+2024-08-29 18:40:31,013 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=210650.66666666666, ans=0.95
+2024-08-29 18:40:37,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=210704.0, ans=0.025
+2024-08-29 18:40:56,046 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=210757.33333333334, ans=0.125
+2024-08-29 18:41:02,772 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.81 vs. limit=15.0
+2024-08-29 18:41:08,443 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.57 vs. limit=22.5
+2024-08-29 18:41:08,958 INFO [train.py:1114] (3/4) Epoch 16, batch 2200, loss[loss=0.1989, simple_loss=0.2649, pruned_loss=0.04712, ctc_loss=0.09674, over 19609.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2753, pruned_loss=0.05263, ctc_loss=0.09912, over 3868836.05 frames. ], batch size: 57, lr: 9.26e-03, grad_scale: 32.0
+2024-08-29 18:41:17,207 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=210864.0, ans=0.125
+2024-08-29 18:41:27,677 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.04 vs. limit=22.5
+2024-08-29 18:41:29,786 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.757e+02 2.042e+02 2.598e+02 4.148e+02, threshold=4.084e+02, percent-clipped=0.0
+2024-08-29 18:42:31,571 INFO [train.py:1114] (3/4) Epoch 16, batch 2250, loss[loss=0.222, simple_loss=0.292, pruned_loss=0.05515, ctc_loss=0.1045, over 19617.00 frames. ], tot_loss[loss=0.211, simple_loss=0.276, pruned_loss=0.05301, ctc_loss=0.09973, over 3868508.86 frames. ], batch size: 55, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:42:43,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=211184.0, ans=0.0
+2024-08-29 18:42:57,254 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=211237.33333333334, ans=0.125
+2024-08-29 18:42:58,400 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=211237.33333333334, ans=0.125
+2024-08-29 18:43:08,482 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=211290.66666666666, ans=0.0
+2024-08-29 18:44:24,271 INFO [train.py:1114] (3/4) Epoch 16, batch 2300, loss[loss=0.1672, simple_loss=0.2366, pruned_loss=0.0355, ctc_loss=0.06695, over 19500.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.275, pruned_loss=0.05307, ctc_loss=0.0999, over 3861386.43 frames. ], batch size: 49, lr: 9.25e-03, grad_scale: 32.0
+2024-08-29 18:44:29,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=211397.33333333334, ans=0.125
+2024-08-29 18:44:29,243 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=211397.33333333334, ans=0.1
+2024-08-29 18:44:56,364 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=211397.33333333334, ans=0.125
+2024-08-29 18:45:10,434 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.785e+02 2.121e+02 2.618e+02 4.213e+02, threshold=4.241e+02, percent-clipped=2.0
+2024-08-29 18:45:14,062 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=211504.0, ans=0.0
+2024-08-29 18:45:33,135 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=211557.33333333334, ans=0.0
+2024-08-29 18:45:34,693 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.04 vs. limit=15.0
+2024-08-29 18:45:40,096 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.13 vs. limit=15.0
+2024-08-29 18:45:59,066 INFO [train.py:1114] (3/4) Epoch 16, batch 2350, loss[loss=0.2199, simple_loss=0.2906, pruned_loss=0.05404, ctc_loss=0.1027, over 19654.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2754, pruned_loss=0.05334, ctc_loss=0.1004, over 3863092.72 frames. ], batch size: 63, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:45:59,271 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=211664.0, ans=0.125
+2024-08-29 18:46:02,687 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.20 vs. limit=15.0
+2024-08-29 18:47:00,334 INFO [train.py:1114] (3/4) Epoch 16, batch 2400, loss[loss=0.2102, simple_loss=0.2779, pruned_loss=0.05197, ctc_loss=0.09643, over 19287.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2777, pruned_loss=0.0544, ctc_loss=0.1022, over 3857264.20 frames. ], batch size: 71, lr: 9.24e-03, grad_scale: 32.0
+2024-08-29 18:47:05,952 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=211930.66666666666, ans=0.125
+2024-08-29 18:47:12,122 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=211984.0, ans=0.0
+2024-08-29 18:47:20,729 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.800e+02 2.132e+02 2.653e+02 4.129e+02, threshold=4.264e+02, percent-clipped=0.0
+2024-08-29 18:47:28,853 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.30 vs. limit=10.0
+2024-08-29 18:47:42,289 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=212090.66666666666, ans=0.0
+2024-08-29 18:47:54,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=212144.0, ans=0.2
+2024-08-29 18:47:56,898 INFO [train.py:1114] (3/4) Epoch 16, batch 2450, loss[loss=0.2648, simple_loss=0.3085, pruned_loss=0.08038, ctc_loss=0.1508, over 13699.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2814, pruned_loss=0.05703, ctc_loss=0.1075, over 3730318.33 frames. ], batch size: 140, lr: 9.23e-03, grad_scale: 32.0
+2024-08-29 18:48:14,650 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=212250.66666666666, ans=0.125
+2024-08-29 18:48:24,212 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.77 vs. limit=15.0
+2024-08-29 18:48:25,007 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=212304.0, ans=0.025
+2024-08-29 18:48:38,233 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=212357.33333333334, ans=0.125
+2024-08-29 18:55:35,478 INFO [train.py:1114] (3/4) Epoch 17, batch 0, loss[loss=0.2025, simple_loss=0.2612, pruned_loss=0.05238, ctc_loss=0.09771, over 19407.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2612, pruned_loss=0.05238, ctc_loss=0.09771, over 19407.00 frames. ], batch size: 48, lr: 8.95e-03, grad_scale: 32.0
+2024-08-29 18:55:35,479 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-29 18:55:50,207 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.5304, 3.1551, 2.2166, 2.8777], device='cuda:3')
+2024-08-29 18:56:00,898 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.0061, 4.3021, 3.7727, 4.0091], device='cuda:3')
+2024-08-29 18:56:01,327 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.8984, 2.0968, 3.4919, 3.5788], device='cuda:3')
+2024-08-29 18:56:04,690 INFO [train.py:1146] (3/4) Epoch 17, validation: loss=0.1843, simple_loss=0.2733, pruned_loss=0.03544, ctc_loss=0.06098, over 944034.00 frames.
+2024-08-29 18:56:04,691 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13732MB
+2024-08-29 18:56:54,573 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=212458.66666666666, ans=0.0
+2024-08-29 18:58:11,761 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-29 18:58:20,537 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=212512.0, ans=0.025
+2024-08-29 18:58:21,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=212512.0, ans=0.0
+2024-08-29 18:58:30,844 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 1.824e+02 2.030e+02 2.233e+02 3.073e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-29 18:58:34,755 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.78 vs. limit=22.5
+2024-08-29 18:58:40,252 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=212565.33333333334, ans=0.0
+2024-08-29 19:05:26,875 INFO [train.py:1114] (3/4) Epoch 17, batch 50, loss[loss=0.1911, simple_loss=0.2521, pruned_loss=0.04759, ctc_loss=0.08721, over 19722.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.279, pruned_loss=0.05533, ctc_loss=0.1046, over 844761.44 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:07:29,301 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=212672.0, ans=0.125
+2024-08-29 19:07:49,562 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=212672.0, ans=0.0
+2024-08-29 19:07:51,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=212725.33333333334, ans=0.1
+2024-08-29 19:08:18,619 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=212778.66666666666, ans=0.125
+2024-08-29 19:08:25,738 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.96 vs. limit=6.0
+2024-08-29 19:08:42,647 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.65 vs. limit=15.0
+2024-08-29 19:08:47,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=212885.33333333334, ans=0.125
+2024-08-29 19:08:52,525 INFO [train.py:1114] (3/4) Epoch 17, batch 100, loss[loss=0.188, simple_loss=0.2601, pruned_loss=0.04181, ctc_loss=0.08056, over 19722.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2794, pruned_loss=0.05451, ctc_loss=0.1033, over 1498730.66 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-29 19:09:03,373 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=212992.0, ans=0.125
+2024-08-29 19:09:04,460 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=212992.0, ans=0.0
+2024-08-29 19:09:17,209 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=213045.33333333334, ans=0.125
+2024-08-29 19:09:25,907 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.707e+02 1.910e+02 2.335e+02 3.363e+02, threshold=3.820e+02, percent-clipped=0.0
+2024-08-29 19:09:49,578 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.43 vs. limit=6.0
+2024-08-29 19:09:58,139 INFO [train.py:1114] (3/4) Epoch 17, batch 150, loss[loss=0.1753, simple_loss=0.2434, pruned_loss=0.03915, ctc_loss=0.07237, over 19720.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2766, pruned_loss=0.0528, ctc_loss=0.1002, over 2026289.94 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:10:52,078 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=213205.33333333334, ans=0.125
+2024-08-29 19:10:55,585 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=213205.33333333334, ans=0.1
+2024-08-29 19:12:23,559 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.63 vs. limit=15.0
+2024-08-29 19:16:16,392 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=213365.33333333334, ans=0.125
+2024-08-29 19:16:19,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-29 19:16:25,522 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-29 19:16:29,776 INFO [train.py:1114] (3/4) Epoch 17, batch 200, loss[loss=0.2501, simple_loss=0.2997, pruned_loss=0.07368, ctc_loss=0.1328, over 18235.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2752, pruned_loss=0.052, ctc_loss=0.09824, over 2433635.33 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-29 19:24:58,950 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=213472.0, ans=0.2
+2024-08-29 19:26:28,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=213525.33333333334, ans=0.025
+2024-08-29 19:27:11,256 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=213525.33333333334, ans=0.2
+2024-08-29 19:27:57,283 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.724e+02 1.931e+02 2.405e+02 4.691e+02, threshold=3.862e+02, percent-clipped=4.0
+2024-08-29 19:28:21,521 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=213685.33333333334, ans=0.125
+2024-08-29 19:28:32,759 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.73 vs. limit=22.5
+2024-08-29 19:28:38,488 INFO [train.py:1114] (3/4) Epoch 17, batch 250, loss[loss=0.2238, simple_loss=0.2894, pruned_loss=0.05786, ctc_loss=0.1064, over 19400.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.275, pruned_loss=0.05153, ctc_loss=0.09742, over 2755065.21 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:29:27,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=213845.33333333334, ans=0.05
+2024-08-29 19:30:02,636 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.18 vs. limit=15.0
+2024-08-29 19:30:03,449 INFO [train.py:1114] (3/4) Epoch 17, batch 300, loss[loss=0.247, simple_loss=0.3024, pruned_loss=0.07035, ctc_loss=0.127, over 19534.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2748, pruned_loss=0.0516, ctc_loss=0.09749, over 2999463.88 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-29 19:30:18,973 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=214058.66666666666, ans=0.09899494936611666
+2024-08-29 19:31:09,109 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=214112.0, ans=0.0
+2024-08-29 19:31:57,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=214112.0, ans=0.0
+2024-08-29 19:32:01,454 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=214112.0, ans=0.125
+2024-08-29 19:32:02,225 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.689e+02 1.972e+02 2.447e+02 4.331e+02, threshold=3.945e+02, percent-clipped=1.0
+2024-08-29 19:32:41,675 INFO [train.py:1114] (3/4) Epoch 17, batch 350, loss[loss=0.1916, simple_loss=0.2523, pruned_loss=0.04675, ctc_loss=0.09314, over 19779.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2759, pruned_loss=0.05244, ctc_loss=0.09878, over 3190999.16 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:33:00,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=214325.33333333334, ans=0.0
+2024-08-29 19:33:08,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=214325.33333333334, ans=0.5
+2024-08-29 19:33:57,691 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=214432.0, ans=0.025
+2024-08-29 19:34:01,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=214432.0, ans=0.0
+2024-08-29 19:34:04,510 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=214485.33333333334, ans=0.125
+2024-08-29 19:34:10,705 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.16 vs. limit=15.0
+2024-08-29 19:34:18,295 INFO [train.py:1114] (3/4) Epoch 17, batch 400, loss[loss=0.2065, simple_loss=0.2757, pruned_loss=0.05032, ctc_loss=0.0919, over 19500.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2754, pruned_loss=0.0522, ctc_loss=0.09822, over 3343531.34 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-29 19:34:39,660 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=214538.66666666666, ans=0.0
+2024-08-29 19:35:44,472 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=214645.33333333334, ans=0.2
+2024-08-29 19:36:30,683 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.299e+02 1.665e+02 1.964e+02 2.553e+02 4.238e+02, threshold=3.929e+02, percent-clipped=2.0
+2024-08-29 19:37:57,082 INFO [train.py:1114] (3/4) Epoch 17, batch 450, loss[loss=0.2184, simple_loss=0.2884, pruned_loss=0.05357, ctc_loss=0.1033, over 19614.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2763, pruned_loss=0.05259, ctc_loss=0.09908, over 3451227.36 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:38:14,230 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.10 vs. limit=22.5
+2024-08-29 19:40:16,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=215018.66666666666, ans=10.0
+2024-08-29 19:40:17,352 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.16 vs. limit=22.5
+2024-08-29 19:40:20,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=215018.66666666666, ans=0.0
+2024-08-29 19:40:26,571 INFO [train.py:1114] (3/4) Epoch 17, batch 500, loss[loss=0.2207, simple_loss=0.2909, pruned_loss=0.05551, ctc_loss=0.09866, over 19684.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2759, pruned_loss=0.05254, ctc_loss=0.09905, over 3546818.49 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-29 19:40:30,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=215072.0, ans=0.125
+2024-08-29 19:42:38,140 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.765e+02 1.983e+02 2.603e+02 4.687e+02, threshold=3.966e+02, percent-clipped=3.0
+2024-08-29 19:43:26,470 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.76 vs. limit=6.0
+2024-08-29 19:43:45,807 INFO [train.py:1114] (3/4) Epoch 17, batch 550, loss[loss=0.2507, simple_loss=0.304, pruned_loss=0.07231, ctc_loss=0.1317, over 19302.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2759, pruned_loss=0.05263, ctc_loss=0.09925, over 3608612.57 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-29 19:44:04,629 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=215338.66666666666, ans=0.125
+2024-08-29 19:44:50,186 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-29 19:44:58,449 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=215338.66666666666, ans=0.5
+2024-08-29 19:45:01,632 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=215392.0, ans=0.2
+2024-08-29 19:46:01,935 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.24 vs. limit=15.0
+2024-08-29 19:46:53,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=215552.0, ans=0.1
+2024-08-29 19:46:57,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=215552.0, ans=0.125
+2024-08-29 19:47:01,424 INFO [train.py:1114] (3/4) Epoch 17, batch 600, loss[loss=0.2518, simple_loss=0.2991, pruned_loss=0.07511, ctc_loss=0.1355, over 19313.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2758, pruned_loss=0.05258, ctc_loss=0.0992, over 3666177.61 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:47:07,270 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=215605.33333333334, ans=0.0
+2024-08-29 19:47:11,390 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.85 vs. limit=15.0
+2024-08-29 19:48:19,079 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.690e+02 1.951e+02 2.307e+02 4.172e+02, threshold=3.901e+02, percent-clipped=2.0
+2024-08-29 19:48:25,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=215765.33333333334, ans=0.125
+2024-08-29 19:48:32,128 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=215818.66666666666, ans=0.0
+2024-08-29 19:49:17,788 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.82 vs. limit=22.5
+2024-08-29 19:49:21,615 INFO [train.py:1114] (3/4) Epoch 17, batch 650, loss[loss=0.2145, simple_loss=0.2757, pruned_loss=0.05577, ctc_loss=0.1046, over 19764.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2752, pruned_loss=0.0524, ctc_loss=0.09876, over 3716226.04 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 64.0
+2024-08-29 19:49:38,572 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=215925.33333333334, ans=0.07
+2024-08-29 19:50:09,093 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=13.03 vs. limit=15.0
+2024-08-29 19:50:20,447 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.78 vs. limit=15.0
+2024-08-29 19:51:21,436 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=216085.33333333334, ans=0.125
+2024-08-29 19:51:32,005 INFO [train.py:1114] (3/4) Epoch 17, batch 700, loss[loss=0.1982, simple_loss=0.2659, pruned_loss=0.04686, ctc_loss=0.09199, over 19711.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2756, pruned_loss=0.05251, ctc_loss=0.0988, over 3749092.86 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:51:32,467 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.69 vs. limit=22.5
+2024-08-29 19:51:42,749 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.50 vs. limit=12.0
+2024-08-29 19:51:58,591 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=216245.33333333334, ans=0.05
+2024-08-29 19:52:43,598 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.757e+02 1.978e+02 2.439e+02 3.670e+02, threshold=3.956e+02, percent-clipped=0.0
+2024-08-29 19:53:44,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=216352.0, ans=0.125
+2024-08-29 19:53:46,878 INFO [train.py:1114] (3/4) Epoch 17, batch 750, loss[loss=0.2171, simple_loss=0.2886, pruned_loss=0.05215, ctc_loss=0.103, over 19502.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2753, pruned_loss=0.05234, ctc_loss=0.09856, over 3775151.58 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 64.0
+2024-08-29 19:54:34,363 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.90 vs. limit=15.0
+2024-08-29 19:55:07,963 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.13 vs. limit=15.0
+2024-08-29 19:55:11,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216405.33333333334, ans=0.1
+2024-08-29 19:55:48,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=216458.66666666666, ans=0.125
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-0
new file mode 100644
index 0000000000000000000000000000000000000000..b0b51f050bee6b14e38de69bd49ca6b14ba4a513
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-0
@@ -0,0 +1,564 @@
+2024-08-30 12:44:46,451 INFO [train.py:1182] (0/4) Training started
+2024-08-30 12:44:46,908 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-30 12:44:47,090 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2651.int.cedar.computecanada.ca', 'IP address': '172.16.146.88'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 17, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 12:44:47,091 INFO [train.py:1212] (0/4) About to create model
+2024-08-30 12:44:48,184 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-30 12:44:48,766 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-16.pt
+2024-08-30 12:45:01,679 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-30 12:45:02,095 INFO [train.py:1231] (0/4) Using DDP
+2024-08-30 12:45:06,256 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-30 12:45:06,460 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-30 12:45:06,460 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-30 12:45:06,666 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-30 12:45:08,257 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-30 12:45:08,264 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-30 12:45:08,444 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-30 12:45:08,576 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-30 12:45:08,904 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-30 12:45:08,904 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 12:51:17,012 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12840MB
+2024-08-30 12:51:18,479 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-30 12:53:02,305 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-30 12:53:03,308 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=4.25 vs. limit=5.0
+2024-08-30 12:53:03,666 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 12:54:12,750 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 12:54:14,354 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 12:54:14,375 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-30 12:55:06,918 INFO [train.py:1114] (0/4) Epoch 17, batch 0, loss[loss=0.2431, simple_loss=0.286, pruned_loss=0.07327, ctc_loss=0.1339, over 19800.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.286, pruned_loss=0.07327, ctc_loss=0.1339, over 19800.00 frames. ], batch size: 49, lr: 8.95e-03, grad_scale: 32.0
+2024-08-30 12:55:06,919 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-30 12:55:26,654 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.7976, 2.9573, 4.0454, 4.1840], device='cuda:0')
+2024-08-30 12:55:27,223 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.7737, 2.2076, 1.6605, 2.0207, 2.2583, 2.3793, 2.3037, 1.7608],
+ device='cuda:0')
+2024-08-30 12:55:31,713 INFO [train.py:1146] (0/4) Epoch 17, validation: loss=0.185, simple_loss=0.2737, pruned_loss=0.03584, ctc_loss=0.06176, over 944034.00 frames.
+2024-08-30 12:55:31,713 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 12:55:32,459 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.47 vs. limit=22.5
+2024-08-30 12:56:12,782 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.93 vs. limit=15.0
+2024-08-30 13:01:17,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=212512.0, ans=0.0
+2024-08-30 13:06:19,314 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.860e+02 2.030e+02 2.233e+02 2.993e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-30 13:06:53,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=212565.33333333334, ans=0.125
+2024-08-30 13:07:11,022 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=212565.33333333334, ans=0.125
+2024-08-30 13:09:56,386 INFO [train.py:1114] (0/4) Epoch 17, batch 50, loss[loss=0.1764, simple_loss=0.244, pruned_loss=0.03928, ctc_loss=0.0757, over 19711.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2783, pruned_loss=0.0537, ctc_loss=0.1018, over 844772.04 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:09:56,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=212672.0, ans=0.125
+2024-08-30 13:16:09,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=212725.33333333334, ans=0.0
+2024-08-30 13:16:44,134 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:18:37,724 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.25 vs. limit=15.0
+2024-08-30 13:18:56,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=212885.33333333334, ans=0.125
+2024-08-30 13:19:01,169 INFO [train.py:1114] (0/4) Epoch 17, batch 100, loss[loss=0.1973, simple_loss=0.2612, pruned_loss=0.04844, ctc_loss=0.09124, over 19727.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2798, pruned_loss=0.05447, ctc_loss=0.103, over 1498273.11 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:19:24,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=212992.0, ans=0.125
+2024-08-30 13:19:25,371 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.69 vs. limit=6.0
+2024-08-30 13:19:26,196 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.68 vs. limit=22.5
+2024-08-30 13:20:07,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=212992.0, ans=0.125
+2024-08-30 13:23:11,083 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.706e+02 1.953e+02 2.287e+02 3.713e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-30 13:24:10,825 INFO [train.py:1114] (0/4) Epoch 17, batch 150, loss[loss=0.2364, simple_loss=0.2768, pruned_loss=0.07104, ctc_loss=0.1349, over 19685.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2779, pruned_loss=0.0541, ctc_loss=0.1023, over 2027311.53 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:24:43,783 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-40000.pt
+2024-08-30 13:25:06,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=213312.0, ans=0.125
+2024-08-30 13:27:19,148 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.01 vs. limit=15.0
+2024-08-30 13:27:35,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=213365.33333333334, ans=0.0
+2024-08-30 13:27:36,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213365.33333333334, ans=0.1
+2024-08-30 13:27:51,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=213365.33333333334, ans=0.1
+2024-08-30 13:27:55,282 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-30 13:28:02,238 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:28:05,732 INFO [train.py:1114] (0/4) Epoch 17, batch 200, loss[loss=0.2567, simple_loss=0.3049, pruned_loss=0.07503, ctc_loss=0.146, over 18082.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2767, pruned_loss=0.05355, ctc_loss=0.1014, over 2434813.02 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:28:12,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=213472.0, ans=0.2
+2024-08-30 13:28:15,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=213472.0, ans=0.125
+2024-08-30 13:28:17,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=213525.33333333334, ans=0.1
+2024-08-30 13:28:19,773 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:28:24,143 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=213525.33333333334, ans=0.2
+2024-08-30 13:28:28,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=213578.66666666666, ans=0.0
+2024-08-30 13:28:40,335 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.384e+02 1.731e+02 1.992e+02 2.666e+02 4.093e+02, threshold=3.983e+02, percent-clipped=1.0
+2024-08-30 13:28:40,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=213632.0, ans=0.125
+2024-08-30 13:28:42,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213632.0, ans=0.1
+2024-08-30 13:29:02,746 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=213685.33333333334, ans=0.0
+2024-08-30 13:29:05,288 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=213685.33333333334, ans=0.125
+2024-08-30 13:29:06,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=213738.66666666666, ans=0.1
+2024-08-30 13:29:07,531 INFO [train.py:1114] (0/4) Epoch 17, batch 250, loss[loss=0.2387, simple_loss=0.2913, pruned_loss=0.06706, ctc_loss=0.13, over 19421.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2758, pruned_loss=0.0531, ctc_loss=0.1005, over 2755282.98 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:29:07,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=213738.66666666666, ans=0.0
+2024-08-30 13:29:13,931 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=213738.66666666666, ans=0.2
+2024-08-30 13:29:13,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=213738.66666666666, ans=0.0
+2024-08-30 13:29:24,754 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=213792.0, ans=0.95
+2024-08-30 13:29:39,228 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.03 vs. limit=15.0
+2024-08-30 13:29:53,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=213898.66666666666, ans=0.125
+2024-08-30 13:29:57,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=213898.66666666666, ans=0.125
+2024-08-30 13:30:13,960 INFO [train.py:1114] (0/4) Epoch 17, batch 300, loss[loss=0.2388, simple_loss=0.2943, pruned_loss=0.06636, ctc_loss=0.1264, over 19530.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2746, pruned_loss=0.05232, ctc_loss=0.09883, over 3000773.43 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:30:19,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=214005.33333333334, ans=0.125
+2024-08-30 13:30:28,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=214058.66666666666, ans=0.125
+2024-08-30 13:30:40,783 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.67 vs. limit=15.0
+2024-08-30 13:30:51,813 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.663e+02 1.872e+02 2.298e+02 3.693e+02, threshold=3.744e+02, percent-clipped=0.0
+2024-08-30 13:31:12,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=214218.66666666666, ans=0.125
+2024-08-30 13:31:25,596 INFO [train.py:1114] (0/4) Epoch 17, batch 350, loss[loss=0.1931, simple_loss=0.2561, pruned_loss=0.04653, ctc_loss=0.09266, over 19745.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2755, pruned_loss=0.05258, ctc_loss=0.09958, over 3190658.25 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:31:27,607 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.66 vs. limit=6.0
+2024-08-30 13:31:37,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=214325.33333333334, ans=0.125
+2024-08-30 13:31:39,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=214325.33333333334, ans=0.125
+2024-08-30 13:31:41,459 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.79 vs. limit=10.0
+2024-08-30 13:32:07,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214432.0, ans=0.1
+2024-08-30 13:32:08,981 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.90 vs. limit=15.0
+2024-08-30 13:32:10,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=214432.0, ans=0.125
+2024-08-30 13:32:20,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=214485.33333333334, ans=0.0
+2024-08-30 13:32:24,589 INFO [train.py:1114] (0/4) Epoch 17, batch 400, loss[loss=0.1967, simple_loss=0.2789, pruned_loss=0.04147, ctc_loss=0.07919, over 19506.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2751, pruned_loss=0.05215, ctc_loss=0.09842, over 3343295.33 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:32:29,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=214538.66666666666, ans=0.125
+2024-08-30 13:32:50,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=214645.33333333334, ans=0.0
+2024-08-30 13:33:00,602 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.11 vs. limit=6.0
+2024-08-30 13:33:01,006 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.640e+02 1.901e+02 2.325e+02 4.074e+02, threshold=3.801e+02, percent-clipped=1.0
+2024-08-30 13:33:21,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=214752.0, ans=0.015
+2024-08-30 13:33:24,177 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=214752.0, ans=0.025
+2024-08-30 13:33:26,243 INFO [train.py:1114] (0/4) Epoch 17, batch 450, loss[loss=0.199, simple_loss=0.2789, pruned_loss=0.0429, ctc_loss=0.08315, over 19619.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2749, pruned_loss=0.05187, ctc_loss=0.09762, over 3449981.31 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:33:58,471 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.85 vs. limit=15.0
+2024-08-30 13:34:03,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=214858.66666666666, ans=0.0
+2024-08-30 13:38:32,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=214912.0, ans=0.2
+2024-08-30 13:38:36,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.20 vs. limit=15.0
+2024-08-30 13:43:52,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=215018.66666666666, ans=0.0
+2024-08-30 13:43:56,048 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=215018.66666666666, ans=0.125
+2024-08-30 13:44:05,809 INFO [train.py:1114] (0/4) Epoch 17, batch 500, loss[loss=0.2189, simple_loss=0.2915, pruned_loss=0.05321, ctc_loss=0.09982, over 19663.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2742, pruned_loss=0.05137, ctc_loss=0.09665, over 3545189.32 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:44:38,546 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.78 vs. limit=15.0
+2024-08-30 13:44:48,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=215125.33333333334, ans=0.125
+2024-08-30 13:44:48,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=215125.33333333334, ans=0.125
+2024-08-30 13:44:48,991 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=215125.33333333334, ans=0.0
+2024-08-30 13:45:06,446 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.780e+02 2.026e+02 2.589e+02 4.105e+02, threshold=4.052e+02, percent-clipped=2.0
+2024-08-30 13:45:08,130 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.78 vs. limit=15.0
+2024-08-30 13:45:31,469 INFO [train.py:1114] (0/4) Epoch 17, batch 550, loss[loss=0.2128, simple_loss=0.2858, pruned_loss=0.05059, ctc_loss=0.09647, over 19206.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2746, pruned_loss=0.05167, ctc_loss=0.09726, over 3607486.88 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-30 13:45:31,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=215338.66666666666, ans=0.07
+2024-08-30 13:45:59,354 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=215445.33333333334, ans=0.0
+2024-08-30 13:46:20,236 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=215552.0, ans=0.0
+2024-08-30 13:47:16,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=215552.0, ans=0.125
+2024-08-30 13:47:20,214 INFO [train.py:1114] (0/4) Epoch 17, batch 600, loss[loss=0.2332, simple_loss=0.2927, pruned_loss=0.06459, ctc_loss=0.1111, over 19446.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2745, pruned_loss=0.05148, ctc_loss=0.09687, over 3664553.91 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-30 13:47:22,615 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=215605.33333333334, ans=0.125
+2024-08-30 13:47:53,791 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.276e+02 1.647e+02 1.940e+02 2.383e+02 4.124e+02, threshold=3.879e+02, percent-clipped=1.0
+2024-08-30 13:48:27,126 INFO [train.py:1114] (0/4) Epoch 17, batch 650, loss[loss=0.2001, simple_loss=0.2664, pruned_loss=0.04901, ctc_loss=0.08935, over 19762.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2738, pruned_loss=0.05121, ctc_loss=0.09637, over 3715413.88 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 32.0
+2024-08-30 13:51:32,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=215925.33333333334, ans=0.125
+2024-08-30 13:52:18,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=216032.0, ans=0.025
+2024-08-30 13:52:36,366 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=216032.0, ans=0.125
+2024-08-30 13:53:17,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=216085.33333333334, ans=0.125
+2024-08-30 13:54:00,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=216085.33333333334, ans=0.125
+2024-08-30 14:00:36,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216085.33333333334, ans=0.1
+2024-08-30 14:06:19,750 INFO [train.py:1114] (0/4) Epoch 17, batch 700, loss[loss=0.2137, simple_loss=0.2823, pruned_loss=0.05325, ctc_loss=0.09654, over 19728.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2748, pruned_loss=0.0515, ctc_loss=0.09689, over 3747076.79 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:06:25,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=216138.66666666666, ans=0.125
+2024-08-30 14:08:55,349 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.82 vs. limit=15.0
+2024-08-30 14:09:33,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=216245.33333333334, ans=0.0
+2024-08-30 14:09:58,057 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=216245.33333333334, ans=0.0
+2024-08-30 14:12:13,786 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 1.667e+02 2.137e+02 2.601e+02 4.284e+02, threshold=4.274e+02, percent-clipped=4.0
+2024-08-30 14:17:27,437 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.16 vs. limit=15.0
+2024-08-30 14:17:28,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=216352.0, ans=0.0
+2024-08-30 14:17:35,091 INFO [train.py:1114] (0/4) Epoch 17, batch 750, loss[loss=0.2182, simple_loss=0.2887, pruned_loss=0.05451, ctc_loss=0.09658, over 19495.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2745, pruned_loss=0.05123, ctc_loss=0.09622, over 3773742.85 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:17:42,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=216405.33333333334, ans=0.125
+2024-08-30 14:17:45,789 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.41 vs. limit=15.0
+2024-08-30 14:17:58,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=216458.66666666666, ans=0.09899494936611666
+2024-08-30 14:19:07,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=216512.0, ans=0.125
+2024-08-30 14:19:33,904 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.81 vs. limit=10.0
+2024-08-30 14:19:43,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=216565.33333333334, ans=0.1
+2024-08-30 14:19:44,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=216565.33333333334, ans=0.0
+2024-08-30 14:19:50,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=216618.66666666666, ans=0.0
+2024-08-30 14:20:37,986 INFO [train.py:1114] (0/4) Epoch 17, batch 800, loss[loss=0.1776, simple_loss=0.2456, pruned_loss=0.04007, ctc_loss=0.07368, over 19403.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2745, pruned_loss=0.05156, ctc_loss=0.09689, over 3795895.10 frames. ], batch size: 48, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:27:07,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=216725.33333333334, ans=0.125
+2024-08-30 14:29:23,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=216725.33333333334, ans=0.125
+2024-08-30 14:29:49,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=216725.33333333334, ans=0.125
+2024-08-30 14:30:46,455 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:31:18,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=216778.66666666666, ans=0.125
+2024-08-30 14:31:25,314 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=216778.66666666666, ans=0.2
+2024-08-30 14:31:32,323 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.715e+02 2.071e+02 2.537e+02 3.967e+02, threshold=4.143e+02, percent-clipped=0.0
+2024-08-30 14:32:27,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216885.33333333334, ans=0.1
+2024-08-30 14:32:34,223 INFO [train.py:1114] (0/4) Epoch 17, batch 850, loss[loss=0.2197, simple_loss=0.2925, pruned_loss=0.05341, ctc_loss=0.1002, over 19634.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2747, pruned_loss=0.05174, ctc_loss=0.09728, over 3815942.73 frames. ], batch size: 59, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:32:39,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=216938.66666666666, ans=0.0
+2024-08-30 14:32:53,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216992.0, ans=0.1
+2024-08-30 14:33:00,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=216992.0, ans=0.0
+2024-08-30 14:33:02,482 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.86 vs. limit=15.0
+2024-08-30 14:33:07,719 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=217045.33333333334, ans=0.05
+2024-08-30 14:33:12,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=217045.33333333334, ans=0.0
+2024-08-30 14:34:07,706 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.93 vs. limit=15.0
+2024-08-30 14:34:26,357 INFO [train.py:1114] (0/4) Epoch 17, batch 900, loss[loss=0.1797, simple_loss=0.2473, pruned_loss=0.04139, ctc_loss=0.07326, over 19407.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2753, pruned_loss=0.05233, ctc_loss=0.09825, over 3820019.15 frames. ], batch size: 48, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:35:33,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=217205.33333333334, ans=0.125
+2024-08-30 14:36:59,134 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.623e+02 1.810e+02 2.233e+02 4.039e+02, threshold=3.621e+02, percent-clipped=0.0
+2024-08-30 14:37:24,543 INFO [train.py:1114] (0/4) Epoch 17, batch 950, loss[loss=0.2161, simple_loss=0.2748, pruned_loss=0.05674, ctc_loss=0.1099, over 19522.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2753, pruned_loss=0.05209, ctc_loss=0.09803, over 3821118.76 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:38:29,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=217525.33333333334, ans=0.125
+2024-08-30 14:38:53,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=217632.0, ans=10.0
+2024-08-30 14:39:16,466 INFO [train.py:1114] (0/4) Epoch 17, batch 1000, loss[loss=0.2146, simple_loss=0.2739, pruned_loss=0.05678, ctc_loss=0.1046, over 19843.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2759, pruned_loss=0.05226, ctc_loss=0.09851, over 3816399.91 frames. ], batch size: 52, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:39:23,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=217738.66666666666, ans=0.2
+2024-08-30 14:39:25,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=217738.66666666666, ans=0.125
+2024-08-30 14:39:30,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=217792.0, ans=0.1
+2024-08-30 14:39:33,714 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=217792.0, ans=0.125
+2024-08-30 14:39:52,706 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.648e+02 1.905e+02 2.181e+02 3.196e+02, threshold=3.810e+02, percent-clipped=0.0
+2024-08-30 14:39:54,203 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:39:55,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=217898.66666666666, ans=0.125
+2024-08-30 14:39:59,375 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.01 vs. limit=22.5
+2024-08-30 14:40:08,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=217952.0, ans=0.1
+2024-08-30 14:40:20,762 INFO [train.py:1114] (0/4) Epoch 17, batch 1050, loss[loss=0.2268, simple_loss=0.292, pruned_loss=0.0594, ctc_loss=0.1072, over 19845.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2757, pruned_loss=0.05246, ctc_loss=0.09866, over 3821538.54 frames. ], batch size: 57, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:40:27,661 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=218005.33333333334, ans=0.025
+2024-08-30 14:40:32,489 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=218058.66666666666, ans=0.07
+2024-08-30 14:40:43,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=218058.66666666666, ans=0.0
+2024-08-30 14:40:53,295 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:40:59,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=218165.33333333334, ans=0.125
+2024-08-30 14:41:17,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=218218.66666666666, ans=0.2
+2024-08-30 14:41:24,159 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.69 vs. limit=15.0
+2024-08-30 14:41:24,730 INFO [train.py:1114] (0/4) Epoch 17, batch 1100, loss[loss=0.2019, simple_loss=0.2731, pruned_loss=0.04692, ctc_loss=0.09225, over 19593.00 frames. ], tot_loss[loss=0.209, simple_loss=0.275, pruned_loss=0.05195, ctc_loss=0.09782, over 3829762.59 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:41:26,196 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:41:48,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=218378.66666666666, ans=0.0
+2024-08-30 14:41:49,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=218378.66666666666, ans=0.0
+2024-08-30 14:42:23,467 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.634e+02 1.909e+02 2.238e+02 3.833e+02, threshold=3.817e+02, percent-clipped=1.0
+2024-08-30 14:42:24,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=218432.0, ans=0.0
+2024-08-30 14:43:03,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.41 vs. limit=12.0
+2024-08-30 14:43:15,271 INFO [train.py:1114] (0/4) Epoch 17, batch 1150, loss[loss=0.2131, simple_loss=0.2753, pruned_loss=0.05501, ctc_loss=0.1022, over 19563.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2757, pruned_loss=0.05255, ctc_loss=0.09904, over 3829113.45 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:43:28,298 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.97 vs. limit=22.5
+2024-08-30 14:43:45,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=218645.33333333334, ans=0.95
+2024-08-30 14:43:48,005 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:44:08,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=218752.0, ans=0.0
+2024-08-30 14:44:20,107 INFO [train.py:1114] (0/4) Epoch 17, batch 1200, loss[loss=0.2089, simple_loss=0.2819, pruned_loss=0.05022, ctc_loss=0.08873, over 19840.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2762, pruned_loss=0.05271, ctc_loss=0.09928, over 3824969.65 frames. ], batch size: 57, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:44:34,578 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=218858.66666666666, ans=0.125
+2024-08-30 14:46:08,768 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 1.734e+02 1.937e+02 2.235e+02 3.279e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-30 14:46:09,061 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=218965.33333333334, ans=0.125
+2024-08-30 14:46:21,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=219018.66666666666, ans=0.1
+2024-08-30 14:46:22,997 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=219018.66666666666, ans=0.125
+2024-08-30 14:46:25,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=219018.66666666666, ans=0.0
+2024-08-30 14:46:31,314 INFO [train.py:1114] (0/4) Epoch 17, batch 1250, loss[loss=0.2317, simple_loss=0.2958, pruned_loss=0.06126, ctc_loss=0.1126, over 19509.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2763, pruned_loss=0.05251, ctc_loss=0.09884, over 3843027.80 frames. ], batch size: 61, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:46:34,236 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.55 vs. limit=10.0
+2024-08-30 14:46:56,794 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.24 vs. limit=6.0
+2024-08-30 14:46:58,833 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=219178.66666666666, ans=0.125
+2024-08-30 14:48:27,733 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=219232.0, ans=0.0
+2024-08-30 14:48:46,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=219285.33333333334, ans=0.025
+2024-08-30 14:48:52,365 INFO [train.py:1114] (0/4) Epoch 17, batch 1300, loss[loss=0.2228, simple_loss=0.2937, pruned_loss=0.05481, ctc_loss=0.1058, over 18867.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2749, pruned_loss=0.05177, ctc_loss=0.09724, over 3845692.99 frames. ], batch size: 76, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:48:58,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=219338.66666666666, ans=0.125
+2024-08-30 14:49:07,922 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.22 vs. limit=6.0
+2024-08-30 14:49:23,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=219445.33333333334, ans=0.125
+2024-08-30 14:49:29,487 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.750e+02 2.054e+02 2.564e+02 3.826e+02, threshold=4.108e+02, percent-clipped=0.0
+2024-08-30 14:49:30,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=219498.66666666666, ans=0.125
+2024-08-30 14:49:41,826 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:49:42,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=219552.0, ans=0.0
+2024-08-30 14:49:45,175 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=219552.0, ans=0.125
+2024-08-30 14:50:08,947 INFO [train.py:1114] (0/4) Epoch 17, batch 1350, loss[loss=0.195, simple_loss=0.2619, pruned_loss=0.04701, ctc_loss=0.08509, over 19764.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2745, pruned_loss=0.0518, ctc_loss=0.0972, over 3857308.50 frames. ], batch size: 54, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:50:13,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=219605.33333333334, ans=0.125
+2024-08-30 14:50:34,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=219712.0, ans=0.0
+2024-08-30 14:50:35,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=219712.0, ans=0.125
+2024-08-30 14:50:58,093 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.90 vs. limit=6.0
+2024-08-30 14:51:09,340 INFO [train.py:1114] (0/4) Epoch 17, batch 1400, loss[loss=0.1849, simple_loss=0.2507, pruned_loss=0.04286, ctc_loss=0.08332, over 19668.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2748, pruned_loss=0.05217, ctc_loss=0.09791, over 3864290.68 frames. ], batch size: 46, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:51:28,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=219872.0, ans=0.125
+2024-08-30 14:51:58,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=219978.66666666666, ans=0.2
+2024-08-30 14:52:01,611 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.697e+02 1.910e+02 2.399e+02 4.058e+02, threshold=3.819e+02, percent-clipped=0.0
+2024-08-30 14:52:02,307 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=10.08 vs. limit=15.0
+2024-08-30 14:52:26,366 INFO [train.py:1114] (0/4) Epoch 17, batch 1450, loss[loss=0.228, simple_loss=0.2942, pruned_loss=0.05819, ctc_loss=0.1135, over 19666.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2753, pruned_loss=0.05211, ctc_loss=0.09817, over 3863106.64 frames. ], batch size: 63, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:52:26,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=220138.66666666666, ans=0.125
+2024-08-30 14:52:36,231 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.54 vs. limit=10.0
+2024-08-30 14:52:39,646 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=220138.66666666666, ans=0.07
+2024-08-30 14:52:43,097 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=220138.66666666666, ans=0.2
+2024-08-30 14:53:09,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=220245.33333333334, ans=0.025
+2024-08-30 14:53:19,796 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.19 vs. limit=15.0
+2024-08-30 14:53:48,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=220298.66666666666, ans=0.05
+2024-08-30 14:53:55,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=220298.66666666666, ans=0.0
+2024-08-30 14:54:01,887 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.44 vs. limit=22.5
+2024-08-30 14:54:12,187 INFO [train.py:1114] (0/4) Epoch 17, batch 1500, loss[loss=0.2059, simple_loss=0.2764, pruned_loss=0.04811, ctc_loss=0.09788, over 19599.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2754, pruned_loss=0.05208, ctc_loss=0.09829, over 3863722.97 frames. ], batch size: 57, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:54:17,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=220405.33333333334, ans=0.125
+2024-08-30 14:54:36,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=220458.66666666666, ans=0.125
+2024-08-30 14:54:54,678 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.719e+02 1.906e+02 2.293e+02 3.704e+02, threshold=3.812e+02, percent-clipped=0.0
+2024-08-30 14:54:59,733 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=220565.33333333334, ans=0.0
+2024-08-30 14:54:59,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=220565.33333333334, ans=0.5
+2024-08-30 14:55:07,390 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.46 vs. limit=12.0
+2024-08-30 14:55:17,132 INFO [train.py:1114] (0/4) Epoch 17, batch 1550, loss[loss=0.2258, simple_loss=0.2889, pruned_loss=0.05916, ctc_loss=0.1111, over 19627.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2755, pruned_loss=0.0523, ctc_loss=0.09869, over 3848490.77 frames. ], batch size: 60, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:55:31,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=220672.0, ans=0.125
+2024-08-30 14:55:33,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=220672.0, ans=0.125
+2024-08-30 14:55:36,197 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:55:58,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=220778.66666666666, ans=0.125
+2024-08-30 14:56:01,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=220778.66666666666, ans=0.125
+2024-08-30 14:56:18,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=220885.33333333334, ans=0.125
+2024-08-30 14:56:19,992 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.25 vs. limit=6.0
+2024-08-30 14:56:25,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=220885.33333333334, ans=0.125
+2024-08-30 14:56:27,383 INFO [train.py:1114] (0/4) Epoch 17, batch 1600, loss[loss=0.1895, simple_loss=0.2625, pruned_loss=0.04284, ctc_loss=0.07719, over 19833.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2752, pruned_loss=0.05196, ctc_loss=0.09797, over 3837459.82 frames. ], batch size: 57, lr: 8.78e-03, grad_scale: 32.0
+2024-08-30 14:56:27,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=220938.66666666666, ans=0.025
+2024-08-30 14:56:31,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=220938.66666666666, ans=0.0
+2024-08-30 14:56:35,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=220938.66666666666, ans=0.0
+2024-08-30 14:57:30,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=220992.0, ans=0.125
+2024-08-30 14:57:38,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=221045.33333333334, ans=0.0
+2024-08-30 15:00:47,802 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.738e+02 2.160e+02 2.635e+02 3.870e+02, threshold=4.320e+02, percent-clipped=2.0
+2024-08-30 15:00:53,883 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=221098.66666666666, ans=0.5
+2024-08-30 15:02:45,997 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=221152.0, ans=0.0
+2024-08-30 15:02:56,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=221152.0, ans=0.125
+2024-08-30 15:03:53,983 INFO [train.py:1114] (0/4) Epoch 17, batch 1650, loss[loss=0.2153, simple_loss=0.2893, pruned_loss=0.05021, ctc_loss=0.1022, over 19655.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2747, pruned_loss=0.05179, ctc_loss=0.09778, over 3833286.80 frames. ], batch size: 59, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:04:00,666 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=221205.33333333334, ans=0.0
+2024-08-30 15:05:53,640 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=221312.0, ans=0.0
+2024-08-30 15:07:30,024 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.70 vs. limit=22.5
+2024-08-30 15:07:35,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=221365.33333333334, ans=0.125
+2024-08-30 15:07:51,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=221418.66666666666, ans=0.125
+2024-08-30 15:07:58,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=221418.66666666666, ans=0.125
+2024-08-30 15:08:00,827 INFO [train.py:1114] (0/4) Epoch 17, batch 1700, loss[loss=0.184, simple_loss=0.2449, pruned_loss=0.04389, ctc_loss=0.08818, over 19662.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2746, pruned_loss=0.05177, ctc_loss=0.09765, over 3846857.71 frames. ], batch size: 46, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:08:01,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=221472.0, ans=0.0
+2024-08-30 15:08:11,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=221525.33333333334, ans=10.0
+2024-08-30 15:08:23,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=221578.66666666666, ans=0.125
+2024-08-30 15:08:35,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=221632.0, ans=0.125
+2024-08-30 15:08:36,788 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.717e+02 1.998e+02 2.422e+02 4.059e+02, threshold=3.996e+02, percent-clipped=0.0
+2024-08-30 15:09:34,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=221632.0, ans=0.1
+2024-08-30 15:09:36,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=221632.0, ans=0.2
+2024-08-30 15:09:36,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=221632.0, ans=0.0
+2024-08-30 15:09:49,515 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.77 vs. limit=22.5
+2024-08-30 15:09:50,072 INFO [train.py:1114] (0/4) Epoch 17, batch 1750, loss[loss=0.2138, simple_loss=0.2661, pruned_loss=0.05908, ctc_loss=0.1084, over 19628.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2741, pruned_loss=0.05151, ctc_loss=0.09706, over 3850512.50 frames. ], batch size: 45, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:10:02,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=221792.0, ans=0.025
+2024-08-30 15:10:03,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=221792.0, ans=0.0
+2024-08-30 15:10:12,808 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.25 vs. limit=12.0
+2024-08-30 15:10:14,113 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.85 vs. limit=15.0
+2024-08-30 15:10:35,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=221952.0, ans=0.0
+2024-08-30 15:10:37,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=221952.0, ans=0.125
+2024-08-30 15:10:46,086 INFO [train.py:1114] (0/4) Epoch 17, batch 1800, loss[loss=0.1971, simple_loss=0.2673, pruned_loss=0.04599, ctc_loss=0.08755, over 19609.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2745, pruned_loss=0.05174, ctc_loss=0.09745, over 3852141.97 frames. ], batch size: 55, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:10:47,311 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=222005.33333333334, ans=0.1
+2024-08-30 15:10:47,691 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.70 vs. limit=15.0
+2024-08-30 15:11:23,234 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.773e+02 2.029e+02 2.607e+02 4.351e+02, threshold=4.057e+02, percent-clipped=1.0
+2024-08-30 15:11:32,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=222218.66666666666, ans=0.125
+2024-08-30 15:11:43,561 INFO [train.py:1114] (0/4) Epoch 17, batch 1850, loss[loss=0.2194, simple_loss=0.2945, pruned_loss=0.05209, ctc_loss=0.1002, over 19585.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.274, pruned_loss=0.05141, ctc_loss=0.09672, over 3855966.23 frames. ], batch size: 57, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:12:04,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=222378.66666666666, ans=0.125
+2024-08-30 15:12:31,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=222485.33333333334, ans=22.5
+2024-08-30 15:12:37,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=222485.33333333334, ans=0.125
+2024-08-30 15:12:40,701 INFO [train.py:1114] (0/4) Epoch 17, batch 1900, loss[loss=0.1972, simple_loss=0.2785, pruned_loss=0.04168, ctc_loss=0.08127, over 19659.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2746, pruned_loss=0.05143, ctc_loss=0.09666, over 3861031.91 frames. ], batch size: 59, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:12:52,467 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.50 vs. limit=12.0
+2024-08-30 15:13:11,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=222645.33333333334, ans=0.125
+2024-08-30 15:13:18,236 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.682e+02 1.950e+02 2.328e+02 4.923e+02, threshold=3.901e+02, percent-clipped=3.0
+2024-08-30 15:13:30,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.27 vs. limit=15.0
+2024-08-30 15:13:38,414 INFO [train.py:1114] (0/4) Epoch 17, batch 1950, loss[loss=0.2147, simple_loss=0.2811, pruned_loss=0.0535, ctc_loss=0.1031, over 19559.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.276, pruned_loss=0.05205, ctc_loss=0.09786, over 3869686.70 frames. ], batch size: 52, lr: 8.74e-03, grad_scale: 16.0
+2024-08-30 15:14:18,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=222805.33333333334, ans=0.125
+2024-08-30 15:14:36,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=222858.66666666666, ans=0.125
+2024-08-30 15:14:40,452 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.52 vs. limit=15.0
+2024-08-30 15:15:10,929 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.81 vs. limit=22.5
+2024-08-30 15:15:27,049 INFO [train.py:1114] (0/4) Epoch 17, batch 2000, loss[loss=0.1624, simple_loss=0.2342, pruned_loss=0.03194, ctc_loss=0.06686, over 19628.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2762, pruned_loss=0.05179, ctc_loss=0.09761, over 3854277.82 frames. ], batch size: 45, lr: 8.74e-03, grad_scale: 32.0
+2024-08-30 15:16:03,257 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.692e+02 2.099e+02 2.435e+02 3.373e+02, threshold=4.199e+02, percent-clipped=0.0
+2024-08-30 15:16:18,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=223285.33333333334, ans=0.125
+2024-08-30 15:16:38,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=223285.33333333334, ans=0.125
+2024-08-30 15:16:42,836 INFO [train.py:1114] (0/4) Epoch 17, batch 2050, loss[loss=0.1772, simple_loss=0.2454, pruned_loss=0.03987, ctc_loss=0.07327, over 19707.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2751, pruned_loss=0.05204, ctc_loss=0.09802, over 3850647.74 frames. ], batch size: 47, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:17:01,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=223338.66666666666, ans=0.125
+2024-08-30 15:18:19,221 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.21 vs. limit=15.0
+2024-08-30 15:18:40,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=223445.33333333334, ans=0.125
+2024-08-30 15:20:15,748 INFO [train.py:1114] (0/4) Epoch 17, batch 2100, loss[loss=0.2162, simple_loss=0.2854, pruned_loss=0.05242, ctc_loss=0.1055, over 19773.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2746, pruned_loss=0.0518, ctc_loss=0.09754, over 3857785.87 frames. ], batch size: 54, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:20:39,762 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=223605.33333333334, ans=0.125
+2024-08-30 15:20:44,447 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.27 vs. limit=15.0
+2024-08-30 15:20:46,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=223605.33333333334, ans=22.5
+2024-08-30 15:20:58,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=223605.33333333334, ans=0.0
+2024-08-30 15:21:03,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=223658.66666666666, ans=0.1
+2024-08-30 15:21:15,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=223712.0, ans=0.0
+2024-08-30 15:21:23,085 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.29 vs. limit=22.5
+2024-08-30 15:21:23,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=223765.33333333334, ans=0.0
+2024-08-30 15:21:41,987 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.235e+02 1.693e+02 2.019e+02 2.546e+02 6.032e+02, threshold=4.039e+02, percent-clipped=5.0
+2024-08-30 15:21:44,365 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:21:51,394 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.99 vs. limit=15.0
+2024-08-30 15:21:51,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=223818.66666666666, ans=0.0
+2024-08-30 15:21:55,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=223818.66666666666, ans=0.125
+2024-08-30 15:22:02,851 INFO [train.py:1114] (0/4) Epoch 17, batch 2150, loss[loss=0.2008, simple_loss=0.2674, pruned_loss=0.04893, ctc_loss=0.09069, over 19594.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.274, pruned_loss=0.05147, ctc_loss=0.09675, over 3868200.70 frames. ], batch size: 52, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:22:04,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=223872.0, ans=0.5
+2024-08-30 15:22:06,195 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=223872.0, ans=0.125
+2024-08-30 15:22:08,452 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=223872.0, ans=0.2
+2024-08-30 15:22:24,361 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.02 vs. limit=12.0
+2024-08-30 15:22:29,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=223978.66666666666, ans=0.125
+2024-08-30 15:22:34,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=223978.66666666666, ans=0.2
+2024-08-30 15:22:42,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=224032.0, ans=0.125
+2024-08-30 15:22:46,821 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.27 vs. limit=15.0
+2024-08-30 15:22:58,293 INFO [train.py:1114] (0/4) Epoch 17, batch 2200, loss[loss=0.2313, simple_loss=0.2981, pruned_loss=0.05927, ctc_loss=0.1151, over 19586.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2736, pruned_loss=0.05101, ctc_loss=0.09589, over 3867184.45 frames. ], batch size: 57, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:23:01,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=224138.66666666666, ans=0.125
+2024-08-30 15:23:34,323 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.21 vs. limit=22.5
+2024-08-30 15:23:39,324 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:23:42,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=224298.66666666666, ans=0.025
+2024-08-30 15:23:53,332 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.675e+02 1.986e+02 2.371e+02 4.244e+02, threshold=3.972e+02, percent-clipped=2.0
+2024-08-30 15:23:54,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=224298.66666666666, ans=0.125
+2024-08-30 15:24:13,617 INFO [train.py:1114] (0/4) Epoch 17, batch 2250, loss[loss=0.2106, simple_loss=0.2834, pruned_loss=0.0492, ctc_loss=0.09857, over 19615.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.274, pruned_loss=0.05122, ctc_loss=0.09627, over 3867576.77 frames. ], batch size: 55, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:24:28,139 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=224458.66666666666, ans=0.015
+2024-08-30 15:25:44,793 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:25:54,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=224512.0, ans=0.025
+2024-08-30 15:26:56,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=224565.33333333334, ans=0.2
+2024-08-30 15:27:11,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=224618.66666666666, ans=0.125
+2024-08-30 15:27:15,752 INFO [train.py:1114] (0/4) Epoch 17, batch 2300, loss[loss=0.1762, simple_loss=0.2496, pruned_loss=0.03707, ctc_loss=0.07161, over 19495.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2736, pruned_loss=0.05151, ctc_loss=0.09671, over 3861679.45 frames. ], batch size: 49, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:27:27,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=224672.0, ans=0.125
+2024-08-30 15:28:32,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=224778.66666666666, ans=0.125
+2024-08-30 15:28:45,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=224832.0, ans=0.09899494936611666
+2024-08-30 15:28:46,695 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.759e+02 2.126e+02 2.592e+02 4.068e+02, threshold=4.252e+02, percent-clipped=2.0
+2024-08-30 15:28:46,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=224832.0, ans=0.0
+2024-08-30 15:28:49,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=224832.0, ans=0.1
+2024-08-30 15:29:04,185 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:29:49,494 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.77 vs. limit=15.0
+2024-08-30 15:29:51,084 INFO [train.py:1114] (0/4) Epoch 17, batch 2350, loss[loss=0.2031, simple_loss=0.28, pruned_loss=0.04582, ctc_loss=0.08652, over 19653.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2734, pruned_loss=0.05166, ctc_loss=0.09696, over 3864539.45 frames. ], batch size: 63, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:30:02,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=224992.0, ans=0.1
+2024-08-30 15:30:43,798 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=225045.33333333334, ans=0.95
+2024-08-30 15:31:51,959 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=4.84 vs. limit=12.0
+2024-08-30 15:32:54,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=225152.0, ans=0.2
+2024-08-30 15:32:55,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=225152.0, ans=0.2
+2024-08-30 15:32:58,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=225152.0, ans=0.2
+2024-08-30 15:33:11,237 INFO [train.py:1114] (0/4) Epoch 17, batch 2400, loss[loss=0.2476, simple_loss=0.3034, pruned_loss=0.06972, ctc_loss=0.1307, over 19364.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2766, pruned_loss=0.05334, ctc_loss=0.09985, over 3858657.20 frames. ], batch size: 67, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:33:44,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=225312.0, ans=0.125
+2024-08-30 15:33:48,810 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.684e+02 1.880e+02 2.443e+02 3.780e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-30 15:34:08,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=225418.66666666666, ans=0.125
+2024-08-30 15:34:10,341 INFO [train.py:1114] (0/4) Epoch 17, batch 2450, loss[loss=0.3143, simple_loss=0.3359, pruned_loss=0.1035, ctc_loss=0.2142, over 13661.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2805, pruned_loss=0.05616, ctc_loss=0.1058, over 3734008.74 frames. ], batch size: 140, lr: 8.69e-03, grad_scale: 16.0
+2024-08-30 15:34:14,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=225472.0, ans=0.125
+2024-08-30 15:34:44,532 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.108e-01
+2024-08-30 15:34:45,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=225472.0, ans=0.0
+2024-08-30 15:34:52,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=225525.33333333334, ans=0.1
+2024-08-30 15:35:00,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=225578.66666666666, ans=0.125
+2024-08-30 15:35:03,221 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.68 vs. limit=15.0
+2024-08-30 15:35:22,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=225632.0, ans=0.025
+2024-08-30 15:35:40,793 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 15:38:02,850 INFO [train.py:1114] (0/4) Epoch 18, batch 0, loss[loss=0.2041, simple_loss=0.2608, pruned_loss=0.05338, ctc_loss=0.1015, over 19813.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2608, pruned_loss=0.05338, ctc_loss=0.1015, over 19813.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:38:02,851 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-30 15:39:11,427 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.4945, 3.0577, 2.0697, 2.5956], device='cuda:0')
+2024-08-30 15:39:34,945 INFO [train.py:1146] (0/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.0364, ctc_loss=0.06401, over 944034.00 frames.
+2024-08-30 15:39:34,945 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13588MB
+2024-08-30 15:39:39,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=225680.0, ans=0.125
+2024-08-30 15:39:51,532 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.07 vs. limit=6.0
+2024-08-30 15:40:29,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 15:40:30,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 15:40:35,663 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.11 vs. limit=15.0
+2024-08-30 15:40:58,099 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.919e+02 2.092e+02 2.421e+02 5.568e+02, threshold=4.185e+02, percent-clipped=4.0
+2024-08-30 15:41:04,970 INFO [train.py:1114] (0/4) Epoch 18, batch 50, loss[loss=0.1841, simple_loss=0.253, pruned_loss=0.04164, ctc_loss=0.07987, over 19716.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2794, pruned_loss=0.0545, ctc_loss=0.103, over 845746.19 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:41:18,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=225946.66666666666, ans=0.1
+2024-08-30 15:41:59,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=226000.0, ans=0.1
+2024-08-30 15:42:02,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=226053.33333333334, ans=0.0
+2024-08-30 15:42:12,885 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.84 vs. limit=15.0
+2024-08-30 15:42:28,596 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=226160.0, ans=0.0
+2024-08-30 15:44:07,510 INFO [train.py:1114] (0/4) Epoch 18, batch 100, loss[loss=0.211, simple_loss=0.2725, pruned_loss=0.05408, ctc_loss=0.1033, over 19718.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2792, pruned_loss=0.05356, ctc_loss=0.1006, over 1499054.83 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:44:09,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=226213.33333333334, ans=0.025
+2024-08-30 15:45:01,907 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.711e+02 1.973e+02 2.383e+02 4.146e+02, threshold=3.946e+02, percent-clipped=0.0
+2024-08-30 15:45:03,096 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=226426.66666666666, ans=0.015
+2024-08-30 15:45:10,569 INFO [train.py:1114] (0/4) Epoch 18, batch 150, loss[loss=0.1915, simple_loss=0.2504, pruned_loss=0.04914, ctc_loss=0.08582, over 19715.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2754, pruned_loss=0.05179, ctc_loss=0.09749, over 2028052.35 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:45:10,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=226480.0, ans=0.1
+2024-08-30 15:45:31,270 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-30 15:45:42,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=226586.66666666666, ans=10.0
+2024-08-30 15:46:00,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=226640.0, ans=0.125
+2024-08-30 15:46:10,998 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.45 vs. limit=22.5
+2024-08-30 15:46:16,526 INFO [train.py:1114] (0/4) Epoch 18, batch 200, loss[loss=0.2309, simple_loss=0.2918, pruned_loss=0.06218, ctc_loss=0.114, over 18248.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2737, pruned_loss=0.05066, ctc_loss=0.0954, over 2435754.48 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:46:33,285 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.65 vs. limit=15.0
+2024-08-30 15:46:45,268 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=226853.33333333334, ans=0.05
+2024-08-30 15:46:51,315 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:47:08,533 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.332e+02 1.794e+02 2.164e+02 2.564e+02 4.131e+02, threshold=4.328e+02, percent-clipped=1.0
+2024-08-30 15:47:20,539 INFO [train.py:1114] (0/4) Epoch 18, batch 250, loss[loss=0.2179, simple_loss=0.2815, pruned_loss=0.05683, ctc_loss=0.1014, over 19404.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2742, pruned_loss=0.05106, ctc_loss=0.09602, over 2755802.13 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:47:32,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=227066.66666666666, ans=0.125
+2024-08-30 15:48:32,082 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.27 vs. limit=15.0
+2024-08-30 15:48:37,746 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.02 vs. limit=22.5
+2024-08-30 15:48:45,288 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=227120.0, ans=0.1
+2024-08-30 15:48:53,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=227120.0, ans=0.1
+2024-08-30 15:49:06,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=227173.33333333334, ans=0.125
+2024-08-30 15:49:06,354 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=227173.33333333334, ans=0.125
+2024-08-30 15:49:22,642 INFO [train.py:1114] (0/4) Epoch 18, batch 300, loss[loss=0.2088, simple_loss=0.2777, pruned_loss=0.05118, ctc_loss=0.09382, over 19499.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.274, pruned_loss=0.05103, ctc_loss=0.09617, over 3000417.31 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:50:55,056 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.06 vs. limit=12.0
+2024-08-30 15:51:03,703 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.94 vs. limit=15.0
+2024-08-30 15:51:40,144 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.730e+02 1.916e+02 2.273e+02 3.732e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-30 15:51:48,908 INFO [train.py:1114] (0/4) Epoch 18, batch 350, loss[loss=0.1864, simple_loss=0.2452, pruned_loss=0.04666, ctc_loss=0.08558, over 19741.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2737, pruned_loss=0.05089, ctc_loss=0.09588, over 3191008.27 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:52:08,103 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.59 vs. limit=22.5
+2024-08-30 15:52:08,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=227600.0, ans=0.125
+2024-08-30 15:52:13,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=227600.0, ans=0.0
+2024-08-30 15:52:34,937 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=227706.66666666666, ans=0.125
+2024-08-30 15:52:51,854 INFO [train.py:1114] (0/4) Epoch 18, batch 400, loss[loss=0.1975, simple_loss=0.2709, pruned_loss=0.04422, ctc_loss=0.08937, over 19506.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.274, pruned_loss=0.05109, ctc_loss=0.09619, over 3343005.44 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:52:59,317 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=227813.33333333334, ans=0.125
+2024-08-30 15:54:16,400 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.651e+02 1.862e+02 2.258e+02 4.636e+02, threshold=3.723e+02, percent-clipped=1.0
+2024-08-30 15:54:16,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=228026.66666666666, ans=0.125
+2024-08-30 15:54:17,687 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:54:25,953 INFO [train.py:1114] (0/4) Epoch 18, batch 450, loss[loss=0.1884, simple_loss=0.2645, pruned_loss=0.04072, ctc_loss=0.07721, over 19611.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2732, pruned_loss=0.05055, ctc_loss=0.09533, over 3451708.44 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:54:26,590 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.10 vs. limit=10.0
+2024-08-30 15:54:58,737 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=228186.66666666666, ans=0.1
+2024-08-30 15:55:15,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=228240.0, ans=0.125
+2024-08-30 15:55:22,157 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.10 vs. limit=12.0
+2024-08-30 15:55:37,515 INFO [train.py:1114] (0/4) Epoch 18, batch 500, loss[loss=0.1938, simple_loss=0.2659, pruned_loss=0.04491, ctc_loss=0.07995, over 19634.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2727, pruned_loss=0.05056, ctc_loss=0.09544, over 3546079.07 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:55:41,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=228346.66666666666, ans=0.0
+2024-08-30 15:56:16,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=228400.0, ans=0.0
+2024-08-30 15:56:22,785 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=228453.33333333334, ans=0.125
+2024-08-30 15:56:25,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=228453.33333333334, ans=0.1
+2024-08-30 15:56:38,293 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.86 vs. limit=15.0
+2024-08-30 15:56:40,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=228453.33333333334, ans=15.0
+2024-08-30 15:56:44,805 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=228506.66666666666, ans=0.0
+2024-08-30 15:57:00,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=228506.66666666666, ans=0.125
+2024-08-30 15:57:03,388 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.03 vs. limit=22.5
+2024-08-30 15:57:52,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=228560.0, ans=0.125
+2024-08-30 15:57:54,266 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.602e+02 1.832e+02 2.190e+02 3.877e+02, threshold=3.665e+02, percent-clipped=2.0
+2024-08-30 15:57:59,097 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=228560.0, ans=0.125
+2024-08-30 15:58:00,977 INFO [train.py:1114] (0/4) Epoch 18, batch 550, loss[loss=0.2026, simple_loss=0.2776, pruned_loss=0.04643, ctc_loss=0.08688, over 19273.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2729, pruned_loss=0.05051, ctc_loss=0.09524, over 3608445.94 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:58:44,392 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=228613.33333333334, ans=0.1
+2024-08-30 15:58:58,452 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=228666.66666666666, ans=0.125
+2024-08-30 16:00:50,094 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.32 vs. limit=10.0
+2024-08-30 16:00:54,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=228720.0, ans=0.025
+2024-08-30 16:01:05,469 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=228720.0, ans=0.2
+2024-08-30 16:01:09,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=228720.0, ans=0.125
+2024-08-30 16:01:35,825 INFO [train.py:1114] (0/4) Epoch 18, batch 600, loss[loss=0.2306, simple_loss=0.2883, pruned_loss=0.06276, ctc_loss=0.1182, over 19393.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2727, pruned_loss=0.05029, ctc_loss=0.09454, over 3666186.54 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:01:39,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=228880.0, ans=0.125
+2024-08-30 16:02:21,003 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.45 vs. limit=22.5
+2024-08-30 16:03:04,755 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=228986.66666666666, ans=0.1
+2024-08-30 16:03:13,335 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.08 vs. limit=15.0
+2024-08-30 16:03:47,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=229040.0, ans=0.125
+2024-08-30 16:04:29,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=229040.0, ans=0.125
+2024-08-30 16:04:35,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=229040.0, ans=0.1
+2024-08-30 16:04:41,755 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 1.726e+02 2.045e+02 2.727e+02 4.181e+02, threshold=4.090e+02, percent-clipped=7.0
+2024-08-30 16:04:48,712 INFO [train.py:1114] (0/4) Epoch 18, batch 650, loss[loss=0.19, simple_loss=0.2615, pruned_loss=0.04304, ctc_loss=0.08085, over 19762.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2718, pruned_loss=0.04999, ctc_loss=0.09404, over 3716392.84 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:05:08,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=229146.66666666666, ans=0.125
+2024-08-30 16:06:24,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=229200.0, ans=0.125
+2024-08-30 16:06:36,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=229253.33333333334, ans=0.125
+2024-08-30 16:06:43,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229306.66666666666, ans=0.1
+2024-08-30 16:06:51,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-30 16:07:18,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=229360.0, ans=0.025
+2024-08-30 16:07:32,102 INFO [train.py:1114] (0/4) Epoch 18, batch 700, loss[loss=0.1917, simple_loss=0.2595, pruned_loss=0.04617, ctc_loss=0.07901, over 19714.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2721, pruned_loss=0.05001, ctc_loss=0.09406, over 3749192.59 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:07:37,248 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.09 vs. limit=15.0
+2024-08-30 16:07:39,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=229413.33333333334, ans=22.5
+2024-08-30 16:08:08,214 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=229573.33333333334, ans=0.125
+2024-08-30 16:08:27,249 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.709e+02 1.988e+02 2.480e+02 4.374e+02, threshold=3.975e+02, percent-clipped=1.0
+2024-08-30 16:08:33,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=229680.0, ans=0.2
+2024-08-30 16:08:34,086 INFO [train.py:1114] (0/4) Epoch 18, batch 750, loss[loss=0.2257, simple_loss=0.2905, pruned_loss=0.05788, ctc_loss=0.1128, over 19502.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2726, pruned_loss=0.05039, ctc_loss=0.09487, over 3774959.33 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:08:46,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=229680.0, ans=0.125
+2024-08-30 16:09:13,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=229840.0, ans=0.2
+2024-08-30 16:09:34,534 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=229893.33333333334, ans=0.125
+2024-08-30 16:09:38,056 INFO [train.py:1114] (0/4) Epoch 18, batch 800, loss[loss=0.2044, simple_loss=0.2605, pruned_loss=0.05406, ctc_loss=0.1006, over 19807.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2722, pruned_loss=0.05018, ctc_loss=0.09443, over 3796787.06 frames. ], batch size: 49, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:09:56,826 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.35 vs. limit=15.0
+2024-08-30 16:10:07,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=230000.0, ans=0.025
+2024-08-30 16:10:31,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=230106.66666666666, ans=0.2
+2024-08-30 16:11:30,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=230160.0, ans=0.0
+2024-08-30 16:11:34,979 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.744e+02 1.950e+02 2.451e+02 4.139e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-30 16:11:47,902 INFO [train.py:1114] (0/4) Epoch 18, batch 850, loss[loss=0.2136, simple_loss=0.2869, pruned_loss=0.05022, ctc_loss=0.09987, over 19633.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2722, pruned_loss=0.05019, ctc_loss=0.09441, over 3815142.45 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:11:57,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=230213.33333333334, ans=0.125
+2024-08-30 16:12:16,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=230320.0, ans=0.125
+2024-08-30 16:12:25,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=230320.0, ans=0.125
+2024-08-30 16:12:26,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=230373.33333333334, ans=0.2
+2024-08-30 16:12:48,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=230426.66666666666, ans=0.07
+2024-08-30 16:12:57,689 INFO [train.py:1114] (0/4) Epoch 18, batch 900, loss[loss=0.197, simple_loss=0.2602, pruned_loss=0.04913, ctc_loss=0.08885, over 19425.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2727, pruned_loss=0.05055, ctc_loss=0.09512, over 3819486.86 frames. ], batch size: 48, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:12:59,214 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.08 vs. limit=12.0
+2024-08-30 16:13:28,886 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.47 vs. limit=15.0
+2024-08-30 16:13:44,885 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=230693.33333333334, ans=0.0
+2024-08-30 16:13:49,524 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.771e+02 2.097e+02 2.541e+02 3.279e+02, threshold=4.195e+02, percent-clipped=1.0
+2024-08-30 16:13:56,603 INFO [train.py:1114] (0/4) Epoch 18, batch 950, loss[loss=0.1883, simple_loss=0.2564, pruned_loss=0.0437, ctc_loss=0.08186, over 19484.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2733, pruned_loss=0.05071, ctc_loss=0.09546, over 3820910.45 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:14:50,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=230746.66666666666, ans=0.0
+2024-08-30 16:15:13,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=230853.33333333334, ans=0.125
+2024-08-30 16:15:14,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=230853.33333333334, ans=0.2
+2024-08-30 16:15:34,029 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.04 vs. limit=10.0
+2024-08-30 16:15:39,666 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=230960.0, ans=0.125
+2024-08-30 16:15:41,762 INFO [train.py:1114] (0/4) Epoch 18, batch 1000, loss[loss=0.1873, simple_loss=0.2579, pruned_loss=0.04173, ctc_loss=0.08273, over 19851.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2741, pruned_loss=0.05112, ctc_loss=0.09635, over 3815875.19 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:15:51,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=231013.33333333334, ans=0.0
+2024-08-30 16:15:55,446 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.67 vs. limit=6.0
+2024-08-30 16:16:20,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231120.0, ans=0.1
+2024-08-30 16:16:46,157 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.681e+02 1.935e+02 2.141e+02 3.468e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-30 16:16:46,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=231226.66666666666, ans=0.125
+2024-08-30 16:16:53,208 INFO [train.py:1114] (0/4) Epoch 18, batch 1050, loss[loss=0.224, simple_loss=0.2943, pruned_loss=0.05624, ctc_loss=0.1031, over 19852.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2734, pruned_loss=0.05094, ctc_loss=0.0959, over 3822177.37 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:16:56,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=231280.0, ans=0.2
+2024-08-30 16:17:59,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=231386.66666666666, ans=0.125
+2024-08-30 16:18:00,843 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.01 vs. limit=15.0
+2024-08-30 16:18:01,884 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.20 vs. limit=15.0
+2024-08-30 16:18:29,881 INFO [train.py:1114] (0/4) Epoch 18, batch 1100, loss[loss=0.2234, simple_loss=0.2836, pruned_loss=0.05964, ctc_loss=0.11, over 19596.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2727, pruned_loss=0.05052, ctc_loss=0.0952, over 3829672.02 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:18:57,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=231653.33333333334, ans=0.125
+2024-08-30 16:19:03,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=231653.33333333334, ans=0.2
+2024-08-30 16:19:24,175 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.668e+02 1.884e+02 2.263e+02 3.606e+02, threshold=3.767e+02, percent-clipped=0.0
+2024-08-30 16:19:52,650 INFO [train.py:1114] (0/4) Epoch 18, batch 1150, loss[loss=0.1929, simple_loss=0.2616, pruned_loss=0.045, ctc_loss=0.0857, over 19608.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2731, pruned_loss=0.05087, ctc_loss=0.09608, over 3827298.87 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:19:55,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=231813.33333333334, ans=0.125
+2024-08-30 16:19:56,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231813.33333333334, ans=0.1
+2024-08-30 16:19:57,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=231813.33333333334, ans=0.125
+2024-08-30 16:20:00,083 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=231813.33333333334, ans=0.2
+2024-08-30 16:22:11,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=231866.66666666666, ans=0.0
+2024-08-30 16:22:28,879 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.40 vs. limit=15.0
+2024-08-30 16:22:56,529 INFO [train.py:1114] (0/4) Epoch 18, batch 1200, loss[loss=0.2079, simple_loss=0.2849, pruned_loss=0.04709, ctc_loss=0.09157, over 19837.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2746, pruned_loss=0.05148, ctc_loss=0.09725, over 3824043.11 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:22:58,875 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=232080.0, ans=0.035
+2024-08-30 16:23:43,454 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.05 vs. limit=15.0
+2024-08-30 16:23:45,955 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.656e+02 1.841e+02 2.164e+02 3.391e+02, threshold=3.682e+02, percent-clipped=0.0
+2024-08-30 16:23:48,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=232293.33333333334, ans=0.0
+2024-08-30 16:23:52,935 INFO [train.py:1114] (0/4) Epoch 18, batch 1250, loss[loss=0.2104, simple_loss=0.2778, pruned_loss=0.05201, ctc_loss=0.09746, over 19507.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2753, pruned_loss=0.0516, ctc_loss=0.09737, over 3842402.06 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:24:06,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=232400.0, ans=0.125
+2024-08-30 16:24:09,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=232400.0, ans=0.0
+2024-08-30 16:24:10,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=232400.0, ans=0.125
+2024-08-30 16:24:27,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=232453.33333333334, ans=0.2
+2024-08-30 16:24:34,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=232506.66666666666, ans=0.1
+2024-08-30 16:25:36,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=232506.66666666666, ans=0.125
+2024-08-30 16:25:38,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=232506.66666666666, ans=0.1
+2024-08-30 16:25:53,657 INFO [train.py:1114] (0/4) Epoch 18, batch 1300, loss[loss=0.2042, simple_loss=0.2777, pruned_loss=0.04738, ctc_loss=0.08971, over 18824.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2738, pruned_loss=0.05101, ctc_loss=0.09608, over 3845983.72 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:32:42,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=232773.33333333334, ans=0.125
+2024-08-30 16:35:48,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=232826.66666666666, ans=0.1
+2024-08-30 16:45:23,080 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.785e+02 2.170e+02 2.759e+02 4.331e+02, threshold=4.339e+02, percent-clipped=5.0
+2024-08-30 17:02:45,885 INFO [train.py:1114] (0/4) Epoch 18, batch 1350, loss[loss=0.2027, simple_loss=0.2613, pruned_loss=0.05242, ctc_loss=0.09782, over 19761.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2736, pruned_loss=0.05108, ctc_loss=0.0961, over 3857349.73 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
+2024-08-30 17:12:54,858 INFO [train.py:1050] (0/4) Caught exception: [Rank 0] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=46170, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600000 milliseconds before timing out..
+2024-08-30 17:12:54,860 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-0.pt
+2024-08-30 17:12:57,589 INFO [train.py:1413] (0/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 17:12:57,636 INFO [train.py:1419] (0/4) features shape: torch.Size([56, 1420, 80])
+2024-08-30 17:12:57,638 INFO [train.py:1423] (0/4) num tokens: 4279
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-1
new file mode 100644
index 0000000000000000000000000000000000000000..3f183ee8a127d7c14aa40c3299de6856ab759c38
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-1
@@ -0,0 +1,577 @@
+2024-08-30 12:44:46,730 INFO [train.py:1182] (1/4) Training started
+2024-08-30 12:44:48,385 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-30 12:44:48,388 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2651.int.cedar.computecanada.ca', 'IP address': '172.16.146.88'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 17, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 12:44:48,388 INFO [train.py:1212] (1/4) About to create model
+2024-08-30 12:44:49,100 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-30 12:44:49,100 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-16.pt
+2024-08-30 12:45:01,813 INFO [train.py:1231] (1/4) Using DDP
+2024-08-30 12:45:06,258 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-30 12:45:06,457 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-30 12:45:06,457 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 12:45:06,664 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-30 12:45:06,664 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-30 12:45:06,664 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-30 12:45:08,260 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-30 12:45:08,264 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-30 12:45:08,444 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-30 12:45:08,576 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-30 12:45:08,901 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-30 12:45:08,901 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 12:51:15,857 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.24 vs. limit=3.0
+2024-08-30 12:51:17,012 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13363MB
+2024-08-30 12:51:18,484 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-30 12:53:02,305 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-30 12:53:03,662 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-30 12:54:12,744 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-30 12:54:14,357 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-30 12:54:14,377 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-30 12:55:00,325 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.57 vs. limit=15.0
+2024-08-30 12:55:06,915 INFO [train.py:1114] (1/4) Epoch 17, batch 0, loss[loss=0.185, simple_loss=0.2497, pruned_loss=0.04405, ctc_loss=0.08062, over 19820.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2497, pruned_loss=0.04405, ctc_loss=0.08062, over 19820.00 frames. ], batch size: 49, lr: 8.95e-03, grad_scale: 32.0
+2024-08-30 12:55:06,915 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-30 12:55:26,654 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.7543, 2.1106, 1.5990, 1.9654, 2.1607, 2.2839, 2.1657, 1.7068],
+ device='cuda:1')
+2024-08-30 12:55:31,711 INFO [train.py:1146] (1/4) Epoch 17, validation: loss=0.185, simple_loss=0.2737, pruned_loss=0.03584, ctc_loss=0.06176, over 944034.00 frames.
+2024-08-30 12:55:31,712 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13467MB
+2024-08-30 12:55:59,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=212458.66666666666, ans=0.0
+2024-08-30 12:56:10,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-30 12:56:13,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=212458.66666666666, ans=0.05
+2024-08-30 12:56:42,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=212458.66666666666, ans=0.2
+2024-08-30 12:58:34,682 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-30 12:59:52,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212512.0, ans=0.1
+2024-08-30 13:01:17,476 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=212512.0, ans=0.125
+2024-08-30 13:06:19,314 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.860e+02 2.030e+02 2.233e+02 2.993e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-30 13:09:44,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=212672.0, ans=0.125
+2024-08-30 13:09:56,407 INFO [train.py:1114] (1/4) Epoch 17, batch 50, loss[loss=0.1964, simple_loss=0.2636, pruned_loss=0.04701, ctc_loss=0.08799, over 19696.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2794, pruned_loss=0.05445, ctc_loss=0.104, over 844645.99 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:09:59,629 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=212672.0, ans=0.125
+2024-08-30 13:15:56,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=212672.0, ans=0.0
+2024-08-30 13:16:10,042 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:18:30,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=212832.0, ans=0.125
+2024-08-30 13:18:39,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=212832.0, ans=0.2
+2024-08-30 13:19:01,137 INFO [train.py:1114] (1/4) Epoch 17, batch 100, loss[loss=0.2007, simple_loss=0.2719, pruned_loss=0.0464, ctc_loss=0.09168, over 19711.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2806, pruned_loss=0.05462, ctc_loss=0.103, over 1499050.01 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:19:07,920 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.52 vs. limit=15.0
+2024-08-30 13:19:25,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=212992.0, ans=0.125
+2024-08-30 13:22:04,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=213045.33333333334, ans=0.0
+2024-08-30 13:22:37,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=213045.33333333334, ans=0.125
+2024-08-30 13:23:11,084 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.706e+02 1.953e+02 2.287e+02 3.713e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-30 13:23:53,177 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.35 vs. limit=15.0
+2024-08-30 13:24:10,835 INFO [train.py:1114] (1/4) Epoch 17, batch 150, loss[loss=0.1898, simple_loss=0.2448, pruned_loss=0.04741, ctc_loss=0.09987, over 19703.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.278, pruned_loss=0.05362, ctc_loss=0.101, over 2028977.84 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:24:21,117 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=213205.33333333334, ans=0.125
+2024-08-30 13:24:35,648 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.15 vs. limit=12.0
+2024-08-30 13:25:09,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=213312.0, ans=0.0
+2024-08-30 13:27:36,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=213365.33333333334, ans=0.0
+2024-08-30 13:27:49,622 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.68 vs. limit=10.0
+2024-08-30 13:27:50,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=213365.33333333334, ans=0.125
+2024-08-30 13:27:59,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=213418.66666666666, ans=0.0
+2024-08-30 13:28:05,733 INFO [train.py:1114] (1/4) Epoch 17, batch 200, loss[loss=0.227, simple_loss=0.2885, pruned_loss=0.06039, ctc_loss=0.1118, over 18150.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2769, pruned_loss=0.05286, ctc_loss=0.09969, over 2437042.07 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:28:06,078 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=1.854e-02
+2024-08-30 13:28:09,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=213472.0, ans=0.0
+2024-08-30 13:28:30,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=213578.66666666666, ans=0.125
+2024-08-30 13:28:31,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=213578.66666666666, ans=0.125
+2024-08-30 13:28:32,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=213578.66666666666, ans=0.125
+2024-08-30 13:28:40,338 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.384e+02 1.731e+02 1.992e+02 2.666e+02 4.093e+02, threshold=3.983e+02, percent-clipped=1.0
+2024-08-30 13:28:58,899 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.03 vs. limit=22.5
+2024-08-30 13:29:05,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=213685.33333333334, ans=0.0
+2024-08-30 13:29:07,557 INFO [train.py:1114] (1/4) Epoch 17, batch 250, loss[loss=0.2289, simple_loss=0.296, pruned_loss=0.05953, ctc_loss=0.1072, over 19395.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2771, pruned_loss=0.05331, ctc_loss=0.1003, over 2757247.20 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:29:10,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=213738.66666666666, ans=0.0
+2024-08-30 13:29:39,009 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.32 vs. limit=12.0
+2024-08-30 13:29:54,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213898.66666666666, ans=0.1
+2024-08-30 13:30:13,962 INFO [train.py:1114] (1/4) Epoch 17, batch 300, loss[loss=0.2405, simple_loss=0.2992, pruned_loss=0.06715, ctc_loss=0.1188, over 19510.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.276, pruned_loss=0.05275, ctc_loss=0.0994, over 3001398.30 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:30:51,817 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.663e+02 1.872e+02 2.298e+02 3.693e+02, threshold=3.744e+02, percent-clipped=0.0
+2024-08-30 13:30:54,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=214165.33333333334, ans=0.2
+2024-08-30 13:31:01,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=214165.33333333334, ans=0.2
+2024-08-30 13:31:12,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=214218.66666666666, ans=10.0
+2024-08-30 13:31:21,419 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.52 vs. limit=22.5
+2024-08-30 13:31:25,598 INFO [train.py:1114] (1/4) Epoch 17, batch 350, loss[loss=0.1843, simple_loss=0.2542, pruned_loss=0.04182, ctc_loss=0.07697, over 19756.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2762, pruned_loss=0.05242, ctc_loss=0.09879, over 3191743.97 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:31:27,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=214272.0, ans=0.2
+2024-08-30 13:31:28,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=214272.0, ans=0.07
+2024-08-30 13:31:29,922 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.42 vs. limit=15.0
+2024-08-30 13:31:44,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=214325.33333333334, ans=0.5
+2024-08-30 13:32:04,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=214432.0, ans=0.125
+2024-08-30 13:32:07,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=214432.0, ans=0.125
+2024-08-30 13:32:12,978 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=214485.33333333334, ans=0.125
+2024-08-30 13:32:19,297 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.24 vs. limit=15.0
+2024-08-30 13:32:24,581 INFO [train.py:1114] (1/4) Epoch 17, batch 400, loss[loss=0.1905, simple_loss=0.2712, pruned_loss=0.03987, ctc_loss=0.07503, over 19487.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2754, pruned_loss=0.05185, ctc_loss=0.09784, over 3343628.60 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:32:30,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=214538.66666666666, ans=0.05
+2024-08-30 13:32:33,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=214538.66666666666, ans=0.125
+2024-08-30 13:32:39,878 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.49 vs. limit=22.5
+2024-08-30 13:32:55,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=214645.33333333334, ans=0.0
+2024-08-30 13:32:55,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-30 13:33:01,010 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.640e+02 1.901e+02 2.325e+02 4.074e+02, threshold=3.801e+02, percent-clipped=1.0
+2024-08-30 13:33:07,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=214698.66666666666, ans=0.125
+2024-08-30 13:33:11,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=214698.66666666666, ans=0.2
+2024-08-30 13:33:20,770 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=214752.0, ans=0.125
+2024-08-30 13:33:24,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=214752.0, ans=0.125
+2024-08-30 13:33:26,239 INFO [train.py:1114] (1/4) Epoch 17, batch 450, loss[loss=0.2215, simple_loss=0.2941, pruned_loss=0.05462, ctc_loss=0.09914, over 19605.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2758, pruned_loss=0.05227, ctc_loss=0.09853, over 3450831.97 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:33:57,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.25 vs. limit=22.5
+2024-08-30 13:34:05,003 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=214858.66666666666, ans=0.2
+2024-08-30 13:43:51,933 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.66 vs. limit=5.0
+2024-08-30 13:44:01,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=215018.66666666666, ans=0.0
+2024-08-30 13:44:02,846 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.22 vs. limit=6.0
+2024-08-30 13:44:05,801 INFO [train.py:1114] (1/4) Epoch 17, batch 500, loss[loss=0.2163, simple_loss=0.29, pruned_loss=0.0526, ctc_loss=0.09377, over 19661.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.275, pruned_loss=0.05211, ctc_loss=0.09837, over 3546821.36 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:44:39,218 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=215072.0, ans=0.125
+2024-08-30 13:44:39,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=215072.0, ans=0.0
+2024-08-30 13:44:43,191 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=215125.33333333334, ans=0.2
+2024-08-30 13:44:50,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=215125.33333333334, ans=0.125
+2024-08-30 13:44:50,215 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=215125.33333333334, ans=0.0
+2024-08-30 13:44:59,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=215178.66666666666, ans=0.125
+2024-08-30 13:45:06,448 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.780e+02 2.026e+02 2.589e+02 4.105e+02, threshold=4.052e+02, percent-clipped=2.0
+2024-08-30 13:45:10,297 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=215232.0, ans=0.2
+2024-08-30 13:45:16,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=215232.0, ans=0.0
+2024-08-30 13:45:31,485 INFO [train.py:1114] (1/4) Epoch 17, batch 550, loss[loss=0.2281, simple_loss=0.2875, pruned_loss=0.06192, ctc_loss=0.1121, over 19299.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2745, pruned_loss=0.05198, ctc_loss=0.09804, over 3608064.98 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-30 13:45:42,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=215392.0, ans=10.0
+2024-08-30 13:45:59,614 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.00 vs. limit=12.0
+2024-08-30 13:46:00,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=215445.33333333334, ans=0.125
+2024-08-30 13:46:22,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215552.0, ans=0.1
+2024-08-30 13:47:18,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=215552.0, ans=0.1
+2024-08-30 13:47:20,209 INFO [train.py:1114] (1/4) Epoch 17, batch 600, loss[loss=0.2238, simple_loss=0.2943, pruned_loss=0.05595, ctc_loss=0.1035, over 19430.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2744, pruned_loss=0.05178, ctc_loss=0.09741, over 3665994.82 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-30 13:47:23,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=215605.33333333334, ans=0.0
+2024-08-30 13:47:24,234 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.14 vs. limit=15.0
+2024-08-30 13:47:27,906 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.23 vs. limit=22.5
+2024-08-30 13:47:32,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=215658.66666666666, ans=0.0
+2024-08-30 13:47:33,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=215658.66666666666, ans=0.125
+2024-08-30 13:47:49,969 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.17 vs. limit=15.0
+2024-08-30 13:47:51,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=215712.0, ans=0.2
+2024-08-30 13:47:53,788 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.276e+02 1.647e+02 1.940e+02 2.383e+02 4.124e+02, threshold=3.879e+02, percent-clipped=1.0
+2024-08-30 13:48:15,477 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=215818.66666666666, ans=0.125
+2024-08-30 13:48:27,126 INFO [train.py:1114] (1/4) Epoch 17, batch 650, loss[loss=0.2036, simple_loss=0.2676, pruned_loss=0.05028, ctc_loss=0.09769, over 19764.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2737, pruned_loss=0.05152, ctc_loss=0.09708, over 3716311.99 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 32.0
+2024-08-30 13:52:37,308 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:00:35,373 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.13 vs. limit=15.0
+2024-08-30 14:06:19,773 INFO [train.py:1114] (1/4) Epoch 17, batch 700, loss[loss=0.2039, simple_loss=0.2687, pruned_loss=0.05024, ctc_loss=0.09628, over 19726.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2748, pruned_loss=0.05202, ctc_loss=0.09805, over 3749338.22 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:10:26,198 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.27 vs. limit=6.0
+2024-08-30 14:11:44,084 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=216298.66666666666, ans=0.0
+2024-08-30 14:12:13,779 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 1.667e+02 2.137e+02 2.601e+02 4.284e+02, threshold=4.274e+02, percent-clipped=4.0
+2024-08-30 14:16:29,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=216298.66666666666, ans=0.1
+2024-08-30 14:17:07,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216352.0, ans=0.1
+2024-08-30 14:17:35,073 INFO [train.py:1114] (1/4) Epoch 17, batch 750, loss[loss=0.2177, simple_loss=0.2865, pruned_loss=0.05373, ctc_loss=0.1037, over 19479.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2741, pruned_loss=0.05169, ctc_loss=0.09742, over 3775726.61 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:17:41,668 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=216405.33333333334, ans=0.0
+2024-08-30 14:17:42,173 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.98 vs. limit=6.0
+2024-08-30 14:17:59,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=216458.66666666666, ans=0.125
+2024-08-30 14:18:42,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=216458.66666666666, ans=0.125
+2024-08-30 14:19:07,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=216512.0, ans=0.07
+2024-08-30 14:20:37,994 INFO [train.py:1114] (1/4) Epoch 17, batch 800, loss[loss=0.2131, simple_loss=0.2588, pruned_loss=0.06203, ctc_loss=0.1084, over 19834.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2741, pruned_loss=0.05178, ctc_loss=0.09759, over 3795773.92 frames. ], batch size: 49, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:29:21,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=216725.33333333334, ans=0.2
+2024-08-30 14:29:36,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=216725.33333333334, ans=0.125
+2024-08-30 14:31:11,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=216725.33333333334, ans=0.2
+2024-08-30 14:31:26,587 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=216778.66666666666, ans=0.0
+2024-08-30 14:31:32,316 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.715e+02 2.071e+02 2.537e+02 3.967e+02, threshold=4.143e+02, percent-clipped=0.0
+2024-08-30 14:31:48,354 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:32:34,214 INFO [train.py:1114] (1/4) Epoch 17, batch 850, loss[loss=0.2318, simple_loss=0.2969, pruned_loss=0.06046, ctc_loss=0.1142, over 19643.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2743, pruned_loss=0.05207, ctc_loss=0.09812, over 3815745.90 frames. ], batch size: 59, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:32:37,372 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.70 vs. limit=12.0
+2024-08-30 14:32:39,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=216938.66666666666, ans=0.025
+2024-08-30 14:32:46,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=216938.66666666666, ans=0.2
+2024-08-30 14:32:55,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=216992.0, ans=0.1
+2024-08-30 14:34:26,341 INFO [train.py:1114] (1/4) Epoch 17, batch 900, loss[loss=0.193, simple_loss=0.2537, pruned_loss=0.04824, ctc_loss=0.08981, over 19805.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2747, pruned_loss=0.0523, ctc_loss=0.09855, over 3819499.53 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:36:01,846 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=217205.33333333334, ans=0.025
+2024-08-30 14:36:28,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=217258.66666666666, ans=0.125
+2024-08-30 14:36:39,130 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.81 vs. limit=22.5
+2024-08-30 14:36:59,138 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.623e+02 1.810e+02 2.233e+02 4.039e+02, threshold=3.621e+02, percent-clipped=0.0
+2024-08-30 14:37:10,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=217365.33333333334, ans=0.125
+2024-08-30 14:37:24,549 INFO [train.py:1114] (1/4) Epoch 17, batch 950, loss[loss=0.1812, simple_loss=0.2448, pruned_loss=0.04265, ctc_loss=0.08089, over 19497.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.275, pruned_loss=0.05222, ctc_loss=0.09853, over 3821559.39 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:38:43,252 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=217578.66666666666, ans=0.125
+2024-08-30 14:38:45,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=217578.66666666666, ans=0.2
+2024-08-30 14:38:45,494 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=217578.66666666666, ans=0.07
+2024-08-30 14:38:57,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=217632.0, ans=0.2
+2024-08-30 14:39:07,844 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=217685.33333333334, ans=0.1
+2024-08-30 14:39:10,780 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.43 vs. limit=22.5
+2024-08-30 14:39:16,493 INFO [train.py:1114] (1/4) Epoch 17, batch 1000, loss[loss=0.2122, simple_loss=0.2749, pruned_loss=0.05384, ctc_loss=0.1047, over 19883.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2758, pruned_loss=0.0526, ctc_loss=0.09924, over 3817867.61 frames. ], batch size: 52, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:39:18,658 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.22 vs. limit=22.5
+2024-08-30 14:39:22,940 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:39:42,397 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=217845.33333333334, ans=0.125
+2024-08-30 14:39:52,694 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.648e+02 1.905e+02 2.181e+02 3.196e+02, threshold=3.810e+02, percent-clipped=0.0
+2024-08-30 14:40:16,951 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.72 vs. limit=15.0
+2024-08-30 14:40:20,752 INFO [train.py:1114] (1/4) Epoch 17, batch 1050, loss[loss=0.2112, simple_loss=0.286, pruned_loss=0.0494, ctc_loss=0.0941, over 19821.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2751, pruned_loss=0.05251, ctc_loss=0.09907, over 3824747.58 frames. ], batch size: 57, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:40:29,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=218005.33333333334, ans=0.125
+2024-08-30 14:40:40,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=218058.66666666666, ans=0.1
+2024-08-30 14:40:46,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=218112.0, ans=0.0
+2024-08-30 14:40:58,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=218165.33333333334, ans=0.125
+2024-08-30 14:41:13,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=218218.66666666666, ans=0.125
+2024-08-30 14:41:24,736 INFO [train.py:1114] (1/4) Epoch 17, batch 1100, loss[loss=0.2051, simple_loss=0.2768, pruned_loss=0.04946, ctc_loss=0.08593, over 19584.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2752, pruned_loss=0.05223, ctc_loss=0.09854, over 3831537.03 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:41:25,359 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.21 vs. limit=15.0
+2024-08-30 14:41:43,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=218325.33333333334, ans=0.125
+2024-08-30 14:42:15,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=218378.66666666666, ans=0.0
+2024-08-30 14:42:23,474 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.634e+02 1.909e+02 2.238e+02 3.833e+02, threshold=3.817e+02, percent-clipped=1.0
+2024-08-30 14:43:09,782 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=218485.33333333334, ans=0.05
+2024-08-30 14:43:11,407 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.53 vs. limit=15.0
+2024-08-30 14:43:15,280 INFO [train.py:1114] (1/4) Epoch 17, batch 1150, loss[loss=0.1994, simple_loss=0.2652, pruned_loss=0.04819, ctc_loss=0.09286, over 19583.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2751, pruned_loss=0.05231, ctc_loss=0.09855, over 3831079.30 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:43:23,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=218538.66666666666, ans=0.0
+2024-08-30 14:43:28,265 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.98 vs. limit=10.0
+2024-08-30 14:43:29,804 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.82 vs. limit=15.0
+2024-08-30 14:43:32,950 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.06 vs. limit=15.0
+2024-08-30 14:43:34,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=218592.0, ans=0.0
+2024-08-30 14:43:37,056 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.99 vs. limit=12.0
+2024-08-30 14:43:44,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=218645.33333333334, ans=0.0
+2024-08-30 14:43:44,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=218645.33333333334, ans=0.025
+2024-08-30 14:43:47,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=218645.33333333334, ans=10.0
+2024-08-30 14:43:51,677 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=218645.33333333334, ans=0.025
+2024-08-30 14:43:57,646 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.15 vs. limit=6.0
+2024-08-30 14:44:00,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=218698.66666666666, ans=0.025
+2024-08-30 14:44:03,732 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=6.43 vs. limit=15.0
+2024-08-30 14:44:20,110 INFO [train.py:1114] (1/4) Epoch 17, batch 1200, loss[loss=0.2096, simple_loss=0.2797, pruned_loss=0.04973, ctc_loss=0.1003, over 19841.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2757, pruned_loss=0.05252, ctc_loss=0.09901, over 3825453.22 frames. ], batch size: 57, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:45:47,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=218858.66666666666, ans=0.1
+2024-08-30 14:45:57,132 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.53 vs. limit=15.0
+2024-08-30 14:46:08,768 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 1.734e+02 1.937e+02 2.235e+02 3.279e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-30 14:46:11,397 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=218965.33333333334, ans=0.0
+2024-08-30 14:46:13,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=218965.33333333334, ans=0.0
+2024-08-30 14:46:24,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=219018.66666666666, ans=0.2
+2024-08-30 14:46:25,358 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=219018.66666666666, ans=0.1
+2024-08-30 14:46:31,316 INFO [train.py:1114] (1/4) Epoch 17, batch 1250, loss[loss=0.2208, simple_loss=0.2855, pruned_loss=0.05735, ctc_loss=0.1037, over 19520.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2761, pruned_loss=0.05266, ctc_loss=0.09929, over 3843634.46 frames. ], batch size: 61, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:46:49,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=219125.33333333334, ans=0.0
+2024-08-30 14:46:50,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=219125.33333333334, ans=0.2
+2024-08-30 14:46:59,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=219178.66666666666, ans=0.125
+2024-08-30 14:48:33,525 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=219232.0, ans=0.025
+2024-08-30 14:48:46,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=219285.33333333334, ans=0.0
+2024-08-30 14:48:52,374 INFO [train.py:1114] (1/4) Epoch 17, batch 1300, loss[loss=0.212, simple_loss=0.2801, pruned_loss=0.05187, ctc_loss=0.1003, over 18867.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2752, pruned_loss=0.05242, ctc_loss=0.09884, over 3847381.66 frames. ], batch size: 76, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:49:00,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=219338.66666666666, ans=0.025
+2024-08-30 14:49:01,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=219338.66666666666, ans=0.125
+2024-08-30 14:49:29,493 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.750e+02 2.054e+02 2.564e+02 3.826e+02, threshold=4.108e+02, percent-clipped=0.0
+2024-08-30 14:49:40,679 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=219498.66666666666, ans=0.125
+2024-08-30 14:49:42,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=219552.0, ans=0.0
+2024-08-30 14:50:08,917 INFO [train.py:1114] (1/4) Epoch 17, batch 1350, loss[loss=0.2047, simple_loss=0.2833, pruned_loss=0.0465, ctc_loss=0.08258, over 19769.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2742, pruned_loss=0.05164, ctc_loss=0.09729, over 3857863.66 frames. ], batch size: 54, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:50:14,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=219605.33333333334, ans=0.125
+2024-08-30 14:50:39,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=219712.0, ans=0.2
+2024-08-30 14:50:55,627 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.20 vs. limit=22.5
+2024-08-30 14:51:00,998 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=219818.66666666666, ans=0.125
+2024-08-30 14:51:03,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=219818.66666666666, ans=0.125
+2024-08-30 14:51:03,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=219818.66666666666, ans=0.125
+2024-08-30 14:51:09,327 INFO [train.py:1114] (1/4) Epoch 17, batch 1400, loss[loss=0.2064, simple_loss=0.2657, pruned_loss=0.05256, ctc_loss=0.1046, over 19679.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.274, pruned_loss=0.05189, ctc_loss=0.09756, over 3864231.98 frames. ], batch size: 46, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:51:28,450 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:52:01,616 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.697e+02 1.910e+02 2.399e+02 4.058e+02, threshold=3.819e+02, percent-clipped=0.0
+2024-08-30 14:52:06,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=220032.0, ans=0.0
+2024-08-30 14:52:23,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.42 vs. limit=10.0
+2024-08-30 14:52:26,401 INFO [train.py:1114] (1/4) Epoch 17, batch 1450, loss[loss=0.2025, simple_loss=0.2831, pruned_loss=0.04493, ctc_loss=0.07981, over 19695.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2746, pruned_loss=0.05186, ctc_loss=0.09746, over 3862167.13 frames. ], batch size: 63, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:52:43,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=220138.66666666666, ans=0.125
+2024-08-30 14:53:17,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=220245.33333333334, ans=0.1
+2024-08-30 14:53:54,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=220298.66666666666, ans=0.2
+2024-08-30 14:53:56,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=220298.66666666666, ans=0.025
+2024-08-30 14:54:08,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=220352.0, ans=0.09899494936611666
+2024-08-30 14:54:12,192 INFO [train.py:1114] (1/4) Epoch 17, batch 1500, loss[loss=0.2069, simple_loss=0.2782, pruned_loss=0.04934, ctc_loss=0.09232, over 19565.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2754, pruned_loss=0.05218, ctc_loss=0.09801, over 3862445.51 frames. ], batch size: 57, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:54:12,995 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.82 vs. limit=15.0
+2024-08-30 14:54:36,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=220458.66666666666, ans=0.125
+2024-08-30 14:54:54,687 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.719e+02 1.906e+02 2.293e+02 3.704e+02, threshold=3.812e+02, percent-clipped=0.0
+2024-08-30 14:55:17,160 INFO [train.py:1114] (1/4) Epoch 17, batch 1550, loss[loss=0.2397, simple_loss=0.2994, pruned_loss=0.0646, ctc_loss=0.127, over 19604.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2758, pruned_loss=0.0526, ctc_loss=0.09884, over 3847588.66 frames. ], batch size: 60, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:55:29,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=220672.0, ans=0.2
+2024-08-30 14:55:48,352 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=220725.33333333334, ans=0.0
+2024-08-30 14:55:59,841 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.26 vs. limit=15.0
+2024-08-30 14:56:01,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=220778.66666666666, ans=0.0
+2024-08-30 14:56:07,627 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:56:20,101 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.57 vs. limit=15.0
+2024-08-30 14:56:22,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=220885.33333333334, ans=0.1
+2024-08-30 14:56:27,380 INFO [train.py:1114] (1/4) Epoch 17, batch 1600, loss[loss=0.2205, simple_loss=0.2876, pruned_loss=0.05587, ctc_loss=0.1042, over 19845.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2756, pruned_loss=0.05225, ctc_loss=0.09831, over 3836188.10 frames. ], batch size: 57, lr: 8.78e-03, grad_scale: 32.0
+2024-08-30 14:57:21,817 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.31 vs. limit=22.5
+2024-08-30 14:57:23,113 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.60 vs. limit=15.0
+2024-08-30 14:57:30,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=220992.0, ans=0.95
+2024-08-30 14:57:34,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=220992.0, ans=0.025
+2024-08-30 14:57:36,292 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.54 vs. limit=10.0
+2024-08-30 15:00:47,791 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.738e+02 2.160e+02 2.635e+02 3.870e+02, threshold=4.320e+02, percent-clipped=2.0
+2024-08-30 15:03:54,012 INFO [train.py:1114] (1/4) Epoch 17, batch 1650, loss[loss=0.1968, simple_loss=0.2729, pruned_loss=0.04115, ctc_loss=0.09577, over 19679.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2755, pruned_loss=0.05229, ctc_loss=0.09861, over 3831818.43 frames. ], batch size: 59, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:05:32,949 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.73 vs. limit=15.0
+2024-08-30 15:05:48,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=221258.66666666666, ans=0.2
+2024-08-30 15:05:48,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=221258.66666666666, ans=0.2
+2024-08-30 15:07:23,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=221312.0, ans=0.125
+2024-08-30 15:07:44,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=221365.33333333334, ans=0.04949747468305833
+2024-08-30 15:08:00,849 INFO [train.py:1114] (1/4) Epoch 17, batch 1700, loss[loss=0.1992, simple_loss=0.2579, pruned_loss=0.05036, ctc_loss=0.09976, over 19675.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2752, pruned_loss=0.05198, ctc_loss=0.09817, over 3845734.92 frames. ], batch size: 46, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:08:23,496 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.34 vs. limit=15.0
+2024-08-30 15:08:24,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=221578.66666666666, ans=0.1
+2024-08-30 15:08:30,260 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=221578.66666666666, ans=0.1
+2024-08-30 15:08:36,780 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.717e+02 1.998e+02 2.422e+02 4.059e+02, threshold=3.996e+02, percent-clipped=0.0
+2024-08-30 15:09:29,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=221632.0, ans=0.0
+2024-08-30 15:09:43,489 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=221685.33333333334, ans=0.0
+2024-08-30 15:09:50,037 INFO [train.py:1114] (1/4) Epoch 17, batch 1750, loss[loss=0.1984, simple_loss=0.251, pruned_loss=0.05337, ctc_loss=0.09733, over 19647.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2744, pruned_loss=0.05174, ctc_loss=0.09771, over 3850469.71 frames. ], batch size: 45, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:10:08,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=221792.0, ans=0.1
+2024-08-30 15:10:13,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=221845.33333333334, ans=0.2
+2024-08-30 15:10:36,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=221952.0, ans=0.2
+2024-08-30 15:10:46,112 INFO [train.py:1114] (1/4) Epoch 17, batch 1800, loss[loss=0.2167, simple_loss=0.285, pruned_loss=0.05222, ctc_loss=0.1097, over 19604.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2749, pruned_loss=0.05193, ctc_loss=0.09808, over 3851798.91 frames. ], batch size: 55, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:11:22,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=222165.33333333334, ans=0.2
+2024-08-30 15:11:23,242 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.773e+02 2.029e+02 2.607e+02 4.351e+02, threshold=4.057e+02, percent-clipped=1.0
+2024-08-30 15:11:30,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=222165.33333333334, ans=0.1
+2024-08-30 15:11:41,628 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:11:43,553 INFO [train.py:1114] (1/4) Epoch 17, batch 1850, loss[loss=0.1908, simple_loss=0.2773, pruned_loss=0.03862, ctc_loss=0.06776, over 19592.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2747, pruned_loss=0.05166, ctc_loss=0.09743, over 3854949.06 frames. ], batch size: 57, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:11:59,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=222325.33333333334, ans=0.0
+2024-08-30 15:12:12,153 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.91 vs. limit=6.0
+2024-08-30 15:12:29,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=222485.33333333334, ans=0.0
+2024-08-30 15:12:32,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=222485.33333333334, ans=0.2
+2024-08-30 15:12:39,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=222538.66666666666, ans=0.125
+2024-08-30 15:12:40,701 INFO [train.py:1114] (1/4) Epoch 17, batch 1900, loss[loss=0.2275, simple_loss=0.3001, pruned_loss=0.0563, ctc_loss=0.1055, over 19647.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2751, pruned_loss=0.05167, ctc_loss=0.09751, over 3860703.66 frames. ], batch size: 59, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:12:40,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=222538.66666666666, ans=0.0
+2024-08-30 15:13:06,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=222645.33333333334, ans=0.1
+2024-08-30 15:13:12,557 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=222645.33333333334, ans=0.025
+2024-08-30 15:13:18,246 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.682e+02 1.950e+02 2.328e+02 4.923e+02, threshold=3.901e+02, percent-clipped=3.0
+2024-08-30 15:13:26,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=222752.0, ans=0.125
+2024-08-30 15:13:38,419 INFO [train.py:1114] (1/4) Epoch 17, batch 1950, loss[loss=0.2349, simple_loss=0.2872, pruned_loss=0.06664, ctc_loss=0.1231, over 19595.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2763, pruned_loss=0.05212, ctc_loss=0.09822, over 3870458.50 frames. ], batch size: 52, lr: 8.74e-03, grad_scale: 16.0
+2024-08-30 15:14:36,390 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=222858.66666666666, ans=0.125
+2024-08-30 15:14:53,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=222965.33333333334, ans=0.125
+2024-08-30 15:15:06,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=222965.33333333334, ans=10.0
+2024-08-30 15:15:27,067 INFO [train.py:1114] (1/4) Epoch 17, batch 2000, loss[loss=0.1733, simple_loss=0.2377, pruned_loss=0.03989, ctc_loss=0.07271, over 19625.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2768, pruned_loss=0.05239, ctc_loss=0.0986, over 3856646.93 frames. ], batch size: 45, lr: 8.74e-03, grad_scale: 32.0
+2024-08-30 15:15:51,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=223178.66666666666, ans=0.125
+2024-08-30 15:15:58,022 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=223178.66666666666, ans=0.0
+2024-08-30 15:16:03,265 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.692e+02 2.099e+02 2.435e+02 3.373e+02, threshold=4.199e+02, percent-clipped=0.0
+2024-08-30 15:16:05,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=223232.0, ans=0.125
+2024-08-30 15:16:06,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=223232.0, ans=0.125
+2024-08-30 15:16:14,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=223285.33333333334, ans=0.1
+2024-08-30 15:16:18,381 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=223285.33333333334, ans=0.2
+2024-08-30 15:16:38,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=223285.33333333334, ans=0.2
+2024-08-30 15:16:42,844 INFO [train.py:1114] (1/4) Epoch 17, batch 2050, loss[loss=0.176, simple_loss=0.242, pruned_loss=0.04027, ctc_loss=0.07354, over 19729.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2759, pruned_loss=0.05248, ctc_loss=0.09883, over 3852531.93 frames. ], batch size: 47, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:16:55,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=223338.66666666666, ans=0.1
+2024-08-30 15:17:01,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=223338.66666666666, ans=0.1
+2024-08-30 15:18:23,842 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.46 vs. limit=22.5
+2024-08-30 15:18:33,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=223445.33333333334, ans=0.125
+2024-08-30 15:20:01,265 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=223498.66666666666, ans=0.0
+2024-08-30 15:20:11,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=223552.0, ans=0.125
+2024-08-30 15:20:13,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=223552.0, ans=0.125
+2024-08-30 15:20:15,771 INFO [train.py:1114] (1/4) Epoch 17, batch 2100, loss[loss=0.2026, simple_loss=0.2685, pruned_loss=0.04907, ctc_loss=0.09674, over 19766.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2748, pruned_loss=0.05183, ctc_loss=0.09776, over 3859529.90 frames. ], batch size: 54, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:21:01,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=223658.66666666666, ans=0.0
+2024-08-30 15:21:18,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=223712.0, ans=0.125
+2024-08-30 15:21:41,984 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.235e+02 1.693e+02 2.019e+02 2.546e+02 6.032e+02, threshold=4.039e+02, percent-clipped=5.0
+2024-08-30 15:21:50,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=223818.66666666666, ans=0.125
+2024-08-30 15:21:57,530 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=2.789e-02
+2024-08-30 15:22:02,825 INFO [train.py:1114] (1/4) Epoch 17, batch 2150, loss[loss=0.184, simple_loss=0.2558, pruned_loss=0.04112, ctc_loss=0.07483, over 19570.00 frames. ], tot_loss[loss=0.208, simple_loss=0.274, pruned_loss=0.05159, ctc_loss=0.09701, over 3870502.16 frames. ], batch size: 52, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:22:11,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=223872.0, ans=0.95
+2024-08-30 15:22:47,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=224085.33333333334, ans=0.125
+2024-08-30 15:22:54,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=224085.33333333334, ans=0.0
+2024-08-30 15:22:58,283 INFO [train.py:1114] (1/4) Epoch 17, batch 2200, loss[loss=0.2049, simple_loss=0.2794, pruned_loss=0.04698, ctc_loss=0.09133, over 19579.00 frames. ], tot_loss[loss=0.208, simple_loss=0.274, pruned_loss=0.05159, ctc_loss=0.09688, over 3867850.87 frames. ], batch size: 57, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:23:08,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=224138.66666666666, ans=0.125
+2024-08-30 15:23:31,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=224245.33333333334, ans=0.125
+2024-08-30 15:23:41,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=224298.66666666666, ans=0.125
+2024-08-30 15:23:53,347 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.675e+02 1.986e+02 2.371e+02 4.244e+02, threshold=3.972e+02, percent-clipped=2.0
+2024-08-30 15:23:55,403 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.54 vs. limit=6.0
+2024-08-30 15:24:13,614 INFO [train.py:1114] (1/4) Epoch 17, batch 2250, loss[loss=0.208, simple_loss=0.2793, pruned_loss=0.04969, ctc_loss=0.09318, over 19616.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2746, pruned_loss=0.05193, ctc_loss=0.09755, over 3867771.57 frames. ], batch size: 55, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:24:19,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=224405.33333333334, ans=0.125
+2024-08-30 15:24:26,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=224458.66666666666, ans=0.0
+2024-08-30 15:24:28,798 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.03 vs. limit=15.0
+2024-08-30 15:25:42,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=224458.66666666666, ans=0.0
+2024-08-30 15:27:01,336 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.13 vs. limit=22.5
+2024-08-30 15:27:08,472 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=224618.66666666666, ans=0.125
+2024-08-30 15:27:09,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=224618.66666666666, ans=0.07
+2024-08-30 15:27:11,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=224618.66666666666, ans=0.0
+2024-08-30 15:27:15,732 INFO [train.py:1114] (1/4) Epoch 17, batch 2300, loss[loss=0.1708, simple_loss=0.2383, pruned_loss=0.03778, ctc_loss=0.06935, over 19506.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2741, pruned_loss=0.05212, ctc_loss=0.09806, over 3861446.76 frames. ], batch size: 49, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:28:29,823 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=224725.33333333334, ans=0.125
+2024-08-30 15:28:43,557 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=224832.0, ans=0.09899494936611666
+2024-08-30 15:28:44,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=224832.0, ans=0.0
+2024-08-30 15:28:46,693 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.759e+02 2.126e+02 2.592e+02 4.068e+02, threshold=4.252e+02, percent-clipped=2.0
+2024-08-30 15:29:50,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=224938.66666666666, ans=0.1
+2024-08-30 15:29:51,108 INFO [train.py:1114] (1/4) Epoch 17, batch 2350, loss[loss=0.2271, simple_loss=0.2928, pruned_loss=0.05941, ctc_loss=0.1067, over 19670.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2746, pruned_loss=0.05237, ctc_loss=0.09846, over 3863977.51 frames. ], batch size: 63, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:29:53,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=224938.66666666666, ans=0.125
+2024-08-30 15:30:00,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=224938.66666666666, ans=0.2
+2024-08-30 15:30:36,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=224992.0, ans=0.07
+2024-08-30 15:31:54,162 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.83 vs. limit=15.0
+2024-08-30 15:32:00,448 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=225098.66666666666, ans=0.0
+2024-08-30 15:32:52,435 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=225098.66666666666, ans=0.09899494936611666
+2024-08-30 15:33:11,256 INFO [train.py:1114] (1/4) Epoch 17, batch 2400, loss[loss=0.2376, simple_loss=0.3021, pruned_loss=0.0617, ctc_loss=0.1243, over 19262.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.276, pruned_loss=0.0527, ctc_loss=0.09901, over 3858306.35 frames. ], batch size: 71, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:33:33,581 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.86 vs. limit=15.0
+2024-08-30 15:33:34,685 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten.whitening_limit, batch_count=225312.0, ans=15.0
+2024-08-30 15:33:48,275 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.38 vs. limit=6.0
+2024-08-30 15:33:48,809 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.684e+02 1.880e+02 2.443e+02 3.780e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-30 15:34:08,661 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.57 vs. limit=15.0
+2024-08-30 15:34:10,336 INFO [train.py:1114] (1/4) Epoch 17, batch 2450, loss[loss=0.2668, simple_loss=0.3082, pruned_loss=0.08333, ctc_loss=0.1469, over 13116.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2799, pruned_loss=0.05543, ctc_loss=0.1045, over 3731084.46 frames. ], batch size: 140, lr: 8.69e-03, grad_scale: 16.0
+2024-08-30 15:34:11,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=225472.0, ans=0.125
+2024-08-30 15:34:14,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=225472.0, ans=0.025
+2024-08-30 15:34:18,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=225472.0, ans=10.0
+2024-08-30 15:34:50,673 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=225525.33333333334, ans=0.09899494936611666
+2024-08-30 15:34:51,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=225525.33333333334, ans=0.125
+2024-08-30 15:34:59,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=225578.66666666666, ans=0.125
+2024-08-30 15:35:02,985 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.48 vs. limit=22.5
+2024-08-30 15:35:03,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=225578.66666666666, ans=0.125
+2024-08-30 15:35:09,807 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.11 vs. limit=15.0
+2024-08-30 15:38:02,874 INFO [train.py:1114] (1/4) Epoch 18, batch 0, loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04852, ctc_loss=0.09187, over 19410.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04852, ctc_loss=0.09187, over 19410.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:38:02,874 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-30 15:38:16,595 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([3.9873, 3.3705, 2.6267, 3.1732], device='cuda:1')
+2024-08-30 15:39:34,943 INFO [train.py:1146] (1/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.0364, ctc_loss=0.06401, over 944034.00 frames.
+2024-08-30 15:39:34,944 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13639MB
+2024-08-30 15:39:39,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=225680.0, ans=0.125
+2024-08-30 15:40:05,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=225733.33333333334, ans=0.1
+2024-08-30 15:40:20,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-30 15:40:25,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 15:40:30,539 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=225786.66666666666, ans=0.1
+2024-08-30 15:40:47,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=225840.0, ans=0.125
+2024-08-30 15:40:58,103 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.919e+02 2.092e+02 2.421e+02 5.568e+02, threshold=4.185e+02, percent-clipped=4.0
+2024-08-30 15:41:03,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=225893.33333333334, ans=0.1
+2024-08-30 15:41:04,966 INFO [train.py:1114] (1/4) Epoch 18, batch 50, loss[loss=0.1582, simple_loss=0.2327, pruned_loss=0.0302, ctc_loss=0.05849, over 19735.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2757, pruned_loss=0.05252, ctc_loss=0.1004, over 843693.80 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:41:05,554 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.70 vs. limit=15.0
+2024-08-30 15:41:16,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=225946.66666666666, ans=0.09899494936611666
+2024-08-30 15:41:46,188 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=225946.66666666666, ans=0.0
+2024-08-30 15:42:01,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=226053.33333333334, ans=0.025
+2024-08-30 15:42:27,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=226160.0, ans=0.0
+2024-08-30 15:42:28,860 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.79 vs. limit=15.0
+2024-08-30 15:44:07,530 INFO [train.py:1114] (1/4) Epoch 18, batch 100, loss[loss=0.199, simple_loss=0.2665, pruned_loss=0.04874, ctc_loss=0.0852, over 19719.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2771, pruned_loss=0.05266, ctc_loss=0.1004, over 1498452.85 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:44:17,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=226213.33333333334, ans=15.0
+2024-08-30 15:44:34,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=226320.0, ans=0.125
+2024-08-30 15:44:43,558 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.42 vs. limit=15.0
+2024-08-30 15:44:58,583 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=226426.66666666666, ans=0.07
+2024-08-30 15:45:01,910 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.711e+02 1.973e+02 2.383e+02 4.146e+02, threshold=3.946e+02, percent-clipped=0.0
+2024-08-30 15:45:10,543 INFO [train.py:1114] (1/4) Epoch 18, batch 150, loss[loss=0.1954, simple_loss=0.2555, pruned_loss=0.04927, ctc_loss=0.09182, over 19694.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2751, pruned_loss=0.05225, ctc_loss=0.09937, over 2027899.17 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:45:11,998 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:45:24,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-30 15:45:26,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-30 15:45:27,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-30 15:45:28,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=226533.33333333334, ans=0.1
+2024-08-30 15:45:28,898 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:45:35,102 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.23 vs. limit=15.0
+2024-08-30 15:45:39,601 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=226586.66666666666, ans=0.035
+2024-08-30 15:45:43,636 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.54 vs. limit=12.0
+2024-08-30 15:46:10,742 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=226693.33333333334, ans=0.125
+2024-08-30 15:46:16,548 INFO [train.py:1114] (1/4) Epoch 18, batch 200, loss[loss=0.2091, simple_loss=0.2801, pruned_loss=0.0504, ctc_loss=0.09337, over 18231.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2739, pruned_loss=0.05144, ctc_loss=0.09761, over 2435641.98 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:46:23,911 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.95 vs. limit=15.0
+2024-08-30 15:46:45,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=226853.33333333334, ans=0.0
+2024-08-30 15:47:08,529 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.332e+02 1.794e+02 2.164e+02 2.564e+02 4.131e+02, threshold=4.328e+02, percent-clipped=1.0
+2024-08-30 15:47:20,541 INFO [train.py:1114] (1/4) Epoch 18, batch 250, loss[loss=0.2314, simple_loss=0.2971, pruned_loss=0.05931, ctc_loss=0.1175, over 19375.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2738, pruned_loss=0.05121, ctc_loss=0.097, over 2757155.58 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:47:25,612 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=227013.33333333334, ans=0.125
+2024-08-30 15:47:32,899 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.39 vs. limit=12.0
+2024-08-30 15:48:36,401 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.34 vs. limit=22.5
+2024-08-30 15:49:09,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=227226.66666666666, ans=0.1
+2024-08-30 15:49:21,133 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.44 vs. limit=15.0
+2024-08-30 15:49:22,654 INFO [train.py:1114] (1/4) Epoch 18, batch 300, loss[loss=0.224, simple_loss=0.2859, pruned_loss=0.05819, ctc_loss=0.1146, over 19505.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2728, pruned_loss=0.05064, ctc_loss=0.09594, over 3002465.67 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:51:20,878 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:51:27,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=227440.0, ans=0.125
+2024-08-30 15:51:27,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=227440.0, ans=0.025
+2024-08-30 15:51:40,146 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.730e+02 1.916e+02 2.273e+02 3.732e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-30 15:51:41,789 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=227493.33333333334, ans=0.2
+2024-08-30 15:51:48,895 INFO [train.py:1114] (1/4) Epoch 18, batch 350, loss[loss=0.1884, simple_loss=0.2494, pruned_loss=0.04613, ctc_loss=0.08774, over 19745.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2731, pruned_loss=0.05065, ctc_loss=0.09572, over 3192448.38 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:52:05,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=227600.0, ans=0.0
+2024-08-30 15:52:32,630 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=227706.66666666666, ans=0.2
+2024-08-30 15:52:38,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=227760.0, ans=0.125
+2024-08-30 15:52:51,832 INFO [train.py:1114] (1/4) Epoch 18, batch 400, loss[loss=0.202, simple_loss=0.2743, pruned_loss=0.04733, ctc_loss=0.08765, over 19499.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2731, pruned_loss=0.0506, ctc_loss=0.09547, over 3342545.79 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:52:54,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=227813.33333333334, ans=0.125
+2024-08-30 15:53:01,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=227813.33333333334, ans=0.2
+2024-08-30 15:53:08,946 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:53:10,568 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.88 vs. limit=15.0
+2024-08-30 15:53:15,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=227920.0, ans=0.125
+2024-08-30 15:53:17,285 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=227920.0, ans=0.125
+2024-08-30 15:54:08,782 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=227973.33333333334, ans=6.0
+2024-08-30 15:54:09,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=227973.33333333334, ans=0.125
+2024-08-30 15:54:09,555 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=227973.33333333334, ans=0.1
+2024-08-30 15:54:12,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.66 vs. limit=15.0
+2024-08-30 15:54:16,393 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.651e+02 1.862e+02 2.258e+02 4.636e+02, threshold=3.723e+02, percent-clipped=1.0
+2024-08-30 15:54:17,889 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=228026.66666666666, ans=0.125
+2024-08-30 15:54:24,310 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=228080.0, ans=0.125
+2024-08-30 15:54:24,365 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=228080.0, ans=0.025
+2024-08-30 15:54:25,942 INFO [train.py:1114] (1/4) Epoch 18, batch 450, loss[loss=0.1989, simple_loss=0.2781, pruned_loss=0.04369, ctc_loss=0.08104, over 19601.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2735, pruned_loss=0.05112, ctc_loss=0.09624, over 3451509.51 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:54:58,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=228186.66666666666, ans=0.125
+2024-08-30 15:55:11,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=228240.0, ans=0.0
+2024-08-30 15:55:37,536 INFO [train.py:1114] (1/4) Epoch 18, batch 500, loss[loss=0.2274, simple_loss=0.2951, pruned_loss=0.05904, ctc_loss=0.1042, over 19696.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2723, pruned_loss=0.05039, ctc_loss=0.09506, over 3546801.94 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:55:40,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=228346.66666666666, ans=0.125
+2024-08-30 15:55:47,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=228346.66666666666, ans=0.5
+2024-08-30 15:56:25,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=228453.33333333334, ans=0.5
+2024-08-30 15:56:37,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=228453.33333333334, ans=0.125
+2024-08-30 15:57:50,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=228560.0, ans=0.2
+2024-08-30 15:57:54,273 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.602e+02 1.832e+02 2.190e+02 3.877e+02, threshold=3.665e+02, percent-clipped=2.0
+2024-08-30 15:58:00,971 INFO [train.py:1114] (1/4) Epoch 18, batch 550, loss[loss=0.2147, simple_loss=0.2868, pruned_loss=0.05194, ctc_loss=0.09692, over 19259.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2723, pruned_loss=0.05053, ctc_loss=0.09513, over 3609220.25 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:58:38,527 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=228613.33333333334, ans=0.0
+2024-08-30 15:58:55,992 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:00:51,926 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:00:53,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=228720.0, ans=0.125
+2024-08-30 16:01:35,843 INFO [train.py:1114] (1/4) Epoch 18, batch 600, loss[loss=0.229, simple_loss=0.3018, pruned_loss=0.05653, ctc_loss=0.108, over 19439.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2727, pruned_loss=0.05071, ctc_loss=0.09544, over 3667203.93 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:01:38,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=228880.0, ans=0.125
+2024-08-30 16:02:34,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=228933.33333333334, ans=0.125
+2024-08-30 16:03:08,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=228986.66666666666, ans=0.1
+2024-08-30 16:04:28,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=229040.0, ans=0.125
+2024-08-30 16:04:31,380 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=229040.0, ans=0.025
+2024-08-30 16:04:31,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=229040.0, ans=0.0
+2024-08-30 16:04:41,751 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 1.726e+02 2.045e+02 2.727e+02 4.181e+02, threshold=4.090e+02, percent-clipped=7.0
+2024-08-30 16:04:46,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=229093.33333333334, ans=0.125
+2024-08-30 16:04:48,706 INFO [train.py:1114] (1/4) Epoch 18, batch 650, loss[loss=0.1937, simple_loss=0.2684, pruned_loss=0.04397, ctc_loss=0.07789, over 19779.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2721, pruned_loss=0.05064, ctc_loss=0.09521, over 3716775.23 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:04:48,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=229146.66666666666, ans=0.125
+2024-08-30 16:05:08,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=229146.66666666666, ans=0.1
+2024-08-30 16:06:17,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=229200.0, ans=0.125
+2024-08-30 16:06:30,664 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=229200.0, ans=0.125
+2024-08-30 16:06:37,752 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.97 vs. limit=15.0
+2024-08-30 16:06:39,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=229253.33333333334, ans=0.125
+2024-08-30 16:06:47,222 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-30 16:07:14,533 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.87 vs. limit=22.5
+2024-08-30 16:07:24,403 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.30 vs. limit=6.0
+2024-08-30 16:07:32,101 INFO [train.py:1114] (1/4) Epoch 18, batch 700, loss[loss=0.2059, simple_loss=0.2678, pruned_loss=0.05285, ctc_loss=0.09561, over 19707.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2727, pruned_loss=0.05077, ctc_loss=0.09536, over 3748465.26 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:07:32,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=229413.33333333334, ans=0.09899494936611666
+2024-08-30 16:07:34,047 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.50 vs. limit=22.5
+2024-08-30 16:07:34,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229413.33333333334, ans=0.1
+2024-08-30 16:07:38,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=229413.33333333334, ans=0.125
+2024-08-30 16:07:45,620 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.20 vs. limit=22.5
+2024-08-30 16:07:55,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=229520.0, ans=0.125
+2024-08-30 16:08:13,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=229573.33333333334, ans=0.125
+2024-08-30 16:08:27,237 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.709e+02 1.988e+02 2.480e+02 4.374e+02, threshold=3.975e+02, percent-clipped=1.0
+2024-08-30 16:08:33,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=229680.0, ans=0.125
+2024-08-30 16:08:34,067 INFO [train.py:1114] (1/4) Epoch 18, batch 750, loss[loss=0.2341, simple_loss=0.2947, pruned_loss=0.0637, ctc_loss=0.1153, over 19498.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2723, pruned_loss=0.05051, ctc_loss=0.09496, over 3775010.57 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:08:49,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=229733.33333333334, ans=0.0
+2024-08-30 16:09:17,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=229840.0, ans=0.125
+2024-08-30 16:09:23,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=229840.0, ans=0.125
+2024-08-30 16:09:34,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=229893.33333333334, ans=0.125
+2024-08-30 16:09:35,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=229893.33333333334, ans=0.125
+2024-08-30 16:09:36,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=229893.33333333334, ans=22.5
+2024-08-30 16:09:38,078 INFO [train.py:1114] (1/4) Epoch 18, batch 800, loss[loss=0.1909, simple_loss=0.2614, pruned_loss=0.04367, ctc_loss=0.08254, over 19409.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2718, pruned_loss=0.04994, ctc_loss=0.0941, over 3795416.46 frames. ], batch size: 48, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:09:52,000 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=229946.66666666666, ans=0.0
+2024-08-30 16:09:52,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=229946.66666666666, ans=0.125
+2024-08-30 16:09:55,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=229946.66666666666, ans=0.0
+2024-08-30 16:10:08,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=230000.0, ans=0.125
+2024-08-30 16:10:12,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=230053.33333333334, ans=0.0
+2024-08-30 16:10:37,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=230160.0, ans=0.0
+2024-08-30 16:11:34,980 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.744e+02 1.950e+02 2.451e+02 4.139e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-30 16:11:44,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=230160.0, ans=0.0
+2024-08-30 16:11:47,905 INFO [train.py:1114] (1/4) Epoch 18, batch 850, loss[loss=0.2182, simple_loss=0.2857, pruned_loss=0.05439, ctc_loss=0.105, over 19648.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2719, pruned_loss=0.05002, ctc_loss=0.09424, over 3814604.01 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:11:58,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=230266.66666666666, ans=0.07
+2024-08-30 16:12:14,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=230320.0, ans=0.0
+2024-08-30 16:12:18,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=230320.0, ans=0.2
+2024-08-30 16:12:38,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=230373.33333333334, ans=0.1
+2024-08-30 16:12:48,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=230426.66666666666, ans=0.125
+2024-08-30 16:12:57,693 INFO [train.py:1114] (1/4) Epoch 18, batch 900, loss[loss=0.1984, simple_loss=0.2542, pruned_loss=0.05146, ctc_loss=0.09935, over 19809.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2722, pruned_loss=0.05028, ctc_loss=0.09481, over 3819625.95 frames. ], batch size: 49, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:13:11,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=230533.33333333334, ans=0.07
+2024-08-30 16:13:21,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=230586.66666666666, ans=0.0
+2024-08-30 16:13:22,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=230586.66666666666, ans=0.0
+2024-08-30 16:13:49,531 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.771e+02 2.097e+02 2.541e+02 3.279e+02, threshold=4.195e+02, percent-clipped=1.0
+2024-08-30 16:13:56,603 INFO [train.py:1114] (1/4) Epoch 18, batch 950, loss[loss=0.1923, simple_loss=0.258, pruned_loss=0.04653, ctc_loss=0.08353, over 19518.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2727, pruned_loss=0.05089, ctc_loss=0.09603, over 3819326.72 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:14:51,697 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=230746.66666666666, ans=0.1
+2024-08-30 16:15:07,715 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.18 vs. limit=15.0
+2024-08-30 16:15:16,275 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=230853.33333333334, ans=0.125
+2024-08-30 16:15:16,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=230853.33333333334, ans=0.1
+2024-08-30 16:15:38,514 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=2.745e-03
+2024-08-30 16:15:40,063 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=3.94 vs. limit=15.0
+2024-08-30 16:15:41,780 INFO [train.py:1114] (1/4) Epoch 18, batch 1000, loss[loss=0.19, simple_loss=0.2568, pruned_loss=0.04508, ctc_loss=0.08242, over 19849.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2733, pruned_loss=0.05106, ctc_loss=0.09637, over 3815683.01 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:16:07,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=231120.0, ans=0.5
+2024-08-30 16:16:27,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=231120.0, ans=0.125
+2024-08-30 16:16:46,163 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.681e+02 1.935e+02 2.141e+02 3.468e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-30 16:16:51,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=231226.66666666666, ans=0.1
+2024-08-30 16:16:53,181 INFO [train.py:1114] (1/4) Epoch 18, batch 1050, loss[loss=0.1955, simple_loss=0.2699, pruned_loss=0.04371, ctc_loss=0.08398, over 19852.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2731, pruned_loss=0.05131, ctc_loss=0.09676, over 3823270.16 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:16:53,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231280.0, ans=0.1
+2024-08-30 16:16:57,510 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.66 vs. limit=15.0
+2024-08-30 16:17:08,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=231333.33333333334, ans=0.125
+2024-08-30 16:17:10,127 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=231333.33333333334, ans=0.125
+2024-08-30 16:17:17,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=231386.66666666666, ans=0.125
+2024-08-30 16:18:04,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=231440.0, ans=0.125
+2024-08-30 16:18:11,592 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.64 vs. limit=22.5
+2024-08-30 16:18:13,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=231440.0, ans=0.125
+2024-08-30 16:18:29,888 INFO [train.py:1114] (1/4) Epoch 18, batch 1100, loss[loss=0.2012, simple_loss=0.2682, pruned_loss=0.04853, ctc_loss=0.09288, over 19594.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2725, pruned_loss=0.05078, ctc_loss=0.0959, over 3830526.83 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:18:44,891 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.44 vs. limit=22.5
+2024-08-30 16:18:53,751 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.37 vs. limit=15.0
+2024-08-30 16:19:24,183 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.668e+02 1.884e+02 2.263e+02 3.606e+02, threshold=3.767e+02, percent-clipped=0.0
+2024-08-30 16:19:48,337 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=231760.0, ans=0.1
+2024-08-30 16:19:52,628 INFO [train.py:1114] (1/4) Epoch 18, batch 1150, loss[loss=0.2151, simple_loss=0.2788, pruned_loss=0.05544, ctc_loss=0.1015, over 19588.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2724, pruned_loss=0.05071, ctc_loss=0.0958, over 3830167.85 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:19:55,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=231813.33333333334, ans=0.125
+2024-08-30 16:20:00,315 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.72 vs. limit=10.0
+2024-08-30 16:20:06,783 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=231866.66666666666, ans=0.0
+2024-08-30 16:22:20,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=231920.0, ans=0.0
+2024-08-30 16:22:56,526 INFO [train.py:1114] (1/4) Epoch 18, batch 1200, loss[loss=0.1942, simple_loss=0.2771, pruned_loss=0.03994, ctc_loss=0.07877, over 19831.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2739, pruned_loss=0.051, ctc_loss=0.0965, over 3825669.59 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:23:05,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=232080.0, ans=0.0
+2024-08-30 16:23:10,030 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=232133.33333333334, ans=0.0
+2024-08-30 16:23:24,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=232186.66666666666, ans=0.125
+2024-08-30 16:23:26,168 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.29 vs. limit=10.0
+2024-08-30 16:23:30,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=232240.0, ans=0.125
+2024-08-30 16:23:33,059 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.67 vs. limit=15.0
+2024-08-30 16:23:34,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=232240.0, ans=0.125
+2024-08-30 16:23:38,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=232240.0, ans=0.0
+2024-08-30 16:23:38,481 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=232240.0, ans=0.125
+2024-08-30 16:23:39,503 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=232240.0, ans=0.125
+2024-08-30 16:23:39,549 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=232240.0, ans=0.1
+2024-08-30 16:23:45,954 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.656e+02 1.841e+02 2.164e+02 3.391e+02, threshold=3.682e+02, percent-clipped=0.0
+2024-08-30 16:23:52,933 INFO [train.py:1114] (1/4) Epoch 18, batch 1250, loss[loss=0.2267, simple_loss=0.2931, pruned_loss=0.05839, ctc_loss=0.1086, over 19525.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2741, pruned_loss=0.0511, ctc_loss=0.09647, over 3842781.75 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:23:57,412 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.67 vs. limit=15.0
+2024-08-30 16:24:16,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=232453.33333333334, ans=0.0
+2024-08-30 16:24:18,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=232453.33333333334, ans=10.0
+2024-08-30 16:25:41,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=232560.0, ans=0.125
+2024-08-30 16:25:42,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=232560.0, ans=0.0
+2024-08-30 16:25:45,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=232560.0, ans=0.1
+2024-08-30 16:25:53,676 INFO [train.py:1114] (1/4) Epoch 18, batch 1300, loss[loss=0.2079, simple_loss=0.2827, pruned_loss=0.04905, ctc_loss=0.08728, over 18867.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2728, pruned_loss=0.05038, ctc_loss=0.09502, over 3846959.32 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:32:25,719 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=232666.66666666666, ans=0.1
+2024-08-30 16:45:22,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=232826.66666666666, ans=0.125
+2024-08-30 16:45:23,079 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.785e+02 2.170e+02 2.759e+02 4.331e+02, threshold=4.339e+02, percent-clipped=5.0
+2024-08-30 16:54:23,612 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=232826.66666666666, ans=0.2
+2024-08-30 17:02:45,881 INFO [train.py:1114] (1/4) Epoch 18, batch 1350, loss[loss=0.1762, simple_loss=0.2595, pruned_loss=0.03357, ctc_loss=0.06426, over 19771.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2726, pruned_loss=0.05023, ctc_loss=0.09447, over 3856082.84 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-2
new file mode 100644
index 0000000000000000000000000000000000000000..3aa1ec07c94fb274b65677b9f760ba02eb0dbdd6
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-2
@@ -0,0 +1,570 @@
+2024-08-30 12:44:46,732 INFO [train.py:1182] (2/4) Training started
+2024-08-30 12:44:53,509 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-30 12:44:53,511 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2651.int.cedar.computecanada.ca', 'IP address': '172.16.146.88'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 17, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 12:44:53,512 INFO [train.py:1212] (2/4) About to create model
+2024-08-30 12:44:54,217 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-30 12:44:54,218 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-16.pt
+2024-08-30 12:45:01,882 INFO [train.py:1231] (2/4) Using DDP
+2024-08-30 12:45:06,262 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-30 12:45:06,460 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-30 12:45:06,461 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 12:45:06,664 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-30 12:45:06,701 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-30 12:45:06,701 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-30 12:45:06,701 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-30 12:45:06,701 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-30 12:45:06,701 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-30 12:45:08,264 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-30 12:45:08,265 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-30 12:45:08,444 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-30 12:45:08,576 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-30 12:45:08,902 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-30 12:45:08,902 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 12:51:17,009 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12782MB
+2024-08-30 12:51:18,486 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12849MB
+2024-08-30 12:53:02,306 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-30 12:53:03,454 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=192, metric=8.24 vs. limit=7.5
+2024-08-30 12:53:03,661 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-30 12:54:12,747 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-30 12:54:14,357 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-30 12:54:14,376 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-30 12:54:59,377 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.34 vs. limit=15.0
+2024-08-30 12:55:06,919 INFO [train.py:1114] (2/4) Epoch 17, batch 0, loss[loss=0.1952, simple_loss=0.2558, pruned_loss=0.04945, ctc_loss=0.08906, over 19416.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2558, pruned_loss=0.04945, ctc_loss=0.08906, over 19416.00 frames. ], batch size: 48, lr: 8.95e-03, grad_scale: 32.0
+2024-08-30 12:55:06,920 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-30 12:55:27,184 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([6.0834, 5.5112, 5.0666, 5.1729], device='cuda:2')
+2024-08-30 12:55:27,653 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.8662, 2.0752, 3.2524, 3.4011], device='cuda:2')
+2024-08-30 12:55:31,712 INFO [train.py:1146] (2/4) Epoch 17, validation: loss=0.185, simple_loss=0.2737, pruned_loss=0.03584, ctc_loss=0.06176, over 944034.00 frames.
+2024-08-30 12:55:31,713 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12967MB
+2024-08-30 12:55:33,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=212405.33333333334, ans=0.125
+2024-08-30 12:56:02,101 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-30 12:56:20,279 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=212458.66666666666, ans=0.025
+2024-08-30 13:06:19,314 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.860e+02 2.030e+02 2.233e+02 2.993e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-30 13:06:19,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=212565.33333333334, ans=0.125
+2024-08-30 13:07:20,627 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.05 vs. limit=6.0
+2024-08-30 13:09:56,387 INFO [train.py:1114] (2/4) Epoch 17, batch 50, loss[loss=0.1763, simple_loss=0.2446, pruned_loss=0.03925, ctc_loss=0.07347, over 19722.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2782, pruned_loss=0.05445, ctc_loss=0.1029, over 845315.44 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:10:01,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=212672.0, ans=0.125
+2024-08-30 13:11:45,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=212672.0, ans=0.0
+2024-08-30 13:16:42,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=212725.33333333334, ans=0.1
+2024-08-30 13:18:12,412 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=212778.66666666666, ans=0.125
+2024-08-30 13:18:12,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=212778.66666666666, ans=0.05
+2024-08-30 13:18:13,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=212778.66666666666, ans=0.125
+2024-08-30 13:18:31,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=212832.0, ans=0.125
+2024-08-30 13:19:01,146 INFO [train.py:1114] (2/4) Epoch 17, batch 100, loss[loss=0.1981, simple_loss=0.2692, pruned_loss=0.04602, ctc_loss=0.08747, over 19753.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2772, pruned_loss=0.05326, ctc_loss=0.1003, over 1498877.26 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:19:17,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=212992.0, ans=0.1
+2024-08-30 13:21:59,155 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.59 vs. limit=22.5
+2024-08-30 13:23:00,811 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.80 vs. limit=12.0
+2024-08-30 13:23:11,084 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.706e+02 1.953e+02 2.287e+02 3.713e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-30 13:24:07,719 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.06 vs. limit=15.0
+2024-08-30 13:24:10,866 INFO [train.py:1114] (2/4) Epoch 17, batch 150, loss[loss=0.1857, simple_loss=0.2527, pruned_loss=0.04277, ctc_loss=0.08313, over 19722.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2756, pruned_loss=0.0522, ctc_loss=0.0986, over 2027544.43 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:24:14,151 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.90 vs. limit=6.0
+2024-08-30 13:27:35,250 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=213365.33333333334, ans=0.125
+2024-08-30 13:27:51,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=213365.33333333334, ans=0.125
+2024-08-30 13:27:55,278 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-30 13:28:05,733 INFO [train.py:1114] (2/4) Epoch 17, batch 200, loss[loss=0.2337, simple_loss=0.2901, pruned_loss=0.06478, ctc_loss=0.1196, over 18397.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.275, pruned_loss=0.05219, ctc_loss=0.09867, over 2435139.54 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:28:16,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=213525.33333333334, ans=0.09899494936611666
+2024-08-30 13:28:40,334 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.384e+02 1.731e+02 1.992e+02 2.666e+02 4.093e+02, threshold=3.983e+02, percent-clipped=1.0
+2024-08-30 13:28:55,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=213685.33333333334, ans=0.125
+2024-08-30 13:29:01,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=213685.33333333334, ans=0.0
+2024-08-30 13:29:04,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=213685.33333333334, ans=0.1
+2024-08-30 13:29:07,536 INFO [train.py:1114] (2/4) Epoch 17, batch 250, loss[loss=0.2099, simple_loss=0.2793, pruned_loss=0.05083, ctc_loss=0.09702, over 19307.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2744, pruned_loss=0.05153, ctc_loss=0.09732, over 2755756.17 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:29:51,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=213898.66666666666, ans=0.025
+2024-08-30 13:29:52,080 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.21 vs. limit=10.0
+2024-08-30 13:30:02,068 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=213952.0, ans=0.125
+2024-08-30 13:30:13,963 INFO [train.py:1114] (2/4) Epoch 17, batch 300, loss[loss=0.2338, simple_loss=0.294, pruned_loss=0.06369, ctc_loss=0.1155, over 19521.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2748, pruned_loss=0.05154, ctc_loss=0.09736, over 3000078.39 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:30:16,773 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=214005.33333333334, ans=0.125
+2024-08-30 13:30:35,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=214058.66666666666, ans=0.0
+2024-08-30 13:30:45,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=214112.0, ans=0.125
+2024-08-30 13:30:51,813 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.663e+02 1.872e+02 2.298e+02 3.693e+02, threshold=3.744e+02, percent-clipped=0.0
+2024-08-30 13:31:25,595 INFO [train.py:1114] (2/4) Epoch 17, batch 350, loss[loss=0.2074, simple_loss=0.2638, pruned_loss=0.05408, ctc_loss=0.1073, over 19751.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2751, pruned_loss=0.05169, ctc_loss=0.09759, over 3189055.41 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:31:25,742 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=214272.0, ans=0.125
+2024-08-30 13:31:28,235 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=214272.0, ans=0.125
+2024-08-30 13:31:45,146 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.97 vs. limit=15.0
+2024-08-30 13:32:03,918 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=214432.0, ans=0.125
+2024-08-30 13:32:08,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=214432.0, ans=0.125
+2024-08-30 13:32:24,581 INFO [train.py:1114] (2/4) Epoch 17, batch 400, loss[loss=0.2113, simple_loss=0.2814, pruned_loss=0.05133, ctc_loss=0.0964, over 19496.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2742, pruned_loss=0.05122, ctc_loss=0.09685, over 3340401.63 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:32:30,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=214538.66666666666, ans=0.0
+2024-08-30 13:33:01,009 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.640e+02 1.901e+02 2.325e+02 4.074e+02, threshold=3.801e+02, percent-clipped=1.0
+2024-08-30 13:33:20,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=214752.0, ans=0.0
+2024-08-30 13:33:26,242 INFO [train.py:1114] (2/4) Epoch 17, batch 450, loss[loss=0.1993, simple_loss=0.2716, pruned_loss=0.04533, ctc_loss=0.09091, over 19617.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2749, pruned_loss=0.05159, ctc_loss=0.09742, over 3449884.10 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:34:00,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=214858.66666666666, ans=0.0
+2024-08-30 13:34:00,621 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.17 vs. limit=15.0
+2024-08-30 13:34:02,177 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.38 vs. limit=6.0
+2024-08-30 13:34:07,455 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214858.66666666666, ans=0.1
+2024-08-30 13:38:13,354 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.37 vs. limit=12.0
+2024-08-30 13:38:29,784 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=214912.0, ans=0.0
+2024-08-30 13:38:30,840 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=214912.0, ans=0.2
+2024-08-30 13:43:53,809 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=215018.66666666666, ans=0.025
+2024-08-30 13:44:00,147 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215018.66666666666, ans=0.1
+2024-08-30 13:44:05,807 INFO [train.py:1114] (2/4) Epoch 17, batch 500, loss[loss=0.234, simple_loss=0.2974, pruned_loss=0.06128, ctc_loss=0.1202, over 19708.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2744, pruned_loss=0.05148, ctc_loss=0.09717, over 3545931.45 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:44:35,794 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=215072.0, ans=0.125
+2024-08-30 13:44:57,579 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.17 vs. limit=10.0
+2024-08-30 13:45:04,962 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.19 vs. limit=22.5
+2024-08-30 13:45:06,443 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.780e+02 2.026e+02 2.589e+02 4.105e+02, threshold=4.052e+02, percent-clipped=2.0
+2024-08-30 13:45:11,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=215232.0, ans=0.2
+2024-08-30 13:45:31,464 INFO [train.py:1114] (2/4) Epoch 17, batch 550, loss[loss=0.2212, simple_loss=0.2817, pruned_loss=0.05796, ctc_loss=0.112, over 19279.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2739, pruned_loss=0.05121, ctc_loss=0.09666, over 3608240.69 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-30 13:45:36,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=215338.66666666666, ans=0.5
+2024-08-30 13:45:52,209 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:45:52,622 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.06 vs. limit=15.0
+2024-08-30 13:45:55,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=215445.33333333334, ans=0.0
+2024-08-30 13:46:10,467 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.whiten.whitening_limit, batch_count=215498.66666666666, ans=12.0
+2024-08-30 13:46:12,889 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.66 vs. limit=15.0
+2024-08-30 13:46:16,716 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215498.66666666666, ans=0.1
+2024-08-30 13:46:26,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=215552.0, ans=0.1
+2024-08-30 13:46:26,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=215552.0, ans=0.0
+2024-08-30 13:46:27,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=215552.0, ans=0.125
+2024-08-30 13:47:20,208 INFO [train.py:1114] (2/4) Epoch 17, batch 600, loss[loss=0.2404, simple_loss=0.3028, pruned_loss=0.06502, ctc_loss=0.1201, over 19456.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2743, pruned_loss=0.05135, ctc_loss=0.09664, over 3666029.34 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-30 13:47:29,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=215605.33333333334, ans=0.125
+2024-08-30 13:47:32,371 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.64 vs. limit=22.5
+2024-08-30 13:47:45,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=215712.0, ans=0.0
+2024-08-30 13:47:53,791 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.276e+02 1.647e+02 1.940e+02 2.383e+02 4.124e+02, threshold=3.879e+02, percent-clipped=1.0
+2024-08-30 13:47:54,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=215765.33333333334, ans=0.125
+2024-08-30 13:48:05,596 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=215765.33333333334, ans=0.0
+2024-08-30 13:48:17,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=215818.66666666666, ans=0.0
+2024-08-30 13:48:27,128 INFO [train.py:1114] (2/4) Epoch 17, batch 650, loss[loss=0.2122, simple_loss=0.2806, pruned_loss=0.05187, ctc_loss=0.1003, over 19776.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2739, pruned_loss=0.05119, ctc_loss=0.09626, over 3716225.09 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 32.0
+2024-08-30 13:50:17,385 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.15 vs. limit=6.0
+2024-08-30 13:51:24,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=215925.33333333334, ans=0.0
+2024-08-30 13:51:25,486 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215925.33333333334, ans=0.1
+2024-08-30 13:52:31,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=216032.0, ans=0.2
+2024-08-30 13:52:35,751 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=216032.0, ans=0.0
+2024-08-30 13:53:20,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216085.33333333334, ans=0.1
+2024-08-30 14:04:24,249 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216085.33333333334, ans=0.1
+2024-08-30 14:06:19,758 INFO [train.py:1114] (2/4) Epoch 17, batch 700, loss[loss=0.1879, simple_loss=0.2623, pruned_loss=0.04087, ctc_loss=0.07939, over 19707.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2748, pruned_loss=0.05184, ctc_loss=0.09754, over 3747768.85 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:06:27,529 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=216138.66666666666, ans=0.125
+2024-08-30 14:07:09,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=216192.0, ans=0.025
+2024-08-30 14:12:13,791 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 1.667e+02 2.137e+02 2.601e+02 4.284e+02, threshold=4.274e+02, percent-clipped=4.0
+2024-08-30 14:16:31,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=216298.66666666666, ans=0.125
+2024-08-30 14:17:08,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=216352.0, ans=0.125
+2024-08-30 14:17:27,106 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=216352.0, ans=0.125
+2024-08-30 14:17:35,080 INFO [train.py:1114] (2/4) Epoch 17, batch 750, loss[loss=0.1921, simple_loss=0.2638, pruned_loss=0.04346, ctc_loss=0.08339, over 19516.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2745, pruned_loss=0.05188, ctc_loss=0.09772, over 3774513.43 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:17:39,191 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=216405.33333333334, ans=0.125
+2024-08-30 14:18:08,801 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=216458.66666666666, ans=0.125
+2024-08-30 14:18:59,589 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=216512.0, ans=0.125
+2024-08-30 14:19:30,934 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.63 vs. limit=10.0
+2024-08-30 14:19:31,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=216565.33333333334, ans=0.1
+2024-08-30 14:19:34,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=216565.33333333334, ans=0.0
+2024-08-30 14:19:48,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=216618.66666666666, ans=0.0
+2024-08-30 14:20:37,995 INFO [train.py:1114] (2/4) Epoch 17, batch 800, loss[loss=0.1639, simple_loss=0.2384, pruned_loss=0.03221, ctc_loss=0.06252, over 19431.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2746, pruned_loss=0.05177, ctc_loss=0.09773, over 3795456.66 frames. ], batch size: 48, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:22:01,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1.whitening_limit, batch_count=216672.0, ans=10.0
+2024-08-30 14:31:25,884 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.25 vs. limit=15.0
+2024-08-30 14:31:32,320 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.715e+02 2.071e+02 2.537e+02 3.967e+02, threshold=4.143e+02, percent-clipped=0.0
+2024-08-30 14:32:33,426 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=216938.66666666666, ans=0.125
+2024-08-30 14:32:34,223 INFO [train.py:1114] (2/4) Epoch 17, batch 850, loss[loss=0.2342, simple_loss=0.2938, pruned_loss=0.06327, ctc_loss=0.1199, over 19651.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.274, pruned_loss=0.05137, ctc_loss=0.09688, over 3815335.46 frames. ], batch size: 59, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:32:37,101 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=216938.66666666666, ans=0.1
+2024-08-30 14:32:39,341 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=216938.66666666666, ans=0.025
+2024-08-30 14:32:53,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=216992.0, ans=0.0
+2024-08-30 14:33:08,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=217045.33333333334, ans=0.0
+2024-08-30 14:33:25,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=217098.66666666666, ans=0.0
+2024-08-30 14:34:06,367 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.75 vs. limit=15.0
+2024-08-30 14:34:26,344 INFO [train.py:1114] (2/4) Epoch 17, batch 900, loss[loss=0.2044, simple_loss=0.2682, pruned_loss=0.05046, ctc_loss=0.09912, over 19394.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2748, pruned_loss=0.05196, ctc_loss=0.0977, over 3819723.78 frames. ], batch size: 48, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:35:58,504 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=217205.33333333334, ans=0.125
+2024-08-30 14:36:00,629 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:36:28,170 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=217258.66666666666, ans=0.2
+2024-08-30 14:36:46,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=217312.0, ans=0.125
+2024-08-30 14:36:59,138 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.623e+02 1.810e+02 2.233e+02 4.039e+02, threshold=3.621e+02, percent-clipped=0.0
+2024-08-30 14:37:03,367 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=217365.33333333334, ans=0.125
+2024-08-30 14:37:09,450 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=217365.33333333334, ans=0.125
+2024-08-30 14:37:24,537 INFO [train.py:1114] (2/4) Epoch 17, batch 950, loss[loss=0.2009, simple_loss=0.2648, pruned_loss=0.05043, ctc_loss=0.09046, over 19494.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2755, pruned_loss=0.05246, ctc_loss=0.09879, over 3820675.06 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:38:26,052 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.31 vs. limit=22.5
+2024-08-30 14:38:37,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=217578.66666666666, ans=0.125
+2024-08-30 14:38:41,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=217578.66666666666, ans=0.125
+2024-08-30 14:38:43,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=217578.66666666666, ans=0.125
+2024-08-30 14:38:43,164 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:38:47,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=217578.66666666666, ans=0.1
+2024-08-30 14:38:51,587 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=217632.0, ans=0.1
+2024-08-30 14:39:04,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=217685.33333333334, ans=0.125
+2024-08-30 14:39:16,468 INFO [train.py:1114] (2/4) Epoch 17, batch 1000, loss[loss=0.2093, simple_loss=0.2747, pruned_loss=0.05225, ctc_loss=0.09862, over 19852.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2761, pruned_loss=0.0527, ctc_loss=0.09925, over 3817731.00 frames. ], batch size: 52, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:39:33,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=217792.0, ans=0.1
+2024-08-30 14:39:36,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=217792.0, ans=0.025
+2024-08-30 14:39:38,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=217792.0, ans=0.125
+2024-08-30 14:39:48,385 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.04 vs. limit=6.0
+2024-08-30 14:39:52,695 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.648e+02 1.905e+02 2.181e+02 3.196e+02, threshold=3.810e+02, percent-clipped=0.0
+2024-08-30 14:39:58,792 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=217898.66666666666, ans=0.0
+2024-08-30 14:40:13,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=217952.0, ans=0.07
+2024-08-30 14:40:17,832 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.81 vs. limit=22.5
+2024-08-30 14:40:18,083 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.69 vs. limit=15.0
+2024-08-30 14:40:20,739 INFO [train.py:1114] (2/4) Epoch 17, batch 1050, loss[loss=0.2084, simple_loss=0.2766, pruned_loss=0.05076, ctc_loss=0.09691, over 19848.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.275, pruned_loss=0.05221, ctc_loss=0.09831, over 3825551.01 frames. ], batch size: 57, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:40:27,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=218005.33333333334, ans=0.1
+2024-08-30 14:40:28,077 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=218005.33333333334, ans=0.0
+2024-08-30 14:40:36,884 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=218058.66666666666, ans=0.07
+2024-08-30 14:40:46,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=218112.0, ans=0.125
+2024-08-30 14:40:57,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=218165.33333333334, ans=0.125
+2024-08-30 14:41:15,404 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.97 vs. limit=12.0
+2024-08-30 14:41:16,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=218218.66666666666, ans=0.125
+2024-08-30 14:41:22,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=218218.66666666666, ans=0.0
+2024-08-30 14:41:23,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=218272.0, ans=0.125
+2024-08-30 14:41:24,731 INFO [train.py:1114] (2/4) Epoch 17, batch 1100, loss[loss=0.1895, simple_loss=0.2676, pruned_loss=0.04011, ctc_loss=0.07793, over 19592.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2749, pruned_loss=0.05209, ctc_loss=0.09814, over 3832810.09 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:41:28,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=218272.0, ans=0.125
+2024-08-30 14:41:30,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=218272.0, ans=0.1
+2024-08-30 14:41:44,975 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=218325.33333333334, ans=0.125
+2024-08-30 14:42:16,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=218378.66666666666, ans=0.1
+2024-08-30 14:42:23,465 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.634e+02 1.909e+02 2.238e+02 3.833e+02, threshold=3.817e+02, percent-clipped=1.0
+2024-08-30 14:43:15,267 INFO [train.py:1114] (2/4) Epoch 17, batch 1150, loss[loss=0.1868, simple_loss=0.2555, pruned_loss=0.04262, ctc_loss=0.08214, over 19591.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.275, pruned_loss=0.05222, ctc_loss=0.09841, over 3830356.08 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:43:23,248 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=218538.66666666666, ans=0.0
+2024-08-30 14:43:42,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=218592.0, ans=0.125
+2024-08-30 14:43:44,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=218645.33333333334, ans=0.2
+2024-08-30 14:43:44,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=218645.33333333334, ans=0.1
+2024-08-30 14:43:50,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=218645.33333333334, ans=0.0
+2024-08-30 14:43:56,231 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.01 vs. limit=22.5
+2024-08-30 14:44:04,885 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.20 vs. limit=6.0
+2024-08-30 14:44:12,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_ff3.min_abs, batch_count=218752.0, ans=0.2
+2024-08-30 14:44:16,010 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.59 vs. limit=15.0
+2024-08-30 14:44:20,101 INFO [train.py:1114] (2/4) Epoch 17, batch 1200, loss[loss=0.2177, simple_loss=0.2849, pruned_loss=0.05574, ctc_loss=0.09774, over 19842.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2762, pruned_loss=0.05268, ctc_loss=0.09929, over 3825167.37 frames. ], batch size: 57, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:44:20,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=218805.33333333334, ans=0.2
+2024-08-30 14:46:08,195 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.28 vs. limit=15.0
+2024-08-30 14:46:08,763 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 1.734e+02 1.937e+02 2.235e+02 3.279e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-30 14:46:26,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=219018.66666666666, ans=0.04949747468305833
+2024-08-30 14:46:31,313 INFO [train.py:1114] (2/4) Epoch 17, batch 1250, loss[loss=0.2169, simple_loss=0.2782, pruned_loss=0.05727, ctc_loss=0.1026, over 19515.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2767, pruned_loss=0.05278, ctc_loss=0.09932, over 3843189.79 frames. ], batch size: 61, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:46:39,340 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.21 vs. limit=12.0
+2024-08-30 14:48:24,474 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.86 vs. limit=15.0
+2024-08-30 14:48:52,392 INFO [train.py:1114] (2/4) Epoch 17, batch 1300, loss[loss=0.2187, simple_loss=0.2868, pruned_loss=0.05462, ctc_loss=0.1034, over 18942.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2758, pruned_loss=0.0523, ctc_loss=0.09846, over 3847215.40 frames. ], batch size: 76, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:49:29,490 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.750e+02 2.054e+02 2.564e+02 3.826e+02, threshold=4.108e+02, percent-clipped=0.0
+2024-08-30 14:49:32,140 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=219498.66666666666, ans=0.125
+2024-08-30 14:49:44,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=219552.0, ans=0.07
+2024-08-30 14:50:00,460 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.30 vs. limit=12.0
+2024-08-30 14:50:08,914 INFO [train.py:1114] (2/4) Epoch 17, batch 1350, loss[loss=0.2038, simple_loss=0.2692, pruned_loss=0.05046, ctc_loss=0.09351, over 19785.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2753, pruned_loss=0.05218, ctc_loss=0.09819, over 3857406.84 frames. ], batch size: 54, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:50:16,263 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.00 vs. limit=15.0
+2024-08-30 14:50:17,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=219605.33333333334, ans=0.0
+2024-08-30 14:50:17,546 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.10 vs. limit=12.0
+2024-08-30 14:50:19,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=219658.66666666666, ans=0.0
+2024-08-30 14:50:24,794 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:50:34,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=219712.0, ans=0.125
+2024-08-30 14:51:04,674 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.70 vs. limit=6.0
+2024-08-30 14:51:09,317 INFO [train.py:1114] (2/4) Epoch 17, batch 1400, loss[loss=0.1717, simple_loss=0.2298, pruned_loss=0.04066, ctc_loss=0.08087, over 19655.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2745, pruned_loss=0.05173, ctc_loss=0.09733, over 3863333.21 frames. ], batch size: 46, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:51:27,409 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.85 vs. limit=22.5
+2024-08-30 14:52:01,609 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.697e+02 1.910e+02 2.399e+02 4.058e+02, threshold=3.819e+02, percent-clipped=0.0
+2024-08-30 14:52:08,059 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.64 vs. limit=15.0
+2024-08-30 14:52:24,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=220085.33333333334, ans=0.025
+2024-08-30 14:52:25,662 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.44 vs. limit=15.0
+2024-08-30 14:52:26,360 INFO [train.py:1114] (2/4) Epoch 17, batch 1450, loss[loss=0.2146, simple_loss=0.2918, pruned_loss=0.05032, ctc_loss=0.09184, over 19634.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.275, pruned_loss=0.05188, ctc_loss=0.09757, over 3862191.42 frames. ], batch size: 63, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:52:42,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=220138.66666666666, ans=0.125
+2024-08-30 14:52:59,470 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.32 vs. limit=10.0
+2024-08-30 14:54:12,185 INFO [train.py:1114] (2/4) Epoch 17, batch 1500, loss[loss=0.208, simple_loss=0.2727, pruned_loss=0.05123, ctc_loss=0.102, over 19586.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2754, pruned_loss=0.05208, ctc_loss=0.09809, over 3862121.78 frames. ], batch size: 57, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:54:33,523 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=220458.66666666666, ans=0.125
+2024-08-30 14:54:40,381 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.36 vs. limit=10.0
+2024-08-30 14:54:43,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=220512.0, ans=0.0
+2024-08-30 14:54:47,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=220512.0, ans=0.125
+2024-08-30 14:54:49,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=220512.0, ans=0.125
+2024-08-30 14:54:54,682 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.719e+02 1.906e+02 2.293e+02 3.704e+02, threshold=3.812e+02, percent-clipped=0.0
+2024-08-30 14:55:02,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=220565.33333333334, ans=0.125
+2024-08-30 14:55:03,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=220565.33333333334, ans=0.07
+2024-08-30 14:55:17,122 INFO [train.py:1114] (2/4) Epoch 17, batch 1550, loss[loss=0.2365, simple_loss=0.3022, pruned_loss=0.06304, ctc_loss=0.112, over 19616.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2756, pruned_loss=0.05241, ctc_loss=0.09874, over 3845614.46 frames. ], batch size: 60, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:55:47,493 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.28 vs. limit=22.5
+2024-08-30 14:55:56,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=220778.66666666666, ans=0.125
+2024-08-30 14:55:56,951 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.34 vs. limit=12.0
+2024-08-30 14:56:01,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=220778.66666666666, ans=0.025
+2024-08-30 14:56:27,382 INFO [train.py:1114] (2/4) Epoch 17, batch 1600, loss[loss=0.2328, simple_loss=0.2953, pruned_loss=0.06139, ctc_loss=0.1187, over 19832.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2752, pruned_loss=0.05242, ctc_loss=0.09873, over 3835786.34 frames. ], batch size: 57, lr: 8.78e-03, grad_scale: 32.0
+2024-08-30 14:57:18,986 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=220938.66666666666, ans=0.1
+2024-08-30 14:57:21,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=220992.0, ans=15.0
+2024-08-30 14:58:34,663 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.55 vs. limit=22.5
+2024-08-30 15:00:47,791 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.738e+02 2.160e+02 2.635e+02 3.870e+02, threshold=4.320e+02, percent-clipped=2.0
+2024-08-30 15:00:50,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=221098.66666666666, ans=0.0
+2024-08-30 15:00:51,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=221098.66666666666, ans=0.125
+2024-08-30 15:02:48,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=221152.0, ans=0.025
+2024-08-30 15:03:53,982 INFO [train.py:1114] (2/4) Epoch 17, batch 1650, loss[loss=0.2128, simple_loss=0.2812, pruned_loss=0.05343, ctc_loss=0.09384, over 19655.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2746, pruned_loss=0.05213, ctc_loss=0.09827, over 3833058.26 frames. ], batch size: 59, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:03:57,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=221205.33333333334, ans=0.2
+2024-08-30 15:03:59,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=221205.33333333334, ans=0.05
+2024-08-30 15:05:39,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=221258.66666666666, ans=0.0
+2024-08-30 15:05:51,181 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=221258.66666666666, ans=0.0
+2024-08-30 15:07:26,267 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.70 vs. limit=15.0
+2024-08-30 15:07:44,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=221365.33333333334, ans=22.5
+2024-08-30 15:07:47,345 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.66 vs. limit=8.0
+2024-08-30 15:08:00,829 INFO [train.py:1114] (2/4) Epoch 17, batch 1700, loss[loss=0.1937, simple_loss=0.2501, pruned_loss=0.05023, ctc_loss=0.0918, over 19677.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2745, pruned_loss=0.05179, ctc_loss=0.09761, over 3847660.59 frames. ], batch size: 46, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:08:06,819 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.25 vs. limit=22.5
+2024-08-30 15:08:18,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=221525.33333333334, ans=0.125
+2024-08-30 15:08:36,782 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.717e+02 1.998e+02 2.422e+02 4.059e+02, threshold=3.996e+02, percent-clipped=0.0
+2024-08-30 15:09:50,038 INFO [train.py:1114] (2/4) Epoch 17, batch 1750, loss[loss=0.1998, simple_loss=0.2555, pruned_loss=0.05229, ctc_loss=0.09908, over 19643.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2738, pruned_loss=0.05143, ctc_loss=0.09694, over 3852036.08 frames. ], batch size: 45, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:09:57,897 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.63 vs. limit=12.0
+2024-08-30 15:10:03,430 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=221792.0, ans=0.125
+2024-08-30 15:10:11,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=221845.33333333334, ans=0.125
+2024-08-30 15:10:46,082 INFO [train.py:1114] (2/4) Epoch 17, batch 1800, loss[loss=0.1995, simple_loss=0.2735, pruned_loss=0.04593, ctc_loss=0.08381, over 19605.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.274, pruned_loss=0.05157, ctc_loss=0.09693, over 3853124.07 frames. ], batch size: 55, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:11:21,273 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=222165.33333333334, ans=0.125
+2024-08-30 15:11:23,232 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.773e+02 2.029e+02 2.607e+02 4.351e+02, threshold=4.057e+02, percent-clipped=1.0
+2024-08-30 15:11:25,865 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=222165.33333333334, ans=0.125
+2024-08-30 15:11:36,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=222218.66666666666, ans=0.125
+2024-08-30 15:11:37,311 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.28 vs. limit=12.0
+2024-08-30 15:11:38,212 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.42 vs. limit=22.5
+2024-08-30 15:11:43,556 INFO [train.py:1114] (2/4) Epoch 17, batch 1850, loss[loss=0.2005, simple_loss=0.2762, pruned_loss=0.04428, ctc_loss=0.09059, over 19604.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2739, pruned_loss=0.05151, ctc_loss=0.0968, over 3856458.31 frames. ], batch size: 57, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:12:17,802 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=13.30 vs. limit=15.0
+2024-08-30 15:12:19,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=222432.0, ans=0.125
+2024-08-30 15:12:23,064 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=222432.0, ans=0.0
+2024-08-30 15:12:24,279 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=222432.0, ans=0.125
+2024-08-30 15:12:37,429 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.02 vs. limit=22.5
+2024-08-30 15:12:40,691 INFO [train.py:1114] (2/4) Epoch 17, batch 1900, loss[loss=0.1881, simple_loss=0.2748, pruned_loss=0.03728, ctc_loss=0.06736, over 19617.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2741, pruned_loss=0.05122, ctc_loss=0.09622, over 3861433.48 frames. ], batch size: 59, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:12:41,474 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.11 vs. limit=12.0
+2024-08-30 15:13:03,427 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=222645.33333333334, ans=0.125
+2024-08-30 15:13:18,238 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.682e+02 1.950e+02 2.328e+02 4.923e+02, threshold=3.901e+02, percent-clipped=3.0
+2024-08-30 15:13:34,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=222752.0, ans=0.0
+2024-08-30 15:13:38,420 INFO [train.py:1114] (2/4) Epoch 17, batch 1950, loss[loss=0.1948, simple_loss=0.2617, pruned_loss=0.04641, ctc_loss=0.08783, over 19600.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2749, pruned_loss=0.05127, ctc_loss=0.09621, over 3870786.46 frames. ], batch size: 52, lr: 8.74e-03, grad_scale: 16.0
+2024-08-30 15:14:32,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=222858.66666666666, ans=0.04949747468305833
+2024-08-30 15:15:27,050 INFO [train.py:1114] (2/4) Epoch 17, batch 2000, loss[loss=0.185, simple_loss=0.2425, pruned_loss=0.04637, ctc_loss=0.08667, over 19661.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.275, pruned_loss=0.05139, ctc_loss=0.09656, over 3856145.28 frames. ], batch size: 45, lr: 8.74e-03, grad_scale: 32.0
+2024-08-30 15:15:29,724 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.85 vs. limit=22.5
+2024-08-30 15:15:38,804 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.27 vs. limit=22.5
+2024-08-30 15:15:44,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=223125.33333333334, ans=0.0
+2024-08-30 15:15:50,525 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.06 vs. limit=6.0
+2024-08-30 15:15:51,141 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=223178.66666666666, ans=0.125
+2024-08-30 15:15:51,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=223178.66666666666, ans=0.2
+2024-08-30 15:15:54,643 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=223178.66666666666, ans=0.0
+2024-08-30 15:15:56,725 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=223178.66666666666, ans=0.125
+2024-08-30 15:16:03,258 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.692e+02 2.099e+02 2.435e+02 3.373e+02, threshold=4.199e+02, percent-clipped=0.0
+2024-08-30 15:16:05,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=223232.0, ans=0.07
+2024-08-30 15:16:08,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=223232.0, ans=0.125
+2024-08-30 15:16:11,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=223285.33333333334, ans=0.125
+2024-08-30 15:16:12,576 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=223285.33333333334, ans=0.125
+2024-08-30 15:16:16,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=223285.33333333334, ans=0.025
+2024-08-30 15:16:42,837 INFO [train.py:1114] (2/4) Epoch 17, batch 2050, loss[loss=0.1693, simple_loss=0.2368, pruned_loss=0.03745, ctc_loss=0.06731, over 19708.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2747, pruned_loss=0.05181, ctc_loss=0.09733, over 3852292.04 frames. ], batch size: 47, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:18:19,214 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.13 vs. limit=6.0
+2024-08-30 15:18:29,922 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=223392.0, ans=0.2
+2024-08-30 15:20:15,747 INFO [train.py:1114] (2/4) Epoch 17, batch 2100, loss[loss=0.2146, simple_loss=0.2782, pruned_loss=0.05455, ctc_loss=0.1048, over 19765.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2742, pruned_loss=0.05155, ctc_loss=0.09705, over 3859849.16 frames. ], batch size: 54, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:21:08,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=223658.66666666666, ans=0.125
+2024-08-30 15:21:41,987 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.235e+02 1.693e+02 2.019e+02 2.546e+02 6.032e+02, threshold=4.039e+02, percent-clipped=5.0
+2024-08-30 15:21:43,399 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=223765.33333333334, ans=0.1
+2024-08-30 15:21:51,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=223818.66666666666, ans=0.125
+2024-08-30 15:21:58,490 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=223818.66666666666, ans=0.1
+2024-08-30 15:22:02,823 INFO [train.py:1114] (2/4) Epoch 17, batch 2150, loss[loss=0.2174, simple_loss=0.2713, pruned_loss=0.05951, ctc_loss=0.1113, over 19847.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2741, pruned_loss=0.0516, ctc_loss=0.09707, over 3870273.52 frames. ], batch size: 52, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:22:05,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=223872.0, ans=0.125
+2024-08-30 15:22:22,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=223925.33333333334, ans=0.125
+2024-08-30 15:22:26,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=223978.66666666666, ans=0.0
+2024-08-30 15:22:58,284 INFO [train.py:1114] (2/4) Epoch 17, batch 2200, loss[loss=0.2448, simple_loss=0.3009, pruned_loss=0.06857, ctc_loss=0.1291, over 19591.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2744, pruned_loss=0.05176, ctc_loss=0.09743, over 3869561.05 frames. ], batch size: 57, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:23:03,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=224138.66666666666, ans=0.1
+2024-08-30 15:23:21,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=224192.0, ans=0.07
+2024-08-30 15:23:53,333 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.675e+02 1.986e+02 2.371e+02 4.244e+02, threshold=3.972e+02, percent-clipped=2.0
+2024-08-30 15:24:07,139 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=224352.0, ans=0.125
+2024-08-30 15:24:13,611 INFO [train.py:1114] (2/4) Epoch 17, batch 2250, loss[loss=0.217, simple_loss=0.2906, pruned_loss=0.05139, ctc_loss=0.1014, over 19606.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2744, pruned_loss=0.05174, ctc_loss=0.09739, over 3868300.94 frames. ], batch size: 55, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:24:18,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=224405.33333333334, ans=0.125
+2024-08-30 15:25:46,907 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=224512.0, ans=0.2
+2024-08-30 15:26:47,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=224512.0, ans=0.125
+2024-08-30 15:27:03,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=224618.66666666666, ans=0.125
+2024-08-30 15:27:14,884 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=224672.0, ans=0.125
+2024-08-30 15:27:15,735 INFO [train.py:1114] (2/4) Epoch 17, batch 2300, loss[loss=0.1872, simple_loss=0.2581, pruned_loss=0.04325, ctc_loss=0.07445, over 19502.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2739, pruned_loss=0.05198, ctc_loss=0.09767, over 3862443.18 frames. ], batch size: 49, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:27:20,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=224672.0, ans=0.125
+2024-08-30 15:27:23,974 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=224672.0, ans=0.0
+2024-08-30 15:28:25,247 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=224725.33333333334, ans=0.2
+2024-08-30 15:28:29,810 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=224725.33333333334, ans=0.025
+2024-08-30 15:28:46,688 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.759e+02 2.126e+02 2.592e+02 4.068e+02, threshold=4.252e+02, percent-clipped=2.0
+2024-08-30 15:28:59,492 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=224885.33333333334, ans=0.125
+2024-08-30 15:29:48,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=224885.33333333334, ans=0.125
+2024-08-30 15:29:51,085 INFO [train.py:1114] (2/4) Epoch 17, batch 2350, loss[loss=0.242, simple_loss=0.3064, pruned_loss=0.06454, ctc_loss=0.1212, over 19675.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2741, pruned_loss=0.05214, ctc_loss=0.09784, over 3864783.52 frames. ], batch size: 63, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:29:53,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=224938.66666666666, ans=0.125
+2024-08-30 15:30:02,805 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.05 vs. limit=15.0
+2024-08-30 15:30:28,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=224992.0, ans=0.0
+2024-08-30 15:30:28,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=224992.0, ans=0.05
+2024-08-30 15:30:32,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=224992.0, ans=0.125
+2024-08-30 15:31:55,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=225045.33333333334, ans=0.0
+2024-08-30 15:33:11,237 INFO [train.py:1114] (2/4) Epoch 17, batch 2400, loss[loss=0.2186, simple_loss=0.2835, pruned_loss=0.05496, ctc_loss=0.1093, over 19315.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2765, pruned_loss=0.05308, ctc_loss=0.09946, over 3858493.92 frames. ], batch size: 71, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:33:30,979 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=225258.66666666666, ans=0.125
+2024-08-30 15:33:34,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=225312.0, ans=0.125
+2024-08-30 15:33:35,952 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.15 vs. limit=12.0
+2024-08-30 15:33:48,812 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.684e+02 1.880e+02 2.443e+02 3.780e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-30 15:34:10,328 INFO [train.py:1114] (2/4) Epoch 17, batch 2450, loss[loss=0.2978, simple_loss=0.3236, pruned_loss=0.09849, ctc_loss=0.1875, over 13434.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2808, pruned_loss=0.05599, ctc_loss=0.1055, over 3729261.65 frames. ], batch size: 140, lr: 8.69e-03, grad_scale: 16.0
+2024-08-30 15:34:10,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=225472.0, ans=0.125
+2024-08-30 15:34:12,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=225472.0, ans=0.0
+2024-08-30 15:34:45,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=225472.0, ans=0.125
+2024-08-30 15:34:54,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=225525.33333333334, ans=0.1
+2024-08-30 15:34:55,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=225525.33333333334, ans=0.0
+2024-08-30 15:34:59,304 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=225578.66666666666, ans=10.0
+2024-08-30 15:38:02,847 INFO [train.py:1114] (2/4) Epoch 18, batch 0, loss[loss=0.192, simple_loss=0.2512, pruned_loss=0.04765, ctc_loss=0.09374, over 19432.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2512, pruned_loss=0.04765, ctc_loss=0.09374, over 19432.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:38:02,848 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-30 15:38:39,840 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.4936, 3.0624, 2.1043, 2.6462], device='cuda:2')
+2024-08-30 15:39:34,946 INFO [train.py:1146] (2/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.0364, ctc_loss=0.06401, over 944034.00 frames.
+2024-08-30 15:39:34,947 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13562MB
+2024-08-30 15:39:39,562 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=225680.0, ans=0.125
+2024-08-30 15:40:20,797 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-30 15:40:29,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-30 15:40:33,361 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.00 vs. limit=15.0
+2024-08-30 15:40:47,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=225840.0, ans=0.125
+2024-08-30 15:40:58,092 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.919e+02 2.092e+02 2.421e+02 5.568e+02, threshold=4.185e+02, percent-clipped=4.0
+2024-08-30 15:40:59,162 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.76 vs. limit=15.0
+2024-08-30 15:41:02,013 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=225893.33333333334, ans=0.07
+2024-08-30 15:41:04,965 INFO [train.py:1114] (2/4) Epoch 18, batch 50, loss[loss=0.1731, simple_loss=0.2371, pruned_loss=0.0395, ctc_loss=0.07531, over 19736.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2761, pruned_loss=0.05232, ctc_loss=0.09881, over 844774.48 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:42:00,284 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=226053.33333333334, ans=0.0
+2024-08-30 15:44:07,505 INFO [train.py:1114] (2/4) Epoch 18, batch 100, loss[loss=0.187, simple_loss=0.2598, pruned_loss=0.04168, ctc_loss=0.07718, over 19708.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2785, pruned_loss=0.05331, ctc_loss=0.09997, over 1499238.30 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:44:10,170 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=226213.33333333334, ans=0.2
+2024-08-30 15:44:14,034 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.43 vs. limit=22.5
+2024-08-30 15:44:28,954 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.34 vs. limit=15.0
+2024-08-30 15:44:40,902 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=226320.0, ans=0.04949747468305833
+2024-08-30 15:44:59,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=226426.66666666666, ans=0.125
+2024-08-30 15:45:01,911 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.711e+02 1.973e+02 2.383e+02 4.146e+02, threshold=3.946e+02, percent-clipped=0.0
+2024-08-30 15:45:10,549 INFO [train.py:1114] (2/4) Epoch 18, batch 150, loss[loss=0.1889, simple_loss=0.2549, pruned_loss=0.04414, ctc_loss=0.08667, over 19719.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2747, pruned_loss=0.05107, ctc_loss=0.09587, over 2028180.04 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:45:19,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=226480.0, ans=0.1
+2024-08-30 15:45:26,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-30 15:45:27,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-30 15:45:30,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-30 15:45:40,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=226586.66666666666, ans=0.1
+2024-08-30 15:45:43,293 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226586.66666666666, ans=0.125
+2024-08-30 15:46:03,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=226693.33333333334, ans=0.0
+2024-08-30 15:46:16,528 INFO [train.py:1114] (2/4) Epoch 18, batch 200, loss[loss=0.2072, simple_loss=0.2755, pruned_loss=0.04951, ctc_loss=0.09987, over 18272.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2737, pruned_loss=0.05095, ctc_loss=0.09567, over 2435566.49 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:46:17,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=226746.66666666666, ans=0.0
+2024-08-30 15:46:27,542 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.79 vs. limit=15.0
+2024-08-30 15:46:29,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=226800.0, ans=0.125
+2024-08-30 15:46:54,485 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=226906.66666666666, ans=0.125
+2024-08-30 15:47:08,526 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.332e+02 1.794e+02 2.164e+02 2.564e+02 4.131e+02, threshold=4.328e+02, percent-clipped=1.0
+2024-08-30 15:47:20,540 INFO [train.py:1114] (2/4) Epoch 18, batch 250, loss[loss=0.2632, simple_loss=0.3191, pruned_loss=0.07563, ctc_loss=0.1402, over 19347.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2742, pruned_loss=0.05149, ctc_loss=0.09685, over 2754676.87 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:47:23,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=227013.33333333334, ans=0.025
+2024-08-30 15:47:23,648 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=18.07 vs. limit=22.5
+2024-08-30 15:47:27,808 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=227013.33333333334, ans=0.0
+2024-08-30 15:47:32,412 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=227066.66666666666, ans=0.0
+2024-08-30 15:48:55,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=227173.33333333334, ans=0.0
+2024-08-30 15:49:22,637 INFO [train.py:1114] (2/4) Epoch 18, batch 300, loss[loss=0.2147, simple_loss=0.2799, pruned_loss=0.05502, ctc_loss=0.09868, over 19550.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2735, pruned_loss=0.05103, ctc_loss=0.09594, over 3000148.78 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:51:23,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=227440.0, ans=0.2
+2024-08-30 15:51:27,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=227440.0, ans=0.0
+2024-08-30 15:51:30,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=227440.0, ans=0.95
+2024-08-30 15:51:31,312 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.20 vs. limit=22.5
+2024-08-30 15:51:34,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=227493.33333333334, ans=0.2
+2024-08-30 15:51:39,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=227493.33333333334, ans=0.125
+2024-08-30 15:51:40,143 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.730e+02 1.916e+02 2.273e+02 3.732e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-30 15:51:45,939 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=227493.33333333334, ans=0.125
+2024-08-30 15:51:48,899 INFO [train.py:1114] (2/4) Epoch 18, batch 350, loss[loss=0.1741, simple_loss=0.2424, pruned_loss=0.03833, ctc_loss=0.0727, over 19783.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2739, pruned_loss=0.05112, ctc_loss=0.09602, over 3189797.23 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:51:50,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=227546.66666666666, ans=0.025
+2024-08-30 15:52:23,399 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.12 vs. limit=15.0
+2024-08-30 15:52:29,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=227706.66666666666, ans=0.0
+2024-08-30 15:52:40,081 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=227760.0, ans=0.125
+2024-08-30 15:52:44,871 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=227760.0, ans=0.0
+2024-08-30 15:52:51,839 INFO [train.py:1114] (2/4) Epoch 18, batch 400, loss[loss=0.2107, simple_loss=0.288, pruned_loss=0.04792, ctc_loss=0.09374, over 19482.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2738, pruned_loss=0.05116, ctc_loss=0.0962, over 3341232.34 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:52:53,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=227813.33333333334, ans=0.0
+2024-08-30 15:52:55,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=227813.33333333334, ans=0.125
+2024-08-30 15:54:16,394 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.651e+02 1.862e+02 2.258e+02 4.636e+02, threshold=3.723e+02, percent-clipped=1.0
+2024-08-30 15:54:20,147 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=228026.66666666666, ans=0.125
+2024-08-30 15:54:20,499 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.29 vs. limit=22.5
+2024-08-30 15:54:25,942 INFO [train.py:1114] (2/4) Epoch 18, batch 450, loss[loss=0.2245, simple_loss=0.2914, pruned_loss=0.05591, ctc_loss=0.1144, over 19625.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2743, pruned_loss=0.05133, ctc_loss=0.09659, over 3450729.30 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:54:26,941 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=16.30 vs. limit=22.5
+2024-08-30 15:54:50,648 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=228133.33333333334, ans=0.125
+2024-08-30 15:55:04,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=228186.66666666666, ans=0.125
+2024-08-30 15:55:17,168 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.25 vs. limit=15.0
+2024-08-30 15:55:18,043 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=228240.0, ans=0.025
+2024-08-30 15:55:21,652 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=228240.0, ans=0.1
+2024-08-30 15:55:22,748 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=228293.33333333334, ans=0.125
+2024-08-30 15:55:37,509 INFO [train.py:1114] (2/4) Epoch 18, batch 500, loss[loss=0.2214, simple_loss=0.2927, pruned_loss=0.05513, ctc_loss=0.09975, over 19681.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2735, pruned_loss=0.05099, ctc_loss=0.09611, over 3546019.83 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:55:45,035 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=228346.66666666666, ans=0.125
+2024-08-30 15:55:46,719 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.38 vs. limit=10.0
+2024-08-30 15:55:50,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=228400.0, ans=0.025
+2024-08-30 15:57:00,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=228506.66666666666, ans=0.0
+2024-08-30 15:57:50,007 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=228560.0, ans=0.125
+2024-08-30 15:57:54,269 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.602e+02 1.832e+02 2.190e+02 3.877e+02, threshold=3.665e+02, percent-clipped=2.0
+2024-08-30 15:57:58,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=228560.0, ans=0.0
+2024-08-30 15:58:00,971 INFO [train.py:1114] (2/4) Epoch 18, batch 550, loss[loss=0.2293, simple_loss=0.2885, pruned_loss=0.06216, ctc_loss=0.1147, over 19242.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2735, pruned_loss=0.05102, ctc_loss=0.09619, over 3608988.72 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:58:50,081 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.55 vs. limit=6.0
+2024-08-30 16:01:05,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=228720.0, ans=0.125
+2024-08-30 16:01:07,909 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=228720.0, ans=0.125
+2024-08-30 16:01:35,822 INFO [train.py:1114] (2/4) Epoch 18, batch 600, loss[loss=0.2275, simple_loss=0.2938, pruned_loss=0.05909, ctc_loss=0.1075, over 19360.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2731, pruned_loss=0.05068, ctc_loss=0.09546, over 3665746.22 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:02:23,185 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=228933.33333333334, ans=0.125
+2024-08-30 16:02:33,404 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.34 vs. limit=15.0
+2024-08-30 16:03:04,798 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=228986.66666666666, ans=0.125
+2024-08-30 16:04:37,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=229093.33333333334, ans=0.125
+2024-08-30 16:04:39,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=229093.33333333334, ans=0.025
+2024-08-30 16:04:39,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=229093.33333333334, ans=0.0
+2024-08-30 16:04:41,752 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 1.726e+02 2.045e+02 2.727e+02 4.181e+02, threshold=4.090e+02, percent-clipped=7.0
+2024-08-30 16:04:48,705 INFO [train.py:1114] (2/4) Epoch 18, batch 650, loss[loss=0.2118, simple_loss=0.2823, pruned_loss=0.0511, ctc_loss=0.0976, over 19756.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.273, pruned_loss=0.05103, ctc_loss=0.09612, over 3716208.52 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:04:59,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=229146.66666666666, ans=0.125
+2024-08-30 16:05:06,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=229146.66666666666, ans=0.125
+2024-08-30 16:06:18,768 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=229200.0, ans=0.0
+2024-08-30 16:06:23,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=229200.0, ans=0.125
+2024-08-30 16:06:31,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=229253.33333333334, ans=0.0
+2024-08-30 16:06:35,350 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.42 vs. limit=12.0
+2024-08-30 16:06:36,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=229253.33333333334, ans=0.0
+2024-08-30 16:06:36,324 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=229253.33333333334, ans=0.09899494936611666
+2024-08-30 16:06:44,720 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-30 16:06:47,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229306.66666666666, ans=0.1
+2024-08-30 16:07:14,095 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-30 16:07:25,685 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.79 vs. limit=22.5
+2024-08-30 16:07:32,106 INFO [train.py:1114] (2/4) Epoch 18, batch 700, loss[loss=0.1846, simple_loss=0.2526, pruned_loss=0.04275, ctc_loss=0.07763, over 19722.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2732, pruned_loss=0.05105, ctc_loss=0.09619, over 3749014.97 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:07:37,643 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.47 vs. limit=22.5
+2024-08-30 16:07:41,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=229413.33333333334, ans=0.04949747468305833
+2024-08-30 16:07:44,217 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:07:45,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=229466.66666666666, ans=0.0
+2024-08-30 16:07:51,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=229466.66666666666, ans=0.1
+2024-08-30 16:08:15,813 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:08:27,242 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.709e+02 1.988e+02 2.480e+02 4.374e+02, threshold=3.975e+02, percent-clipped=1.0
+2024-08-30 16:08:34,072 INFO [train.py:1114] (2/4) Epoch 18, batch 750, loss[loss=0.2142, simple_loss=0.2806, pruned_loss=0.0534, ctc_loss=0.1026, over 19859.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2726, pruned_loss=0.05056, ctc_loss=0.09512, over 3775543.53 frames. ], batch size: 55, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:08:44,711 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=229680.0, ans=0.0
+2024-08-30 16:08:56,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229733.33333333334, ans=0.1
+2024-08-30 16:08:58,855 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=229733.33333333334, ans=0.125
+2024-08-30 16:09:38,058 INFO [train.py:1114] (2/4) Epoch 18, batch 800, loss[loss=0.1779, simple_loss=0.2457, pruned_loss=0.03961, ctc_loss=0.07702, over 19430.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.273, pruned_loss=0.05076, ctc_loss=0.09554, over 3796776.82 frames. ], batch size: 48, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:09:55,277 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=229946.66666666666, ans=0.125
+2024-08-30 16:09:56,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=229946.66666666666, ans=0.125
+2024-08-30 16:09:57,596 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=229946.66666666666, ans=0.125
+2024-08-30 16:10:01,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=230000.0, ans=0.2
+2024-08-30 16:10:19,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=230053.33333333334, ans=0.125
+2024-08-30 16:10:24,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=230106.66666666666, ans=0.125
+2024-08-30 16:11:34,977 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.744e+02 1.950e+02 2.451e+02 4.139e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-30 16:11:43,514 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=230160.0, ans=0.0
+2024-08-30 16:11:47,909 INFO [train.py:1114] (2/4) Epoch 18, batch 850, loss[loss=0.2309, simple_loss=0.3038, pruned_loss=0.05721, ctc_loss=0.1087, over 19630.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2727, pruned_loss=0.05071, ctc_loss=0.09548, over 3815628.64 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:11:53,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=230213.33333333334, ans=0.0
+2024-08-30 16:12:09,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=230266.66666666666, ans=0.125
+2024-08-30 16:12:09,798 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=230266.66666666666, ans=15.0
+2024-08-30 16:12:12,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=230266.66666666666, ans=0.0
+2024-08-30 16:12:44,033 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=230373.33333333334, ans=0.07
+2024-08-30 16:12:57,696 INFO [train.py:1114] (2/4) Epoch 18, batch 900, loss[loss=0.1953, simple_loss=0.2542, pruned_loss=0.0494, ctc_loss=0.09432, over 19406.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2731, pruned_loss=0.05114, ctc_loss=0.09621, over 3819241.07 frames. ], batch size: 48, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:12:58,912 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=230480.0, ans=0.5
+2024-08-30 16:13:02,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=230480.0, ans=0.125
+2024-08-30 16:13:38,084 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:13:40,338 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=230640.0, ans=0.0
+2024-08-30 16:13:47,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=230693.33333333334, ans=0.09899494936611666
+2024-08-30 16:13:49,528 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.771e+02 2.097e+02 2.541e+02 3.279e+02, threshold=4.195e+02, percent-clipped=1.0
+2024-08-30 16:13:56,603 INFO [train.py:1114] (2/4) Epoch 18, batch 950, loss[loss=0.1845, simple_loss=0.2518, pruned_loss=0.04331, ctc_loss=0.07649, over 19504.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2729, pruned_loss=0.05101, ctc_loss=0.09592, over 3821614.48 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:15:41,762 INFO [train.py:1114] (2/4) Epoch 18, batch 1000, loss[loss=0.2103, simple_loss=0.2829, pruned_loss=0.04947, ctc_loss=0.09696, over 19859.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.274, pruned_loss=0.05153, ctc_loss=0.09694, over 3816720.59 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:15:50,434 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:15:51,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=231013.33333333334, ans=0.125
+2024-08-30 16:15:53,772 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=231066.66666666666, ans=0.125
+2024-08-30 16:16:03,784 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.64 vs. limit=15.0
+2024-08-30 16:16:06,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=231120.0, ans=0.125
+2024-08-30 16:16:19,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=231120.0, ans=0.125
+2024-08-30 16:16:30,059 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.68 vs. limit=15.0
+2024-08-30 16:16:34,538 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.17 vs. limit=15.0
+2024-08-30 16:16:46,153 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.681e+02 1.935e+02 2.141e+02 3.468e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-30 16:16:53,184 INFO [train.py:1114] (2/4) Epoch 18, batch 1050, loss[loss=0.22, simple_loss=0.2942, pruned_loss=0.05356, ctc_loss=0.09691, over 19821.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2734, pruned_loss=0.0513, ctc_loss=0.09659, over 3823860.40 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:16:54,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=231280.0, ans=0.0
+2024-08-30 16:16:54,766 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=231280.0, ans=0.125
+2024-08-30 16:17:01,370 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=231280.0, ans=0.2
+2024-08-30 16:17:04,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=231280.0, ans=10.0
+2024-08-30 16:17:12,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=231333.33333333334, ans=0.125
+2024-08-30 16:18:00,589 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=231386.66666666666, ans=0.125
+2024-08-30 16:18:12,367 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=231440.0, ans=0.2
+2024-08-30 16:18:29,881 INFO [train.py:1114] (2/4) Epoch 18, batch 1100, loss[loss=0.2055, simple_loss=0.2764, pruned_loss=0.04901, ctc_loss=0.09142, over 19597.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2732, pruned_loss=0.05097, ctc_loss=0.0958, over 3831830.36 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:18:43,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=231600.0, ans=0.125
+2024-08-30 16:18:54,384 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=231653.33333333334, ans=0.125
+2024-08-30 16:18:57,072 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.16 vs. limit=22.5
+2024-08-30 16:19:24,182 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.668e+02 1.884e+02 2.263e+02 3.606e+02, threshold=3.767e+02, percent-clipped=0.0
+2024-08-30 16:19:52,622 INFO [train.py:1114] (2/4) Epoch 18, batch 1150, loss[loss=0.1914, simple_loss=0.2623, pruned_loss=0.04286, ctc_loss=0.08684, over 19594.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2727, pruned_loss=0.05081, ctc_loss=0.09569, over 3830669.25 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:19:53,026 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.31 vs. limit=15.0
+2024-08-30 16:20:02,474 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=231813.33333333334, ans=0.125
+2024-08-30 16:20:04,979 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.66 vs. limit=15.0
+2024-08-30 16:22:09,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=231866.66666666666, ans=0.125
+2024-08-30 16:22:12,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=231866.66666666666, ans=0.09899494936611666
+2024-08-30 16:22:13,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=231866.66666666666, ans=0.125
+2024-08-30 16:22:19,378 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.96 vs. limit=15.0
+2024-08-30 16:22:31,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=231973.33333333334, ans=0.0
+2024-08-30 16:22:34,151 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=8.129e-03
+2024-08-30 16:22:41,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=231973.33333333334, ans=0.125
+2024-08-30 16:22:43,709 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=232026.66666666666, ans=0.0
+2024-08-30 16:22:56,523 INFO [train.py:1114] (2/4) Epoch 18, batch 1200, loss[loss=0.2189, simple_loss=0.2844, pruned_loss=0.05578, ctc_loss=0.1045, over 19844.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2738, pruned_loss=0.05148, ctc_loss=0.09682, over 3825491.91 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:23:00,085 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:23:10,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=232133.33333333334, ans=0.0
+2024-08-30 16:23:17,624 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=232186.66666666666, ans=0.125
+2024-08-30 16:23:21,451 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.35 vs. limit=6.0
+2024-08-30 16:23:31,472 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=232240.0, ans=0.1
+2024-08-30 16:23:33,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232240.0, ans=0.1
+2024-08-30 16:23:43,338 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=4.62 vs. limit=12.0
+2024-08-30 16:23:45,952 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.656e+02 1.841e+02 2.164e+02 3.391e+02, threshold=3.682e+02, percent-clipped=0.0
+2024-08-30 16:23:48,630 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=232293.33333333334, ans=0.125
+2024-08-30 16:23:52,937 INFO [train.py:1114] (2/4) Epoch 18, batch 1250, loss[loss=0.219, simple_loss=0.2868, pruned_loss=0.0558, ctc_loss=0.09885, over 19524.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2745, pruned_loss=0.05141, ctc_loss=0.09652, over 3842929.75 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:24:04,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=232400.0, ans=0.025
+2024-08-30 16:24:04,367 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.51 vs. limit=22.5
+2024-08-30 16:25:43,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=232560.0, ans=0.2
+2024-08-30 16:25:44,493 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232560.0, ans=0.1
+2024-08-30 16:25:45,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=232560.0, ans=0.0
+2024-08-30 16:25:53,661 INFO [train.py:1114] (2/4) Epoch 18, batch 1300, loss[loss=0.2509, simple_loss=0.3043, pruned_loss=0.07282, ctc_loss=0.1296, over 18811.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2741, pruned_loss=0.05135, ctc_loss=0.09634, over 3845733.83 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:32:11,333 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.86 vs. limit=12.0
+2024-08-30 16:32:19,319 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:32:29,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=232720.0, ans=0.0
+2024-08-30 16:32:29,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=232720.0, ans=0.2
+2024-08-30 16:32:37,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=232720.0, ans=0.125
+2024-08-30 16:45:23,074 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.785e+02 2.170e+02 2.759e+02 4.331e+02, threshold=4.339e+02, percent-clipped=5.0
+2024-08-30 16:54:23,696 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232826.66666666666, ans=0.1
+2024-08-30 16:57:13,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=232826.66666666666, ans=0.0
+2024-08-30 17:02:45,880 INFO [train.py:1114] (2/4) Epoch 18, batch 1350, loss[loss=0.215, simple_loss=0.2813, pruned_loss=0.05396, ctc_loss=0.1019, over 19781.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2734, pruned_loss=0.05107, ctc_loss=0.09581, over 3856639.95 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
+2024-08-30 17:02:46,431 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.68 vs. limit=15.0
+2024-08-30 17:12:54,858 INFO [train.py:1050] (2/4) Caught exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=46170, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600008 milliseconds before timing out..
+2024-08-30 17:12:54,860 INFO [checkpoint.py:75] (2/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-2.pt
+2024-08-30 17:12:57,794 INFO [train.py:1413] (2/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 17:12:57,842 INFO [train.py:1419] (2/4) features shape: torch.Size([56, 1419, 80])
+2024-08-30 17:12:57,845 INFO [train.py:1423] (2/4) num tokens: 4237
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-3
new file mode 100644
index 0000000000000000000000000000000000000000..ee732e72cd8a5afc915a389862e54b1503a59d04
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-12-44-46-3
@@ -0,0 +1,544 @@
+2024-08-30 12:44:46,727 INFO [train.py:1182] (3/4) Training started
+2024-08-30 12:44:48,619 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-30 12:44:48,621 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2651.int.cedar.computecanada.ca', 'IP address': '172.16.146.88'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 17, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 12:44:48,622 INFO [train.py:1212] (3/4) About to create model
+2024-08-30 12:44:49,314 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-30 12:44:49,315 INFO [checkpoint.py:112] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-16.pt
+2024-08-30 12:45:01,822 INFO [train.py:1231] (3/4) Using DDP
+2024-08-30 12:45:06,262 INFO [train.py:1243] (3/4) Loading optimizer state dict
+2024-08-30 12:45:06,460 INFO [train.py:1251] (3/4) Loading scheduler state dict
+2024-08-30 12:45:06,460 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 12:45:06,664 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-30 12:45:06,664 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-30 12:45:06,664 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-30 12:45:06,665 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-30 12:45:08,257 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-30 12:45:08,264 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-30 12:45:08,444 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-30 12:45:08,576 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-30 12:45:08,900 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-30 12:45:08,901 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 12:51:17,011 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12808MB
+2024-08-30 12:51:18,481 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 12:53:02,313 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 12:53:03,663 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 12:54:12,743 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 12:54:13,487 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=256, metric=14.79 vs. limit=7.5
+2024-08-30 12:54:14,359 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 12:54:14,401 INFO [train.py:1344] (3/4) Loading grad scaler state dict
+2024-08-30 12:55:04,828 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.02 vs. limit=15.0
+2024-08-30 12:55:06,918 INFO [train.py:1114] (3/4) Epoch 17, batch 0, loss[loss=0.2025, simple_loss=0.2612, pruned_loss=0.05238, ctc_loss=0.09774, over 19407.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2612, pruned_loss=0.05238, ctc_loss=0.09774, over 19407.00 frames. ], batch size: 48, lr: 8.95e-03, grad_scale: 32.0
+2024-08-30 12:55:06,919 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-30 12:55:27,039 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.7218, 2.1221, 1.5530, 1.9445, 2.1680, 2.2712, 2.1419, 1.6912],
+ device='cuda:3')
+2024-08-30 12:55:31,700 INFO [train.py:1146] (3/4) Epoch 17, validation: loss=0.185, simple_loss=0.2737, pruned_loss=0.03584, ctc_loss=0.06176, over 944034.00 frames.
+2024-08-30 12:55:31,701 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 12:55:33,425 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.85 vs. limit=22.5
+2024-08-30 13:01:17,379 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:06:19,316 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.860e+02 2.030e+02 2.233e+02 2.993e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-30 13:09:34,236 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=212618.66666666666, ans=0.0
+2024-08-30 13:09:34,642 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.94 vs. limit=22.5
+2024-08-30 13:09:43,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=212618.66666666666, ans=0.2
+2024-08-30 13:09:56,388 INFO [train.py:1114] (3/4) Epoch 17, batch 50, loss[loss=0.1951, simple_loss=0.2543, pruned_loss=0.0494, ctc_loss=0.09256, over 19722.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.279, pruned_loss=0.05541, ctc_loss=0.1047, over 844761.44 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:15:57,930 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=212725.33333333334, ans=0.0
+2024-08-30 13:16:03,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=212725.33333333334, ans=0.0
+2024-08-30 13:16:42,953 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=212725.33333333334, ans=0.1
+2024-08-30 13:18:57,646 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212885.33333333334, ans=0.1
+2024-08-30 13:18:58,747 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=212885.33333333334, ans=0.2
+2024-08-30 13:18:58,995 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.98 vs. limit=15.0
+2024-08-30 13:19:01,151 INFO [train.py:1114] (3/4) Epoch 17, batch 100, loss[loss=0.2147, simple_loss=0.2758, pruned_loss=0.05536, ctc_loss=0.1072, over 19722.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2798, pruned_loss=0.05494, ctc_loss=0.1043, over 1498730.66 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 32.0
+2024-08-30 13:19:03,906 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=212938.66666666666, ans=0.2
+2024-08-30 13:19:06,716 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.79 vs. limit=15.0
+2024-08-30 13:19:23,514 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=212992.0, ans=0.09899494936611666
+2024-08-30 13:20:53,595 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:22:02,870 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=213045.33333333334, ans=0.125
+2024-08-30 13:23:01,092 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.02 vs. limit=15.0
+2024-08-30 13:23:11,081 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 1.706e+02 1.953e+02 2.287e+02 3.713e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-30 13:23:30,457 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=213098.66666666666, ans=0.5
+2024-08-30 13:24:10,834 INFO [train.py:1114] (3/4) Epoch 17, batch 150, loss[loss=0.1641, simple_loss=0.2363, pruned_loss=0.03382, ctc_loss=0.0607, over 19720.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2778, pruned_loss=0.05387, ctc_loss=0.1024, over 2026289.94 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:27:25,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213365.33333333334, ans=0.1
+2024-08-30 13:27:53,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=213418.66666666666, ans=0.0
+2024-08-30 13:27:56,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-30 13:28:03,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=213418.66666666666, ans=0.0
+2024-08-30 13:28:05,727 INFO [train.py:1114] (3/4) Epoch 17, batch 200, loss[loss=0.2245, simple_loss=0.2875, pruned_loss=0.05906, ctc_loss=0.1086, over 18235.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2767, pruned_loss=0.05327, ctc_loss=0.1007, over 2433635.33 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 32.0
+2024-08-30 13:28:07,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=213472.0, ans=0.125
+2024-08-30 13:28:15,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=213472.0, ans=0.2
+2024-08-30 13:28:24,261 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=213525.33333333334, ans=0.07
+2024-08-30 13:28:30,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=213578.66666666666, ans=0.125
+2024-08-30 13:28:40,338 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.384e+02 1.731e+02 1.992e+02 2.666e+02 4.093e+02, threshold=3.983e+02, percent-clipped=1.0
+2024-08-30 13:28:57,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213685.33333333334, ans=0.1
+2024-08-30 13:29:07,540 INFO [train.py:1114] (3/4) Epoch 17, batch 250, loss[loss=0.2155, simple_loss=0.2846, pruned_loss=0.05364, ctc_loss=0.0977, over 19400.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2761, pruned_loss=0.0525, ctc_loss=0.09926, over 2755065.21 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:29:14,200 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.95 vs. limit=6.0
+2024-08-30 13:29:19,888 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=213792.0, ans=0.125
+2024-08-30 13:29:39,925 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=213845.33333333334, ans=0.125
+2024-08-30 13:29:52,906 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 13:30:13,962 INFO [train.py:1114] (3/4) Epoch 17, batch 300, loss[loss=0.2492, simple_loss=0.3034, pruned_loss=0.07158, ctc_loss=0.1296, over 19534.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2753, pruned_loss=0.05207, ctc_loss=0.09817, over 2999463.88 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 32.0
+2024-08-30 13:30:24,948 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=214058.66666666666, ans=0.0
+2024-08-30 13:30:28,705 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=214058.66666666666, ans=0.025
+2024-08-30 13:30:50,928 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=214112.0, ans=0.035
+2024-08-30 13:30:51,811 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.663e+02 1.872e+02 2.298e+02 3.693e+02, threshold=3.744e+02, percent-clipped=0.0
+2024-08-30 13:31:02,066 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.78 vs. limit=15.0
+2024-08-30 13:31:25,600 INFO [train.py:1114] (3/4) Epoch 17, batch 350, loss[loss=0.1712, simple_loss=0.2415, pruned_loss=0.03629, ctc_loss=0.07087, over 19779.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2761, pruned_loss=0.05256, ctc_loss=0.09883, over 3190999.16 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:31:29,429 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214272.0, ans=0.1
+2024-08-30 13:31:38,699 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=214325.33333333334, ans=0.125
+2024-08-30 13:31:44,785 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=214325.33333333334, ans=0.0
+2024-08-30 13:32:07,376 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=214432.0, ans=0.125
+2024-08-30 13:32:18,912 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=214485.33333333334, ans=0.025
+2024-08-30 13:32:24,586 INFO [train.py:1114] (3/4) Epoch 17, batch 400, loss[loss=0.2203, simple_loss=0.2825, pruned_loss=0.05707, ctc_loss=0.1099, over 19500.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2754, pruned_loss=0.05218, ctc_loss=0.09806, over 3343531.34 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-30 13:32:35,653 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=214592.0, ans=0.1
+2024-08-30 13:32:56,407 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214645.33333333334, ans=0.1
+2024-08-30 13:32:56,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-30 13:33:01,000 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.238e+02 1.640e+02 1.901e+02 2.325e+02 4.074e+02, threshold=3.801e+02, percent-clipped=1.0
+2024-08-30 13:33:08,457 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=214698.66666666666, ans=0.125
+2024-08-30 13:33:12,349 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=214698.66666666666, ans=0.0
+2024-08-30 13:33:26,237 INFO [train.py:1114] (3/4) Epoch 17, batch 450, loss[loss=0.2071, simple_loss=0.2826, pruned_loss=0.04749, ctc_loss=0.09185, over 19614.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2758, pruned_loss=0.05212, ctc_loss=0.09815, over 3451227.36 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:33:30,437 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.95 vs. limit=22.5
+2024-08-30 13:33:36,536 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=214805.33333333334, ans=0.125
+2024-08-30 13:38:31,119 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.40 vs. limit=12.0
+2024-08-30 13:38:32,397 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.82 vs. limit=15.0
+2024-08-30 13:38:33,786 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=214912.0, ans=0.125
+2024-08-30 13:38:33,803 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214912.0, ans=0.1
+2024-08-30 13:43:52,440 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=215018.66666666666, ans=0.125
+2024-08-30 13:44:02,832 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.56 vs. limit=10.0
+2024-08-30 13:44:05,799 INFO [train.py:1114] (3/4) Epoch 17, batch 500, loss[loss=0.2222, simple_loss=0.2917, pruned_loss=0.05627, ctc_loss=0.1004, over 19684.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2753, pruned_loss=0.05193, ctc_loss=0.09801, over 3546818.49 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-30 13:44:53,035 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.09 vs. limit=15.0
+2024-08-30 13:44:58,455 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=215178.66666666666, ans=0.1
+2024-08-30 13:45:06,443 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.780e+02 2.026e+02 2.589e+02 4.105e+02, threshold=4.052e+02, percent-clipped=2.0
+2024-08-30 13:45:31,472 INFO [train.py:1114] (3/4) Epoch 17, batch 550, loss[loss=0.2148, simple_loss=0.2835, pruned_loss=0.05417, ctc_loss=0.09472, over 19302.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2751, pruned_loss=0.05189, ctc_loss=0.09777, over 3608612.57 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 32.0
+2024-08-30 13:45:32,693 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=215338.66666666666, ans=0.025
+2024-08-30 13:45:40,257 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215338.66666666666, ans=0.1
+2024-08-30 13:45:50,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=215392.0, ans=0.0
+2024-08-30 13:46:28,907 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.21 vs. limit=15.0
+2024-08-30 13:46:30,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=215552.0, ans=0.125
+2024-08-30 13:47:20,209 INFO [train.py:1114] (3/4) Epoch 17, batch 600, loss[loss=0.2494, simple_loss=0.2986, pruned_loss=0.07367, ctc_loss=0.132, over 19313.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2753, pruned_loss=0.05217, ctc_loss=0.09837, over 3666177.61 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 64.0
+2024-08-30 13:47:26,850 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.54 vs. limit=15.0
+2024-08-30 13:47:31,979 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215658.66666666666, ans=0.1
+2024-08-30 13:47:36,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=215658.66666666666, ans=0.0
+2024-08-30 13:47:40,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.46 vs. limit=15.0
+2024-08-30 13:47:49,538 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=215712.0, ans=0.125
+2024-08-30 13:47:53,783 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.276e+02 1.647e+02 1.940e+02 2.383e+02 4.124e+02, threshold=3.879e+02, percent-clipped=1.0
+2024-08-30 13:48:17,035 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.21 vs. limit=22.5
+2024-08-30 13:48:27,129 INFO [train.py:1114] (3/4) Epoch 17, batch 650, loss[loss=0.2217, simple_loss=0.2785, pruned_loss=0.05982, ctc_loss=0.1132, over 19764.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2748, pruned_loss=0.05205, ctc_loss=0.09814, over 3716226.04 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 32.0
+2024-08-30 13:50:13,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=215872.0, ans=0.125
+2024-08-30 13:51:32,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=215925.33333333334, ans=0.125
+2024-08-30 13:51:48,700 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.18 vs. limit=10.0
+2024-08-30 13:51:58,389 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.45 vs. limit=12.0
+2024-08-30 13:52:27,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=216032.0, ans=0.2
+2024-08-30 13:52:37,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=216032.0, ans=0.0
+2024-08-30 14:06:19,755 INFO [train.py:1114] (3/4) Epoch 17, batch 700, loss[loss=0.1851, simple_loss=0.2597, pruned_loss=0.04017, ctc_loss=0.07546, over 19711.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2752, pruned_loss=0.05213, ctc_loss=0.09815, over 3749092.86 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:06:38,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=216138.66666666666, ans=0.125
+2024-08-30 14:12:13,783 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 1.667e+02 2.137e+02 2.601e+02 4.284e+02, threshold=4.274e+02, percent-clipped=4.0
+2024-08-30 14:17:08,651 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=216352.0, ans=0.125
+2024-08-30 14:17:08,960 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.58 vs. limit=15.0
+2024-08-30 14:17:09,906 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:17:35,071 INFO [train.py:1114] (3/4) Epoch 17, batch 750, loss[loss=0.2041, simple_loss=0.2805, pruned_loss=0.04528, ctc_loss=0.09269, over 19502.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2748, pruned_loss=0.05193, ctc_loss=0.09786, over 3775151.58 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 32.0
+2024-08-30 14:17:39,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=216405.33333333334, ans=0.125
+2024-08-30 14:17:58,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=216458.66666666666, ans=0.125
+2024-08-30 14:18:08,795 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=216458.66666666666, ans=0.07
+2024-08-30 14:19:12,062 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=216565.33333333334, ans=0.2
+2024-08-30 14:19:42,179 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=216565.33333333334, ans=0.125
+2024-08-30 14:19:46,784 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216565.33333333334, ans=0.1
+2024-08-30 14:20:37,989 INFO [train.py:1114] (3/4) Epoch 17, batch 800, loss[loss=0.1907, simple_loss=0.256, pruned_loss=0.04604, ctc_loss=0.08348, over 19408.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.274, pruned_loss=0.05161, ctc_loss=0.09728, over 3796424.97 frames. ], batch size: 48, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:20:44,040 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.02 vs. limit=6.0
+2024-08-30 14:22:02,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=216672.0, ans=0.0
+2024-08-30 14:27:06,935 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216672.0, ans=0.1
+2024-08-30 14:29:36,204 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216725.33333333334, ans=0.1
+2024-08-30 14:31:17,694 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=216778.66666666666, ans=0.125
+2024-08-30 14:31:25,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=216778.66666666666, ans=0.125
+2024-08-30 14:31:32,315 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.715e+02 2.071e+02 2.537e+02 3.967e+02, threshold=4.143e+02, percent-clipped=0.0
+2024-08-30 14:32:34,219 INFO [train.py:1114] (3/4) Epoch 17, batch 850, loss[loss=0.1969, simple_loss=0.2707, pruned_loss=0.04466, ctc_loss=0.08437, over 19644.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2742, pruned_loss=0.05176, ctc_loss=0.09755, over 3815832.83 frames. ], batch size: 59, lr: 8.86e-03, grad_scale: 32.0
+2024-08-30 14:32:38,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=216938.66666666666, ans=0.0
+2024-08-30 14:32:39,263 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=216938.66666666666, ans=0.1
+2024-08-30 14:32:54,410 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216992.0, ans=0.1
+2024-08-30 14:32:58,699 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.32 vs. limit=15.0
+2024-08-30 14:33:06,941 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.28 vs. limit=15.0
+2024-08-30 14:33:13,825 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=217045.33333333334, ans=0.1
+2024-08-30 14:33:20,903 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.73 vs. limit=15.0
+2024-08-30 14:33:27,926 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=217152.0, ans=0.025
+2024-08-30 14:34:09,065 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.07 vs. limit=15.0
+2024-08-30 14:34:26,337 INFO [train.py:1114] (3/4) Epoch 17, batch 900, loss[loss=0.2032, simple_loss=0.2626, pruned_loss=0.05264, ctc_loss=0.09609, over 19787.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2741, pruned_loss=0.05181, ctc_loss=0.09747, over 3820435.20 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:36:02,134 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=3.77 vs. limit=12.0
+2024-08-30 14:36:51,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=217312.0, ans=0.125
+2024-08-30 14:36:59,136 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.623e+02 1.810e+02 2.233e+02 4.039e+02, threshold=3.621e+02, percent-clipped=0.0
+2024-08-30 14:37:24,535 INFO [train.py:1114] (3/4) Epoch 17, batch 950, loss[loss=0.1661, simple_loss=0.24, pruned_loss=0.03312, ctc_loss=0.06503, over 19513.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2747, pruned_loss=0.05215, ctc_loss=0.09795, over 3821540.41 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-30 14:39:04,466 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=217685.33333333334, ans=0.1
+2024-08-30 14:39:05,557 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=217685.33333333334, ans=0.1
+2024-08-30 14:39:16,467 INFO [train.py:1114] (3/4) Epoch 17, batch 1000, loss[loss=0.2193, simple_loss=0.2832, pruned_loss=0.05562, ctc_loss=0.1107, over 19848.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2752, pruned_loss=0.05241, ctc_loss=0.09846, over 3816977.63 frames. ], batch size: 52, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:39:25,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=217738.66666666666, ans=0.125
+2024-08-30 14:39:26,375 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=217738.66666666666, ans=0.2
+2024-08-30 14:39:43,465 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=217845.33333333334, ans=0.035
+2024-08-30 14:39:52,694 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.648e+02 1.905e+02 2.181e+02 3.196e+02, threshold=3.810e+02, percent-clipped=0.0
+2024-08-30 14:40:20,745 INFO [train.py:1114] (3/4) Epoch 17, batch 1050, loss[loss=0.2335, simple_loss=0.2936, pruned_loss=0.06267, ctc_loss=0.1199, over 19835.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2753, pruned_loss=0.05267, ctc_loss=0.09897, over 3823093.81 frames. ], batch size: 57, lr: 8.84e-03, grad_scale: 32.0
+2024-08-30 14:40:26,527 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:41:14,896 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=218218.66666666666, ans=0.0
+2024-08-30 14:41:24,726 INFO [train.py:1114] (3/4) Epoch 17, batch 1100, loss[loss=0.1822, simple_loss=0.2539, pruned_loss=0.03999, ctc_loss=0.07604, over 19602.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2746, pruned_loss=0.05221, ctc_loss=0.0981, over 3829569.24 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:41:33,221 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=218272.0, ans=0.125
+2024-08-30 14:41:35,582 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=218325.33333333334, ans=0.2
+2024-08-30 14:41:36,707 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:41:41,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=218325.33333333334, ans=0.125
+2024-08-30 14:41:41,550 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=218325.33333333334, ans=0.0
+2024-08-30 14:41:44,176 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.17 vs. limit=15.0
+2024-08-30 14:42:15,387 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=218378.66666666666, ans=0.125
+2024-08-30 14:42:23,464 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.634e+02 1.909e+02 2.238e+02 3.833e+02, threshold=3.817e+02, percent-clipped=1.0
+2024-08-30 14:42:58,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=218432.0, ans=0.125
+2024-08-30 14:43:15,268 INFO [train.py:1114] (3/4) Epoch 17, batch 1150, loss[loss=0.1917, simple_loss=0.2642, pruned_loss=0.04377, ctc_loss=0.07898, over 19592.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2752, pruned_loss=0.05268, ctc_loss=0.09906, over 3829957.78 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-30 14:43:30,347 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=218592.0, ans=0.035
+2024-08-30 14:43:33,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=218592.0, ans=0.0
+2024-08-30 14:43:46,545 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=218645.33333333334, ans=0.0
+2024-08-30 14:43:51,475 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=218645.33333333334, ans=0.025
+2024-08-30 14:43:57,325 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.70 vs. limit=6.0
+2024-08-30 14:44:01,853 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=218698.66666666666, ans=0.0
+2024-08-30 14:44:20,106 INFO [train.py:1114] (3/4) Epoch 17, batch 1200, loss[loss=0.2099, simple_loss=0.2831, pruned_loss=0.04996, ctc_loss=0.09175, over 19837.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2757, pruned_loss=0.05293, ctc_loss=0.09946, over 3825542.18 frames. ], batch size: 57, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:44:30,797 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=218805.33333333334, ans=0.1
+2024-08-30 14:44:32,135 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=218858.66666666666, ans=0.1
+2024-08-30 14:45:49,738 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=218858.66666666666, ans=0.0
+2024-08-30 14:46:08,767 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 1.734e+02 1.937e+02 2.235e+02 3.279e+02, threshold=3.874e+02, percent-clipped=0.0
+2024-08-30 14:46:18,522 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=219018.66666666666, ans=0.0
+2024-08-30 14:46:24,548 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.40 vs. limit=10.0
+2024-08-30 14:46:31,312 INFO [train.py:1114] (3/4) Epoch 17, batch 1250, loss[loss=0.2528, simple_loss=0.3054, pruned_loss=0.07248, ctc_loss=0.138, over 19542.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2759, pruned_loss=0.05268, ctc_loss=0.0989, over 3844081.90 frames. ], batch size: 61, lr: 8.82e-03, grad_scale: 32.0
+2024-08-30 14:48:32,145 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:48:50,533 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=219338.66666666666, ans=0.1
+2024-08-30 14:48:52,362 INFO [train.py:1114] (3/4) Epoch 17, batch 1300, loss[loss=0.2484, simple_loss=0.3045, pruned_loss=0.07053, ctc_loss=0.1283, over 18817.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2752, pruned_loss=0.05241, ctc_loss=0.09848, over 3846339.73 frames. ], batch size: 76, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:48:57,695 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=219338.66666666666, ans=0.1
+2024-08-30 14:49:19,643 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.59 vs. limit=12.0
+2024-08-30 14:49:29,483 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 1.750e+02 2.054e+02 2.564e+02 3.826e+02, threshold=4.108e+02, percent-clipped=0.0
+2024-08-30 14:49:45,255 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:50:02,345 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=219552.0, ans=0.125
+2024-08-30 14:50:04,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=219552.0, ans=0.0
+2024-08-30 14:50:08,920 INFO [train.py:1114] (3/4) Epoch 17, batch 1350, loss[loss=0.2055, simple_loss=0.2754, pruned_loss=0.04974, ctc_loss=0.09015, over 19783.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.275, pruned_loss=0.05228, ctc_loss=0.09824, over 3856219.38 frames. ], batch size: 54, lr: 8.81e-03, grad_scale: 32.0
+2024-08-30 14:50:27,184 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=219658.66666666666, ans=0.125
+2024-08-30 14:50:34,985 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.95 vs. limit=15.0
+2024-08-30 14:50:59,939 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:51:00,929 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=219818.66666666666, ans=0.1
+2024-08-30 14:51:09,317 INFO [train.py:1114] (3/4) Epoch 17, batch 1400, loss[loss=0.1673, simple_loss=0.2346, pruned_loss=0.03613, ctc_loss=0.06948, over 19664.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2745, pruned_loss=0.05209, ctc_loss=0.09786, over 3863442.84 frames. ], batch size: 46, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:51:27,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=219872.0, ans=0.125
+2024-08-30 14:51:30,652 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=219872.0, ans=0.0
+2024-08-30 14:52:01,611 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.697e+02 1.910e+02 2.399e+02 4.058e+02, threshold=3.819e+02, percent-clipped=0.0
+2024-08-30 14:52:14,350 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 14:52:15,530 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=220085.33333333334, ans=0.0
+2024-08-30 14:52:26,358 INFO [train.py:1114] (3/4) Epoch 17, batch 1450, loss[loss=0.2129, simple_loss=0.2813, pruned_loss=0.05308, ctc_loss=0.09589, over 19651.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2755, pruned_loss=0.05268, ctc_loss=0.09899, over 3860840.47 frames. ], batch size: 63, lr: 8.80e-03, grad_scale: 32.0
+2024-08-30 14:52:43,127 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=220138.66666666666, ans=0.04949747468305833
+2024-08-30 14:52:54,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=220192.0, ans=0.125
+2024-08-30 14:53:05,741 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=220192.0, ans=0.125
+2024-08-30 14:54:12,184 INFO [train.py:1114] (3/4) Epoch 17, batch 1500, loss[loss=0.2, simple_loss=0.2789, pruned_loss=0.0434, ctc_loss=0.08561, over 19580.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2754, pruned_loss=0.05228, ctc_loss=0.09825, over 3861253.07 frames. ], batch size: 57, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:54:17,970 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=220405.33333333334, ans=0.1
+2024-08-30 14:54:22,825 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=220405.33333333334, ans=0.2
+2024-08-30 14:54:23,214 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.24 vs. limit=15.0
+2024-08-30 14:54:47,382 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=220512.0, ans=0.2
+2024-08-30 14:54:51,619 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.29 vs. limit=15.0
+2024-08-30 14:54:54,682 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.719e+02 1.906e+02 2.293e+02 3.704e+02, threshold=3.812e+02, percent-clipped=0.0
+2024-08-30 14:55:00,901 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=220565.33333333334, ans=0.125
+2024-08-30 14:55:03,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=220565.33333333334, ans=0.125
+2024-08-30 14:55:09,223 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=220618.66666666666, ans=0.025
+2024-08-30 14:55:11,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=220618.66666666666, ans=0.2
+2024-08-30 14:55:17,131 INFO [train.py:1114] (3/4) Epoch 17, batch 1550, loss[loss=0.2152, simple_loss=0.2827, pruned_loss=0.0535, ctc_loss=0.1017, over 19608.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.275, pruned_loss=0.0523, ctc_loss=0.09848, over 3845897.95 frames. ], batch size: 60, lr: 8.79e-03, grad_scale: 32.0
+2024-08-30 14:55:31,627 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.25 vs. limit=15.0
+2024-08-30 14:55:36,483 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.90 vs. limit=22.5
+2024-08-30 14:56:04,955 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=220832.0, ans=0.07
+2024-08-30 14:56:10,534 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.61 vs. limit=15.0
+2024-08-30 14:56:26,696 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.57 vs. limit=15.0
+2024-08-30 14:56:27,379 INFO [train.py:1114] (3/4) Epoch 17, batch 1600, loss[loss=0.226, simple_loss=0.2852, pruned_loss=0.05961, ctc_loss=0.119, over 19834.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2749, pruned_loss=0.05217, ctc_loss=0.09838, over 3834852.77 frames. ], batch size: 57, lr: 8.78e-03, grad_scale: 32.0
+2024-08-30 14:56:35,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=220938.66666666666, ans=0.1
+2024-08-30 14:57:23,780 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=220992.0, ans=0.025
+2024-08-30 14:58:32,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=221045.33333333334, ans=0.07
+2024-08-30 14:58:43,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=221045.33333333334, ans=0.0
+2024-08-30 15:00:47,795 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 1.738e+02 2.160e+02 2.635e+02 3.870e+02, threshold=4.320e+02, percent-clipped=2.0
+2024-08-30 15:00:54,225 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.91 vs. limit=15.0
+2024-08-30 15:02:48,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=221152.0, ans=0.125
+2024-08-30 15:03:53,977 INFO [train.py:1114] (3/4) Epoch 17, batch 1650, loss[loss=0.1953, simple_loss=0.2701, pruned_loss=0.04329, ctc_loss=0.08511, over 19642.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2749, pruned_loss=0.05225, ctc_loss=0.09832, over 3830114.97 frames. ], batch size: 59, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:05:45,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=221258.66666666666, ans=0.1
+2024-08-30 15:05:48,782 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=221258.66666666666, ans=0.05
+2024-08-30 15:07:23,291 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=221312.0, ans=0.05
+2024-08-30 15:07:35,871 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=221365.33333333334, ans=0.125
+2024-08-30 15:07:39,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=221365.33333333334, ans=0.2
+2024-08-30 15:07:43,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=221365.33333333334, ans=0.125
+2024-08-30 15:07:45,421 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=221365.33333333334, ans=0.1
+2024-08-30 15:07:50,908 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.49 vs. limit=15.0
+2024-08-30 15:08:00,827 INFO [train.py:1114] (3/4) Epoch 17, batch 1700, loss[loss=0.1962, simple_loss=0.2516, pruned_loss=0.05096, ctc_loss=0.09716, over 19691.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2753, pruned_loss=0.05224, ctc_loss=0.09822, over 3844046.33 frames. ], batch size: 46, lr: 8.77e-03, grad_scale: 32.0
+2024-08-30 15:08:16,924 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.18 vs. limit=10.0
+2024-08-30 15:08:31,452 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=221578.66666666666, ans=0.125
+2024-08-30 15:08:33,903 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.91 vs. limit=22.5
+2024-08-30 15:08:36,778 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.717e+02 1.998e+02 2.422e+02 4.059e+02, threshold=3.996e+02, percent-clipped=0.0
+2024-08-30 15:09:33,400 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=221632.0, ans=0.0
+2024-08-30 15:09:35,784 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=221632.0, ans=0.1
+2024-08-30 15:09:43,629 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=221685.33333333334, ans=0.125
+2024-08-30 15:09:50,037 INFO [train.py:1114] (3/4) Epoch 17, batch 1750, loss[loss=0.1956, simple_loss=0.2525, pruned_loss=0.04989, ctc_loss=0.09737, over 19662.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2748, pruned_loss=0.05211, ctc_loss=0.09791, over 3849771.43 frames. ], batch size: 45, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:09:56,053 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.45 vs. limit=15.0
+2024-08-30 15:09:56,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=221738.66666666666, ans=0.125
+2024-08-30 15:10:11,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=221845.33333333334, ans=0.0
+2024-08-30 15:10:21,603 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=221845.33333333334, ans=0.1
+2024-08-30 15:10:43,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=221952.0, ans=0.1
+2024-08-30 15:10:44,122 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=221952.0, ans=0.125
+2024-08-30 15:10:46,091 INFO [train.py:1114] (3/4) Epoch 17, batch 1800, loss[loss=0.229, simple_loss=0.2949, pruned_loss=0.0578, ctc_loss=0.1185, over 19626.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2748, pruned_loss=0.05219, ctc_loss=0.09795, over 3852200.51 frames. ], batch size: 55, lr: 8.76e-03, grad_scale: 32.0
+2024-08-30 15:11:06,625 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=222058.66666666666, ans=0.1
+2024-08-30 15:11:11,237 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=222112.0, ans=0.025
+2024-08-30 15:11:23,235 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.773e+02 2.029e+02 2.607e+02 4.351e+02, threshold=4.057e+02, percent-clipped=1.0
+2024-08-30 15:11:29,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=222165.33333333334, ans=0.125
+2024-08-30 15:11:43,557 INFO [train.py:1114] (3/4) Epoch 17, batch 1850, loss[loss=0.2157, simple_loss=0.2859, pruned_loss=0.05364, ctc_loss=0.09557, over 19583.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2746, pruned_loss=0.05201, ctc_loss=0.09777, over 3856199.12 frames. ], batch size: 57, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:11:50,863 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.34 vs. limit=15.0
+2024-08-30 15:12:06,017 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=222378.66666666666, ans=0.1
+2024-08-30 15:12:09,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=222378.66666666666, ans=0.0
+2024-08-30 15:12:10,964 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.38 vs. limit=15.0
+2024-08-30 15:12:18,593 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=222432.0, ans=0.125
+2024-08-30 15:12:39,808 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=222538.66666666666, ans=0.125
+2024-08-30 15:12:40,699 INFO [train.py:1114] (3/4) Epoch 17, batch 1900, loss[loss=0.2126, simple_loss=0.2907, pruned_loss=0.04913, ctc_loss=0.09088, over 19666.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.275, pruned_loss=0.05206, ctc_loss=0.0979, over 3861529.04 frames. ], batch size: 59, lr: 8.75e-03, grad_scale: 16.0
+2024-08-30 15:12:41,019 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=222538.66666666666, ans=0.125
+2024-08-30 15:12:47,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=222538.66666666666, ans=0.125
+2024-08-30 15:13:07,792 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=222645.33333333334, ans=0.125
+2024-08-30 15:13:18,233 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.682e+02 1.950e+02 2.328e+02 4.923e+02, threshold=3.901e+02, percent-clipped=3.0
+2024-08-30 15:13:20,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=222698.66666666666, ans=15.0
+2024-08-30 15:13:25,239 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=222698.66666666666, ans=0.025
+2024-08-30 15:13:36,339 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=222752.0, ans=0.1
+2024-08-30 15:13:38,420 INFO [train.py:1114] (3/4) Epoch 17, batch 1950, loss[loss=0.1951, simple_loss=0.2646, pruned_loss=0.04541, ctc_loss=0.08691, over 19590.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2763, pruned_loss=0.0524, ctc_loss=0.09864, over 3870508.85 frames. ], batch size: 52, lr: 8.74e-03, grad_scale: 16.0
+2024-08-30 15:14:29,698 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=222858.66666666666, ans=0.1
+2024-08-30 15:14:38,791 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=222912.0, ans=0.1
+2024-08-30 15:14:45,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=222912.0, ans=0.125
+2024-08-30 15:14:49,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=222912.0, ans=0.04949747468305833
+2024-08-30 15:14:53,090 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.47 vs. limit=22.5
+2024-08-30 15:15:27,049 INFO [train.py:1114] (3/4) Epoch 17, batch 2000, loss[loss=0.1798, simple_loss=0.2432, pruned_loss=0.0425, ctc_loss=0.07824, over 19678.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2769, pruned_loss=0.05275, ctc_loss=0.09954, over 3855534.91 frames. ], batch size: 45, lr: 8.74e-03, grad_scale: 32.0
+2024-08-30 15:15:29,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=223072.0, ans=0.0
+2024-08-30 15:16:03,254 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.692e+02 2.099e+02 2.435e+02 3.373e+02, threshold=4.199e+02, percent-clipped=0.0
+2024-08-30 15:16:08,204 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=223232.0, ans=0.0
+2024-08-30 15:16:12,455 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=223285.33333333334, ans=0.2
+2024-08-30 15:16:13,641 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=223285.33333333334, ans=0.125
+2024-08-30 15:16:42,838 INFO [train.py:1114] (3/4) Epoch 17, batch 2050, loss[loss=0.1936, simple_loss=0.2544, pruned_loss=0.0485, ctc_loss=0.08957, over 19740.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2762, pruned_loss=0.05274, ctc_loss=0.09979, over 3852671.33 frames. ], batch size: 47, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:18:41,016 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=223445.33333333334, ans=0.125
+2024-08-30 15:19:49,009 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=223445.33333333334, ans=0.125
+2024-08-30 15:19:54,808 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=223498.66666666666, ans=0.125
+2024-08-30 15:20:15,747 INFO [train.py:1114] (3/4) Epoch 17, batch 2100, loss[loss=0.1886, simple_loss=0.2633, pruned_loss=0.04156, ctc_loss=0.07706, over 19778.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2756, pruned_loss=0.05228, ctc_loss=0.09891, over 3859898.41 frames. ], batch size: 54, lr: 8.73e-03, grad_scale: 32.0
+2024-08-30 15:20:46,786 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=223605.33333333334, ans=0.0
+2024-08-30 15:20:59,227 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=223658.66666666666, ans=0.125
+2024-08-30 15:21:23,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=223765.33333333334, ans=0.125
+2024-08-30 15:21:41,979 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.235e+02 1.693e+02 2.019e+02 2.546e+02 6.032e+02, threshold=4.039e+02, percent-clipped=5.0
+2024-08-30 15:21:46,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=223765.33333333334, ans=0.0
+2024-08-30 15:21:54,090 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=223818.66666666666, ans=0.2
+2024-08-30 15:21:54,198 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.33 vs. limit=15.0
+2024-08-30 15:22:02,825 INFO [train.py:1114] (3/4) Epoch 17, batch 2150, loss[loss=0.1748, simple_loss=0.2511, pruned_loss=0.03574, ctc_loss=0.06774, over 19843.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2748, pruned_loss=0.05186, ctc_loss=0.09781, over 3871519.46 frames. ], batch size: 52, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:22:24,237 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.12 vs. limit=10.0
+2024-08-30 15:22:24,483 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.99 vs. limit=15.0
+2024-08-30 15:22:24,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=223978.66666666666, ans=0.125
+2024-08-30 15:22:58,283 INFO [train.py:1114] (3/4) Epoch 17, batch 2200, loss[loss=0.1982, simple_loss=0.2789, pruned_loss=0.04246, ctc_loss=0.08161, over 19578.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2744, pruned_loss=0.05161, ctc_loss=0.09744, over 3870160.92 frames. ], batch size: 57, lr: 8.72e-03, grad_scale: 32.0
+2024-08-30 15:23:07,568 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.35 vs. limit=12.0
+2024-08-30 15:23:08,446 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=224138.66666666666, ans=0.0
+2024-08-30 15:23:36,093 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=224245.33333333334, ans=0.035
+2024-08-30 15:23:39,375 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=224245.33333333334, ans=0.025
+2024-08-30 15:23:53,333 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.675e+02 1.986e+02 2.371e+02 4.244e+02, threshold=3.972e+02, percent-clipped=2.0
+2024-08-30 15:24:13,607 INFO [train.py:1114] (3/4) Epoch 17, batch 2250, loss[loss=0.2248, simple_loss=0.2938, pruned_loss=0.05596, ctc_loss=0.1098, over 19621.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2745, pruned_loss=0.05146, ctc_loss=0.09708, over 3869960.68 frames. ], batch size: 55, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:25:41,969 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.97 vs. limit=15.0
+2024-08-30 15:25:45,206 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.22 vs. limit=12.0
+2024-08-30 15:25:48,964 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=224512.0, ans=0.5
+2024-08-30 15:25:51,216 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=224512.0, ans=0.0
+2024-08-30 15:27:05,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=224618.66666666666, ans=0.0
+2024-08-30 15:27:15,737 INFO [train.py:1114] (3/4) Epoch 17, batch 2300, loss[loss=0.1765, simple_loss=0.2453, pruned_loss=0.0381, ctc_loss=0.07875, over 19502.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2736, pruned_loss=0.05137, ctc_loss=0.09688, over 3863663.04 frames. ], batch size: 49, lr: 8.71e-03, grad_scale: 32.0
+2024-08-30 15:28:39,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=224778.66666666666, ans=0.125
+2024-08-30 15:28:40,097 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=224778.66666666666, ans=0.1
+2024-08-30 15:28:43,569 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=224832.0, ans=0.5
+2024-08-30 15:28:46,693 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.759e+02 2.126e+02 2.592e+02 4.068e+02, threshold=4.252e+02, percent-clipped=2.0
+2024-08-30 15:28:58,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=224885.33333333334, ans=0.125
+2024-08-30 15:29:48,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=224885.33333333334, ans=0.2
+2024-08-30 15:29:51,077 INFO [train.py:1114] (3/4) Epoch 17, batch 2350, loss[loss=0.2353, simple_loss=0.2964, pruned_loss=0.06359, ctc_loss=0.1175, over 19646.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2737, pruned_loss=0.05161, ctc_loss=0.09724, over 3866110.76 frames. ], batch size: 63, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:29:51,273 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=224938.66666666666, ans=0.2
+2024-08-30 15:29:54,680 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=224938.66666666666, ans=0.125
+2024-08-30 15:30:01,985 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.95 vs. limit=15.0
+2024-08-30 15:30:42,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=225045.33333333334, ans=0.05
+2024-08-30 15:30:42,337 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=225045.33333333334, ans=0.125
+2024-08-30 15:32:49,543 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.18 vs. limit=15.0
+2024-08-30 15:33:11,234 INFO [train.py:1114] (3/4) Epoch 17, batch 2400, loss[loss=0.2491, simple_loss=0.3107, pruned_loss=0.0681, ctc_loss=0.1284, over 19290.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2764, pruned_loss=0.05273, ctc_loss=0.0991, over 3860060.04 frames. ], batch size: 71, lr: 8.70e-03, grad_scale: 32.0
+2024-08-30 15:33:43,411 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=225312.0, ans=0.125
+2024-08-30 15:33:48,810 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.684e+02 1.880e+02 2.443e+02 3.780e+02, threshold=3.760e+02, percent-clipped=0.0
+2024-08-30 15:33:51,539 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=225365.33333333334, ans=0.2
+2024-08-30 15:33:51,599 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=225365.33333333334, ans=0.2
+2024-08-30 15:34:10,333 INFO [train.py:1114] (3/4) Epoch 17, batch 2450, loss[loss=0.2847, simple_loss=0.3147, pruned_loss=0.09183, ctc_loss=0.1777, over 13396.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2798, pruned_loss=0.05528, ctc_loss=0.1041, over 3730534.66 frames. ], batch size: 140, lr: 8.69e-03, grad_scale: 16.0
+2024-08-30 15:34:50,537 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=225525.33333333334, ans=0.2
+2024-08-30 15:34:59,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=225578.66666666666, ans=0.07
+2024-08-30 15:35:06,151 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=225578.66666666666, ans=0.125
+2024-08-30 15:35:21,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=225632.0, ans=0.125
+2024-08-30 15:35:22,685 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=225632.0, ans=0.1
+2024-08-30 15:38:02,848 INFO [train.py:1114] (3/4) Epoch 18, batch 0, loss[loss=0.2024, simple_loss=0.2593, pruned_loss=0.05157, ctc_loss=0.1056, over 19798.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2593, pruned_loss=0.05157, ctc_loss=0.1056, over 19798.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:38:02,848 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-30 15:38:18,420 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.2.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.7964, 2.0165, 1.7763, 1.9403], device='cuda:3')
+2024-08-30 15:39:31,452 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.4950, 3.0552, 2.0864, 2.6519], device='cuda:3')
+2024-08-30 15:39:34,936 INFO [train.py:1146] (3/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.0364, ctc_loss=0.06401, over 944034.00 frames.
+2024-08-30 15:39:34,938 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13606MB
+2024-08-30 15:39:39,555 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=225680.0, ans=0.125
+2024-08-30 15:39:51,494 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.81 vs. limit=15.0
+2024-08-30 15:40:21,197 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.72 vs. limit=15.0
+2024-08-30 15:40:26,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 15:40:35,333 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=225840.0, ans=0.04949747468305833
+2024-08-30 15:40:56,081 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=225893.33333333334, ans=0.125
+2024-08-30 15:40:58,093 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.919e+02 2.092e+02 2.421e+02 5.568e+02, threshold=4.185e+02, percent-clipped=4.0
+2024-08-30 15:41:04,970 INFO [train.py:1114] (3/4) Epoch 18, batch 50, loss[loss=0.1739, simple_loss=0.2429, pruned_loss=0.0384, ctc_loss=0.07026, over 19711.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2767, pruned_loss=0.05275, ctc_loss=0.1009, over 844773.76 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 15:42:05,261 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=226053.33333333334, ans=0.125
+2024-08-30 15:42:27,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=226160.0, ans=0.1
+2024-08-30 15:42:28,849 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.09 vs. limit=15.0
+2024-08-30 15:44:01,397 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=226160.0, ans=0.125
+2024-08-30 15:44:07,543 INFO [train.py:1114] (3/4) Epoch 18, batch 100, loss[loss=0.186, simple_loss=0.2589, pruned_loss=0.04085, ctc_loss=0.07841, over 19730.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2782, pruned_loss=0.05259, ctc_loss=0.1005, over 1499130.94 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:44:10,105 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=226213.33333333334, ans=0.125
+2024-08-30 15:44:16,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=226213.33333333334, ans=0.2
+2024-08-30 15:44:33,466 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=226320.0, ans=0.125
+2024-08-30 15:44:40,758 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=226320.0, ans=0.125
+2024-08-30 15:44:43,263 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=226320.0, ans=0.07
+2024-08-30 15:45:01,912 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.711e+02 1.973e+02 2.383e+02 4.146e+02, threshold=3.946e+02, percent-clipped=0.0
+2024-08-30 15:45:03,501 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.32 vs. limit=15.0
+2024-08-30 15:45:10,544 INFO [train.py:1114] (3/4) Epoch 18, batch 150, loss[loss=0.1807, simple_loss=0.2466, pruned_loss=0.04193, ctc_loss=0.07737, over 19710.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2751, pruned_loss=0.05134, ctc_loss=0.09743, over 2028556.29 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-30 15:45:10,682 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=226480.0, ans=0.025
+2024-08-30 15:45:20,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=226480.0, ans=0.0
+2024-08-30 15:45:23,021 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-30 15:45:25,173 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=226533.33333333334, ans=0.015
+2024-08-30 15:45:40,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=226586.66666666666, ans=0.07
+2024-08-30 15:45:58,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=226640.0, ans=0.0
+2024-08-30 15:46:16,532 INFO [train.py:1114] (3/4) Epoch 18, batch 200, loss[loss=0.26, simple_loss=0.3089, pruned_loss=0.07483, ctc_loss=0.1535, over 18383.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.274, pruned_loss=0.05092, ctc_loss=0.09645, over 2435908.49 frames. ], batch size: 86, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:46:17,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=226746.66666666666, ans=0.125
+2024-08-30 15:46:29,340 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=226800.0, ans=0.125
+2024-08-30 15:46:58,080 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226906.66666666666, ans=0.125
+2024-08-30 15:46:58,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=226906.66666666666, ans=0.0
+2024-08-30 15:46:59,321 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=226906.66666666666, ans=0.0
+2024-08-30 15:47:08,529 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.332e+02 1.794e+02 2.164e+02 2.564e+02 4.131e+02, threshold=4.328e+02, percent-clipped=1.0
+2024-08-30 15:47:14,889 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=227013.33333333334, ans=10.0
+2024-08-30 15:47:20,534 INFO [train.py:1114] (3/4) Epoch 18, batch 250, loss[loss=0.2425, simple_loss=0.3013, pruned_loss=0.06676, ctc_loss=0.1254, over 19385.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2741, pruned_loss=0.05131, ctc_loss=0.09694, over 2755239.67 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-30 15:47:22,232 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.38 vs. limit=15.0
+2024-08-30 15:47:30,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=227013.33333333334, ans=0.09899494936611666
+2024-08-30 15:48:37,518 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.25 vs. limit=15.0
+2024-08-30 15:48:44,129 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:49:17,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=227226.66666666666, ans=0.125
+2024-08-30 15:49:22,642 INFO [train.py:1114] (3/4) Epoch 18, batch 300, loss[loss=0.2247, simple_loss=0.287, pruned_loss=0.06062, ctc_loss=0.103, over 19510.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.273, pruned_loss=0.05066, ctc_loss=0.09536, over 3000722.02 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:50:47,654 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.84 vs. limit=12.0
+2024-08-30 15:50:59,796 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=227333.33333333334, ans=0.1
+2024-08-30 15:51:39,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=227493.33333333334, ans=0.2
+2024-08-30 15:51:40,146 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.292e+02 1.730e+02 1.916e+02 2.273e+02 3.732e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-30 15:51:48,900 INFO [train.py:1114] (3/4) Epoch 18, batch 350, loss[loss=0.1863, simple_loss=0.2453, pruned_loss=0.04536, ctc_loss=0.09167, over 19798.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2737, pruned_loss=0.05084, ctc_loss=0.09563, over 3190837.21 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-30 15:52:00,631 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.84 vs. limit=22.5
+2024-08-30 15:52:12,197 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=227600.0, ans=0.0
+2024-08-30 15:52:32,532 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=227706.66666666666, ans=0.0
+2024-08-30 15:52:41,346 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=227760.0, ans=0.0
+2024-08-30 15:52:51,836 INFO [train.py:1114] (3/4) Epoch 18, batch 400, loss[loss=0.1932, simple_loss=0.2775, pruned_loss=0.03909, ctc_loss=0.07679, over 19478.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2738, pruned_loss=0.05098, ctc_loss=0.09601, over 3342300.72 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:53:05,417 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 15:53:14,948 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=227920.0, ans=0.1
+2024-08-30 15:54:16,395 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.651e+02 1.862e+02 2.258e+02 4.636e+02, threshold=3.723e+02, percent-clipped=1.0
+2024-08-30 15:54:24,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=228080.0, ans=0.0
+2024-08-30 15:54:25,943 INFO [train.py:1114] (3/4) Epoch 18, batch 450, loss[loss=0.2074, simple_loss=0.2776, pruned_loss=0.04995, ctc_loss=0.09312, over 19604.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2741, pruned_loss=0.05118, ctc_loss=0.09618, over 3450789.67 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-30 15:54:38,251 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.24 vs. limit=15.0
+2024-08-30 15:54:59,086 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.22 vs. limit=22.5
+2024-08-30 15:55:13,183 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=228240.0, ans=0.2
+2024-08-30 15:55:14,398 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=228240.0, ans=0.125
+2024-08-30 15:55:33,019 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.76 vs. limit=22.5
+2024-08-30 15:55:37,514 INFO [train.py:1114] (3/4) Epoch 18, batch 500, loss[loss=0.2338, simple_loss=0.299, pruned_loss=0.06119, ctc_loss=0.1154, over 19664.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2732, pruned_loss=0.05075, ctc_loss=0.09534, over 3545366.65 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:55:37,722 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=228346.66666666666, ans=0.07
+2024-08-30 15:55:49,708 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=228400.0, ans=0.07
+2024-08-30 15:56:36,779 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=228453.33333333334, ans=0.125
+2024-08-30 15:56:43,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=228506.66666666666, ans=0.125
+2024-08-30 15:57:47,517 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=228506.66666666666, ans=0.0
+2024-08-30 15:57:54,272 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.602e+02 1.832e+02 2.190e+02 3.877e+02, threshold=3.665e+02, percent-clipped=2.0
+2024-08-30 15:58:00,982 INFO [train.py:1114] (3/4) Epoch 18, batch 550, loss[loss=0.2323, simple_loss=0.2952, pruned_loss=0.06194, ctc_loss=0.1139, over 19350.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2734, pruned_loss=0.05091, ctc_loss=0.09564, over 3608466.68 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-30 15:58:58,432 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=228666.66666666666, ans=0.0
+2024-08-30 16:00:54,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=228720.0, ans=0.125
+2024-08-30 16:01:22,882 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=228826.66666666666, ans=0.125
+2024-08-30 16:01:33,966 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.73 vs. limit=15.0
+2024-08-30 16:01:35,826 INFO [train.py:1114] (3/4) Epoch 18, batch 600, loss[loss=0.2209, simple_loss=0.277, pruned_loss=0.05927, ctc_loss=0.1157, over 19320.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2734, pruned_loss=0.05106, ctc_loss=0.09603, over 3665320.01 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:02:29,470 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.11 vs. limit=22.5
+2024-08-30 16:02:34,800 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.59 vs. limit=6.0
+2024-08-30 16:03:46,431 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.26 vs. limit=12.0
+2024-08-30 16:04:41,753 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 1.726e+02 2.045e+02 2.727e+02 4.181e+02, threshold=4.090e+02, percent-clipped=7.0
+2024-08-30 16:04:43,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=229093.33333333334, ans=0.09899494936611666
+2024-08-30 16:04:44,365 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=229093.33333333334, ans=0.1
+2024-08-30 16:04:45,468 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229093.33333333334, ans=0.1
+2024-08-30 16:04:48,708 INFO [train.py:1114] (3/4) Epoch 18, batch 650, loss[loss=0.176, simple_loss=0.2595, pruned_loss=0.03386, ctc_loss=0.06195, over 19775.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2727, pruned_loss=0.05051, ctc_loss=0.09503, over 3715876.17 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-30 16:06:22,468 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.15 vs. limit=15.0
+2024-08-30 16:06:23,189 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229200.0, ans=0.1
+2024-08-30 16:06:38,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=229253.33333333334, ans=0.2
+2024-08-30 16:06:51,813 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=229306.66666666666, ans=0.025
+2024-08-30 16:07:16,999 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=229360.0, ans=0.025
+2024-08-30 16:07:27,778 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=229360.0, ans=0.09899494936611666
+2024-08-30 16:07:29,450 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.40 vs. limit=22.5
+2024-08-30 16:07:30,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229360.0, ans=0.1
+2024-08-30 16:07:32,102 INFO [train.py:1114] (3/4) Epoch 18, batch 700, loss[loss=0.1875, simple_loss=0.2572, pruned_loss=0.04219, ctc_loss=0.08339, over 19735.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2729, pruned_loss=0.05048, ctc_loss=0.09495, over 3746552.88 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:07:35,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=229413.33333333334, ans=0.2
+2024-08-30 16:07:36,194 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.65 vs. limit=12.0
+2024-08-30 16:07:51,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=229466.66666666666, ans=0.2
+2024-08-30 16:08:04,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=229520.0, ans=0.0
+2024-08-30 16:08:17,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=229573.33333333334, ans=0.0
+2024-08-30 16:08:27,240 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.709e+02 1.988e+02 2.480e+02 4.374e+02, threshold=3.975e+02, percent-clipped=1.0
+2024-08-30 16:08:34,067 INFO [train.py:1114] (3/4) Epoch 18, batch 750, loss[loss=0.2178, simple_loss=0.2783, pruned_loss=0.05683, ctc_loss=0.1091, over 19502.00 frames. ], tot_loss[loss=0.205, simple_loss=0.272, pruned_loss=0.05008, ctc_loss=0.09439, over 3773339.17 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:08:40,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=229680.0, ans=0.2
+2024-08-30 16:08:43,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=229680.0, ans=0.125
+2024-08-30 16:08:48,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=229733.33333333334, ans=0.125
+2024-08-30 16:09:13,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=229840.0, ans=0.125
+2024-08-30 16:09:14,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=229840.0, ans=0.125
+2024-08-30 16:09:16,662 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=229840.0, ans=0.0
+2024-08-30 16:09:31,154 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=229893.33333333334, ans=0.125
+2024-08-30 16:09:35,836 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229893.33333333334, ans=0.1
+2024-08-30 16:09:38,054 INFO [train.py:1114] (3/4) Epoch 18, batch 800, loss[loss=0.1901, simple_loss=0.253, pruned_loss=0.04663, ctc_loss=0.08497, over 19800.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2725, pruned_loss=0.05039, ctc_loss=0.09493, over 3794534.88 frames. ], batch size: 49, lr: 8.37e-03, grad_scale: 32.0
+2024-08-30 16:10:01,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=230000.0, ans=0.125
+2024-08-30 16:10:22,011 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.08 vs. limit=12.0
+2024-08-30 16:10:28,721 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=230106.66666666666, ans=0.125
+2024-08-30 16:10:30,995 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=230106.66666666666, ans=0.1
+2024-08-30 16:11:34,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=230160.0, ans=0.0
+2024-08-30 16:11:34,981 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.744e+02 1.950e+02 2.451e+02 4.139e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-30 16:11:47,902 INFO [train.py:1114] (3/4) Epoch 18, batch 850, loss[loss=0.19, simple_loss=0.2696, pruned_loss=0.04035, ctc_loss=0.07439, over 19618.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2716, pruned_loss=0.05012, ctc_loss=0.09422, over 3812874.07 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:11:54,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=230213.33333333334, ans=0.0
+2024-08-30 16:11:58,599 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=230266.66666666666, ans=0.95
+2024-08-30 16:12:02,663 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=230266.66666666666, ans=0.0
+2024-08-30 16:12:09,617 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:12:21,727 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=230320.0, ans=0.125
+2024-08-30 16:12:30,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=230373.33333333334, ans=0.1
+2024-08-30 16:12:48,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=230426.66666666666, ans=0.0
+2024-08-30 16:12:57,694 INFO [train.py:1114] (3/4) Epoch 18, batch 900, loss[loss=0.1832, simple_loss=0.2457, pruned_loss=0.04403, ctc_loss=0.0816, over 19824.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2721, pruned_loss=0.05064, ctc_loss=0.09513, over 3817990.98 frames. ], batch size: 49, lr: 8.36e-03, grad_scale: 32.0
+2024-08-30 16:12:58,950 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=230480.0, ans=0.125
+2024-08-30 16:13:17,885 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.63 vs. limit=15.0
+2024-08-30 16:13:33,101 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=230640.0, ans=0.125
+2024-08-30 16:13:41,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=230640.0, ans=0.0
+2024-08-30 16:13:49,529 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.771e+02 2.097e+02 2.541e+02 3.279e+02, threshold=4.195e+02, percent-clipped=1.0
+2024-08-30 16:13:51,985 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=230693.33333333334, ans=0.0
+2024-08-30 16:13:56,598 INFO [train.py:1114] (3/4) Epoch 18, batch 950, loss[loss=0.2131, simple_loss=0.2687, pruned_loss=0.0571, ctc_loss=0.1084, over 19489.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2729, pruned_loss=0.05133, ctc_loss=0.09635, over 3820036.99 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:14:49,557 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.39 vs. limit=12.0
+2024-08-30 16:15:24,336 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=230906.66666666666, ans=0.125
+2024-08-30 16:15:24,566 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.30 vs. limit=15.0
+2024-08-30 16:15:41,760 INFO [train.py:1114] (3/4) Epoch 18, batch 1000, loss[loss=0.2131, simple_loss=0.2786, pruned_loss=0.05255, ctc_loss=0.1063, over 19864.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2744, pruned_loss=0.05206, ctc_loss=0.09773, over 3816317.46 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-30 16:15:56,507 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.16 vs. limit=15.0
+2024-08-30 16:16:04,173 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=231066.66666666666, ans=0.025
+2024-08-30 16:16:34,260 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=231173.33333333334, ans=0.2
+2024-08-30 16:16:40,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231226.66666666666, ans=0.1
+2024-08-30 16:16:44,119 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=231226.66666666666, ans=0.125
+2024-08-30 16:16:46,157 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.681e+02 1.935e+02 2.141e+02 3.468e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-30 16:16:53,180 INFO [train.py:1114] (3/4) Epoch 18, batch 1050, loss[loss=0.1995, simple_loss=0.279, pruned_loss=0.04329, ctc_loss=0.0834, over 19827.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2736, pruned_loss=0.05162, ctc_loss=0.09701, over 3823782.84 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:18:04,458 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.73 vs. limit=6.0
+2024-08-30 16:18:29,885 INFO [train.py:1114] (3/4) Epoch 18, batch 1100, loss[loss=0.1948, simple_loss=0.2684, pruned_loss=0.04425, ctc_loss=0.08193, over 19595.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2733, pruned_loss=0.05115, ctc_loss=0.09609, over 3831868.12 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-30 16:18:35,279 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.41 vs. limit=15.0
+2024-08-30 16:18:39,750 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.24 vs. limit=15.0
+2024-08-30 16:19:01,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=231653.33333333334, ans=0.0
+2024-08-30 16:19:01,621 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.26 vs. limit=15.0
+2024-08-30 16:19:19,965 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=231760.0, ans=0.1
+2024-08-30 16:19:24,181 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.668e+02 1.884e+02 2.263e+02 3.606e+02, threshold=3.767e+02, percent-clipped=0.0
+2024-08-30 16:19:52,625 INFO [train.py:1114] (3/4) Epoch 18, batch 1150, loss[loss=0.2027, simple_loss=0.2703, pruned_loss=0.04845, ctc_loss=0.09539, over 19585.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2735, pruned_loss=0.05139, ctc_loss=0.09647, over 3828941.83 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:19:58,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=231813.33333333334, ans=0.09899494936611666
+2024-08-30 16:20:05,674 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=231866.66666666666, ans=0.125
+2024-08-30 16:22:23,866 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 16:22:51,706 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.17 vs. limit=10.0
+2024-08-30 16:22:56,527 INFO [train.py:1114] (3/4) Epoch 18, batch 1200, loss[loss=0.2036, simple_loss=0.2796, pruned_loss=0.04706, ctc_loss=0.08376, over 19846.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.274, pruned_loss=0.05131, ctc_loss=0.09649, over 3824642.10 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-30 16:23:02,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=232080.0, ans=0.125
+2024-08-30 16:23:05,794 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=232080.0, ans=0.2
+2024-08-30 16:23:17,467 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=232186.66666666666, ans=0.2
+2024-08-30 16:23:29,403 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.16 vs. limit=15.0
+2024-08-30 16:23:31,518 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=232240.0, ans=0.125
+2024-08-30 16:23:45,952 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.656e+02 1.841e+02 2.164e+02 3.391e+02, threshold=3.682e+02, percent-clipped=0.0
+2024-08-30 16:23:52,939 INFO [train.py:1114] (3/4) Epoch 18, batch 1250, loss[loss=0.212, simple_loss=0.2879, pruned_loss=0.04999, ctc_loss=0.09056, over 19519.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2742, pruned_loss=0.05105, ctc_loss=0.0958, over 3842414.52 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:24:01,648 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=232346.66666666666, ans=10.0
+2024-08-30 16:24:25,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=232453.33333333334, ans=0.0
+2024-08-30 16:24:33,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=232506.66666666666, ans=0.0
+2024-08-30 16:25:38,724 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=232506.66666666666, ans=0.0
+2024-08-30 16:25:53,661 INFO [train.py:1114] (3/4) Epoch 18, batch 1300, loss[loss=0.2071, simple_loss=0.2706, pruned_loss=0.05204, ctc_loss=0.09905, over 18885.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2739, pruned_loss=0.05086, ctc_loss=0.0956, over 3845881.93 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-30 16:32:09,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=232613.33333333334, ans=0.0
+2024-08-30 16:32:25,510 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=232666.66666666666, ans=0.125
+2024-08-30 16:32:32,932 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=232720.0, ans=0.125
+2024-08-30 16:32:40,075 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=232773.33333333334, ans=0.2
+2024-08-30 16:35:45,911 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=232773.33333333334, ans=0.125
+2024-08-30 16:45:23,076 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.785e+02 2.170e+02 2.759e+02 4.331e+02, threshold=4.339e+02, percent-clipped=5.0
+2024-08-30 16:57:19,369 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.98 vs. limit=15.0
+2024-08-30 17:02:45,886 INFO [train.py:1114] (3/4) Epoch 18, batch 1350, loss[loss=0.2046, simple_loss=0.2722, pruned_loss=0.05017, ctc_loss=0.09185, over 19779.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2736, pruned_loss=0.05092, ctc_loss=0.09559, over 3857636.80 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
+2024-08-30 17:12:54,858 INFO [train.py:1050] (3/4) Caught exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=46170, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600006 milliseconds before timing out..
+2024-08-30 17:12:54,859 INFO [checkpoint.py:75] (3/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-3.pt
+2024-08-30 17:12:56,257 INFO [train.py:1413] (3/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 17:12:56,300 INFO [train.py:1419] (3/4) features shape: torch.Size([56, 1420, 80])
+2024-08-30 17:12:56,303 INFO [train.py:1423] (3/4) num tokens: 4370
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-0
new file mode 100644
index 0000000000000000000000000000000000000000..d89c8c71ebbd7c4f6fade0fb7f3961fe8849fa95
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-0
@@ -0,0 +1,43 @@
+2024-08-30 20:06:52,423 INFO [train.py:1182] (0/4) Training started
+2024-08-30 20:06:52,427 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-30 20:06:52,566 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 20:06:52,566 INFO [train.py:1212] (0/4) About to create model
+2024-08-30 20:06:54,344 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-30 20:06:54,894 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 20:09:02,216 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-30 20:09:02,658 INFO [train.py:1231] (0/4) Using DDP
+2024-08-30 20:09:07,155 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-30 20:17:08,517 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-30 20:17:08,518 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 20:20:46,621 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-30 20:20:48,445 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-30 20:20:48,451 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-30 20:21:06,542 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-30 20:22:04,978 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-30 20:22:05,299 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-30 20:22:05,299 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:26:29,960 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12840MB
+2024-08-30 20:26:34,938 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-30 20:29:01,578 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-30 20:29:02,551 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=4.29 vs. limit=5.0
+2024-08-30 20:29:03,107 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 20:29:43,853 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 20:29:45,409 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 20:29:45,427 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-30 20:31:27,154 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.48 vs. limit=6.0
+2024-08-30 20:31:36,630 INFO [train.py:1114] (0/4) Epoch 18, batch 0, loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.04175, ctc_loss=0.0762, over 19813.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.04175, ctc_loss=0.0762, over 19813.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 20:31:38,746 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-30 20:32:36,423 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3326, 1.0374, 1.5079, 0.7235, 1.4933, 1.5508, 1.6642, 1.3541],
+ device='cuda:0')
+2024-08-30 20:32:45,078 INFO [train.py:1146] (0/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 20:32:45,078 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 20:34:50,689 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=225680.0, ans=0.0
+2024-08-30 20:55:42,260 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.70 vs. limit=15.0
+2024-08-30 20:56:22,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=225786.66666666666, ans=0.2
+2024-08-30 20:56:22,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 20:57:18,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=225786.66666666666, ans=0.2
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-1
new file mode 100644
index 0000000000000000000000000000000000000000..3d407219226f57fb85626f7e050edd2381434198
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-1
@@ -0,0 +1,45 @@
+2024-08-30 20:06:52,808 INFO [train.py:1182] (1/4) Training started
+2024-08-30 20:06:52,809 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-30 20:06:52,812 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 20:06:52,812 INFO [train.py:1212] (1/4) About to create model
+2024-08-30 20:06:54,273 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-30 20:06:54,312 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 20:09:02,596 INFO [train.py:1231] (1/4) Using DDP
+2024-08-30 20:09:07,158 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-30 20:17:08,539 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-30 20:17:08,539 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 20:20:46,622 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-30 20:20:48,457 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-30 20:20:48,458 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-30 20:21:06,543 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-30 20:22:04,978 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-30 20:22:05,300 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-30 20:22:05,300 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:26:28,265 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.35 vs. limit=3.0
+2024-08-30 20:26:29,961 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13363MB
+2024-08-30 20:26:34,937 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 20:29:01,588 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 20:29:03,112 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 20:29:43,852 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 20:29:44,978 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=9.29 vs. limit=5.0
+2024-08-30 20:29:45,417 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 20:29:45,440 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-30 20:31:36,630 INFO [train.py:1114] (1/4) Epoch 18, batch 0, loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04853, ctc_loss=0.09187, over 19410.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04853, ctc_loss=0.09187, over 19410.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 20:31:38,746 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-30 20:32:45,076 INFO [train.py:1146] (1/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 20:32:45,077 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 20:32:51,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.51 vs. limit=15.0
+2024-08-30 20:34:49,772 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.71 vs. limit=6.0
+2024-08-30 20:47:12,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=225733.33333333334, ans=0.0
+2024-08-30 20:52:48,949 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.61 vs. limit=10.0
+2024-08-30 20:58:14,666 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=225840.0, ans=0.125
+2024-08-30 21:10:11,094 INFO [train.py:1050] (1/4) Caught exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=593, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600007 milliseconds before timing out..
+2024-08-30 21:10:11,097 INFO [checkpoint.py:75] (1/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-1.pt
+2024-08-30 21:10:16,373 INFO [train.py:1413] (1/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 21:10:16,429 INFO [train.py:1419] (1/4) features shape: torch.Size([85, 933, 80])
+2024-08-30 21:10:16,432 INFO [train.py:1423] (1/4) num tokens: 4004
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-2
new file mode 100644
index 0000000000000000000000000000000000000000..f00bc8bc15e628c563ae312a8ef3a7ce971ef9da
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-2
@@ -0,0 +1,44 @@
+2024-08-30 20:06:52,811 INFO [train.py:1182] (2/4) Training started
+2024-08-30 20:06:52,812 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-30 20:06:52,814 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 20:06:52,814 INFO [train.py:1212] (2/4) About to create model
+2024-08-30 20:06:54,260 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-30 20:06:54,312 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 20:09:02,612 INFO [train.py:1231] (2/4) Using DDP
+2024-08-30 20:09:07,155 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-30 20:17:08,543 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-30 20:17:08,544 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 20:20:46,622 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-30 20:20:48,466 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-30 20:20:48,467 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-30 20:21:06,542 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-30 20:22:04,978 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-30 20:22:05,301 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-30 20:22:05,302 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:26:29,960 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12782MB
+2024-08-30 20:26:34,942 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12849MB
+2024-08-30 20:29:01,588 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 20:29:02,828 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=256, metric=8.54 vs. limit=7.5
+2024-08-30 20:29:03,110 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 20:29:43,852 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 20:29:45,410 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 20:29:45,429 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-30 20:31:36,626 INFO [train.py:1114] (2/4) Epoch 18, batch 0, loss[loss=0.1826, simple_loss=0.2456, pruned_loss=0.04306, ctc_loss=0.08405, over 19432.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2456, pruned_loss=0.04306, ctc_loss=0.08405, over 19432.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 20:31:38,746 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-30 20:32:33,196 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3674, 1.1105, 1.6340, 0.7188, 1.6513, 1.7595, 1.8532, 1.6009],
+ device='cuda:2')
+2024-08-30 20:32:45,076 INFO [train.py:1146] (2/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 20:32:45,077 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 20:32:53,619 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.87 vs. limit=22.5
+2024-08-30 20:34:49,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=225680.0, ans=0.0
+2024-08-30 20:46:48,454 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.39 vs. limit=15.0
+2024-08-30 21:10:11,093 INFO [train.py:1050] (2/4) Caught exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=593, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600006 milliseconds before timing out..
+2024-08-30 21:10:11,094 INFO [checkpoint.py:75] (2/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-2.pt
+2024-08-30 21:10:12,589 INFO [train.py:1413] (2/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 21:10:12,755 INFO [train.py:1419] (2/4) features shape: torch.Size([85, 936, 80])
+2024-08-30 21:10:12,757 INFO [train.py:1423] (2/4) num tokens: 4032
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-3
new file mode 100644
index 0000000000000000000000000000000000000000..b346abeaa39538dcb74762e147ef636942aea75f
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-20-06-52-3
@@ -0,0 +1,41 @@
+2024-08-30 20:06:52,810 INFO [train.py:1182] (3/4) Training started
+2024-08-30 20:06:52,811 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-30 20:06:52,813 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2647.int.cedar.computecanada.ca', 'IP address': '172.16.146.84'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 20:06:52,814 INFO [train.py:1212] (3/4) About to create model
+2024-08-30 20:06:54,261 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-30 20:06:54,312 INFO [checkpoint.py:112] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 20:09:02,570 INFO [train.py:1231] (3/4) Using DDP
+2024-08-30 20:09:07,155 INFO [train.py:1243] (3/4) Loading optimizer state dict
+2024-08-30 20:17:08,501 INFO [train.py:1251] (3/4) Loading scheduler state dict
+2024-08-30 20:17:08,502 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 20:20:46,622 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-30 20:20:46,868 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-30 20:20:46,869 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-30 20:20:48,445 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-30 20:20:48,451 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-30 20:21:06,542 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-30 20:22:04,978 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-30 20:22:05,306 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-30 20:22:05,307 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 20:26:29,960 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12808MB
+2024-08-30 20:26:34,941 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 20:29:01,585 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 20:29:02,557 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=4.30 vs. limit=5.0
+2024-08-30 20:29:03,107 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 20:29:43,858 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 20:29:45,405 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 20:29:45,428 INFO [train.py:1344] (3/4) Loading grad scaler state dict
+2024-08-30 20:31:23,012 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-30 20:31:36,630 INFO [train.py:1114] (3/4) Epoch 18, batch 0, loss[loss=0.1891, simple_loss=0.2515, pruned_loss=0.04522, ctc_loss=0.09044, over 19798.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2515, pruned_loss=0.04522, ctc_loss=0.09044, over 19798.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 20:31:38,746 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-30 20:32:38,508 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3005, 0.9812, 1.4801, 0.7409, 1.4295, 1.5490, 1.6372, 1.3385],
+ device='cuda:3')
+2024-08-30 20:32:45,068 INFO [train.py:1146] (3/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 20:32:45,069 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 20:34:50,703 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=225680.0, ans=0.1
+2024-08-30 20:54:09,721 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-30 20:55:52,356 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 20:59:09,305 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=225840.0, ans=0.125
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-0
new file mode 100644
index 0000000000000000000000000000000000000000..c096d42f8a5c092b015964f6e936f36bb51e61ec
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-0
@@ -0,0 +1,47 @@
+2024-08-30 21:35:28,104 INFO [train.py:1182] (0/4) Training started
+2024-08-30 21:35:28,112 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-30 21:35:28,717 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2539.int.cedar.computecanada.ca', 'IP address': '172.16.145.232'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 21:35:28,718 INFO [train.py:1212] (0/4) About to create model
+2024-08-30 21:35:30,104 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-30 21:35:30,652 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 21:39:40,123 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-30 21:39:40,535 INFO [train.py:1231] (0/4) Using DDP
+2024-08-30 21:39:47,388 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-30 21:39:54,492 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-30 21:39:54,492 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-30 21:39:56,913 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-30 21:39:56,921 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-30 21:39:57,110 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-30 21:39:57,483 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-30 21:39:57,807 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-30 21:39:57,807 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 21:45:27,165 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12840MB
+2024-08-30 21:45:28,623 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-30 21:46:07,693 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-30 21:46:18,191 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=4.29 vs. limit=5.0
+2024-08-30 21:46:18,706 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 21:47:44,993 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 21:47:46,565 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 21:47:46,582 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-30 21:50:09,083 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.48 vs. limit=6.0
+2024-08-30 21:50:11,573 INFO [train.py:1114] (0/4) Epoch 18, batch 0, loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.04175, ctc_loss=0.0762, over 19813.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.04175, ctc_loss=0.0762, over 19813.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 21:50:11,574 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-30 21:50:40,978 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3326, 1.0374, 1.5079, 0.7235, 1.4933, 1.5508, 1.6642, 1.3541],
+ device='cuda:0')
+2024-08-30 21:51:06,633 INFO [train.py:1146] (0/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 21:51:06,633 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-30 21:52:24,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=225680.0, ans=0.0
+2024-08-30 21:59:02,272 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.66 vs. limit=15.0
+2024-08-30 21:59:14,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=225786.66666666666, ans=0.2
+2024-08-30 21:59:14,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 21:59:15,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=225786.66666666666, ans=0.2
+2024-08-30 22:03:46,337 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.919e+02 2.092e+02 2.381e+02 5.934e+02, threshold=4.185e+02, percent-clipped=5.0
+2024-08-30 22:04:57,926 INFO [train.py:1114] (0/4) Epoch 18, batch 50, loss[loss=0.1874, simple_loss=0.2541, pruned_loss=0.04354, ctc_loss=0.08426, over 19716.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2783, pruned_loss=0.05351, ctc_loss=0.1012, over 845746.19 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 22:07:11,370 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.73 vs. limit=15.0
+2024-08-30 22:24:47,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=226160.0, ans=0.0
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-1
new file mode 100644
index 0000000000000000000000000000000000000000..a7e38d976c9073a6f296239850a4278e0c880b79
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-1
@@ -0,0 +1,56 @@
+2024-08-30 21:35:28,366 INFO [train.py:1182] (1/4) Training started
+2024-08-30 21:35:28,366 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-30 21:35:28,717 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2539.int.cedar.computecanada.ca', 'IP address': '172.16.145.232'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 21:35:28,717 INFO [train.py:1212] (1/4) About to create model
+2024-08-30 21:35:30,102 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-30 21:35:30,216 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 21:39:40,401 INFO [train.py:1231] (1/4) Using DDP
+2024-08-30 21:39:47,395 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-30 21:39:54,485 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-30 21:39:54,485 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-30 21:39:56,914 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-30 21:39:56,921 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-30 21:39:57,110 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-30 21:39:57,483 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-30 21:39:57,813 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-30 21:39:57,814 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 21:45:25,347 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.35 vs. limit=3.0
+2024-08-30 21:45:27,163 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13363MB
+2024-08-30 21:45:28,618 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 21:46:07,692 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 21:46:18,716 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 21:47:44,986 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 21:47:46,092 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=9.29 vs. limit=5.0
+2024-08-30 21:47:46,562 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 21:47:46,583 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-30 21:50:11,562 INFO [train.py:1114] (1/4) Epoch 18, batch 0, loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04853, ctc_loss=0.09187, over 19410.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04853, ctc_loss=0.09187, over 19410.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 21:50:11,563 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-30 21:51:06,631 INFO [train.py:1146] (1/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 21:51:06,631 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-30 21:51:13,170 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.51 vs. limit=15.0
+2024-08-30 21:52:06,788 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.71 vs. limit=6.0
+2024-08-30 21:55:54,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=225733.33333333334, ans=0.0
+2024-08-30 21:57:28,046 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.64 vs. limit=10.0
+2024-08-30 21:59:39,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=225840.0, ans=0.125
+2024-08-30 22:03:09,068 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.35 vs. limit=12.0
+2024-08-30 22:03:46,337 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.919e+02 2.092e+02 2.381e+02 5.934e+02, threshold=4.185e+02, percent-clipped=5.0
+2024-08-30 22:04:57,923 INFO [train.py:1114] (1/4) Epoch 18, batch 50, loss[loss=0.1577, simple_loss=0.2323, pruned_loss=0.03009, ctc_loss=0.05751, over 19735.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2747, pruned_loss=0.05175, ctc_loss=0.09898, over 843693.80 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 22:06:03,334 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.87 vs. limit=6.0
+2024-08-30 22:06:35,044 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-30 22:06:35,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=225946.66666666666, ans=0.125
+2024-08-30 22:11:29,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226000.0, ans=0.125
+2024-08-30 22:21:10,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=226106.66666666666, ans=0.95
+2024-08-30 22:23:47,470 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.79 vs. limit=15.0
+2024-08-30 22:26:49,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=226160.0, ans=0.125
+2024-08-30 22:26:51,129 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.70 vs. limit=15.0
+2024-08-30 22:44:30,258 INFO [train.py:1050] (1/4) Caught exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1277, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600005 milliseconds before timing out..
+2024-08-30 22:44:30,295 INFO [checkpoint.py:75] (1/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-1.pt
+2024-08-30 22:45:51,840 INFO [train.py:1413] (1/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 22:45:51,961 INFO [train.py:1419] (1/4) features shape: torch.Size([61, 1293, 80])
+2024-08-30 22:45:51,964 INFO [train.py:1423] (1/4) num tokens: 4246
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-2
new file mode 100644
index 0000000000000000000000000000000000000000..55e292856b9413e16533585436669d5d9677cf5c
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-2
@@ -0,0 +1,53 @@
+2024-08-30 21:35:28,367 INFO [train.py:1182] (2/4) Training started
+2024-08-30 21:35:28,369 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-30 21:35:28,716 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2539.int.cedar.computecanada.ca', 'IP address': '172.16.145.232'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 21:35:28,717 INFO [train.py:1212] (2/4) About to create model
+2024-08-30 21:35:30,089 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-30 21:35:30,216 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 21:39:40,389 INFO [train.py:1231] (2/4) Using DDP
+2024-08-30 21:39:47,387 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-30 21:39:54,477 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-30 21:39:54,477 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-30 21:39:56,919 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-30 21:39:56,921 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-30 21:39:57,110 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-30 21:39:57,483 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-30 21:39:57,809 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-30 21:39:57,809 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 21:45:27,164 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12782MB
+2024-08-30 21:45:28,616 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12849MB
+2024-08-30 21:46:07,701 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 21:46:18,426 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=256, metric=8.54 vs. limit=7.5
+2024-08-30 21:46:18,718 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 21:47:44,988 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 21:47:46,563 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 21:47:46,583 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-30 21:50:11,564 INFO [train.py:1114] (2/4) Epoch 18, batch 0, loss[loss=0.1826, simple_loss=0.2456, pruned_loss=0.04306, ctc_loss=0.08405, over 19432.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2456, pruned_loss=0.04306, ctc_loss=0.08405, over 19432.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 21:50:11,564 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-30 21:50:23,214 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3674, 1.1105, 1.6340, 0.7188, 1.6513, 1.7595, 1.8532, 1.6009],
+ device='cuda:2')
+2024-08-30 21:51:06,624 INFO [train.py:1146] (2/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 21:51:06,625 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-30 21:51:37,598 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.87 vs. limit=22.5
+2024-08-30 21:52:07,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=225680.0, ans=0.0
+2024-08-30 21:56:26,027 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.37 vs. limit=15.0
+2024-08-30 22:03:46,337 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.919e+02 2.092e+02 2.381e+02 5.934e+02, threshold=4.185e+02, percent-clipped=5.0
+2024-08-30 22:04:55,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=225946.66666666666, ans=0.1
+2024-08-30 22:04:57,945 INFO [train.py:1114] (2/4) Epoch 18, batch 50, loss[loss=0.1957, simple_loss=0.2513, pruned_loss=0.05004, ctc_loss=0.1001, over 19736.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2768, pruned_loss=0.05278, ctc_loss=0.09989, over 844774.48 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 22:06:15,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=225946.66666666666, ans=0.025
+2024-08-30 22:06:27,586 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.92 vs. limit=15.0
+2024-08-30 22:09:47,317 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.18 vs. limit=15.0
+2024-08-30 22:12:12,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=226053.33333333334, ans=0.0
+2024-08-30 22:24:23,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=226160.0, ans=0.125
+2024-08-30 22:25:46,051 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=226160.0, ans=0.2
+2024-08-30 22:44:30,258 INFO [train.py:1050] (2/4) Caught exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1277, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600002 milliseconds before timing out..
+2024-08-30 22:44:30,684 INFO [checkpoint.py:75] (2/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-2.pt
+2024-08-30 22:44:32,230 INFO [train.py:1413] (2/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 22:44:41,079 INFO [train.py:1419] (2/4) features shape: torch.Size([61, 1295, 80])
+2024-08-30 22:44:41,082 INFO [train.py:1423] (2/4) num tokens: 4260
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-3
new file mode 100644
index 0000000000000000000000000000000000000000..7b713734f93e24863ec3b77900924c450d9c92b0
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-30-21-35-28-3
@@ -0,0 +1,52 @@
+2024-08-30 21:35:28,367 INFO [train.py:1182] (3/4) Training started
+2024-08-30 21:35:28,370 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-30 21:35:28,717 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2539.int.cedar.computecanada.ca', 'IP address': '172.16.145.232'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-30 21:35:28,717 INFO [train.py:1212] (3/4) About to create model
+2024-08-30 21:35:30,110 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-30 21:35:30,217 INFO [checkpoint.py:112] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-30 21:39:40,402 INFO [train.py:1231] (3/4) Using DDP
+2024-08-30 21:39:47,405 INFO [train.py:1243] (3/4) Loading optimizer state dict
+2024-08-30 21:39:54,490 INFO [train.py:1251] (3/4) Loading scheduler state dict
+2024-08-30 21:39:54,490 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-30 21:39:55,313 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-30 21:39:55,314 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-30 21:39:56,914 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-30 21:39:56,921 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-30 21:39:57,110 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-30 21:39:57,484 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-30 21:39:57,810 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-30 21:39:57,810 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-30 21:45:27,163 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12808MB
+2024-08-30 21:45:28,618 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 21:46:07,688 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-30 21:46:18,207 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=4.30 vs. limit=5.0
+2024-08-30 21:46:18,706 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 21:47:44,986 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 21:47:46,564 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 21:47:46,575 INFO [train.py:1344] (3/4) Loading grad scaler state dict
+2024-08-30 21:50:06,648 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-30 21:50:11,560 INFO [train.py:1114] (3/4) Epoch 18, batch 0, loss[loss=0.1891, simple_loss=0.2515, pruned_loss=0.04522, ctc_loss=0.09044, over 19798.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2515, pruned_loss=0.04522, ctc_loss=0.09044, over 19798.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 21:50:11,561 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-30 21:50:59,757 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3005, 0.9812, 1.4801, 0.7409, 1.4295, 1.5490, 1.6372, 1.3385],
+ device='cuda:3')
+2024-08-30 21:51:06,624 INFO [train.py:1146] (3/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-30 21:51:06,625 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-30 21:52:30,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=225680.0, ans=0.1
+2024-08-30 21:58:05,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-30 21:59:03,035 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-30 22:00:59,910 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=225840.0, ans=0.125
+2024-08-30 22:03:46,338 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.919e+02 2.092e+02 2.381e+02 5.934e+02, threshold=4.185e+02, percent-clipped=5.0
+2024-08-30 22:04:52,603 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=225946.66666666666, ans=0.0
+2024-08-30 22:04:57,926 INFO [train.py:1114] (3/4) Epoch 18, batch 50, loss[loss=0.1828, simple_loss=0.2487, pruned_loss=0.04244, ctc_loss=0.0803, over 19711.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2763, pruned_loss=0.05243, ctc_loss=0.09984, over 844773.76 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-30 22:11:24,706 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.98 vs. limit=15.0
+2024-08-30 22:12:57,269 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.33 vs. limit=15.0
+2024-08-30 22:20:51,195 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=226106.66666666666, ans=0.025
+2024-08-30 22:44:30,258 INFO [train.py:1050] (3/4) Caught exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1277, OpType=ALLREDUCE, NumelIn=841, NumelOut=841, Timeout(ms)=600000) ran for 600000 milliseconds before timing out..
+2024-08-30 22:44:30,291 INFO [checkpoint.py:75] (3/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/bad-model-3.pt
+2024-08-30 22:44:31,758 INFO [train.py:1413] (3/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/batch-a689ee27-eec1-83b6-15a8-f48f39643825.pt
+2024-08-30 22:44:40,498 INFO [train.py:1419] (3/4) features shape: torch.Size([61, 1294, 80])
+2024-08-30 22:44:40,500 INFO [train.py:1423] (3/4) num tokens: 4370
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-0 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-0
new file mode 100644
index 0000000000000000000000000000000000000000..166821eb5ceeb50c7d69db7b5ee5ca118ae98d7d
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-0
@@ -0,0 +1,1057 @@
+2024-08-31 13:15:01,118 INFO [train.py:1182] (0/4) Training started
+2024-08-31 13:15:03,782 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-31 13:15:03,785 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-31 13:15:03,785 INFO [train.py:1212] (0/4) About to create model
+2024-08-31 13:15:10,365 INFO [train.py:1216] (0/4) Number of model parameters: 66367431
+2024-08-31 13:15:45,928 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-31 13:16:01,508 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-31 13:16:01,896 INFO [train.py:1231] (0/4) Using DDP
+2024-08-31 13:16:07,013 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-31 13:16:46,064 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-31 13:16:46,065 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-31 13:16:46,068 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-31 13:16:46,068 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-31 13:16:46,068 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-31 13:16:46,068 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-31 13:16:46,068 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-31 13:16:46,069 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-31 13:16:47,640 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-31 13:16:50,429 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-31 13:16:50,430 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-31 13:16:50,431 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-31 13:16:50,754 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-31 13:16:50,755 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:22:43,895 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12840MB
+2024-08-31 13:22:45,376 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-31 13:23:02,014 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12916MB
+2024-08-31 13:23:02,981 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=192, metric=4.29 vs. limit=5.0
+2024-08-31 13:23:03,527 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-31 13:24:12,102 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-31 13:24:13,682 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-31 13:24:13,696 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-31 13:25:02,922 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.48 vs. limit=6.0
+2024-08-31 13:25:06,944 INFO [train.py:1114] (0/4) Epoch 18, batch 0, loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.04175, ctc_loss=0.0762, over 19813.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2487, pruned_loss=0.04175, ctc_loss=0.0762, over 19813.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 13:25:06,945 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-31 13:25:28,551 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3326, 1.0374, 1.5079, 0.7235, 1.4933, 1.5508, 1.6642, 1.3541],
+ device='cuda:0')
+2024-08-31 13:25:49,907 INFO [train.py:1146] (0/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-31 13:25:49,908 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13093MB
+2024-08-31 13:27:20,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=225680.0, ans=0.0
+2024-08-31 13:39:08,521 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.70 vs. limit=15.0
+2024-08-31 13:41:45,640 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=225786.66666666666, ans=0.2
+2024-08-31 13:41:45,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-31 13:42:04,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=225786.66666666666, ans=0.2
+2024-08-31 13:48:13,676 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.934e+02 2.118e+02 2.433e+02 6.228e+02, threshold=4.237e+02, percent-clipped=5.0
+2024-08-31 13:56:46,282 INFO [train.py:1114] (0/4) Epoch 18, batch 50, loss[loss=0.1862, simple_loss=0.2537, pruned_loss=0.0428, ctc_loss=0.0826, over 19716.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2786, pruned_loss=0.05368, ctc_loss=0.1016, over 845746.19 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 14:00:20,725 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.80 vs. limit=15.0
+2024-08-31 14:11:27,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=226160.0, ans=0.0
+2024-08-31 14:15:00,235 INFO [train.py:1114] (0/4) Epoch 18, batch 100, loss[loss=0.1941, simple_loss=0.2618, pruned_loss=0.04594, ctc_loss=0.0861, over 19718.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2789, pruned_loss=0.05323, ctc_loss=0.1002, over 1499054.83 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:32:19,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=226426.66666666666, ans=0.2
+2024-08-31 14:32:51,600 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.685e+02 1.949e+02 2.332e+02 3.525e+02, threshold=3.898e+02, percent-clipped=0.0
+2024-08-31 14:34:04,415 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.99 vs. limit=10.0
+2024-08-31 14:34:38,789 INFO [train.py:1114] (0/4) Epoch 18, batch 150, loss[loss=0.1976, simple_loss=0.254, pruned_loss=0.05219, ctc_loss=0.09204, over 19715.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2751, pruned_loss=0.05165, ctc_loss=0.09722, over 2028052.35 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:45:09,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-31 14:45:16,325 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-31 14:47:41,105 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.32 vs. limit=22.5
+2024-08-31 14:49:47,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=226586.66666666666, ans=0.2
+2024-08-31 14:50:22,934 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=226640.0, ans=0.125
+2024-08-31 15:05:15,394 INFO [train.py:1114] (0/4) Epoch 18, batch 200, loss[loss=0.2387, simple_loss=0.2965, pruned_loss=0.06626, ctc_loss=0.121, over 18248.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2736, pruned_loss=0.05063, ctc_loss=0.09535, over 2435754.48 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:08:17,632 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.78 vs. limit=15.0
+2024-08-31 15:15:21,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=226906.66666666666, ans=0.0
+2024-08-31 15:16:06,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=226906.66666666666, ans=0.1
+2024-08-31 15:17:44,778 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.761e+02 2.086e+02 2.524e+02 4.159e+02, threshold=4.172e+02, percent-clipped=2.0
+2024-08-31 15:17:59,750 INFO [train.py:1114] (0/4) Epoch 18, batch 250, loss[loss=0.2088, simple_loss=0.2768, pruned_loss=0.05176, ctc_loss=0.09344, over 19404.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2743, pruned_loss=0.05121, ctc_loss=0.09645, over 2755802.13 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:21:18,070 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=2.544e-03
+2024-08-31 15:22:32,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=227120.0, ans=0.1
+2024-08-31 15:22:59,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=227173.33333333334, ans=0.0
+2024-08-31 15:24:04,300 INFO [train.py:1114] (0/4) Epoch 18, batch 300, loss[loss=0.232, simple_loss=0.291, pruned_loss=0.06337, ctc_loss=0.1157, over 19499.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2735, pruned_loss=0.05064, ctc_loss=0.09544, over 3000417.31 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:24:58,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=227333.33333333334, ans=0.2
+2024-08-31 15:25:05,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=227333.33333333334, ans=0.125
+2024-08-31 15:25:05,818 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=227333.33333333334, ans=0.2
+2024-08-31 15:29:14,286 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=227440.0, ans=0.0
+2024-08-31 15:29:15,823 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.57 vs. limit=15.0
+2024-08-31 15:30:29,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=227493.33333333334, ans=0.2
+2024-08-31 15:30:47,329 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.680e+02 1.932e+02 2.386e+02 3.920e+02, threshold=3.864e+02, percent-clipped=0.0
+2024-08-31 15:31:47,642 INFO [train.py:1114] (0/4) Epoch 18, batch 350, loss[loss=0.1769, simple_loss=0.2411, pruned_loss=0.04046, ctc_loss=0.07922, over 19741.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2735, pruned_loss=0.05075, ctc_loss=0.09563, over 3191008.27 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:33:40,244 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.16 vs. limit=15.0
+2024-08-31 15:33:47,376 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.40 vs. limit=15.0
+2024-08-31 15:33:50,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=227653.33333333334, ans=0.125
+2024-08-31 15:33:56,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=227706.66666666666, ans=0.0
+2024-08-31 15:34:38,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=227760.0, ans=0.125
+2024-08-31 15:34:38,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=227760.0, ans=0.2
+2024-08-31 15:34:57,667 INFO [train.py:1114] (0/4) Epoch 18, batch 400, loss[loss=0.2183, simple_loss=0.282, pruned_loss=0.05565, ctc_loss=0.1082, over 19506.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2738, pruned_loss=0.05095, ctc_loss=0.09579, over 3343005.44 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:35:07,718 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=227813.33333333334, ans=0.2
+2024-08-31 15:35:16,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=227866.66666666666, ans=0.0
+2024-08-31 15:35:47,151 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=227866.66666666666, ans=0.125
+2024-08-31 15:36:46,136 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=227920.0, ans=0.2
+2024-08-31 15:36:46,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.23 vs. limit=22.5
+2024-08-31 15:36:48,778 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.52 vs. limit=10.0
+2024-08-31 15:37:06,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=228026.66666666666, ans=0.0
+2024-08-31 15:37:11,052 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.718e+02 1.967e+02 2.336e+02 3.401e+02, threshold=3.934e+02, percent-clipped=0.0
+2024-08-31 15:37:37,963 INFO [train.py:1114] (0/4) Epoch 18, batch 450, loss[loss=0.2167, simple_loss=0.2828, pruned_loss=0.05421, ctc_loss=0.1054, over 19611.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2735, pruned_loss=0.05084, ctc_loss=0.09578, over 3451708.44 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:39:42,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=228186.66666666666, ans=0.1
+2024-08-31 15:39:47,846 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.54 vs. limit=15.0
+2024-08-31 15:39:53,512 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=228240.0, ans=0.125
+2024-08-31 15:39:53,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=228240.0, ans=0.1
+2024-08-31 15:40:18,487 INFO [train.py:1114] (0/4) Epoch 18, batch 500, loss[loss=0.1928, simple_loss=0.2659, pruned_loss=0.04374, ctc_loss=0.08065, over 19634.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2727, pruned_loss=0.0505, ctc_loss=0.09535, over 3546079.07 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:40:44,578 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=228453.33333333334, ans=0.2
+2024-08-31 15:41:10,341 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.618e+02 1.812e+02 2.329e+02 3.946e+02, threshold=3.624e+02, percent-clipped=1.0
+2024-08-31 15:41:12,865 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=228560.0, ans=0.125
+2024-08-31 15:41:15,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=228560.0, ans=0.1
+2024-08-31 15:41:17,482 INFO [train.py:1114] (0/4) Epoch 18, batch 550, loss[loss=0.2071, simple_loss=0.2797, pruned_loss=0.04888, ctc_loss=0.09156, over 19273.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2729, pruned_loss=0.05035, ctc_loss=0.09503, over 3608445.94 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:41:19,072 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:42:34,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys.whitening_limit, batch_count=228666.66666666666, ans=6.0
+2024-08-31 15:43:27,895 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=228720.0, ans=0.0
+2024-08-31 15:43:57,926 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=228826.66666666666, ans=0.95
+2024-08-31 15:44:18,843 INFO [train.py:1114] (0/4) Epoch 18, batch 600, loss[loss=0.2282, simple_loss=0.2872, pruned_loss=0.06148, ctc_loss=0.1154, over 19393.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.273, pruned_loss=0.05046, ctc_loss=0.09506, over 3666186.54 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:44:26,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=228880.0, ans=0.125
+2024-08-31 15:44:48,016 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.76 vs. limit=10.0
+2024-08-31 15:44:52,838 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.48 vs. limit=6.0
+2024-08-31 15:44:54,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=228986.66666666666, ans=0.1
+2024-08-31 15:45:09,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=229040.0, ans=0.125
+2024-08-31 15:45:13,304 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff2.min_abs, batch_count=229040.0, ans=0.1
+2024-08-31 15:45:16,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=229040.0, ans=0.125
+2024-08-31 15:45:16,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=229040.0, ans=0.125
+2024-08-31 15:45:20,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=229040.0, ans=0.2
+2024-08-31 15:45:28,759 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.735e+02 2.092e+02 3.203e+02 5.009e+02, threshold=4.184e+02, percent-clipped=13.0
+2024-08-31 15:45:38,279 INFO [train.py:1114] (0/4) Epoch 18, batch 650, loss[loss=0.1955, simple_loss=0.2651, pruned_loss=0.04564, ctc_loss=0.08656, over 19762.00 frames. ], tot_loss[loss=0.205, simple_loss=0.272, pruned_loss=0.05012, ctc_loss=0.09435, over 3716392.84 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:46:15,092 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.62 vs. limit=15.0
+2024-08-31 15:46:18,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=229146.66666666666, ans=0.025
+2024-08-31 15:46:20,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229146.66666666666, ans=0.1
+2024-08-31 15:46:30,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=229200.0, ans=0.0
+2024-08-31 15:46:46,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=229253.33333333334, ans=0.125
+2024-08-31 15:47:04,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=229360.0, ans=0.125
+2024-08-31 15:47:16,563 INFO [train.py:1114] (0/4) Epoch 18, batch 700, loss[loss=0.2004, simple_loss=0.2642, pruned_loss=0.05098, ctc_loss=0.08681, over 19714.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2725, pruned_loss=0.0503, ctc_loss=0.09465, over 3749192.59 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:47:21,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=229413.33333333334, ans=0.125
+2024-08-31 15:47:25,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=229413.33333333334, ans=0.125
+2024-08-31 15:47:25,901 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.12 vs. limit=15.0
+2024-08-31 15:47:31,884 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.55 vs. limit=15.0
+2024-08-31 15:47:54,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229573.33333333334, ans=0.1
+2024-08-31 15:48:10,575 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.672e+02 1.935e+02 2.401e+02 4.868e+02, threshold=3.870e+02, percent-clipped=1.0
+2024-08-31 15:48:16,523 INFO [train.py:1114] (0/4) Epoch 18, batch 750, loss[loss=0.2233, simple_loss=0.2905, pruned_loss=0.05572, ctc_loss=0.112, over 19502.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2726, pruned_loss=0.05037, ctc_loss=0.09477, over 3774959.33 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:48:22,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=229680.0, ans=0.2
+2024-08-31 15:48:27,470 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=229733.33333333334, ans=0.125
+2024-08-31 15:48:31,085 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:49:02,461 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.67 vs. limit=15.0
+2024-08-31 15:49:28,024 INFO [train.py:1114] (0/4) Epoch 18, batch 800, loss[loss=0.1859, simple_loss=0.2494, pruned_loss=0.04468, ctc_loss=0.08261, over 19807.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2725, pruned_loss=0.05037, ctc_loss=0.09481, over 3796787.06 frames. ], batch size: 49, lr: 8.37e-03, grad_scale: 32.0
+2024-08-31 15:49:32,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=229946.66666666666, ans=0.0
+2024-08-31 15:49:49,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=230000.0, ans=0.125
+2024-08-31 15:50:27,782 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.682e+02 1.957e+02 2.333e+02 3.697e+02, threshold=3.913e+02, percent-clipped=0.0
+2024-08-31 15:50:33,684 INFO [train.py:1114] (0/4) Epoch 18, batch 850, loss[loss=0.2076, simple_loss=0.2823, pruned_loss=0.04736, ctc_loss=0.09556, over 19633.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2727, pruned_loss=0.05065, ctc_loss=0.09541, over 3815142.45 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:51:21,846 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.80 vs. limit=10.0
+2024-08-31 15:51:30,193 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=230266.66666666666, ans=0.2
+2024-08-31 15:51:59,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=230373.33333333334, ans=0.125
+2024-08-31 15:52:12,855 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.91 vs. limit=6.0
+2024-08-31 15:52:15,924 INFO [train.py:1114] (0/4) Epoch 18, batch 900, loss[loss=0.204, simple_loss=0.2645, pruned_loss=0.05281, ctc_loss=0.09467, over 19425.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2728, pruned_loss=0.05062, ctc_loss=0.0954, over 3819486.86 frames. ], batch size: 48, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:52:24,589 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.51 vs. limit=15.0
+2024-08-31 15:52:25,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=230480.0, ans=0.125
+2024-08-31 15:52:46,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=230586.66666666666, ans=0.125
+2024-08-31 15:53:00,241 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=230640.0, ans=0.0
+2024-08-31 15:53:01,304 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=230640.0, ans=0.0
+2024-08-31 15:53:02,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=230640.0, ans=0.2
+2024-08-31 15:53:02,618 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=230640.0, ans=0.125
+2024-08-31 15:53:12,022 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 1.645e+02 1.872e+02 2.411e+02 3.930e+02, threshold=3.745e+02, percent-clipped=1.0
+2024-08-31 15:53:46,102 INFO [train.py:1114] (0/4) Epoch 18, batch 950, loss[loss=0.2076, simple_loss=0.2671, pruned_loss=0.05377, ctc_loss=0.1015, over 19484.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2732, pruned_loss=0.05056, ctc_loss=0.09528, over 3820910.45 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:53:47,915 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.86 vs. limit=15.0
+2024-08-31 15:53:56,009 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=230746.66666666666, ans=0.2
+2024-08-31 15:54:22,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=230906.66666666666, ans=0.125
+2024-08-31 15:54:28,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=230906.66666666666, ans=0.07
+2024-08-31 15:54:38,867 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=230960.0, ans=0.5
+2024-08-31 15:54:43,829 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=230960.0, ans=0.2
+2024-08-31 15:54:48,308 INFO [train.py:1114] (0/4) Epoch 18, batch 1000, loss[loss=0.1981, simple_loss=0.2647, pruned_loss=0.04651, ctc_loss=0.09609, over 19851.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2739, pruned_loss=0.05078, ctc_loss=0.09595, over 3815875.19 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:54:56,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=231013.33333333334, ans=0.0
+2024-08-31 15:54:59,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=231013.33333333334, ans=0.125
+2024-08-31 15:55:15,990 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=231120.0, ans=0.125
+2024-08-31 15:55:18,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=231120.0, ans=0.0
+2024-08-31 15:55:19,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=231120.0, ans=0.0
+2024-08-31 15:55:33,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231120.0, ans=0.1
+2024-08-31 15:55:54,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=231226.66666666666, ans=0.025
+2024-08-31 15:55:55,133 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 1.660e+02 1.836e+02 2.172e+02 3.389e+02, threshold=3.673e+02, percent-clipped=0.0
+2024-08-31 15:56:01,084 INFO [train.py:1114] (0/4) Epoch 18, batch 1050, loss[loss=0.2183, simple_loss=0.2904, pruned_loss=0.0534, ctc_loss=0.09846, over 19852.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2733, pruned_loss=0.0507, ctc_loss=0.09563, over 3822177.37 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:56:05,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=231280.0, ans=0.5
+2024-08-31 15:56:37,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=231440.0, ans=0.0
+2024-08-31 15:56:49,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=231493.33333333334, ans=0.125
+2024-08-31 15:56:59,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231493.33333333334, ans=0.1
+2024-08-31 15:57:01,157 INFO [train.py:1114] (0/4) Epoch 18, batch 1100, loss[loss=0.2016, simple_loss=0.2716, pruned_loss=0.04796, ctc_loss=0.08936, over 19596.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2727, pruned_loss=0.05044, ctc_loss=0.0952, over 3829672.02 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:57:05,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=231546.66666666666, ans=0.0
+2024-08-31 15:57:13,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=231546.66666666666, ans=0.125
+2024-08-31 15:57:22,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=231600.0, ans=0.125
+2024-08-31 15:57:36,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=231653.33333333334, ans=0.125
+2024-08-31 15:57:56,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=231760.0, ans=0.125
+2024-08-31 15:57:58,311 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.608e+02 1.860e+02 2.284e+02 4.941e+02, threshold=3.719e+02, percent-clipped=1.0
+2024-08-31 15:58:04,201 INFO [train.py:1114] (0/4) Epoch 18, batch 1150, loss[loss=0.195, simple_loss=0.2622, pruned_loss=0.0462, ctc_loss=0.08858, over 19608.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2731, pruned_loss=0.05076, ctc_loss=0.09602, over 3827298.87 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:58:10,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231813.33333333334, ans=0.1
+2024-08-31 15:58:10,938 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.96 vs. limit=15.0
+2024-08-31 15:58:17,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231813.33333333334, ans=0.1
+2024-08-31 15:58:23,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231866.66666666666, ans=0.1
+2024-08-31 15:58:31,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=231866.66666666666, ans=0.2
+2024-08-31 15:58:32,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=231866.66666666666, ans=0.2
+2024-08-31 15:58:41,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=231920.0, ans=0.125
+2024-08-31 15:58:50,027 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=231920.0, ans=0.125
+2024-08-31 15:59:17,228 INFO [train.py:1114] (0/4) Epoch 18, batch 1200, loss[loss=0.2195, simple_loss=0.2913, pruned_loss=0.05367, ctc_loss=0.1011, over 19837.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2745, pruned_loss=0.05132, ctc_loss=0.09701, over 3824043.11 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:59:24,296 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.33 vs. limit=10.0
+2024-08-31 15:59:58,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=232240.0, ans=15.0
+2024-08-31 16:00:02,995 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=232240.0, ans=0.125
+2024-08-31 16:00:07,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232293.33333333334, ans=0.1
+2024-08-31 16:00:07,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=232293.33333333334, ans=0.07
+2024-08-31 16:00:12,193 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.681e+02 1.869e+02 2.236e+02 3.755e+02, threshold=3.738e+02, percent-clipped=1.0
+2024-08-31 16:00:18,281 INFO [train.py:1114] (0/4) Epoch 18, batch 1250, loss[loss=0.2426, simple_loss=0.296, pruned_loss=0.06858, ctc_loss=0.1301, over 19507.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2754, pruned_loss=0.05167, ctc_loss=0.0976, over 3842402.06 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:00:25,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=232346.66666666666, ans=0.2
+2024-08-31 16:00:40,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=232400.0, ans=0.025
+2024-08-31 16:01:12,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=232560.0, ans=0.025
+2024-08-31 16:01:16,119 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=232560.0, ans=0.125
+2024-08-31 16:01:22,409 INFO [train.py:1114] (0/4) Epoch 18, batch 1300, loss[loss=0.2198, simple_loss=0.2854, pruned_loss=0.05603, ctc_loss=0.1054, over 18824.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2739, pruned_loss=0.05103, ctc_loss=0.09614, over 3845983.72 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:01:34,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=232666.66666666666, ans=0.2
+2024-08-31 16:01:41,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=232666.66666666666, ans=0.125
+2024-08-31 16:01:50,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=232720.0, ans=10.0
+2024-08-31 16:01:51,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=232720.0, ans=0.125
+2024-08-31 16:02:01,861 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:02:20,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=232826.66666666666, ans=0.125
+2024-08-31 16:02:21,663 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 1.758e+02 2.176e+02 2.645e+02 4.342e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-31 16:02:27,589 INFO [train.py:1114] (0/4) Epoch 18, batch 1350, loss[loss=0.2034, simple_loss=0.2618, pruned_loss=0.05271, ctc_loss=0.09872, over 19761.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2732, pruned_loss=0.05071, ctc_loss=0.09531, over 3857349.73 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:02:47,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=232933.33333333334, ans=0.125
+2024-08-31 16:03:11,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=233040.0, ans=0.2
+2024-08-31 16:03:27,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=233093.33333333334, ans=0.0
+2024-08-31 16:03:29,589 INFO [train.py:1114] (0/4) Epoch 18, batch 1400, loss[loss=0.1968, simple_loss=0.2528, pruned_loss=0.05174, ctc_loss=0.09347, over 19657.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2735, pruned_loss=0.05097, ctc_loss=0.09583, over 3864164.22 frames. ], batch size: 46, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:03:40,440 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.22 vs. limit=22.5
+2024-08-31 16:04:10,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=233253.33333333334, ans=0.05
+2024-08-31 16:04:12,260 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.06 vs. limit=6.0
+2024-08-31 16:04:18,679 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.30 vs. limit=15.0
+2024-08-31 16:04:36,286 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.655e+02 1.916e+02 2.338e+02 3.956e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-31 16:04:42,278 INFO [train.py:1114] (0/4) Epoch 18, batch 1450, loss[loss=0.2014, simple_loss=0.2796, pruned_loss=0.04547, ctc_loss=0.08067, over 19668.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2736, pruned_loss=0.05086, ctc_loss=0.09551, over 3861721.28 frames. ], batch size: 63, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:05:13,169 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.45 vs. limit=15.0
+2024-08-31 16:05:25,173 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.32 vs. limit=15.0
+2024-08-31 16:05:42,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=233626.66666666666, ans=0.125
+2024-08-31 16:05:48,801 INFO [train.py:1114] (0/4) Epoch 18, batch 1500, loss[loss=0.2056, simple_loss=0.2747, pruned_loss=0.04873, ctc_loss=0.09765, over 19584.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2742, pruned_loss=0.05114, ctc_loss=0.09614, over 3861790.55 frames. ], batch size: 57, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:05:49,173 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=233680.0, ans=0.0
+2024-08-31 16:05:54,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=233680.0, ans=0.0
+2024-08-31 16:06:04,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=233733.33333333334, ans=0.1
+2024-08-31 16:06:05,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=233733.33333333334, ans=0.1
+2024-08-31 16:06:35,166 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=233840.0, ans=0.125
+2024-08-31 16:06:50,189 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.669e+02 1.866e+02 2.355e+02 3.552e+02, threshold=3.733e+02, percent-clipped=0.0
+2024-08-31 16:07:06,039 INFO [train.py:1114] (0/4) Epoch 18, batch 1550, loss[loss=0.229, simple_loss=0.294, pruned_loss=0.06025, ctc_loss=0.1087, over 19603.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2741, pruned_loss=0.05141, ctc_loss=0.09686, over 3845633.94 frames. ], batch size: 60, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:07:10,022 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=233946.66666666666, ans=0.125
+2024-08-31 16:07:21,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=234000.0, ans=0.2
+2024-08-31 16:07:27,608 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.06 vs. limit=15.0
+2024-08-31 16:07:36,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=234053.33333333334, ans=0.1
+2024-08-31 16:08:07,316 INFO [train.py:1114] (0/4) Epoch 18, batch 1600, loss[loss=0.2593, simple_loss=0.3127, pruned_loss=0.07495, ctc_loss=0.1402, over 19848.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.274, pruned_loss=0.05127, ctc_loss=0.09655, over 3835104.16 frames. ], batch size: 57, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:08:09,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=234213.33333333334, ans=0.125
+2024-08-31 16:08:19,432 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.28 vs. limit=22.5
+2024-08-31 16:08:27,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=234266.66666666666, ans=0.0
+2024-08-31 16:08:39,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=234320.0, ans=0.2
+2024-08-31 16:09:20,624 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.787e+02 2.153e+02 2.672e+02 5.491e+02, threshold=4.305e+02, percent-clipped=8.0
+2024-08-31 16:09:23,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=234426.66666666666, ans=0.025
+2024-08-31 16:09:25,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=234480.0, ans=0.125
+2024-08-31 16:09:26,612 INFO [train.py:1114] (0/4) Epoch 18, batch 1650, loss[loss=0.1976, simple_loss=0.2797, pruned_loss=0.04086, ctc_loss=0.08446, over 19673.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2733, pruned_loss=0.05092, ctc_loss=0.09615, over 3832693.46 frames. ], batch size: 59, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:09:29,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=234480.0, ans=0.0
+2024-08-31 16:12:47,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=234533.33333333334, ans=0.1
+2024-08-31 16:13:05,579 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=234533.33333333334, ans=0.125
+2024-08-31 16:13:42,415 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-44000.pt
+2024-08-31 16:14:08,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=234693.33333333334, ans=0.125
+2024-08-31 16:14:13,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=234693.33333333334, ans=0.1
+2024-08-31 16:14:15,749 INFO [train.py:1114] (0/4) Epoch 18, batch 1700, loss[loss=0.1767, simple_loss=0.2427, pruned_loss=0.04058, ctc_loss=0.0741, over 19685.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2732, pruned_loss=0.05075, ctc_loss=0.09573, over 3846869.25 frames. ], batch size: 46, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:14:17,324 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=234746.66666666666, ans=0.04949747468305833
+2024-08-31 16:14:40,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=234853.33333333334, ans=0.125
+2024-08-31 16:14:41,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=234853.33333333334, ans=0.125
+2024-08-31 16:14:48,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=234853.33333333334, ans=0.125
+2024-08-31 16:14:51,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=234906.66666666666, ans=0.05
+2024-08-31 16:14:52,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=234906.66666666666, ans=0.09899494936611666
+2024-08-31 16:14:54,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=234906.66666666666, ans=0.125
+2024-08-31 16:15:07,767 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.694e+02 2.038e+02 2.484e+02 5.869e+02, threshold=4.076e+02, percent-clipped=3.0
+2024-08-31 16:15:13,540 INFO [train.py:1114] (0/4) Epoch 18, batch 1750, loss[loss=0.194, simple_loss=0.2542, pruned_loss=0.04847, ctc_loss=0.09223, over 19659.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2727, pruned_loss=0.05061, ctc_loss=0.09556, over 3850963.92 frames. ], batch size: 45, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:15:40,166 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=235066.66666666666, ans=0.0
+2024-08-31 16:16:11,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=235226.66666666666, ans=0.125
+2024-08-31 16:16:18,926 INFO [train.py:1114] (0/4) Epoch 18, batch 1800, loss[loss=0.2069, simple_loss=0.279, pruned_loss=0.0482, ctc_loss=0.0961, over 19610.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.273, pruned_loss=0.05071, ctc_loss=0.09551, over 3852616.49 frames. ], batch size: 55, lr: 8.27e-03, grad_scale: 32.0
+2024-08-31 16:16:43,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=235386.66666666666, ans=0.125
+2024-08-31 16:16:45,032 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.27 vs. limit=10.0
+2024-08-31 16:16:52,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=235440.0, ans=0.0
+2024-08-31 16:16:58,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=235440.0, ans=0.2
+2024-08-31 16:17:12,080 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.739e+02 2.099e+02 2.606e+02 4.220e+02, threshold=4.197e+02, percent-clipped=1.0
+2024-08-31 16:17:16,671 INFO [train.py:1114] (0/4) Epoch 18, batch 1850, loss[loss=0.2314, simple_loss=0.296, pruned_loss=0.06048, ctc_loss=0.1147, over 19590.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2735, pruned_loss=0.05093, ctc_loss=0.09598, over 3855800.83 frames. ], batch size: 57, lr: 8.27e-03, grad_scale: 16.0
+2024-08-31 16:17:16,796 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=235546.66666666666, ans=0.025
+2024-08-31 16:17:37,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=235600.0, ans=0.125
+2024-08-31 16:17:42,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=235600.0, ans=0.0
+2024-08-31 16:17:45,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=235653.33333333334, ans=0.125
+2024-08-31 16:17:51,489 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.41 vs. limit=15.0
+2024-08-31 16:17:58,197 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=235706.66666666666, ans=0.0
+2024-08-31 16:18:12,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=235760.0, ans=0.0
+2024-08-31 16:18:21,114 INFO [train.py:1114] (0/4) Epoch 18, batch 1900, loss[loss=0.197, simple_loss=0.2778, pruned_loss=0.04149, ctc_loss=0.08285, over 19649.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2738, pruned_loss=0.05091, ctc_loss=0.09572, over 3860635.93 frames. ], batch size: 59, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:18:39,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=235866.66666666666, ans=0.05
+2024-08-31 16:18:47,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=235920.0, ans=0.125
+2024-08-31 16:19:14,240 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.296e+02 1.623e+02 1.837e+02 2.195e+02 5.135e+02, threshold=3.673e+02, percent-clipped=2.0
+2024-08-31 16:19:18,758 INFO [train.py:1114] (0/4) Epoch 18, batch 1950, loss[loss=0.2206, simple_loss=0.2807, pruned_loss=0.05795, ctc_loss=0.1115, over 19579.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2747, pruned_loss=0.05108, ctc_loss=0.09583, over 3870189.79 frames. ], batch size: 52, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:20:22,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=236133.33333333334, ans=0.0
+2024-08-31 16:20:22,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=236133.33333333334, ans=0.125
+2024-08-31 16:20:41,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=236186.66666666666, ans=0.0
+2024-08-31 16:21:12,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=236293.33333333334, ans=0.125
+2024-08-31 16:21:21,680 INFO [train.py:1114] (0/4) Epoch 18, batch 2000, loss[loss=0.1879, simple_loss=0.2471, pruned_loss=0.04608, ctc_loss=0.09141, over 19655.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2751, pruned_loss=0.05115, ctc_loss=0.0962, over 3855253.43 frames. ], batch size: 45, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:21:32,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=236400.0, ans=0.125
+2024-08-31 16:21:54,730 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.39 vs. limit=15.0
+2024-08-31 16:21:58,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=236506.66666666666, ans=0.125
+2024-08-31 16:22:01,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=236506.66666666666, ans=0.125
+2024-08-31 16:22:06,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=236560.0, ans=0.1
+2024-08-31 16:22:09,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=236560.0, ans=0.0
+2024-08-31 16:22:14,726 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.400e+02 1.704e+02 2.096e+02 2.751e+02 4.638e+02, threshold=4.193e+02, percent-clipped=6.0
+2024-08-31 16:22:16,324 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.12 vs. limit=12.0
+2024-08-31 16:22:19,160 INFO [train.py:1114] (0/4) Epoch 18, batch 2050, loss[loss=0.1735, simple_loss=0.2419, pruned_loss=0.03802, ctc_loss=0.07251, over 19732.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2741, pruned_loss=0.05116, ctc_loss=0.09626, over 3850587.51 frames. ], batch size: 47, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:22:19,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=236613.33333333334, ans=0.0
+2024-08-31 16:22:48,820 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.94 vs. limit=22.5
+2024-08-31 16:23:16,908 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=236826.66666666666, ans=0.125
+2024-08-31 16:23:16,930 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=236826.66666666666, ans=0.0
+2024-08-31 16:23:21,344 INFO [train.py:1114] (0/4) Epoch 18, batch 2100, loss[loss=0.2001, simple_loss=0.2706, pruned_loss=0.04705, ctc_loss=0.08882, over 19774.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2741, pruned_loss=0.05122, ctc_loss=0.09644, over 3858150.81 frames. ], batch size: 54, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:23:27,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=236880.0, ans=0.125
+2024-08-31 16:23:30,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=236880.0, ans=0.1
+2024-08-31 16:23:43,007 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.37 vs. limit=10.0
+2024-08-31 16:23:57,513 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.79 vs. limit=6.0
+2024-08-31 16:24:07,426 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.31 vs. limit=15.0
+2024-08-31 16:24:08,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=237040.0, ans=0.125
+2024-08-31 16:24:13,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=237093.33333333334, ans=0.125
+2024-08-31 16:24:16,392 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.88 vs. limit=6.0
+2024-08-31 16:24:25,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=237093.33333333334, ans=0.125
+2024-08-31 16:24:27,117 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.628e+02 1.802e+02 2.351e+02 4.404e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 16:24:27,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=237093.33333333334, ans=0.0
+2024-08-31 16:24:31,675 INFO [train.py:1114] (0/4) Epoch 18, batch 2150, loss[loss=0.1928, simple_loss=0.2653, pruned_loss=0.0439, ctc_loss=0.08122, over 19594.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2728, pruned_loss=0.05048, ctc_loss=0.09506, over 3868534.95 frames. ], batch size: 52, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:24:41,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=237146.66666666666, ans=0.125
+2024-08-31 16:24:42,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=237146.66666666666, ans=0.1
+2024-08-31 16:24:46,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.02 vs. limit=15.0
+2024-08-31 16:24:49,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=237200.0, ans=0.07
+2024-08-31 16:25:03,894 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=237253.33333333334, ans=0.125
+2024-08-31 16:25:31,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=237360.0, ans=0.125
+2024-08-31 16:25:37,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=237360.0, ans=0.025
+2024-08-31 16:25:37,239 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.76 vs. limit=6.0
+2024-08-31 16:25:40,257 INFO [train.py:1114] (0/4) Epoch 18, batch 2200, loss[loss=0.212, simple_loss=0.2745, pruned_loss=0.05391, ctc_loss=0.1042, over 19586.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2727, pruned_loss=0.05028, ctc_loss=0.09476, over 3867468.35 frames. ], batch size: 57, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:25:49,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=237413.33333333334, ans=0.125
+2024-08-31 16:25:50,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=237413.33333333334, ans=0.125
+2024-08-31 16:26:33,721 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.652e+02 1.938e+02 2.493e+02 4.901e+02, threshold=3.877e+02, percent-clipped=6.0
+2024-08-31 16:26:33,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=237626.66666666666, ans=0.0
+2024-08-31 16:26:38,344 INFO [train.py:1114] (0/4) Epoch 18, batch 2250, loss[loss=0.205, simple_loss=0.2827, pruned_loss=0.04618, ctc_loss=0.08718, over 19603.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2735, pruned_loss=0.05069, ctc_loss=0.09536, over 3867383.66 frames. ], batch size: 55, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:27:19,095 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:27:48,125 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.94 vs. limit=6.0
+2024-08-31 16:27:50,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.04 vs. limit=15.0
+2024-08-31 16:27:51,886 INFO [train.py:1114] (0/4) Epoch 18, batch 2300, loss[loss=0.1914, simple_loss=0.2602, pruned_loss=0.04518, ctc_loss=0.08068, over 19496.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2724, pruned_loss=0.05051, ctc_loss=0.09497, over 3860900.49 frames. ], batch size: 49, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:27:52,046 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:28:05,935 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.06 vs. limit=15.0
+2024-08-31 16:28:13,638 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:28:47,463 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.696e+02 1.848e+02 2.393e+02 3.836e+02, threshold=3.696e+02, percent-clipped=0.0
+2024-08-31 16:29:07,662 INFO [train.py:1114] (0/4) Epoch 18, batch 2350, loss[loss=0.2186, simple_loss=0.2904, pruned_loss=0.05367, ctc_loss=0.09878, over 19647.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2723, pruned_loss=0.0504, ctc_loss=0.09478, over 3863844.14 frames. ], batch size: 63, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:29:23,195 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.97 vs. limit=15.0
+2024-08-31 16:29:27,500 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=3.89 vs. limit=12.0
+2024-08-31 16:29:31,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=238320.0, ans=0.2
+2024-08-31 16:29:42,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=238373.33333333334, ans=0.1
+2024-08-31 16:30:24,981 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.89 vs. limit=12.0
+2024-08-31 16:30:28,643 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.21 vs. limit=10.0
+2024-08-31 16:30:38,501 INFO [train.py:1114] (0/4) Epoch 18, batch 2400, loss[loss=0.2092, simple_loss=0.2786, pruned_loss=0.05087, ctc_loss=0.09501, over 19382.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2748, pruned_loss=0.05163, ctc_loss=0.09683, over 3858008.55 frames. ], batch size: 67, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:30:59,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=238533.33333333334, ans=0.125
+2024-08-31 16:31:06,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=238586.66666666666, ans=0.125
+2024-08-31 16:31:20,275 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.33 vs. limit=22.5
+2024-08-31 16:31:23,649 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.57 vs. limit=10.0
+2024-08-31 16:31:24,429 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=238640.0, ans=0.125
+2024-08-31 16:31:25,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=238640.0, ans=0.125
+2024-08-31 16:31:39,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=238693.33333333334, ans=0.125
+2024-08-31 16:31:40,646 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=238693.33333333334, ans=0.2
+2024-08-31 16:31:47,476 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 1.682e+02 1.835e+02 2.125e+02 4.662e+02, threshold=3.671e+02, percent-clipped=5.0
+2024-08-31 16:31:52,089 INFO [train.py:1114] (0/4) Epoch 18, batch 2450, loss[loss=0.2604, simple_loss=0.3038, pruned_loss=0.07831, ctc_loss=0.1507, over 13211.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2782, pruned_loss=0.05401, ctc_loss=0.1017, over 3733992.69 frames. ], batch size: 140, lr: 8.21e-03, grad_scale: 32.0
+2024-08-31 16:32:29,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=238906.66666666666, ans=0.0
+2024-08-31 16:32:31,927 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.48 vs. limit=6.0
+2024-08-31 16:32:40,065 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-18.pt
+2024-08-31 16:33:43,925 INFO [train.py:1114] (0/4) Epoch 19, batch 0, loss[loss=0.2257, simple_loss=0.2816, pruned_loss=0.06286, ctc_loss=0.1104, over 19421.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.2816, pruned_loss=0.06286, ctc_loss=0.1104, over 19421.00 frames. ], batch size: 48, lr: 7.99e-03, grad_scale: 32.0
+2024-08-31 16:33:43,926 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-31 16:33:51,182 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.0565, 2.2308, 2.9810, 3.4345], device='cuda:0')
+2024-08-31 16:34:00,545 INFO [train.py:1146] (0/4) Epoch 19, validation: loss=0.1846, simple_loss=0.2728, pruned_loss=0.03584, ctc_loss=0.06159, over 944034.00 frames.
+2024-08-31 16:34:01,380 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13675MB
+2024-08-31 16:34:02,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=238954.66666666666, ans=0.125
+2024-08-31 16:34:17,502 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.69 vs. limit=15.0
+2024-08-31 16:34:28,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=239061.33333333334, ans=0.0
+2024-08-31 16:34:43,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=239114.66666666666, ans=0.125
+2024-08-31 16:34:48,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=239114.66666666666, ans=0.0
+2024-08-31 16:35:00,300 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.98 vs. limit=10.0
+2024-08-31 16:35:04,411 INFO [train.py:1114] (0/4) Epoch 19, batch 50, loss[loss=0.1963, simple_loss=0.2596, pruned_loss=0.04818, ctc_loss=0.09174, over 19679.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2756, pruned_loss=0.05164, ctc_loss=0.09704, over 845033.20 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:35:12,503 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.795e+02 2.006e+02 2.342e+02 4.821e+02, threshold=4.012e+02, percent-clipped=4.0
+2024-08-31 16:35:13,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=239221.33333333334, ans=0.0
+2024-08-31 16:35:32,064 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.40 vs. limit=10.0
+2024-08-31 16:35:38,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=239381.33333333334, ans=0.025
+2024-08-31 16:35:41,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=239381.33333333334, ans=0.0
+2024-08-31 16:35:45,404 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.66 vs. limit=6.0
+2024-08-31 16:35:48,460 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=239381.33333333334, ans=0.0
+2024-08-31 16:35:53,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=239434.66666666666, ans=0.1
+2024-08-31 16:35:54,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=239434.66666666666, ans=0.125
+2024-08-31 16:36:02,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=239488.0, ans=0.025
+2024-08-31 16:36:03,810 INFO [train.py:1114] (0/4) Epoch 19, batch 100, loss[loss=0.1759, simple_loss=0.2497, pruned_loss=0.03689, ctc_loss=0.07089, over 19728.00 frames. ], tot_loss[loss=0.21, simple_loss=0.277, pruned_loss=0.05188, ctc_loss=0.09826, over 1498716.65 frames. ], batch size: 51, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:36:14,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=239488.0, ans=0.0
+2024-08-31 16:36:46,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=239648.0, ans=0.0
+2024-08-31 16:36:51,154 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.12 vs. limit=10.0
+2024-08-31 16:37:06,676 INFO [train.py:1114] (0/4) Epoch 19, batch 150, loss[loss=0.1766, simple_loss=0.2442, pruned_loss=0.03942, ctc_loss=0.07549, over 19754.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2736, pruned_loss=0.05025, ctc_loss=0.09537, over 2027546.02 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:37:10,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=239754.66666666666, ans=0.125
+2024-08-31 16:37:10,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=239754.66666666666, ans=0.09899494936611666
+2024-08-31 16:37:15,235 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.762e+02 1.953e+02 2.445e+02 3.524e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-31 16:37:17,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=239808.0, ans=0.125
+2024-08-31 16:37:25,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=239808.0, ans=0.1
+2024-08-31 16:37:29,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=239808.0, ans=0.125
+2024-08-31 16:37:43,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=239914.66666666666, ans=0.0
+2024-08-31 16:37:56,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=239968.0, ans=0.025
+2024-08-31 16:38:07,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=239968.0, ans=0.025
+2024-08-31 16:38:09,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=239968.0, ans=0.125
+2024-08-31 16:38:14,087 INFO [train.py:1114] (0/4) Epoch 19, batch 200, loss[loss=0.2108, simple_loss=0.2765, pruned_loss=0.05182, ctc_loss=0.1036, over 18444.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.271, pruned_loss=0.04897, ctc_loss=0.09241, over 2435779.19 frames. ], batch size: 85, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:38:19,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=240021.33333333334, ans=0.125
+2024-08-31 16:38:19,498 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.96 vs. limit=22.5
+2024-08-31 16:38:20,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240021.33333333334, ans=0.1
+2024-08-31 16:38:27,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=240074.66666666666, ans=0.07
+2024-08-31 16:38:36,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=240128.0, ans=0.2
+2024-08-31 16:38:39,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=240128.0, ans=0.0
+2024-08-31 16:38:44,924 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer_ff2.min_abs, batch_count=240128.0, ans=0.1
+2024-08-31 16:38:53,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=240181.33333333334, ans=0.125
+2024-08-31 16:39:01,761 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=240234.66666666666, ans=0.125
+2024-08-31 16:39:13,535 INFO [train.py:1114] (0/4) Epoch 19, batch 250, loss[loss=0.217, simple_loss=0.2828, pruned_loss=0.0553, ctc_loss=0.1015, over 19356.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2706, pruned_loss=0.04899, ctc_loss=0.0924, over 2756817.04 frames. ], batch size: 67, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:39:27,155 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 1.733e+02 2.186e+02 2.853e+02 4.755e+02, threshold=4.372e+02, percent-clipped=7.0
+2024-08-31 16:40:01,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=240448.0, ans=0.125
+2024-08-31 16:40:06,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=240448.0, ans=0.125
+2024-08-31 16:40:09,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=240501.33333333334, ans=0.125
+2024-08-31 16:40:11,590 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.00 vs. limit=15.0
+2024-08-31 16:40:12,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=240501.33333333334, ans=0.125
+2024-08-31 16:40:19,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=240554.66666666666, ans=0.125
+2024-08-31 16:40:20,379 INFO [train.py:1114] (0/4) Epoch 19, batch 300, loss[loss=0.2278, simple_loss=0.2955, pruned_loss=0.05849, ctc_loss=0.1078, over 19482.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2703, pruned_loss=0.04882, ctc_loss=0.0919, over 3000995.93 frames. ], batch size: 61, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:40:30,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=240554.66666666666, ans=0.0
+2024-08-31 16:40:41,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=240608.0, ans=0.1
+2024-08-31 16:40:42,572 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.22 vs. limit=6.0
+2024-08-31 16:40:52,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=240661.33333333334, ans=0.0
+2024-08-31 16:40:58,757 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.37 vs. limit=15.0
+2024-08-31 16:41:04,640 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=240714.66666666666, ans=0.125
+2024-08-31 16:41:06,892 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=240714.66666666666, ans=0.0
+2024-08-31 16:41:17,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240768.0, ans=0.1
+2024-08-31 16:41:21,966 INFO [train.py:1114] (0/4) Epoch 19, batch 350, loss[loss=0.1908, simple_loss=0.2551, pruned_loss=0.04546, ctc_loss=0.08886, over 19748.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2711, pruned_loss=0.04889, ctc_loss=0.09209, over 3190586.01 frames. ], batch size: 48, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:41:22,121 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=240821.33333333334, ans=0.0
+2024-08-31 16:41:30,308 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.653e+02 1.904e+02 2.349e+02 4.016e+02, threshold=3.809e+02, percent-clipped=0.0
+2024-08-31 16:42:25,376 INFO [train.py:1114] (0/4) Epoch 19, batch 400, loss[loss=0.2128, simple_loss=0.2805, pruned_loss=0.0534, ctc_loss=0.09571, over 19494.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2711, pruned_loss=0.04919, ctc_loss=0.0926, over 3342052.19 frames. ], batch size: 54, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:42:36,307 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=241088.0, ans=0.125
+2024-08-31 16:43:02,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=241194.66666666666, ans=0.0
+2024-08-31 16:43:15,976 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.99 vs. limit=15.0
+2024-08-31 16:43:21,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=241301.33333333334, ans=0.025
+2024-08-31 16:43:32,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=241301.33333333334, ans=0.2
+2024-08-31 16:43:34,368 INFO [train.py:1114] (0/4) Epoch 19, batch 450, loss[loss=0.199, simple_loss=0.2793, pruned_loss=0.0417, ctc_loss=0.08852, over 19626.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2713, pruned_loss=0.04932, ctc_loss=0.0928, over 3450414.33 frames. ], batch size: 55, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:43:42,750 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.686e+02 1.896e+02 2.370e+02 4.152e+02, threshold=3.792e+02, percent-clipped=1.0
+2024-08-31 16:43:47,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=241408.0, ans=0.0
+2024-08-31 16:43:48,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=241408.0, ans=0.125
+2024-08-31 16:43:49,929 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=241408.0, ans=0.025
+2024-08-31 16:43:54,240 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.18 vs. limit=12.0
+2024-08-31 16:43:57,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=241461.33333333334, ans=0.0
+2024-08-31 16:43:58,575 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.35 vs. limit=22.5
+2024-08-31 16:44:03,058 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.76 vs. limit=12.0
+2024-08-31 16:44:06,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=241461.33333333334, ans=0.2
+2024-08-31 16:44:35,440 INFO [train.py:1114] (0/4) Epoch 19, batch 500, loss[loss=0.2209, simple_loss=0.2861, pruned_loss=0.05703, ctc_loss=0.1038, over 19656.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2705, pruned_loss=0.04898, ctc_loss=0.09221, over 3545263.34 frames. ], batch size: 63, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:44:36,819 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=241621.33333333334, ans=0.125
+2024-08-31 16:45:17,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=241781.33333333334, ans=0.2
+2024-08-31 16:45:30,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=241834.66666666666, ans=0.025
+2024-08-31 16:45:49,259 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=241834.66666666666, ans=0.0
+2024-08-31 16:45:50,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-31 16:46:04,258 INFO [train.py:1114] (0/4) Epoch 19, batch 550, loss[loss=0.1871, simple_loss=0.2622, pruned_loss=0.04076, ctc_loss=0.07639, over 19368.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2711, pruned_loss=0.04922, ctc_loss=0.0927, over 3607835.04 frames. ], batch size: 71, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:46:04,798 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.44 vs. limit=15.0
+2024-08-31 16:46:12,723 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.697e+02 1.983e+02 2.191e+02 3.507e+02, threshold=3.966e+02, percent-clipped=0.0
+2024-08-31 16:46:29,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=241941.33333333334, ans=0.125
+2024-08-31 16:46:48,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=241994.66666666666, ans=0.2
+2024-08-31 16:46:48,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=241994.66666666666, ans=0.0
+2024-08-31 16:47:13,009 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=1.89 vs. limit=6.0
+2024-08-31 16:47:16,272 INFO [train.py:1114] (0/4) Epoch 19, batch 600, loss[loss=0.2246, simple_loss=0.2889, pruned_loss=0.05831, ctc_loss=0.1095, over 19400.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2713, pruned_loss=0.04937, ctc_loss=0.093, over 3666587.04 frames. ], batch size: 67, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:47:16,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=242154.66666666666, ans=0.1
+2024-08-31 16:47:22,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=242154.66666666666, ans=0.125
+2024-08-31 16:47:26,170 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.40 vs. limit=22.5
+2024-08-31 16:48:15,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=242314.66666666666, ans=0.1
+2024-08-31 16:48:17,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=242314.66666666666, ans=0.125
+2024-08-31 16:48:21,242 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:48:27,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=242368.0, ans=0.125
+2024-08-31 16:48:32,840 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.14 vs. limit=10.0
+2024-08-31 16:48:36,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=242368.0, ans=0.1
+2024-08-31 16:48:39,673 INFO [train.py:1114] (0/4) Epoch 19, batch 650, loss[loss=0.1616, simple_loss=0.2391, pruned_loss=0.03006, ctc_loss=0.06003, over 19773.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2702, pruned_loss=0.04888, ctc_loss=0.092, over 3716546.96 frames. ], batch size: 54, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:48:41,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=242421.33333333334, ans=0.09899494936611666
+2024-08-31 16:48:48,397 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.784e+02 2.044e+02 2.793e+02 4.792e+02, threshold=4.088e+02, percent-clipped=6.0
+2024-08-31 16:48:59,892 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=242474.66666666666, ans=0.125
+2024-08-31 16:49:31,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=242634.66666666666, ans=0.125
+2024-08-31 16:49:38,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=242634.66666666666, ans=0.0
+2024-08-31 16:49:41,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=242634.66666666666, ans=0.0
+2024-08-31 16:50:02,075 INFO [train.py:1114] (0/4) Epoch 19, batch 700, loss[loss=0.1919, simple_loss=0.2632, pruned_loss=0.04391, ctc_loss=0.08173, over 19733.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2704, pruned_loss=0.04907, ctc_loss=0.0922, over 3748806.62 frames. ], batch size: 51, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:50:06,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=242688.0, ans=0.0
+2024-08-31 16:50:07,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff3.min_abs, batch_count=242688.0, ans=0.2
+2024-08-31 16:50:48,781 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:52:16,517 INFO [train.py:1114] (0/4) Epoch 19, batch 750, loss[loss=0.2168, simple_loss=0.2923, pruned_loss=0.05183, ctc_loss=0.09421, over 19496.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2703, pruned_loss=0.04896, ctc_loss=0.09206, over 3774571.12 frames. ], batch size: 54, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:52:40,601 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.707e+02 2.012e+02 2.576e+02 4.596e+02, threshold=4.024e+02, percent-clipped=2.0
+2024-08-31 16:52:42,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=242954.66666666666, ans=0.125
+2024-08-31 16:52:48,721 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=243008.0, ans=0.025
+2024-08-31 16:52:50,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=243008.0, ans=0.125
+2024-08-31 16:53:02,799 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=243061.33333333334, ans=0.025
+2024-08-31 16:53:36,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=243168.0, ans=0.04949747468305833
+2024-08-31 16:53:40,980 INFO [train.py:1114] (0/4) Epoch 19, batch 800, loss[loss=0.1921, simple_loss=0.2552, pruned_loss=0.0471, ctc_loss=0.08684, over 19802.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2707, pruned_loss=0.04917, ctc_loss=0.0922, over 3795407.26 frames. ], batch size: 49, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:53:42,208 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=243221.33333333334, ans=0.1
+2024-08-31 16:53:57,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=243274.66666666666, ans=0.125
+2024-08-31 16:54:36,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=243434.66666666666, ans=0.125
+2024-08-31 16:54:52,035 INFO [train.py:1114] (0/4) Epoch 19, batch 850, loss[loss=0.2035, simple_loss=0.2792, pruned_loss=0.04676, ctc_loss=0.08545, over 19670.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2704, pruned_loss=0.04899, ctc_loss=0.0921, over 3815217.60 frames. ], batch size: 59, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:55:00,085 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.677e+02 1.837e+02 2.316e+02 3.927e+02, threshold=3.675e+02, percent-clipped=0.0
+2024-08-31 16:55:05,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=243541.33333333334, ans=0.125
+2024-08-31 16:55:15,940 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=243594.66666666666, ans=0.0
+2024-08-31 16:55:18,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=243594.66666666666, ans=0.5
+2024-08-31 16:55:24,814 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.45 vs. limit=15.0
+2024-08-31 16:55:33,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=243648.0, ans=0.125
+2024-08-31 16:55:40,611 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=243648.0, ans=0.0
+2024-08-31 16:55:46,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=243701.33333333334, ans=0.125
+2024-08-31 16:55:46,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=243701.33333333334, ans=0.2
+2024-08-31 16:55:47,094 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.50 vs. limit=22.5
+2024-08-31 16:55:50,194 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:55:55,903 INFO [train.py:1114] (0/4) Epoch 19, batch 900, loss[loss=0.2093, simple_loss=0.2662, pruned_loss=0.05476, ctc_loss=0.1071, over 19806.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2708, pruned_loss=0.04945, ctc_loss=0.09282, over 3819233.74 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:56:01,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=243754.66666666666, ans=0.05
+2024-08-31 16:56:05,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=243754.66666666666, ans=0.125
+2024-08-31 16:56:23,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=243861.33333333334, ans=0.125
+2024-08-31 16:56:45,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=243914.66666666666, ans=0.125
+2024-08-31 16:56:46,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=243914.66666666666, ans=0.0
+2024-08-31 16:56:47,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=243914.66666666666, ans=0.0
+2024-08-31 16:57:05,844 INFO [train.py:1114] (0/4) Epoch 19, batch 950, loss[loss=0.2005, simple_loss=0.2598, pruned_loss=0.0505, ctc_loss=0.1005, over 19501.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2715, pruned_loss=0.04979, ctc_loss=0.0937, over 3820559.24 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:57:14,286 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.751e+02 2.034e+02 2.400e+02 3.857e+02, threshold=4.067e+02, percent-clipped=1.0
+2024-08-31 16:57:20,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=244074.66666666666, ans=0.0
+2024-08-31 16:57:26,548 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=244074.66666666666, ans=0.0
+2024-08-31 16:57:27,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=244074.66666666666, ans=0.125
+2024-08-31 16:57:56,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=244234.66666666666, ans=0.2
+2024-08-31 16:58:03,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=244234.66666666666, ans=0.2
+2024-08-31 16:58:06,278 INFO [train.py:1114] (0/4) Epoch 19, batch 1000, loss[loss=0.1849, simple_loss=0.2643, pruned_loss=0.03882, ctc_loss=0.06929, over 19856.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2728, pruned_loss=0.05043, ctc_loss=0.09494, over 3815653.69 frames. ], batch size: 52, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 16:58:52,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=244341.33333333334, ans=0.015
+2024-08-31 16:58:54,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=244341.33333333334, ans=0.2
+2024-08-31 16:59:10,100 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.84 vs. limit=15.0
+2024-08-31 16:59:12,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=244448.0, ans=0.125
+2024-08-31 16:59:49,199 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.91 vs. limit=22.5
+2024-08-31 16:59:52,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=244501.33333333334, ans=0.5
+2024-08-31 17:00:09,426 INFO [train.py:1114] (0/4) Epoch 19, batch 1050, loss[loss=0.2263, simple_loss=0.2924, pruned_loss=0.05835, ctc_loss=0.1086, over 19834.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2718, pruned_loss=0.05005, ctc_loss=0.09419, over 3822306.47 frames. ], batch size: 57, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 17:00:13,314 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=244554.66666666666, ans=0.1
+2024-08-31 17:00:17,641 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.651e+02 1.935e+02 2.361e+02 3.363e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-31 17:00:20,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=244608.0, ans=0.125
+2024-08-31 17:00:21,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=244608.0, ans=0.0
+2024-08-31 17:00:22,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=244608.0, ans=0.2
+2024-08-31 17:01:12,068 INFO [train.py:1114] (0/4) Epoch 19, batch 1100, loss[loss=0.2089, simple_loss=0.2694, pruned_loss=0.05361, ctc_loss=0.103, over 19574.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2715, pruned_loss=0.04974, ctc_loss=0.09362, over 3830280.28 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:01:16,284 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.50 vs. limit=15.0
+2024-08-31 17:01:58,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=244981.33333333334, ans=0.125
+2024-08-31 17:02:30,950 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=245034.66666666666, ans=0.125
+2024-08-31 17:02:43,473 INFO [train.py:1114] (0/4) Epoch 19, batch 1150, loss[loss=0.1662, simple_loss=0.2384, pruned_loss=0.0345, ctc_loss=0.06259, over 19596.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2712, pruned_loss=0.04946, ctc_loss=0.09313, over 3828089.38 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:03:11,402 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.693e+02 1.899e+02 2.295e+02 3.327e+02, threshold=3.798e+02, percent-clipped=0.0
+2024-08-31 17:03:14,379 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.87 vs. limit=15.0
+2024-08-31 17:03:16,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=245141.33333333334, ans=0.125
+2024-08-31 17:03:18,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=245141.33333333334, ans=0.05
+2024-08-31 17:03:22,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=245141.33333333334, ans=0.95
+2024-08-31 17:03:54,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=245301.33333333334, ans=0.1
+2024-08-31 17:03:56,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=245301.33333333334, ans=0.125
+2024-08-31 17:03:59,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1.whitening_limit, batch_count=245301.33333333334, ans=10.0
+2024-08-31 17:04:00,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=245301.33333333334, ans=0.0
+2024-08-31 17:04:04,676 INFO [train.py:1114] (0/4) Epoch 19, batch 1200, loss[loss=0.1832, simple_loss=0.2596, pruned_loss=0.03881, ctc_loss=0.07307, over 19828.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2718, pruned_loss=0.04969, ctc_loss=0.09361, over 3823389.80 frames. ], batch size: 57, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:04:29,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=245408.0, ans=0.0
+2024-08-31 17:04:33,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=245461.33333333334, ans=0.125
+2024-08-31 17:04:44,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=245514.66666666666, ans=0.2
+2024-08-31 17:04:46,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=245514.66666666666, ans=0.125
+2024-08-31 17:04:51,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=245514.66666666666, ans=0.125
+2024-08-31 17:05:08,561 INFO [train.py:1114] (0/4) Epoch 19, batch 1250, loss[loss=0.2005, simple_loss=0.27, pruned_loss=0.04723, ctc_loss=0.09154, over 19532.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2723, pruned_loss=0.04944, ctc_loss=0.09306, over 3841958.57 frames. ], batch size: 61, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:05:12,366 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=245621.33333333334, ans=0.125
+2024-08-31 17:05:14,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=245621.33333333334, ans=0.125
+2024-08-31 17:05:16,756 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.647e+02 1.911e+02 2.205e+02 3.499e+02, threshold=3.822e+02, percent-clipped=0.0
+2024-08-31 17:05:50,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=245728.0, ans=0.025
+2024-08-31 17:05:58,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=245781.33333333334, ans=0.125
+2024-08-31 17:05:58,820 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.71 vs. limit=15.0
+2024-08-31 17:06:14,119 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=245834.66666666666, ans=0.025
+2024-08-31 17:06:19,683 INFO [train.py:1114] (0/4) Epoch 19, batch 1300, loss[loss=0.2038, simple_loss=0.2746, pruned_loss=0.04908, ctc_loss=0.08737, over 18889.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.272, pruned_loss=0.04965, ctc_loss=0.09353, over 3846145.41 frames. ], batch size: 76, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:06:23,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=245888.0, ans=0.125
+2024-08-31 17:06:32,006 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.27 vs. limit=22.5
+2024-08-31 17:07:02,146 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.74 vs. limit=10.0
+2024-08-31 17:07:24,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=246154.66666666666, ans=0.1
+2024-08-31 17:07:25,628 INFO [train.py:1114] (0/4) Epoch 19, batch 1350, loss[loss=0.1918, simple_loss=0.2649, pruned_loss=0.04307, ctc_loss=0.08142, over 19769.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2714, pruned_loss=0.04925, ctc_loss=0.09269, over 3856948.23 frames. ], batch size: 54, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:07:39,276 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.765e+02 2.070e+02 2.720e+02 4.418e+02, threshold=4.141e+02, percent-clipped=1.0
+2024-08-31 17:07:42,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=246208.0, ans=0.125
+2024-08-31 17:07:51,298 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.66 vs. limit=22.5
+2024-08-31 17:07:53,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=246208.0, ans=0.1
+2024-08-31 17:08:21,820 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:08:35,875 INFO [train.py:1114] (0/4) Epoch 19, batch 1400, loss[loss=0.1698, simple_loss=0.2367, pruned_loss=0.03697, ctc_loss=0.07205, over 19657.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2709, pruned_loss=0.04903, ctc_loss=0.0921, over 3863914.06 frames. ], batch size: 46, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:09:14,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=246528.0, ans=0.035
+2024-08-31 17:09:21,615 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=246528.0, ans=0.125
+2024-08-31 17:09:27,672 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=246581.33333333334, ans=0.125
+2024-08-31 17:09:41,031 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=246634.66666666666, ans=0.2
+2024-08-31 17:09:44,885 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.45 vs. limit=22.5
+2024-08-31 17:09:45,693 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=246634.66666666666, ans=0.0
+2024-08-31 17:09:47,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.66 vs. limit=6.0
+2024-08-31 17:09:50,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=246634.66666666666, ans=0.125
+2024-08-31 17:09:53,651 INFO [train.py:1114] (0/4) Epoch 19, batch 1450, loss[loss=0.2218, simple_loss=0.2845, pruned_loss=0.05707, ctc_loss=0.1126, over 19667.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2713, pruned_loss=0.04943, ctc_loss=0.09277, over 3862670.75 frames. ], batch size: 63, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:09:53,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=246688.0, ans=0.0
+2024-08-31 17:10:02,069 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 1.691e+02 1.919e+02 2.362e+02 3.353e+02, threshold=3.838e+02, percent-clipped=0.0
+2024-08-31 17:10:06,372 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.88 vs. limit=12.0
+2024-08-31 17:11:36,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=246794.66666666666, ans=0.125
+2024-08-31 17:11:50,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=246848.0, ans=0.125
+2024-08-31 17:12:08,927 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=246901.33333333334, ans=0.0
+2024-08-31 17:12:09,648 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.70 vs. limit=15.0
+2024-08-31 17:12:12,391 INFO [train.py:1114] (0/4) Epoch 19, batch 1500, loss[loss=0.2129, simple_loss=0.2834, pruned_loss=0.051, ctc_loss=0.1009, over 19578.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2718, pruned_loss=0.0495, ctc_loss=0.09303, over 3862431.58 frames. ], batch size: 57, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:12:13,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=246954.66666666666, ans=0.125
+2024-08-31 17:13:04,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=247008.0, ans=0.0
+2024-08-31 17:13:58,618 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=247114.66666666666, ans=0.125
+2024-08-31 17:14:15,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=247114.66666666666, ans=0.1
+2024-08-31 17:14:33,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=247168.0, ans=0.2
+2024-08-31 17:14:37,594 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=247221.33333333334, ans=0.1
+2024-08-31 17:14:38,391 INFO [train.py:1114] (0/4) Epoch 19, batch 1550, loss[loss=0.2528, simple_loss=0.3064, pruned_loss=0.073, ctc_loss=0.133, over 19613.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2719, pruned_loss=0.04993, ctc_loss=0.09384, over 3847641.78 frames. ], batch size: 60, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:14:46,783 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.654e+02 1.883e+02 2.328e+02 3.879e+02, threshold=3.765e+02, percent-clipped=1.0
+2024-08-31 17:14:46,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=247221.33333333334, ans=0.0
+2024-08-31 17:15:12,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=247274.66666666666, ans=0.0
+2024-08-31 17:15:21,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=247328.0, ans=0.2
+2024-08-31 17:15:24,813 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.40 vs. limit=12.0
+2024-08-31 17:15:29,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=247328.0, ans=0.125
+2024-08-31 17:16:19,969 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2.whitening_limit, batch_count=247381.33333333334, ans=15.0
+2024-08-31 17:16:21,826 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=247381.33333333334, ans=0.125
+2024-08-31 17:16:25,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=247381.33333333334, ans=0.125
+2024-08-31 17:16:32,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=247434.66666666666, ans=0.0
+2024-08-31 17:16:37,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=247434.66666666666, ans=0.125
+2024-08-31 17:16:40,622 INFO [train.py:1114] (0/4) Epoch 19, batch 1600, loss[loss=0.1953, simple_loss=0.2725, pruned_loss=0.04341, ctc_loss=0.0782, over 19846.00 frames. ], tot_loss[loss=0.205, simple_loss=0.272, pruned_loss=0.05012, ctc_loss=0.09442, over 3836418.62 frames. ], batch size: 57, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:16:59,193 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=247541.33333333334, ans=0.2
+2024-08-31 17:17:07,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=247594.66666666666, ans=0.0
+2024-08-31 17:17:23,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=247648.0, ans=0.2
+2024-08-31 17:17:42,002 INFO [train.py:1114] (0/4) Epoch 19, batch 1650, loss[loss=0.2182, simple_loss=0.2889, pruned_loss=0.05319, ctc_loss=0.1028, over 19628.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2719, pruned_loss=0.05015, ctc_loss=0.09429, over 3833472.39 frames. ], batch size: 59, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:17:43,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=247754.66666666666, ans=0.125
+2024-08-31 17:17:50,564 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.753e+02 1.927e+02 2.360e+02 4.500e+02, threshold=3.853e+02, percent-clipped=4.0
+2024-08-31 17:17:55,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=247808.0, ans=0.0
+2024-08-31 17:18:04,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=247861.33333333334, ans=0.1
+2024-08-31 17:18:07,708 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.63 vs. limit=12.0
+2024-08-31 17:18:20,285 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.68 vs. limit=15.0
+2024-08-31 17:18:42,688 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=247968.0, ans=0.125
+2024-08-31 17:18:44,906 INFO [train.py:1114] (0/4) Epoch 19, batch 1700, loss[loss=0.1786, simple_loss=0.2457, pruned_loss=0.04108, ctc_loss=0.07345, over 19657.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2712, pruned_loss=0.04946, ctc_loss=0.09286, over 3847350.29 frames. ], batch size: 46, lr: 7.84e-03, grad_scale: 64.0
+2024-08-31 17:18:50,381 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.55 vs. limit=12.0
+2024-08-31 17:18:55,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=248021.33333333334, ans=0.025
+2024-08-31 17:19:11,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=248128.0, ans=0.2
+2024-08-31 17:19:52,910 INFO [train.py:1114] (0/4) Epoch 19, batch 1750, loss[loss=0.18, simple_loss=0.245, pruned_loss=0.04152, ctc_loss=0.07996, over 19654.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2705, pruned_loss=0.04902, ctc_loss=0.09214, over 3852186.62 frames. ], batch size: 45, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:19:54,840 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.24 vs. limit=6.0
+2024-08-31 17:20:02,153 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.715e+02 1.941e+02 2.441e+02 4.524e+02, threshold=3.882e+02, percent-clipped=3.0
+2024-08-31 17:20:12,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=248341.33333333334, ans=0.1
+2024-08-31 17:20:16,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=248394.66666666666, ans=0.0
+2024-08-31 17:20:20,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=248394.66666666666, ans=0.0
+2024-08-31 17:20:25,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten.whitening_limit, batch_count=248394.66666666666, ans=15.0
+2024-08-31 17:20:40,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=248501.33333333334, ans=0.1
+2024-08-31 17:20:44,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=248501.33333333334, ans=0.0
+2024-08-31 17:20:46,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=248501.33333333334, ans=0.0
+2024-08-31 17:20:49,896 INFO [train.py:1114] (0/4) Epoch 19, batch 1800, loss[loss=0.2011, simple_loss=0.2755, pruned_loss=0.04618, ctc_loss=0.08578, over 19604.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2709, pruned_loss=0.04896, ctc_loss=0.09206, over 3854361.58 frames. ], batch size: 55, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:20:50,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=248554.66666666666, ans=0.07
+2024-08-31 17:21:19,603 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:21:27,970 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.99 vs. limit=22.5
+2024-08-31 17:21:45,305 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.40 vs. limit=12.0
+2024-08-31 17:21:45,429 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.54 vs. limit=6.0
+2024-08-31 17:21:46,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=248821.33333333334, ans=0.2
+2024-08-31 17:21:47,148 INFO [train.py:1114] (0/4) Epoch 19, batch 1850, loss[loss=0.2268, simple_loss=0.2923, pruned_loss=0.05818, ctc_loss=0.1122, over 19595.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2709, pruned_loss=0.04887, ctc_loss=0.09184, over 3857567.59 frames. ], batch size: 57, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:21:52,320 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.12 vs. limit=22.5
+2024-08-31 17:21:56,039 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.825e+02 2.203e+02 3.044e+02 4.782e+02, threshold=4.406e+02, percent-clipped=6.0
+2024-08-31 17:22:15,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=248874.66666666666, ans=0.125
+2024-08-31 17:22:19,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=248928.0, ans=0.1
+2024-08-31 17:22:34,351 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.17 vs. limit=15.0
+2024-08-31 17:22:52,464 INFO [train.py:1114] (0/4) Epoch 19, batch 1900, loss[loss=0.2045, simple_loss=0.2877, pruned_loss=0.04483, ctc_loss=0.07912, over 19668.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2717, pruned_loss=0.04913, ctc_loss=0.09226, over 3862216.50 frames. ], batch size: 59, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:22:55,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=249088.0, ans=0.0
+2024-08-31 17:22:59,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=249088.0, ans=0.0
+2024-08-31 17:23:08,087 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=249141.33333333334, ans=10.0
+2024-08-31 17:23:13,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=249194.66666666666, ans=0.1
+2024-08-31 17:23:22,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=249194.66666666666, ans=0.1
+2024-08-31 17:23:23,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=249194.66666666666, ans=0.125
+2024-08-31 17:23:34,118 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=249248.0, ans=0.05
+2024-08-31 17:23:35,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=249248.0, ans=0.125
+2024-08-31 17:23:48,980 INFO [train.py:1114] (0/4) Epoch 19, batch 1950, loss[loss=0.1738, simple_loss=0.2493, pruned_loss=0.03633, ctc_loss=0.0643, over 19586.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2722, pruned_loss=0.04906, ctc_loss=0.09216, over 3871303.88 frames. ], batch size: 52, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:23:58,748 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.354e+02 1.608e+02 1.802e+02 2.157e+02 4.545e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 17:24:00,131 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:24:06,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=249408.0, ans=0.1
+2024-08-31 17:24:13,469 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.08 vs. limit=15.0
+2024-08-31 17:24:22,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=249514.66666666666, ans=0.0
+2024-08-31 17:24:25,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=249514.66666666666, ans=0.0
+2024-08-31 17:24:50,835 INFO [train.py:1114] (0/4) Epoch 19, batch 2000, loss[loss=0.1675, simple_loss=0.2328, pruned_loss=0.03686, ctc_loss=0.0712, over 19614.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2728, pruned_loss=0.04943, ctc_loss=0.09284, over 3856103.74 frames. ], batch size: 45, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:24:51,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=249621.33333333334, ans=0.015
+2024-08-31 17:24:57,661 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=249621.33333333334, ans=0.125
+2024-08-31 17:25:05,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.whiten.whitening_limit, batch_count=249674.66666666666, ans=12.0
+2024-08-31 17:25:06,913 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=249674.66666666666, ans=0.2
+2024-08-31 17:25:08,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=249674.66666666666, ans=10.0
+2024-08-31 17:25:25,349 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=249781.33333333334, ans=0.2
+2024-08-31 17:25:41,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=249834.66666666666, ans=0.0
+2024-08-31 17:25:41,173 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=249834.66666666666, ans=0.05
+2024-08-31 17:25:46,744 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=249888.0, ans=0.0
+2024-08-31 17:25:47,787 INFO [train.py:1114] (0/4) Epoch 19, batch 2050, loss[loss=0.1773, simple_loss=0.2406, pruned_loss=0.04211, ctc_loss=0.07482, over 19697.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2717, pruned_loss=0.04945, ctc_loss=0.09289, over 3852752.59 frames. ], batch size: 47, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:25:52,866 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=249888.0, ans=0.125
+2024-08-31 17:25:57,144 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.719e+02 2.018e+02 2.402e+02 3.677e+02, threshold=4.037e+02, percent-clipped=1.0
+2024-08-31 17:25:57,440 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_na.min_abs, batch_count=249888.0, ans=0.02
+2024-08-31 17:25:59,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=249941.33333333334, ans=0.0
+2024-08-31 17:26:19,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=249994.66666666666, ans=0.1
+2024-08-31 17:26:36,248 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.34 vs. limit=15.0
+2024-08-31 17:26:38,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=250101.33333333334, ans=0.05
+2024-08-31 17:26:44,682 INFO [train.py:1114] (0/4) Epoch 19, batch 2100, loss[loss=0.1905, simple_loss=0.269, pruned_loss=0.04051, ctc_loss=0.07741, over 19782.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2712, pruned_loss=0.0493, ctc_loss=0.09268, over 3859487.84 frames. ], batch size: 54, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:26:49,077 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.65 vs. limit=15.0
+2024-08-31 17:26:50,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=250154.66666666666, ans=0.2
+2024-08-31 17:27:13,443 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.32 vs. limit=15.0
+2024-08-31 17:27:40,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=250368.0, ans=0.125
+2024-08-31 17:27:41,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=250421.33333333334, ans=0.0
+2024-08-31 17:27:42,494 INFO [train.py:1114] (0/4) Epoch 19, batch 2150, loss[loss=0.1875, simple_loss=0.2625, pruned_loss=0.04045, ctc_loss=0.07902, over 19568.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2705, pruned_loss=0.0489, ctc_loss=0.09186, over 3870372.76 frames. ], batch size: 52, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:27:48,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=250421.33333333334, ans=0.125
+2024-08-31 17:27:51,496 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.672e+02 1.975e+02 2.523e+02 4.782e+02, threshold=3.951e+02, percent-clipped=2.0
+2024-08-31 17:27:57,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=250474.66666666666, ans=0.125
+2024-08-31 17:28:08,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=250528.0, ans=0.0
+2024-08-31 17:28:22,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=250581.33333333334, ans=0.025
+2024-08-31 17:28:39,684 INFO [train.py:1114] (0/4) Epoch 19, batch 2200, loss[loss=0.202, simple_loss=0.2777, pruned_loss=0.04651, ctc_loss=0.08319, over 19587.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2709, pruned_loss=0.04919, ctc_loss=0.09236, over 3869227.07 frames. ], batch size: 57, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:29:14,746 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=250848.0, ans=0.0
+2024-08-31 17:29:30,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=250901.33333333334, ans=0.1
+2024-08-31 17:29:38,737 INFO [train.py:1114] (0/4) Epoch 19, batch 2250, loss[loss=0.2313, simple_loss=0.2941, pruned_loss=0.06104, ctc_loss=0.1163, over 19623.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2713, pruned_loss=0.04911, ctc_loss=0.09233, over 3869499.94 frames. ], batch size: 55, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:29:47,357 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.680e+02 1.896e+02 2.375e+02 5.292e+02, threshold=3.791e+02, percent-clipped=4.0
+2024-08-31 17:30:00,929 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.74 vs. limit=15.0
+2024-08-31 17:30:12,232 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=251061.33333333334, ans=6.0
+2024-08-31 17:30:20,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=251114.66666666666, ans=0.1
+2024-08-31 17:30:37,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=251168.0, ans=0.125
+2024-08-31 17:30:40,038 INFO [train.py:1114] (0/4) Epoch 19, batch 2300, loss[loss=0.1763, simple_loss=0.2474, pruned_loss=0.03839, ctc_loss=0.07109, over 19501.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2706, pruned_loss=0.04925, ctc_loss=0.09267, over 3863587.28 frames. ], batch size: 49, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:30:47,900 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.13 vs. limit=6.0
+2024-08-31 17:31:05,087 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=3.251e-02
+2024-08-31 17:31:26,534 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=251434.66666666666, ans=0.125
+2024-08-31 17:31:27,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=251434.66666666666, ans=0.125
+2024-08-31 17:31:36,397 INFO [train.py:1114] (0/4) Epoch 19, batch 2350, loss[loss=0.2031, simple_loss=0.2766, pruned_loss=0.04711, ctc_loss=0.0883, over 19649.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2705, pruned_loss=0.04936, ctc_loss=0.09277, over 3865582.44 frames. ], batch size: 63, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:31:45,227 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.718e+02 2.013e+02 2.563e+02 3.706e+02, threshold=4.026e+02, percent-clipped=0.0
+2024-08-31 17:31:50,880 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:32:04,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=251594.66666666666, ans=0.0
+2024-08-31 17:32:05,525 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.34 vs. limit=12.0
+2024-08-31 17:32:13,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=251648.0, ans=0.125
+2024-08-31 17:32:17,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=251648.0, ans=0.125
+2024-08-31 17:32:23,830 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.43 vs. limit=15.0
+2024-08-31 17:32:27,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=251701.33333333334, ans=0.125
+2024-08-31 17:32:36,572 INFO [train.py:1114] (0/4) Epoch 19, batch 2400, loss[loss=0.2285, simple_loss=0.2898, pruned_loss=0.05982, ctc_loss=0.1186, over 19372.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2726, pruned_loss=0.05013, ctc_loss=0.09401, over 3861154.57 frames. ], batch size: 67, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:32:48,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=251808.0, ans=0.2
+2024-08-31 17:32:48,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=251808.0, ans=0.125
+2024-08-31 17:32:50,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=251808.0, ans=0.04949747468305833
+2024-08-31 17:33:39,847 INFO [train.py:1114] (0/4) Epoch 19, batch 2450, loss[loss=0.25, simple_loss=0.2952, pruned_loss=0.0744, ctc_loss=0.1397, over 12910.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.276, pruned_loss=0.0522, ctc_loss=0.09809, over 3737846.95 frames. ], batch size: 141, lr: 7.78e-03, grad_scale: 32.0
+2024-08-31 17:33:43,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=252021.33333333334, ans=0.125
+2024-08-31 17:33:48,950 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.610e+02 1.856e+02 2.081e+02 3.075e+02, threshold=3.711e+02, percent-clipped=0.0
+2024-08-31 17:34:01,821 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=252128.0, ans=0.0
+2024-08-31 17:34:07,217 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=252128.0, ans=0.0
+2024-08-31 17:34:09,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=252128.0, ans=0.125
+2024-08-31 17:34:24,890 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-19.pt
+2024-08-31 17:36:18,526 INFO [train.py:1114] (0/4) Epoch 20, batch 0, loss[loss=0.2057, simple_loss=0.26, pruned_loss=0.05526, ctc_loss=0.1024, over 19809.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.26, pruned_loss=0.05526, ctc_loss=0.1024, over 19809.00 frames. ], batch size: 49, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:36:18,527 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-31 17:36:23,388 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.4.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.1181, 2.3030, 2.5674, 2.2271], device='cuda:0')
+2024-08-31 17:36:28,430 INFO [train.py:1146] (0/4) Epoch 20, validation: loss=0.1834, simple_loss=0.2715, pruned_loss=0.03542, ctc_loss=0.061, over 944034.00 frames.
+2024-08-31 17:36:28,431 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13675MB
+2024-08-31 17:36:41,934 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=252288.0, ans=0.025
+2024-08-31 17:36:50,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=252288.0, ans=0.125
+2024-08-31 17:36:50,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=252288.0, ans=0.2
+2024-08-31 17:36:53,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=252341.33333333334, ans=0.125
+2024-08-31 17:37:06,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=252394.66666666666, ans=0.0
+2024-08-31 17:37:07,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=252394.66666666666, ans=0.025
+2024-08-31 17:37:27,964 INFO [train.py:1114] (0/4) Epoch 20, batch 50, loss[loss=0.1701, simple_loss=0.2445, pruned_loss=0.03522, ctc_loss=0.0633, over 19720.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2734, pruned_loss=0.05082, ctc_loss=0.09645, over 843606.59 frames. ], batch size: 47, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:37:51,156 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.696e+02 1.962e+02 2.261e+02 4.473e+02, threshold=3.923e+02, percent-clipped=2.0
+2024-08-31 17:40:42,351 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.39 vs. limit=12.0
+2024-08-31 17:40:42,651 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.48 vs. limit=22.5
+2024-08-31 17:41:08,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=252714.66666666666, ans=0.125
+2024-08-31 17:41:27,195 INFO [train.py:1114] (0/4) Epoch 20, batch 100, loss[loss=0.1731, simple_loss=0.2453, pruned_loss=0.03703, ctc_loss=0.06724, over 19733.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2745, pruned_loss=0.05095, ctc_loss=0.09572, over 1498532.54 frames. ], batch size: 51, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:41:50,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=252821.33333333334, ans=0.125
+2024-08-31 17:42:39,608 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.77 vs. limit=22.5
+2024-08-31 17:42:51,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=252874.66666666666, ans=0.2
+2024-08-31 17:44:06,478 INFO [train.py:1114] (0/4) Epoch 20, batch 150, loss[loss=0.1966, simple_loss=0.2546, pruned_loss=0.05114, ctc_loss=0.09064, over 19685.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2705, pruned_loss=0.04878, ctc_loss=0.09152, over 2026778.29 frames. ], batch size: 47, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:44:06,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=253034.66666666666, ans=0.0
+2024-08-31 17:44:59,735 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.634e+02 1.821e+02 2.194e+02 3.683e+02, threshold=3.641e+02, percent-clipped=0.0
+2024-08-31 17:45:01,615 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=253088.0, ans=0.125
+2024-08-31 17:45:21,251 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.54 vs. limit=15.0
+2024-08-31 17:45:32,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=253194.66666666666, ans=0.05
+2024-08-31 17:45:58,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=253301.33333333334, ans=0.2
+2024-08-31 17:45:59,902 INFO [train.py:1114] (0/4) Epoch 20, batch 200, loss[loss=0.2194, simple_loss=0.2903, pruned_loss=0.0537, ctc_loss=0.1028, over 18195.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2689, pruned_loss=0.04802, ctc_loss=0.09044, over 2434933.67 frames. ], batch size: 85, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:46:14,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=253354.66666666666, ans=0.1
+2024-08-31 17:46:33,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=253408.0, ans=0.125
+2024-08-31 17:46:48,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=253408.0, ans=0.125
+2024-08-31 17:46:51,317 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=253408.0, ans=0.0
+2024-08-31 17:47:15,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=253514.66666666666, ans=0.125
+2024-08-31 17:47:18,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=253514.66666666666, ans=0.125
+2024-08-31 17:47:20,549 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.43 vs. limit=15.0
+2024-08-31 17:47:28,658 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.31 vs. limit=15.0
+2024-08-31 17:47:33,339 INFO [train.py:1114] (0/4) Epoch 20, batch 250, loss[loss=0.2131, simple_loss=0.2827, pruned_loss=0.0524, ctc_loss=0.09707, over 19420.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.269, pruned_loss=0.04776, ctc_loss=0.0901, over 2755716.60 frames. ], batch size: 67, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:47:43,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=253568.0, ans=0.0
+2024-08-31 17:47:49,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=253621.33333333334, ans=0.025
+2024-08-31 17:47:56,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=253621.33333333334, ans=0.2
+2024-08-31 17:47:59,368 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.745e+02 2.044e+02 2.602e+02 4.259e+02, threshold=4.089e+02, percent-clipped=6.0
+2024-08-31 17:47:59,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=253621.33333333334, ans=0.2
+2024-08-31 17:48:59,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=253674.66666666666, ans=0.125
+2024-08-31 17:49:36,898 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=253728.0, ans=0.125
+2024-08-31 17:49:37,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=253728.0, ans=0.1
+2024-08-31 17:50:00,343 INFO [train.py:1114] (0/4) Epoch 20, batch 300, loss[loss=0.2118, simple_loss=0.2859, pruned_loss=0.05027, ctc_loss=0.09284, over 19549.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2691, pruned_loss=0.04801, ctc_loss=0.09042, over 3001088.90 frames. ], batch size: 61, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:50:08,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=253834.66666666666, ans=0.125
+2024-08-31 17:50:16,252 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=253888.0, ans=0.125
+2024-08-31 17:50:31,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=253941.33333333334, ans=0.0
+2024-08-31 17:50:44,329 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.60 vs. limit=22.5
+2024-08-31 17:50:45,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=253994.66666666666, ans=0.2
+2024-08-31 17:51:05,485 INFO [train.py:1114] (0/4) Epoch 20, batch 350, loss[loss=0.1813, simple_loss=0.2485, pruned_loss=0.04141, ctc_loss=0.07822, over 19726.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2697, pruned_loss=0.04815, ctc_loss=0.09048, over 3190959.39 frames. ], batch size: 48, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:51:07,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=254101.33333333334, ans=0.0
+2024-08-31 17:51:18,185 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.38 vs. limit=15.0
+2024-08-31 17:51:26,955 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.703e+02 1.946e+02 2.321e+02 4.034e+02, threshold=3.891e+02, percent-clipped=0.0
+2024-08-31 17:51:28,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=254208.0, ans=0.125
+2024-08-31 17:51:30,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=254208.0, ans=0.125
+2024-08-31 17:52:02,475 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.32 vs. limit=22.5
+2024-08-31 17:52:04,333 INFO [train.py:1114] (0/4) Epoch 20, batch 400, loss[loss=0.1946, simple_loss=0.2634, pruned_loss=0.04572, ctc_loss=0.08578, over 19510.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2694, pruned_loss=0.04806, ctc_loss=0.09006, over 3342257.31 frames. ], batch size: 54, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:52:37,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=254474.66666666666, ans=0.125
+2024-08-31 17:52:39,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=254474.66666666666, ans=0.125
+2024-08-31 17:52:46,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=254528.0, ans=0.125
+2024-08-31 17:52:55,203 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.29 vs. limit=15.0
+2024-08-31 17:53:10,612 INFO [train.py:1114] (0/4) Epoch 20, batch 450, loss[loss=0.2087, simple_loss=0.2826, pruned_loss=0.04782, ctc_loss=0.09797, over 19600.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2699, pruned_loss=0.04836, ctc_loss=0.09067, over 3451494.20 frames. ], batch size: 55, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:53:20,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=254634.66666666666, ans=0.125
+2024-08-31 17:53:31,692 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.627e+02 1.777e+02 2.217e+02 3.582e+02, threshold=3.554e+02, percent-clipped=0.0
+2024-08-31 17:53:46,489 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=254741.33333333334, ans=0.125
+2024-08-31 17:53:58,593 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.48 vs. limit=15.0
+2024-08-31 17:53:58,673 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.50 vs. limit=10.0
+2024-08-31 17:54:15,353 INFO [train.py:1114] (0/4) Epoch 20, batch 500, loss[loss=0.211, simple_loss=0.2748, pruned_loss=0.05355, ctc_loss=0.1004, over 19680.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2692, pruned_loss=0.04806, ctc_loss=0.09022, over 3545952.35 frames. ], batch size: 63, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:54:34,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=254954.66666666666, ans=0.025
+2024-08-31 17:54:54,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=255061.33333333334, ans=0.125
+2024-08-31 17:54:56,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=255061.33333333334, ans=0.125
+2024-08-31 17:55:14,668 INFO [train.py:1114] (0/4) Epoch 20, batch 550, loss[loss=0.2262, simple_loss=0.2913, pruned_loss=0.05725, ctc_loss=0.1164, over 19253.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2694, pruned_loss=0.04804, ctc_loss=0.09027, over 3608173.82 frames. ], batch size: 71, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:55:35,928 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.640e+02 1.908e+02 2.178e+02 3.229e+02, threshold=3.816e+02, percent-clipped=0.0
+2024-08-31 17:55:46,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=255274.66666666666, ans=0.0
+2024-08-31 17:55:52,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=255328.0, ans=0.2
+2024-08-31 17:56:20,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=255381.33333333334, ans=10.0
+2024-08-31 17:56:22,748 INFO [train.py:1114] (0/4) Epoch 20, batch 600, loss[loss=0.2241, simple_loss=0.294, pruned_loss=0.05678, ctc_loss=0.1014, over 19331.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.27, pruned_loss=0.04832, ctc_loss=0.09088, over 3665746.35 frames. ], batch size: 67, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:56:47,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=255541.33333333334, ans=0.125
+2024-08-31 17:57:04,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=255594.66666666666, ans=0.025
+2024-08-31 17:57:07,796 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.80 vs. limit=15.0
+2024-08-31 17:57:18,488 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.05 vs. limit=12.0
+2024-08-31 17:57:22,366 INFO [train.py:1114] (0/4) Epoch 20, batch 650, loss[loss=0.1978, simple_loss=0.2695, pruned_loss=0.0451, ctc_loss=0.0898, over 19755.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2696, pruned_loss=0.0481, ctc_loss=0.09067, over 3716724.37 frames. ], batch size: 54, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:57:28,821 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=255701.33333333334, ans=0.125
+2024-08-31 17:57:37,710 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.54 vs. limit=6.0
+2024-08-31 17:57:44,316 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.759e+02 2.153e+02 2.838e+02 5.166e+02, threshold=4.306e+02, percent-clipped=8.0
+2024-08-31 17:57:52,733 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:57:55,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=255808.0, ans=0.125
+2024-08-31 17:58:03,512 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=255861.33333333334, ans=0.07
+2024-08-31 17:58:12,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=255914.66666666666, ans=0.0
+2024-08-31 17:58:22,788 INFO [train.py:1114] (0/4) Epoch 20, batch 700, loss[loss=0.2241, simple_loss=0.2827, pruned_loss=0.06002, ctc_loss=0.1137, over 19713.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2704, pruned_loss=0.04829, ctc_loss=0.09091, over 3748489.04 frames. ], batch size: 51, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:58:28,900 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/checkpoint-48000.pt
+2024-08-31 17:58:36,151 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=256021.33333333334, ans=0.025
+2024-08-31 17:58:36,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=256021.33333333334, ans=0.1
+2024-08-31 17:58:38,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=256021.33333333334, ans=0.025
+2024-08-31 17:59:11,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.55 vs. limit=15.0
+2024-08-31 17:59:24,901 INFO [train.py:1114] (0/4) Epoch 20, batch 750, loss[loss=0.2021, simple_loss=0.2768, pruned_loss=0.04667, ctc_loss=0.08524, over 19516.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2696, pruned_loss=0.04779, ctc_loss=0.08997, over 3775536.21 frames. ], batch size: 54, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 17:59:30,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=256234.66666666666, ans=0.0
+2024-08-31 17:59:38,905 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=256234.66666666666, ans=0.125
+2024-08-31 17:59:58,590 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 1.642e+02 1.855e+02 2.095e+02 3.716e+02, threshold=3.709e+02, percent-clipped=0.0
+2024-08-31 18:00:09,930 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=256341.33333333334, ans=0.025
+2024-08-31 18:00:11,643 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.68 vs. limit=22.5
+2024-08-31 18:00:12,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=256394.66666666666, ans=0.125
+2024-08-31 18:00:18,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=256394.66666666666, ans=0.2
+2024-08-31 18:00:30,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=256448.0, ans=0.125
+2024-08-31 18:00:42,912 INFO [train.py:1114] (0/4) Epoch 20, batch 800, loss[loss=0.1949, simple_loss=0.2594, pruned_loss=0.04564, ctc_loss=0.09784, over 19816.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.27, pruned_loss=0.04819, ctc_loss=0.09077, over 3797324.35 frames. ], batch size: 49, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 18:00:43,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=256501.33333333334, ans=0.125
+2024-08-31 18:00:53,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=256554.66666666666, ans=0.2
+2024-08-31 18:01:02,847 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.77 vs. limit=6.0
+2024-08-31 18:01:02,877 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.50 vs. limit=6.0
+2024-08-31 18:01:07,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=256608.0, ans=0.025
+2024-08-31 18:01:23,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=256661.33333333334, ans=0.125
+2024-08-31 18:01:28,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=256661.33333333334, ans=0.125
+2024-08-31 18:01:35,912 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=256714.66666666666, ans=0.125
+2024-08-31 18:01:43,045 INFO [train.py:1114] (0/4) Epoch 20, batch 850, loss[loss=0.1963, simple_loss=0.2706, pruned_loss=0.04362, ctc_loss=0.08694, over 19647.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2695, pruned_loss=0.04797, ctc_loss=0.09052, over 3816331.02 frames. ], batch size: 59, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:01:50,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=256768.0, ans=0.125
+2024-08-31 18:01:52,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=256768.0, ans=0.125
+2024-08-31 18:02:04,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=256821.33333333334, ans=0.1
+2024-08-31 18:02:04,847 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.69 vs. limit=15.0
+2024-08-31 18:02:05,168 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.672e+02 2.009e+02 2.661e+02 4.692e+02, threshold=4.019e+02, percent-clipped=5.0
+2024-08-31 18:02:12,087 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.40 vs. limit=15.0
+2024-08-31 18:02:23,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.72 vs. limit=15.0
+2024-08-31 18:02:42,831 INFO [train.py:1114] (0/4) Epoch 20, batch 900, loss[loss=0.1851, simple_loss=0.2423, pruned_loss=0.04661, ctc_loss=0.08655, over 19410.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2696, pruned_loss=0.04831, ctc_loss=0.09123, over 3819704.95 frames. ], batch size: 48, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:02:43,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=257034.66666666666, ans=0.1
+2024-08-31 18:02:47,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=257034.66666666666, ans=0.1
+2024-08-31 18:03:14,904 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=8.78 vs. limit=12.0
+2024-08-31 18:03:17,244 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.13 vs. limit=22.5
+2024-08-31 18:03:30,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=257194.66666666666, ans=0.07
+2024-08-31 18:03:36,788 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:03:36,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=257194.66666666666, ans=0.025
+2024-08-31 18:03:48,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=257248.0, ans=0.125
+2024-08-31 18:03:50,691 INFO [train.py:1114] (0/4) Epoch 20, batch 950, loss[loss=0.1922, simple_loss=0.2528, pruned_loss=0.04815, ctc_loss=0.08825, over 19494.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2697, pruned_loss=0.04827, ctc_loss=0.0911, over 3820524.61 frames. ], batch size: 49, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:04:12,191 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.674e+02 1.914e+02 2.385e+02 5.476e+02, threshold=3.829e+02, percent-clipped=1.0
+2024-08-31 18:05:00,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=257461.33333333334, ans=0.125
+2024-08-31 18:05:01,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=257461.33333333334, ans=0.125
+2024-08-31 18:05:20,823 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=257514.66666666666, ans=0.125
+2024-08-31 18:05:24,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=257568.0, ans=0.125
+2024-08-31 18:05:25,081 INFO [train.py:1114] (0/4) Epoch 20, batch 1000, loss[loss=0.1769, simple_loss=0.2485, pruned_loss=0.03827, ctc_loss=0.07181, over 19855.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2707, pruned_loss=0.04902, ctc_loss=0.09247, over 3817259.76 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:05:40,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=257621.33333333334, ans=0.125
+2024-08-31 18:10:04,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=257621.33333333334, ans=0.125
+2024-08-31 18:11:53,818 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=257728.0, ans=0.125
+2024-08-31 18:12:03,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=257781.33333333334, ans=0.125
+2024-08-31 18:12:06,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=257781.33333333334, ans=0.0
+2024-08-31 18:12:15,980 INFO [train.py:1114] (0/4) Epoch 20, batch 1050, loss[loss=0.1939, simple_loss=0.2671, pruned_loss=0.04334, ctc_loss=0.08471, over 19842.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2699, pruned_loss=0.04845, ctc_loss=0.09131, over 3823486.45 frames. ], batch size: 57, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:12:22,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=257834.66666666666, ans=0.0
+2024-08-31 18:12:25,179 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=20.18 vs. limit=22.5
+2024-08-31 18:12:32,927 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=257888.0, ans=0.0
+2024-08-31 18:12:34,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=257888.0, ans=0.125
+2024-08-31 18:12:37,419 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.683e+02 1.941e+02 2.234e+02 3.103e+02, threshold=3.882e+02, percent-clipped=0.0
+2024-08-31 18:13:01,936 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:13:13,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.whiten.whitening_limit, batch_count=258048.0, ans=12.0
+2024-08-31 18:13:25,853 INFO [train.py:1114] (0/4) Epoch 20, batch 1100, loss[loss=0.1911, simple_loss=0.2625, pruned_loss=0.04364, ctc_loss=0.08088, over 19588.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2701, pruned_loss=0.04855, ctc_loss=0.09135, over 3831554.41 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:13:58,990 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=258208.0, ans=0.125
+2024-08-31 18:14:26,123 INFO [train.py:1114] (0/4) Epoch 20, batch 1150, loss[loss=0.1985, simple_loss=0.271, pruned_loss=0.0448, ctc_loss=0.09081, over 19585.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2699, pruned_loss=0.04852, ctc_loss=0.09132, over 3828701.45 frames. ], batch size: 52, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:15:12,216 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.657e+02 1.937e+02 2.398e+02 3.976e+02, threshold=3.875e+02, percent-clipped=1.0
+2024-08-31 18:15:35,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=258528.0, ans=0.1
+2024-08-31 18:15:51,938 INFO [train.py:1114] (0/4) Epoch 20, batch 1200, loss[loss=0.2287, simple_loss=0.2975, pruned_loss=0.05827, ctc_loss=0.1082, over 19840.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2708, pruned_loss=0.04873, ctc_loss=0.09188, over 3825325.48 frames. ], batch size: 57, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:16:34,040 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.37 vs. limit=15.0
+2024-08-31 18:16:36,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=258794.66666666666, ans=0.2
+2024-08-31 18:16:54,775 INFO [train.py:1114] (0/4) Epoch 20, batch 1250, loss[loss=0.1978, simple_loss=0.2748, pruned_loss=0.04409, ctc_loss=0.08164, over 19541.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.271, pruned_loss=0.04892, ctc_loss=0.0921, over 3843320.66 frames. ], batch size: 61, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:17:17,015 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.31 vs. limit=10.0
+2024-08-31 18:17:20,830 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.673e+02 1.864e+02 2.243e+02 4.460e+02, threshold=3.727e+02, percent-clipped=1.0
+2024-08-31 18:17:27,279 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.10 vs. limit=6.0
+2024-08-31 18:17:48,937 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=8.394e-02
+2024-08-31 18:18:00,199 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.35 vs. limit=15.0
+2024-08-31 18:18:05,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=259114.66666666666, ans=0.1
+2024-08-31 18:19:03,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=259114.66666666666, ans=0.125
+2024-08-31 18:19:05,802 INFO [train.py:1114] (0/4) Epoch 20, batch 1300, loss[loss=0.2163, simple_loss=0.2805, pruned_loss=0.05623, ctc_loss=0.09898, over 18810.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2701, pruned_loss=0.04846, ctc_loss=0.09129, over 3844907.25 frames. ], batch size: 76, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:19:09,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=259168.0, ans=0.2
+2024-08-31 18:19:09,826 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=259168.0, ans=0.125
+2024-08-31 18:19:12,404 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:19:25,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=259221.33333333334, ans=0.125
+2024-08-31 18:19:30,863 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.94 vs. limit=15.0
+2024-08-31 18:19:38,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=259274.66666666666, ans=0.1
+2024-08-31 18:19:49,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=259328.0, ans=0.0
+2024-08-31 18:19:52,650 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.25 vs. limit=22.5
+2024-08-31 18:19:53,482 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.01 vs. limit=10.0
+2024-08-31 18:20:12,168 INFO [train.py:1114] (0/4) Epoch 20, batch 1350, loss[loss=0.1951, simple_loss=0.2658, pruned_loss=0.04398, ctc_loss=0.09078, over 19761.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2695, pruned_loss=0.04808, ctc_loss=0.0906, over 3855646.42 frames. ], batch size: 54, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:20:34,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=259488.0, ans=0.0
+2024-08-31 18:20:38,777 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 1.677e+02 1.917e+02 2.382e+02 4.193e+02, threshold=3.834e+02, percent-clipped=5.0
+2024-08-31 18:20:51,510 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:21:07,780 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.43 vs. limit=22.5
+2024-08-31 18:21:08,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=259648.0, ans=0.125
+2024-08-31 18:21:14,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=259648.0, ans=0.025
+2024-08-31 18:21:16,776 INFO [train.py:1114] (0/4) Epoch 20, batch 1400, loss[loss=0.1715, simple_loss=0.2359, pruned_loss=0.03902, ctc_loss=0.07262, over 19682.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2695, pruned_loss=0.04806, ctc_loss=0.09038, over 3862772.72 frames. ], batch size: 46, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:21:24,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=259701.33333333334, ans=0.2
+2024-08-31 18:21:53,135 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.18 vs. limit=22.5
+2024-08-31 18:22:00,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=259861.33333333334, ans=0.0
+2024-08-31 18:22:01,354 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=259861.33333333334, ans=0.125
+2024-08-31 18:22:50,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=259914.66666666666, ans=0.2
+2024-08-31 18:22:53,581 INFO [train.py:1114] (0/4) Epoch 20, batch 1450, loss[loss=0.2078, simple_loss=0.2759, pruned_loss=0.05122, ctc_loss=0.09322, over 19667.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2701, pruned_loss=0.04832, ctc_loss=0.09097, over 3861785.17 frames. ], batch size: 63, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:23:06,409 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.32 vs. limit=15.0
+2024-08-31 18:23:17,231 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.776e+02 2.029e+02 2.458e+02 5.712e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-31 18:23:27,415 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=260074.66666666666, ans=0.0
+2024-08-31 18:23:38,813 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=260128.0, ans=0.1
+2024-08-31 18:23:47,690 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.65 vs. limit=22.5
+2024-08-31 18:23:54,002 INFO [train.py:1114] (0/4) Epoch 20, batch 1500, loss[loss=0.2057, simple_loss=0.2798, pruned_loss=0.04819, ctc_loss=0.0882, over 19584.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2703, pruned_loss=0.04827, ctc_loss=0.09099, over 3861372.31 frames. ], batch size: 57, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:24:14,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=260288.0, ans=0.0
+2024-08-31 18:24:20,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=260341.33333333334, ans=0.0
+2024-08-31 18:24:21,778 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:25:19,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=260394.66666666666, ans=0.0
+2024-08-31 18:25:34,752 INFO [train.py:1114] (0/4) Epoch 20, batch 1550, loss[loss=0.2229, simple_loss=0.2908, pruned_loss=0.05664, ctc_loss=0.1043, over 19610.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2702, pruned_loss=0.04845, ctc_loss=0.09129, over 3846392.59 frames. ], batch size: 60, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:26:10,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=260501.33333333334, ans=0.1
+2024-08-31 18:26:24,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=260554.66666666666, ans=10.0
+2024-08-31 18:26:30,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=260554.66666666666, ans=0.04949747468305833
+2024-08-31 18:26:33,183 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.748e+02 2.049e+02 2.466e+02 3.855e+02, threshold=4.097e+02, percent-clipped=0.0
+2024-08-31 18:27:18,517 INFO [train.py:1114] (0/4) Epoch 20, batch 1600, loss[loss=0.2263, simple_loss=0.293, pruned_loss=0.05743, ctc_loss=0.1118, over 19843.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.27, pruned_loss=0.04852, ctc_loss=0.09162, over 3834899.02 frames. ], batch size: 57, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:27:33,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=260821.33333333334, ans=0.0
+2024-08-31 18:27:47,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=260874.66666666666, ans=0.125
+2024-08-31 18:27:55,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=260928.0, ans=0.2
+2024-08-31 18:28:10,039 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=260981.33333333334, ans=0.125
+2024-08-31 18:28:30,409 INFO [train.py:1114] (0/4) Epoch 20, batch 1650, loss[loss=0.1896, simple_loss=0.271, pruned_loss=0.03898, ctc_loss=0.0756, over 19656.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.27, pruned_loss=0.0483, ctc_loss=0.09121, over 3830835.84 frames. ], batch size: 59, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:28:41,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=261088.0, ans=0.2
+2024-08-31 18:28:53,153 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.719e+02 2.026e+02 2.553e+02 4.958e+02, threshold=4.052e+02, percent-clipped=3.0
+2024-08-31 18:28:54,789 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.79 vs. limit=15.0
+2024-08-31 18:29:21,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=261248.0, ans=0.0
+2024-08-31 18:29:27,447 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=261248.0, ans=0.04949747468305833
+2024-08-31 18:29:29,537 INFO [train.py:1114] (0/4) Epoch 20, batch 1700, loss[loss=0.1908, simple_loss=0.2448, pruned_loss=0.04995, ctc_loss=0.09213, over 19688.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.27, pruned_loss=0.04829, ctc_loss=0.09112, over 3845161.14 frames. ], batch size: 46, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:29:34,972 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten.whitening_limit, batch_count=261301.33333333334, ans=15.0
+2024-08-31 18:29:34,972 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.30 vs. limit=15.0
+2024-08-31 18:29:51,109 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=261354.66666666666, ans=0.2
+2024-08-31 18:30:15,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=261461.33333333334, ans=10.0
+2024-08-31 18:31:14,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=261514.66666666666, ans=0.125
+2024-08-31 18:31:18,021 INFO [train.py:1114] (0/4) Epoch 20, batch 1750, loss[loss=0.1716, simple_loss=0.2408, pruned_loss=0.03694, ctc_loss=0.07129, over 19683.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2691, pruned_loss=0.04786, ctc_loss=0.09037, over 3849342.05 frames. ], batch size: 45, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:31:31,004 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.30 vs. limit=6.0
+2024-08-31 18:31:31,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=261621.33333333334, ans=0.125
+2024-08-31 18:31:39,996 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.679e+02 1.951e+02 2.329e+02 4.159e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-31 18:31:52,940 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.89 vs. limit=15.0
+2024-08-31 18:31:54,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=261728.0, ans=0.125
+2024-08-31 18:32:13,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=261781.33333333334, ans=0.125
+2024-08-31 18:32:13,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=261781.33333333334, ans=0.0
+2024-08-31 18:32:15,183 INFO [train.py:1114] (0/4) Epoch 20, batch 1800, loss[loss=0.2031, simple_loss=0.276, pruned_loss=0.04674, ctc_loss=0.09174, over 19608.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2698, pruned_loss=0.04839, ctc_loss=0.09144, over 3852540.42 frames. ], batch size: 55, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:32:36,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=261888.0, ans=0.125
+2024-08-31 18:33:16,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=261994.66666666666, ans=0.0
+2024-08-31 18:33:20,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=261994.66666666666, ans=0.2
+2024-08-31 18:33:24,288 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=262048.0, ans=0.1
+2024-08-31 18:33:34,515 INFO [train.py:1114] (0/4) Epoch 20, batch 1850, loss[loss=0.2141, simple_loss=0.2863, pruned_loss=0.05172, ctc_loss=0.09602, over 19577.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2696, pruned_loss=0.04817, ctc_loss=0.091, over 3856244.40 frames. ], batch size: 57, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:33:47,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=262154.6666666667, ans=0.125
+2024-08-31 18:33:56,006 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.842e+02 2.206e+02 3.038e+02 4.306e+02, threshold=4.411e+02, percent-clipped=5.0
+2024-08-31 18:33:57,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=262208.0, ans=0.1
+2024-08-31 18:33:57,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=262208.0, ans=0.125
+2024-08-31 18:34:12,666 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=262261.3333333333, ans=0.125
+2024-08-31 18:34:29,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=262314.6666666667, ans=0.2
+2024-08-31 18:34:36,224 INFO [train.py:1114] (0/4) Epoch 20, batch 1900, loss[loss=0.1796, simple_loss=0.2639, pruned_loss=0.0344, ctc_loss=0.06637, over 19631.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2698, pruned_loss=0.04805, ctc_loss=0.09072, over 3861887.47 frames. ], batch size: 59, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:34:37,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=262368.0, ans=0.0
+2024-08-31 18:34:39,984 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=262368.0, ans=0.125
+2024-08-31 18:34:42,443 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=5.308e-03
+2024-08-31 18:35:04,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=262474.6666666667, ans=0.125
+2024-08-31 18:35:30,449 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.35 vs. limit=15.0
+2024-08-31 18:35:34,437 INFO [train.py:1114] (0/4) Epoch 20, batch 1950, loss[loss=0.1728, simple_loss=0.2518, pruned_loss=0.03434, ctc_loss=0.06314, over 19595.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2705, pruned_loss=0.04799, ctc_loss=0.09056, over 3870448.19 frames. ], batch size: 52, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:35:40,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=262634.6666666667, ans=0.125
+2024-08-31 18:35:50,557 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.45 vs. limit=15.0
+2024-08-31 18:35:55,619 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.650e+02 1.780e+02 2.101e+02 3.496e+02, threshold=3.560e+02, percent-clipped=0.0
+2024-08-31 18:35:55,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=262741.3333333333, ans=0.5
+2024-08-31 18:36:09,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=262794.6666666667, ans=0.125
+2024-08-31 18:36:19,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=262848.0, ans=0.0
+2024-08-31 18:36:23,570 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.27 vs. limit=22.5
+2024-08-31 18:36:25,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=262848.0, ans=0.0
+2024-08-31 18:36:29,411 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.13 vs. limit=10.0
+2024-08-31 18:36:31,281 INFO [train.py:1114] (0/4) Epoch 20, batch 2000, loss[loss=0.1899, simple_loss=0.2517, pruned_loss=0.04571, ctc_loss=0.09178, over 19654.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.271, pruned_loss=0.04819, ctc_loss=0.09107, over 3855021.75 frames. ], batch size: 45, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:36:45,418 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:37:02,717 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.98 vs. limit=10.0
+2024-08-31 18:37:05,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=263008.0, ans=0.125
+2024-08-31 18:37:14,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=263061.3333333333, ans=0.125
+2024-08-31 18:37:31,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=263168.0, ans=0.0
+2024-08-31 18:37:32,649 INFO [train.py:1114] (0/4) Epoch 20, batch 2050, loss[loss=0.1669, simple_loss=0.2355, pruned_loss=0.0358, ctc_loss=0.0668, over 19708.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.27, pruned_loss=0.04815, ctc_loss=0.09072, over 3851251.97 frames. ], batch size: 47, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:37:52,855 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=263221.3333333333, ans=0.0
+2024-08-31 18:38:02,077 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.724e+02 2.041e+02 2.585e+02 3.821e+02, threshold=4.082e+02, percent-clipped=5.0
+2024-08-31 18:38:24,982 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=7.82 vs. limit=15.0
+2024-08-31 18:38:36,463 INFO [train.py:1114] (0/4) Epoch 20, batch 2100, loss[loss=0.1875, simple_loss=0.2571, pruned_loss=0.04325, ctc_loss=0.07879, over 19771.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2691, pruned_loss=0.04769, ctc_loss=0.08996, over 3858598.14 frames. ], batch size: 54, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:38:36,718 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=263434.6666666667, ans=10.0
+2024-08-31 18:39:10,006 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.19 vs. limit=15.0
+2024-08-31 18:39:17,734 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.08 vs. limit=10.0
+2024-08-31 18:39:22,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=263648.0, ans=0.2
+2024-08-31 18:39:29,735 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=263648.0, ans=0.1
+2024-08-31 18:39:32,896 INFO [train.py:1114] (0/4) Epoch 20, batch 2150, loss[loss=0.2061, simple_loss=0.2733, pruned_loss=0.0505, ctc_loss=0.09496, over 19570.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2683, pruned_loss=0.04735, ctc_loss=0.08904, over 3868795.35 frames. ], batch size: 52, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:39:49,883 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:39:58,526 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.627e+02 1.896e+02 2.393e+02 5.058e+02, threshold=3.792e+02, percent-clipped=5.0
+2024-08-31 18:40:02,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=263808.0, ans=0.125
+2024-08-31 18:40:02,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=263808.0, ans=0.0
+2024-08-31 18:40:04,811 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.90 vs. limit=6.0
+2024-08-31 18:41:03,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=263914.6666666667, ans=0.025
+2024-08-31 18:41:08,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=263968.0, ans=0.2
+2024-08-31 18:41:08,940 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=263968.0, ans=0.125
+2024-08-31 18:41:09,870 INFO [train.py:1114] (0/4) Epoch 20, batch 2200, loss[loss=0.1812, simple_loss=0.2603, pruned_loss=0.0368, ctc_loss=0.07123, over 19581.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.268, pruned_loss=0.0471, ctc_loss=0.08845, over 3867840.37 frames. ], batch size: 57, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:41:27,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=264021.3333333333, ans=15.0
+2024-08-31 18:41:27,894 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=264021.3333333333, ans=0.025
+2024-08-31 18:41:35,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=264021.3333333333, ans=0.125
+2024-08-31 18:42:17,185 INFO [train.py:1114] (0/4) Epoch 20, batch 2250, loss[loss=0.2105, simple_loss=0.2782, pruned_loss=0.05194, ctc_loss=0.09704, over 19618.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2684, pruned_loss=0.04737, ctc_loss=0.08891, over 3866799.94 frames. ], batch size: 55, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:42:23,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=264234.6666666667, ans=0.0
+2024-08-31 18:42:31,583 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=264288.0, ans=0.04949747468305833
+2024-08-31 18:42:35,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=264288.0, ans=0.0
+2024-08-31 18:42:42,058 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.705e+02 2.149e+02 2.747e+02 5.291e+02, threshold=4.298e+02, percent-clipped=7.0
+2024-08-31 18:42:54,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=264394.6666666667, ans=0.125
+2024-08-31 18:42:54,705 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:43:03,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=264394.6666666667, ans=0.0
+2024-08-31 18:43:04,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=264448.0, ans=0.09899494936611666
+2024-08-31 18:43:07,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=264448.0, ans=0.2
+2024-08-31 18:43:07,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=264448.0, ans=0.0
+2024-08-31 18:43:16,654 INFO [train.py:1114] (0/4) Epoch 20, batch 2300, loss[loss=0.1914, simple_loss=0.262, pruned_loss=0.04385, ctc_loss=0.08277, over 19496.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2683, pruned_loss=0.04765, ctc_loss=0.08977, over 3859390.45 frames. ], batch size: 49, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:43:29,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=264554.6666666667, ans=0.1
+2024-08-31 18:43:29,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=264554.6666666667, ans=10.0
+2024-08-31 18:43:34,635 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.30 vs. limit=12.0
+2024-08-31 18:43:37,945 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.41 vs. limit=15.0
+2024-08-31 18:43:43,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=264608.0, ans=0.125
+2024-08-31 18:43:52,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=264661.3333333333, ans=0.0
+2024-08-31 18:43:54,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=264661.3333333333, ans=0.1
+2024-08-31 18:43:59,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=264661.3333333333, ans=0.125
+2024-08-31 18:44:02,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=264714.6666666667, ans=0.125
+2024-08-31 18:44:02,493 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.79 vs. limit=15.0
+2024-08-31 18:44:04,193 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=264714.6666666667, ans=0.2
+2024-08-31 18:44:09,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=264714.6666666667, ans=0.035
+2024-08-31 18:44:10,660 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=264714.6666666667, ans=0.0
+2024-08-31 18:44:12,809 INFO [train.py:1114] (0/4) Epoch 20, batch 2350, loss[loss=0.2247, simple_loss=0.2873, pruned_loss=0.05931, ctc_loss=0.1085, over 19669.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2683, pruned_loss=0.04781, ctc_loss=0.08982, over 3862188.44 frames. ], batch size: 63, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:44:19,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=264768.0, ans=0.125
+2024-08-31 18:44:32,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=264821.3333333333, ans=0.0
+2024-08-31 18:44:47,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=264821.3333333333, ans=0.0
+2024-08-31 18:44:49,416 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.669e+02 1.905e+02 2.325e+02 3.822e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-31 18:45:10,527 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=264928.0, ans=0.125
+2024-08-31 18:45:14,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=264981.3333333333, ans=0.125
+2024-08-31 18:45:24,108 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.90 vs. limit=6.0
+2024-08-31 18:45:26,860 INFO [train.py:1114] (0/4) Epoch 20, batch 2400, loss[loss=0.2131, simple_loss=0.2865, pruned_loss=0.05141, ctc_loss=0.09208, over 19336.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2704, pruned_loss=0.04856, ctc_loss=0.09105, over 3857312.51 frames. ], batch size: 67, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:45:35,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=265034.6666666667, ans=0.125
+2024-08-31 18:45:49,469 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:46:02,492 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.01 vs. limit=15.0
+2024-08-31 18:46:15,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.66 vs. limit=15.0
+2024-08-31 18:46:18,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=265248.0, ans=0.1
+2024-08-31 18:46:21,586 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.13 vs. limit=22.5
+2024-08-31 18:46:23,878 INFO [train.py:1114] (0/4) Epoch 20, batch 2450, loss[loss=0.2568, simple_loss=0.3013, pruned_loss=0.07732, ctc_loss=0.1444, over 13541.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2743, pruned_loss=0.05117, ctc_loss=0.09641, over 3731858.51 frames. ], batch size: 141, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:46:29,247 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=10.73 vs. limit=15.0
+2024-08-31 18:46:36,910 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=265354.6666666667, ans=0.1
+2024-08-31 18:46:38,422 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.26 vs. limit=15.0
+2024-08-31 18:46:43,712 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=265354.6666666667, ans=0.04949747468305833
+2024-08-31 18:46:45,870 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.663e+02 1.874e+02 2.086e+02 3.013e+02, threshold=3.749e+02, percent-clipped=0.0
+2024-08-31 18:46:47,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=265408.0, ans=0.125
+2024-08-31 18:46:55,859 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.02 vs. limit=6.0
+2024-08-31 18:47:05,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=265461.3333333333, ans=0.07
+2024-08-31 18:47:07,547 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-20.pt
+2024-08-31 18:50:18,403 INFO [train.py:1387] (0/4) Done!
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-1 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-1
new file mode 100644
index 0000000000000000000000000000000000000000..ba19f760f9262ffb9db9cbd9f254e9d3e9b4a09e
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-1
@@ -0,0 +1,1030 @@
+2024-08-31 13:15:01,250 INFO [train.py:1182] (1/4) Training started
+2024-08-31 13:15:01,251 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-31 13:15:02,197 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-31 13:15:02,197 INFO [train.py:1212] (1/4) About to create model
+2024-08-31 13:15:10,397 INFO [train.py:1216] (1/4) Number of model parameters: 66367431
+2024-08-31 13:15:10,438 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-31 13:16:01,653 INFO [train.py:1231] (1/4) Using DDP
+2024-08-31 13:16:07,014 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-31 13:16:07,183 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-31 13:16:07,183 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-31 13:16:07,277 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-31 13:16:07,277 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-31 13:16:07,277 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-31 13:16:07,277 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-31 13:16:07,277 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-31 13:16:07,277 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-31 13:16:08,889 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-31 13:16:08,891 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-31 13:16:09,671 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-31 13:16:10,141 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-31 13:16:10,463 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-31 13:16:10,464 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:22:40,297 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=3.35 vs. limit=3.0
+2024-08-31 13:22:43,898 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13363MB
+2024-08-31 13:22:45,382 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-31 13:23:02,022 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-31 13:23:03,533 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-31 13:24:12,098 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-31 13:24:13,264 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=9.29 vs. limit=5.0
+2024-08-31 13:24:13,685 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-31 13:24:13,707 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-31 13:25:06,949 INFO [train.py:1114] (1/4) Epoch 18, batch 0, loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04853, ctc_loss=0.09187, over 19410.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2527, pruned_loss=0.04853, ctc_loss=0.09187, over 19410.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 13:25:06,950 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-31 13:25:49,897 INFO [train.py:1146] (1/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-31 13:25:49,898 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13490MB
+2024-08-31 13:25:51,798 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.51 vs. limit=15.0
+2024-08-31 13:27:19,496 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.71 vs. limit=6.0
+2024-08-31 13:32:00,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=225733.33333333334, ans=0.0
+2024-08-31 13:35:15,721 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.62 vs. limit=10.0
+2024-08-31 13:44:22,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=225840.0, ans=0.125
+2024-08-31 13:48:08,452 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.31 vs. limit=12.0
+2024-08-31 13:48:13,677 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.934e+02 2.118e+02 2.433e+02 6.228e+02, threshold=4.237e+02, percent-clipped=5.0
+2024-08-31 13:56:46,308 INFO [train.py:1114] (1/4) Epoch 18, batch 50, loss[loss=0.1581, simple_loss=0.2327, pruned_loss=0.03018, ctc_loss=0.05788, over 19735.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2748, pruned_loss=0.05185, ctc_loss=0.09917, over 843693.80 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 13:56:58,782 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.86 vs. limit=6.0
+2024-08-31 13:57:42,689 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 13:57:42,767 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=225946.66666666666, ans=0.125
+2024-08-31 14:00:54,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226000.0, ans=0.125
+2024-08-31 14:08:12,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=226106.66666666666, ans=0.95
+2024-08-31 14:10:04,285 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.05 vs. limit=15.0
+2024-08-31 14:13:50,706 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=226160.0, ans=0.125
+2024-08-31 14:13:52,350 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.15 vs. limit=15.0
+2024-08-31 14:15:00,240 INFO [train.py:1114] (1/4) Epoch 18, batch 100, loss[loss=0.2118, simple_loss=0.2736, pruned_loss=0.05532, ctc_loss=0.09851, over 19719.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.278, pruned_loss=0.05344, ctc_loss=0.1018, over 1498452.85 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:16:32,436 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.25 vs. limit=15.0
+2024-08-31 14:17:26,789 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.61 vs. limit=15.0
+2024-08-31 14:26:16,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=226320.0, ans=0.2
+2024-08-31 14:28:46,390 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 14:28:55,982 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=15.0
+2024-08-31 14:30:19,336 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=226373.33333333334, ans=0.0
+2024-08-31 14:32:23,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=226426.66666666666, ans=0.0
+2024-08-31 14:32:51,601 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.685e+02 1.949e+02 2.332e+02 3.525e+02, threshold=3.898e+02, percent-clipped=0.0
+2024-08-31 14:34:13,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=226480.0, ans=0.125
+2024-08-31 14:34:38,796 INFO [train.py:1114] (1/4) Epoch 18, batch 150, loss[loss=0.1912, simple_loss=0.2527, pruned_loss=0.04776, ctc_loss=0.0857, over 19694.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2756, pruned_loss=0.05262, ctc_loss=0.09994, over 2027899.17 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:43:39,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-31 14:44:39,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=226533.33333333334, ans=0.025
+2024-08-31 14:47:14,631 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=226586.66666666666, ans=0.2
+2024-08-31 14:57:50,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=226693.33333333334, ans=0.125
+2024-08-31 15:01:40,800 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:05:15,400 INFO [train.py:1114] (1/4) Epoch 18, batch 200, loss[loss=0.2068, simple_loss=0.2791, pruned_loss=0.04895, ctc_loss=0.0915, over 18231.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2741, pruned_loss=0.05166, ctc_loss=0.09776, over 2435641.98 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:05:49,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=226746.66666666666, ans=0.125
+2024-08-31 15:07:59,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=226800.0, ans=0.125
+2024-08-31 15:08:19,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=226800.0, ans=0.125
+2024-08-31 15:15:21,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=226906.66666666666, ans=0.125
+2024-08-31 15:17:44,784 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.761e+02 2.086e+02 2.524e+02 4.159e+02, threshold=4.172e+02, percent-clipped=2.0
+2024-08-31 15:17:59,767 INFO [train.py:1114] (1/4) Epoch 18, batch 250, loss[loss=0.2447, simple_loss=0.3054, pruned_loss=0.06604, ctc_loss=0.1299, over 19375.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2741, pruned_loss=0.05152, ctc_loss=0.09743, over 2757155.58 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:19:02,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=227013.33333333334, ans=0.025
+2024-08-31 15:19:03,356 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.87 vs. limit=22.5
+2024-08-31 15:19:04,235 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.89 vs. limit=22.5
+2024-08-31 15:19:50,480 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=227013.33333333334, ans=0.125
+2024-08-31 15:20:38,212 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.12 vs. limit=15.0
+2024-08-31 15:22:03,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=227120.0, ans=0.1
+2024-08-31 15:22:34,723 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.35 vs. limit=10.0
+2024-08-31 15:23:39,296 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.47 vs. limit=15.0
+2024-08-31 15:24:04,304 INFO [train.py:1114] (1/4) Epoch 18, batch 300, loss[loss=0.2333, simple_loss=0.2905, pruned_loss=0.06308, ctc_loss=0.1249, over 19505.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2735, pruned_loss=0.05131, ctc_loss=0.09706, over 3002465.67 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:24:20,886 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.93 vs. limit=22.5
+2024-08-31 15:25:12,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=227333.33333333334, ans=0.125
+2024-08-31 15:28:01,309 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.57 vs. limit=15.0
+2024-08-31 15:28:07,123 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.43 vs. limit=15.0
+2024-08-31 15:29:15,365 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=227440.0, ans=0.025
+2024-08-31 15:30:47,328 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.680e+02 1.932e+02 2.386e+02 3.920e+02, threshold=3.864e+02, percent-clipped=0.0
+2024-08-31 15:31:02,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=227493.33333333334, ans=0.125
+2024-08-31 15:31:46,730 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=227546.66666666666, ans=0.025
+2024-08-31 15:31:47,659 INFO [train.py:1114] (1/4) Epoch 18, batch 350, loss[loss=0.1831, simple_loss=0.2466, pruned_loss=0.0438, ctc_loss=0.07996, over 19745.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2738, pruned_loss=0.05127, ctc_loss=0.09692, over 3192448.38 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:32:19,561 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.74 vs. limit=22.5
+2024-08-31 15:32:35,677 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.12 vs. limit=12.0
+2024-08-31 15:33:38,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=227600.0, ans=0.2
+2024-08-31 15:33:50,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=227653.33333333334, ans=0.0
+2024-08-31 15:33:59,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=227706.66666666666, ans=0.125
+2024-08-31 15:34:01,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=227706.66666666666, ans=0.125
+2024-08-31 15:34:57,669 INFO [train.py:1114] (1/4) Epoch 18, batch 400, loss[loss=0.2276, simple_loss=0.2889, pruned_loss=0.06065, ctc_loss=0.1126, over 19499.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2736, pruned_loss=0.05113, ctc_loss=0.09642, over 3342545.79 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:35:13,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=227813.33333333334, ans=0.125
+2024-08-31 15:36:52,695 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.50 vs. limit=12.0
+2024-08-31 15:37:11,053 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.718e+02 1.967e+02 2.336e+02 3.401e+02, threshold=3.934e+02, percent-clipped=0.0
+2024-08-31 15:37:37,963 INFO [train.py:1114] (1/4) Epoch 18, batch 450, loss[loss=0.2014, simple_loss=0.2795, pruned_loss=0.04476, ctc_loss=0.08432, over 19601.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2738, pruned_loss=0.05139, ctc_loss=0.09667, over 3451509.51 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:39:33,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=228186.66666666666, ans=0.1
+2024-08-31 15:39:36,463 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.06 vs. limit=15.0
+2024-08-31 15:39:46,369 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:39:56,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=228240.0, ans=0.125
+2024-08-31 15:40:13,729 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.56 vs. limit=22.5
+2024-08-31 15:40:18,507 INFO [train.py:1114] (1/4) Epoch 18, batch 500, loss[loss=0.21, simple_loss=0.285, pruned_loss=0.05015, ctc_loss=0.08691, over 19696.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2728, pruned_loss=0.05088, ctc_loss=0.09594, over 3546801.94 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:40:22,351 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.84 vs. limit=15.0
+2024-08-31 15:40:34,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=228400.0, ans=0.125
+2024-08-31 15:40:38,793 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=228400.0, ans=0.05
+2024-08-31 15:40:39,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=228400.0, ans=0.0
+2024-08-31 15:40:57,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=228506.66666666666, ans=0.09899494936611666
+2024-08-31 15:40:59,812 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.17 vs. limit=6.0
+2024-08-31 15:41:10,338 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.618e+02 1.812e+02 2.329e+02 3.946e+02, threshold=3.624e+02, percent-clipped=1.0
+2024-08-31 15:41:15,347 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=228560.0, ans=0.0
+2024-08-31 15:41:17,480 INFO [train.py:1114] (1/4) Epoch 18, batch 550, loss[loss=0.2307, simple_loss=0.2951, pruned_loss=0.06066, ctc_loss=0.1124, over 19259.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.273, pruned_loss=0.05118, ctc_loss=0.09636, over 3609220.25 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:41:24,098 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.89 vs. limit=15.0
+2024-08-31 15:42:35,777 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:44:01,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=228826.66666666666, ans=0.125
+2024-08-31 15:44:16,889 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=228826.66666666666, ans=0.125
+2024-08-31 15:44:18,830 INFO [train.py:1114] (1/4) Epoch 18, batch 600, loss[loss=0.2344, simple_loss=0.3042, pruned_loss=0.06002, ctc_loss=0.1114, over 19439.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2734, pruned_loss=0.05124, ctc_loss=0.09633, over 3667203.93 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:44:29,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=228880.0, ans=0.0
+2024-08-31 15:45:03,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=228986.66666666666, ans=0.09899494936611666
+2024-08-31 15:45:10,461 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.76 vs. limit=8.0
+2024-08-31 15:45:12,648 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.35 vs. limit=10.0
+2024-08-31 15:45:16,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=229040.0, ans=0.125
+2024-08-31 15:45:28,765 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.735e+02 2.092e+02 3.203e+02 5.009e+02, threshold=4.184e+02, percent-clipped=13.0
+2024-08-31 15:45:38,300 INFO [train.py:1114] (1/4) Epoch 18, batch 650, loss[loss=0.2054, simple_loss=0.2752, pruned_loss=0.04922, ctc_loss=0.09283, over 19779.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2724, pruned_loss=0.05078, ctc_loss=0.09546, over 3716775.23 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:46:25,630 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=229200.0, ans=0.125
+2024-08-31 15:46:37,100 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=229200.0, ans=0.2
+2024-08-31 15:46:40,816 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=229253.33333333334, ans=0.125
+2024-08-31 15:46:50,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-31 15:46:53,998 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.76 vs. limit=22.5
+2024-08-31 15:46:55,991 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:46:59,999 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:47:14,857 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.36 vs. limit=15.0
+2024-08-31 15:47:16,575 INFO [train.py:1114] (1/4) Epoch 18, batch 700, loss[loss=0.1961, simple_loss=0.2617, pruned_loss=0.0477, ctc_loss=0.08768, over 19707.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2727, pruned_loss=0.05073, ctc_loss=0.09538, over 3748465.26 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:47:21,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=229413.33333333334, ans=0.125
+2024-08-31 15:47:25,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=229413.33333333334, ans=0.025
+2024-08-31 15:48:10,583 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.672e+02 1.935e+02 2.401e+02 4.868e+02, threshold=3.870e+02, percent-clipped=1.0
+2024-08-31 15:48:16,539 INFO [train.py:1114] (1/4) Epoch 18, batch 750, loss[loss=0.2018, simple_loss=0.2768, pruned_loss=0.0469, ctc_loss=0.08244, over 19498.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2724, pruned_loss=0.05053, ctc_loss=0.09508, over 3775010.57 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:48:25,716 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.20 vs. limit=15.0
+2024-08-31 15:48:28,665 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=229733.33333333334, ans=0.0
+2024-08-31 15:48:55,425 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.93 vs. limit=15.0
+2024-08-31 15:48:56,736 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.71 vs. limit=15.0
+2024-08-31 15:49:00,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=229786.66666666666, ans=0.0
+2024-08-31 15:49:10,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=229840.0, ans=0.2
+2024-08-31 15:49:15,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=229893.33333333334, ans=0.0
+2024-08-31 15:49:28,026 INFO [train.py:1114] (1/4) Epoch 18, batch 800, loss[loss=0.1961, simple_loss=0.2641, pruned_loss=0.04711, ctc_loss=0.0848, over 19409.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2721, pruned_loss=0.05015, ctc_loss=0.09452, over 3795416.46 frames. ], batch size: 48, lr: 8.37e-03, grad_scale: 32.0
+2024-08-31 15:49:30,587 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=229946.66666666666, ans=0.125
+2024-08-31 15:49:31,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=229946.66666666666, ans=0.125
+2024-08-31 15:49:31,978 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=229946.66666666666, ans=22.5
+2024-08-31 15:49:56,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=230053.33333333334, ans=0.125
+2024-08-31 15:50:27,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=230160.0, ans=0.025
+2024-08-31 15:50:27,775 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.682e+02 1.957e+02 2.333e+02 3.697e+02, threshold=3.913e+02, percent-clipped=0.0
+2024-08-31 15:50:33,685 INFO [train.py:1114] (1/4) Epoch 18, batch 850, loss[loss=0.2088, simple_loss=0.2816, pruned_loss=0.04877, ctc_loss=0.09612, over 19648.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2722, pruned_loss=0.05024, ctc_loss=0.09467, over 3814604.01 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:50:38,333 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.13 vs. limit=15.0
+2024-08-31 15:51:18,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=230213.33333333334, ans=0.125
+2024-08-31 15:51:29,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=230266.66666666666, ans=0.125
+2024-08-31 15:51:30,198 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=230266.66666666666, ans=0.2
+2024-08-31 15:51:30,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=230266.66666666666, ans=0.125
+2024-08-31 15:51:35,318 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.03 vs. limit=22.5
+2024-08-31 15:52:04,381 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=3.85 vs. limit=12.0
+2024-08-31 15:52:15,036 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=230480.0, ans=0.5
+2024-08-31 15:52:15,925 INFO [train.py:1114] (1/4) Epoch 18, batch 900, loss[loss=0.1971, simple_loss=0.253, pruned_loss=0.05074, ctc_loss=0.09931, over 19809.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2724, pruned_loss=0.05049, ctc_loss=0.09529, over 3819625.95 frames. ], batch size: 49, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:52:16,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=230480.0, ans=0.2
+2024-08-31 15:52:50,565 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.98 vs. limit=15.0
+2024-08-31 15:53:02,126 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.79 vs. limit=15.0
+2024-08-31 15:53:12,032 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 1.645e+02 1.872e+02 2.411e+02 3.930e+02, threshold=3.745e+02, percent-clipped=1.0
+2024-08-31 15:53:45,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=230746.66666666666, ans=0.125
+2024-08-31 15:53:46,107 INFO [train.py:1114] (1/4) Epoch 18, batch 950, loss[loss=0.1914, simple_loss=0.2568, pruned_loss=0.04631, ctc_loss=0.08355, over 19518.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2729, pruned_loss=0.05103, ctc_loss=0.09632, over 3819326.72 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:53:46,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=230746.66666666666, ans=0.2
+2024-08-31 15:53:54,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=230746.66666666666, ans=0.125
+2024-08-31 15:54:41,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=230960.0, ans=0.125
+2024-08-31 15:54:48,314 INFO [train.py:1114] (1/4) Epoch 18, batch 1000, loss[loss=0.1849, simple_loss=0.254, pruned_loss=0.04222, ctc_loss=0.07855, over 19849.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2735, pruned_loss=0.05124, ctc_loss=0.09666, over 3815683.01 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:55:42,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=231173.33333333334, ans=0.125
+2024-08-31 15:55:50,436 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=231226.66666666666, ans=0.0
+2024-08-31 15:55:55,134 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 1.660e+02 1.836e+02 2.172e+02 3.389e+02, threshold=3.673e+02, percent-clipped=0.0
+2024-08-31 15:56:01,084 INFO [train.py:1114] (1/4) Epoch 18, batch 1050, loss[loss=0.1946, simple_loss=0.2695, pruned_loss=0.04314, ctc_loss=0.08355, over 19852.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.273, pruned_loss=0.05124, ctc_loss=0.09652, over 3823270.16 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:56:01,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=231280.0, ans=0.2
+2024-08-31 15:56:01,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231280.0, ans=0.1
+2024-08-31 15:56:08,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=231280.0, ans=10.0
+2024-08-31 15:56:31,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=231386.66666666666, ans=0.0
+2024-08-31 15:56:54,899 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.20 vs. limit=15.0
+2024-08-31 15:57:01,163 INFO [train.py:1114] (1/4) Epoch 18, batch 1100, loss[loss=0.191, simple_loss=0.2635, pruned_loss=0.04255, ctc_loss=0.08364, over 19594.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2724, pruned_loss=0.05063, ctc_loss=0.09553, over 3830526.83 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:57:01,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=231546.66666666666, ans=0.0
+2024-08-31 15:57:29,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=231653.33333333334, ans=0.025
+2024-08-31 15:57:30,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=231653.33333333334, ans=0.0
+2024-08-31 15:57:50,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=231706.66666666666, ans=0.5
+2024-08-31 15:57:58,312 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.608e+02 1.860e+02 2.284e+02 4.941e+02, threshold=3.719e+02, percent-clipped=1.0
+2024-08-31 15:58:04,200 INFO [train.py:1114] (1/4) Epoch 18, batch 1150, loss[loss=0.2214, simple_loss=0.2834, pruned_loss=0.0581, ctc_loss=0.1082, over 19588.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2725, pruned_loss=0.05063, ctc_loss=0.09569, over 3830167.85 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:58:04,881 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.43 vs. limit=12.0
+2024-08-31 15:58:16,372 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.12 vs. limit=15.0
+2024-08-31 15:58:31,251 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=231866.66666666666, ans=0.04949747468305833
+2024-08-31 15:58:45,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231920.0, ans=0.1
+2024-08-31 15:58:46,506 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=231920.0, ans=0.125
+2024-08-31 15:58:58,324 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:59:05,551 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=232026.66666666666, ans=0.125
+2024-08-31 15:59:17,267 INFO [train.py:1114] (1/4) Epoch 18, batch 1200, loss[loss=0.196, simple_loss=0.277, pruned_loss=0.04135, ctc_loss=0.08085, over 19831.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.274, pruned_loss=0.05098, ctc_loss=0.09645, over 3825669.59 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:59:28,841 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.25 vs. limit=15.0
+2024-08-31 15:59:34,589 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=232133.33333333334, ans=0.125
+2024-08-31 15:59:40,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=232186.66666666666, ans=0.125
+2024-08-31 16:00:07,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=232293.33333333334, ans=0.125
+2024-08-31 16:00:11,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=232293.33333333334, ans=0.2
+2024-08-31 16:00:12,199 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.681e+02 1.869e+02 2.236e+02 3.755e+02, threshold=3.738e+02, percent-clipped=1.0
+2024-08-31 16:00:12,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=232293.33333333334, ans=0.125
+2024-08-31 16:00:18,295 INFO [train.py:1114] (1/4) Epoch 18, batch 1250, loss[loss=0.2382, simple_loss=0.2988, pruned_loss=0.0645, ctc_loss=0.1214, over 19525.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2743, pruned_loss=0.05122, ctc_loss=0.09672, over 3842781.75 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:00:44,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=232453.33333333334, ans=0.1
+2024-08-31 16:00:47,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=232453.33333333334, ans=0.1
+2024-08-31 16:00:54,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=232453.33333333334, ans=0.2
+2024-08-31 16:01:22,428 INFO [train.py:1114] (1/4) Epoch 18, batch 1300, loss[loss=0.2103, simple_loss=0.2835, pruned_loss=0.0505, ctc_loss=0.09039, over 18867.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2733, pruned_loss=0.0508, ctc_loss=0.09586, over 3846959.32 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:01:27,335 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.58 vs. limit=12.0
+2024-08-31 16:01:55,345 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.01 vs. limit=10.0
+2024-08-31 16:02:05,655 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=4.815e-02
+2024-08-31 16:02:19,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=232826.66666666666, ans=0.125
+2024-08-31 16:02:21,669 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 1.758e+02 2.176e+02 2.645e+02 4.342e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-31 16:02:25,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=232826.66666666666, ans=0.0
+2024-08-31 16:02:27,608 INFO [train.py:1114] (1/4) Epoch 18, batch 1350, loss[loss=0.1755, simple_loss=0.2589, pruned_loss=0.03359, ctc_loss=0.06248, over 19771.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2733, pruned_loss=0.05085, ctc_loss=0.09578, over 3856082.84 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:03:05,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=233040.0, ans=0.2
+2024-08-31 16:03:06,435 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=233040.0, ans=0.125
+2024-08-31 16:03:29,598 INFO [train.py:1114] (1/4) Epoch 18, batch 1400, loss[loss=0.1683, simple_loss=0.2327, pruned_loss=0.03691, ctc_loss=0.07522, over 19659.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.273, pruned_loss=0.05064, ctc_loss=0.09553, over 3863302.16 frames. ], batch size: 46, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:03:48,111 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=233146.66666666666, ans=0.125
+2024-08-31 16:03:59,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=233200.0, ans=0.2
+2024-08-31 16:04:35,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=233360.0, ans=10.0
+2024-08-31 16:04:36,297 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.655e+02 1.916e+02 2.338e+02 3.956e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-31 16:04:38,836 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=233360.0, ans=0.1
+2024-08-31 16:04:39,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=233360.0, ans=15.0
+2024-08-31 16:04:42,282 INFO [train.py:1114] (1/4) Epoch 18, batch 1450, loss[loss=0.2286, simple_loss=0.2907, pruned_loss=0.06098, ctc_loss=0.1114, over 19656.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2737, pruned_loss=0.05098, ctc_loss=0.09617, over 3861502.71 frames. ], batch size: 63, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:04:55,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=233466.66666666666, ans=0.2
+2024-08-31 16:05:24,211 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.16 vs. limit=12.0
+2024-08-31 16:05:25,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=233573.33333333334, ans=0.2
+2024-08-31 16:05:27,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=233573.33333333334, ans=0.125
+2024-08-31 16:05:32,417 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:05:48,802 INFO [train.py:1114] (1/4) Epoch 18, batch 1500, loss[loss=0.2329, simple_loss=0.2935, pruned_loss=0.06136, ctc_loss=0.1239, over 19574.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2738, pruned_loss=0.05079, ctc_loss=0.09591, over 3860727.16 frames. ], batch size: 57, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:06:24,696 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=233786.66666666666, ans=0.125
+2024-08-31 16:06:25,426 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.54 vs. limit=10.0
+2024-08-31 16:06:49,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=233893.33333333334, ans=0.0
+2024-08-31 16:06:50,190 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.669e+02 1.866e+02 2.355e+02 3.552e+02, threshold=3.733e+02, percent-clipped=0.0
+2024-08-31 16:07:06,040 INFO [train.py:1114] (1/4) Epoch 18, batch 1550, loss[loss=0.2042, simple_loss=0.2788, pruned_loss=0.0482, ctc_loss=0.08296, over 19621.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2738, pruned_loss=0.05112, ctc_loss=0.0965, over 3845155.05 frames. ], batch size: 60, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:07:24,155 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.49 vs. limit=15.0
+2024-08-31 16:07:31,203 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=234053.33333333334, ans=10.0
+2024-08-31 16:07:32,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=234053.33333333334, ans=0.0
+2024-08-31 16:07:33,551 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:07:39,778 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=234053.33333333334, ans=0.125
+2024-08-31 16:08:07,325 INFO [train.py:1114] (1/4) Epoch 18, batch 1600, loss[loss=0.2083, simple_loss=0.2824, pruned_loss=0.04838, ctc_loss=0.09364, over 19841.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2735, pruned_loss=0.0512, ctc_loss=0.09653, over 3834361.49 frames. ], batch size: 57, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:08:20,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=234266.66666666666, ans=0.0
+2024-08-31 16:08:21,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=234266.66666666666, ans=0.05
+2024-08-31 16:08:25,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=234266.66666666666, ans=0.125
+2024-08-31 16:08:27,365 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=234266.66666666666, ans=0.0
+2024-08-31 16:08:47,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=234373.33333333334, ans=0.2
+2024-08-31 16:09:20,625 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.787e+02 2.153e+02 2.672e+02 5.491e+02, threshold=4.305e+02, percent-clipped=8.0
+2024-08-31 16:09:22,693 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.71 vs. limit=22.5
+2024-08-31 16:09:26,597 INFO [train.py:1114] (1/4) Epoch 18, batch 1650, loss[loss=0.1947, simple_loss=0.2716, pruned_loss=0.04349, ctc_loss=0.07737, over 19662.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2728, pruned_loss=0.05074, ctc_loss=0.09569, over 3831307.67 frames. ], batch size: 59, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:09:28,869 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.88 vs. limit=15.0
+2024-08-31 16:09:29,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=234480.0, ans=0.125
+2024-08-31 16:09:31,171 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.11 vs. limit=15.0
+2024-08-31 16:13:02,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=234533.33333333334, ans=0.125
+2024-08-31 16:13:15,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=234533.33333333334, ans=0.125
+2024-08-31 16:13:32,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=234586.66666666666, ans=0.0
+2024-08-31 16:14:10,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=234693.33333333334, ans=0.025
+2024-08-31 16:14:15,791 INFO [train.py:1114] (1/4) Epoch 18, batch 1700, loss[loss=0.1806, simple_loss=0.2423, pruned_loss=0.04302, ctc_loss=0.08185, over 19665.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2729, pruned_loss=0.05066, ctc_loss=0.09536, over 3846215.94 frames. ], batch size: 46, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:14:42,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=234853.33333333334, ans=0.0
+2024-08-31 16:14:55,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=234906.66666666666, ans=0.04949747468305833
+2024-08-31 16:15:02,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=234960.0, ans=0.1
+2024-08-31 16:15:07,769 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.694e+02 2.038e+02 2.484e+02 5.869e+02, threshold=4.076e+02, percent-clipped=3.0
+2024-08-31 16:15:12,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=235013.33333333334, ans=0.2
+2024-08-31 16:15:13,565 INFO [train.py:1114] (1/4) Epoch 18, batch 1750, loss[loss=0.1797, simple_loss=0.2431, pruned_loss=0.04258, ctc_loss=0.07779, over 19628.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2725, pruned_loss=0.05047, ctc_loss=0.09498, over 3851893.90 frames. ], batch size: 45, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:15:43,647 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=235120.0, ans=0.0
+2024-08-31 16:15:53,682 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.96 vs. limit=15.0
+2024-08-31 16:15:54,337 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=235120.0, ans=0.125
+2024-08-31 16:15:56,527 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=235173.33333333334, ans=0.0
+2024-08-31 16:16:04,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=235173.33333333334, ans=0.125
+2024-08-31 16:16:09,200 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=235226.66666666666, ans=0.125
+2024-08-31 16:16:14,966 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=235226.66666666666, ans=0.1
+2024-08-31 16:16:18,932 INFO [train.py:1114] (1/4) Epoch 18, batch 1800, loss[loss=0.2024, simple_loss=0.2773, pruned_loss=0.04564, ctc_loss=0.09089, over 19601.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2728, pruned_loss=0.05048, ctc_loss=0.095, over 3852585.51 frames. ], batch size: 55, lr: 8.27e-03, grad_scale: 32.0
+2024-08-31 16:16:19,336 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.98 vs. limit=15.0
+2024-08-31 16:16:39,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=235333.33333333334, ans=0.1
+2024-08-31 16:16:44,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=235386.66666666666, ans=0.125
+2024-08-31 16:16:56,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=235440.0, ans=0.0
+2024-08-31 16:17:12,067 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.739e+02 2.099e+02 2.606e+02 4.220e+02, threshold=4.197e+02, percent-clipped=1.0
+2024-08-31 16:17:16,667 INFO [train.py:1114] (1/4) Epoch 18, batch 1850, loss[loss=0.2217, simple_loss=0.2861, pruned_loss=0.05678, ctc_loss=0.1093, over 19574.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2727, pruned_loss=0.05015, ctc_loss=0.09438, over 3855922.66 frames. ], batch size: 57, lr: 8.27e-03, grad_scale: 16.0
+2024-08-31 16:17:35,301 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.25 vs. limit=10.0
+2024-08-31 16:17:36,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=235600.0, ans=0.125
+2024-08-31 16:17:58,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=235706.66666666666, ans=0.125
+2024-08-31 16:18:05,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=235706.66666666666, ans=0.125
+2024-08-31 16:18:06,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=235706.66666666666, ans=0.125
+2024-08-31 16:18:21,111 INFO [train.py:1114] (1/4) Epoch 18, batch 1900, loss[loss=0.2238, simple_loss=0.2891, pruned_loss=0.05746, ctc_loss=0.109, over 19667.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2733, pruned_loss=0.05047, ctc_loss=0.095, over 3860504.63 frames. ], batch size: 59, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:18:35,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=235866.66666666666, ans=0.0
+2024-08-31 16:18:40,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=235866.66666666666, ans=0.1
+2024-08-31 16:18:42,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=235866.66666666666, ans=0.0
+2024-08-31 16:18:44,821 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:18:46,240 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.63 vs. limit=22.5
+2024-08-31 16:18:49,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=235920.0, ans=0.1
+2024-08-31 16:19:07,733 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.16 vs. limit=10.0
+2024-08-31 16:19:11,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=236026.66666666666, ans=0.2
+2024-08-31 16:19:14,248 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.296e+02 1.623e+02 1.837e+02 2.195e+02 5.135e+02, threshold=3.673e+02, percent-clipped=2.0
+2024-08-31 16:19:18,782 INFO [train.py:1114] (1/4) Epoch 18, batch 1950, loss[loss=0.2194, simple_loss=0.2845, pruned_loss=0.05676, ctc_loss=0.1018, over 19581.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.274, pruned_loss=0.05029, ctc_loss=0.09451, over 3869594.56 frames. ], batch size: 52, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:20:39,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=236186.66666666666, ans=0.0
+2024-08-31 16:20:42,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=236186.66666666666, ans=0.1
+2024-08-31 16:20:43,549 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.28 vs. limit=6.0
+2024-08-31 16:21:16,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=236293.33333333334, ans=0.125
+2024-08-31 16:21:20,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=236346.66666666666, ans=0.0
+2024-08-31 16:21:21,687 INFO [train.py:1114] (1/4) Epoch 18, batch 2000, loss[loss=0.1834, simple_loss=0.2447, pruned_loss=0.04456, ctc_loss=0.0826, over 19639.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2748, pruned_loss=0.05073, ctc_loss=0.09524, over 3853760.36 frames. ], batch size: 45, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:21:33,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=236400.0, ans=0.125
+2024-08-31 16:22:04,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=236506.66666666666, ans=0.2
+2024-08-31 16:22:14,728 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.400e+02 1.704e+02 2.096e+02 2.751e+02 4.638e+02, threshold=4.193e+02, percent-clipped=6.0
+2024-08-31 16:22:19,182 INFO [train.py:1114] (1/4) Epoch 18, batch 2050, loss[loss=0.1828, simple_loss=0.2455, pruned_loss=0.04419, ctc_loss=0.07938, over 19710.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2735, pruned_loss=0.05044, ctc_loss=0.09496, over 3849371.92 frames. ], batch size: 47, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:22:44,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=236720.0, ans=0.125
+2024-08-31 16:22:45,078 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:23:05,709 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=236773.33333333334, ans=0.125
+2024-08-31 16:23:12,203 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=236826.66666666666, ans=0.125
+2024-08-31 16:23:21,355 INFO [train.py:1114] (1/4) Epoch 18, batch 2100, loss[loss=0.1979, simple_loss=0.2727, pruned_loss=0.04538, ctc_loss=0.08077, over 19787.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2726, pruned_loss=0.04995, ctc_loss=0.09404, over 3857332.35 frames. ], batch size: 54, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:23:24,751 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=236880.0, ans=0.2
+2024-08-31 16:23:24,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=236880.0, ans=0.025
+2024-08-31 16:23:31,278 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=236880.0, ans=0.0
+2024-08-31 16:23:42,651 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=236933.33333333334, ans=0.125
+2024-08-31 16:23:51,919 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.61 vs. limit=15.0
+2024-08-31 16:23:53,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=236986.66666666666, ans=0.125
+2024-08-31 16:24:09,565 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:24:16,003 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=237093.33333333334, ans=0.0
+2024-08-31 16:24:27,118 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.628e+02 1.802e+02 2.351e+02 4.404e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 16:24:31,673 INFO [train.py:1114] (1/4) Epoch 18, batch 2150, loss[loss=0.2093, simple_loss=0.2733, pruned_loss=0.05262, ctc_loss=0.1001, over 19581.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2716, pruned_loss=0.0495, ctc_loss=0.09317, over 3867655.65 frames. ], batch size: 52, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:25:07,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=237306.66666666666, ans=0.125
+2024-08-31 16:25:10,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=237306.66666666666, ans=0.0
+2024-08-31 16:25:26,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=237306.66666666666, ans=0.2
+2024-08-31 16:25:27,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=237306.66666666666, ans=0.2
+2024-08-31 16:25:29,767 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=7.84 vs. limit=15.0
+2024-08-31 16:25:38,172 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=237360.0, ans=0.0
+2024-08-31 16:25:40,255 INFO [train.py:1114] (1/4) Epoch 18, batch 2200, loss[loss=0.2103, simple_loss=0.2828, pruned_loss=0.04998, ctc_loss=0.0947, over 19570.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2715, pruned_loss=0.04938, ctc_loss=0.09301, over 3867394.06 frames. ], batch size: 57, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:25:51,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=237413.33333333334, ans=0.125
+2024-08-31 16:25:56,698 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=237466.66666666666, ans=0.125
+2024-08-31 16:26:11,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=237520.0, ans=0.125
+2024-08-31 16:26:17,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=237573.33333333334, ans=0.125
+2024-08-31 16:26:19,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=237573.33333333334, ans=0.2
+2024-08-31 16:26:33,726 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.652e+02 1.938e+02 2.493e+02 4.901e+02, threshold=3.877e+02, percent-clipped=6.0
+2024-08-31 16:26:38,361 INFO [train.py:1114] (1/4) Epoch 18, batch 2250, loss[loss=0.2019, simple_loss=0.2739, pruned_loss=0.04659, ctc_loss=0.09207, over 19609.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2719, pruned_loss=0.04957, ctc_loss=0.09342, over 3866849.03 frames. ], batch size: 55, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:26:56,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=237680.0, ans=0.2
+2024-08-31 16:27:06,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=237733.33333333334, ans=0.2
+2024-08-31 16:27:31,346 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.45 vs. limit=15.0
+2024-08-31 16:27:35,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=237840.0, ans=0.0
+2024-08-31 16:27:51,909 INFO [train.py:1114] (1/4) Epoch 18, batch 2300, loss[loss=0.2068, simple_loss=0.274, pruned_loss=0.05089, ctc_loss=0.09445, over 19507.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2711, pruned_loss=0.04961, ctc_loss=0.09351, over 3861725.30 frames. ], batch size: 49, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:28:05,760 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=238000.0, ans=0.125
+2024-08-31 16:28:23,767 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=238053.33333333334, ans=0.1
+2024-08-31 16:28:34,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=238106.66666666666, ans=0.125
+2024-08-31 16:28:47,469 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.696e+02 1.848e+02 2.393e+02 3.836e+02, threshold=3.696e+02, percent-clipped=0.0
+2024-08-31 16:29:07,670 INFO [train.py:1114] (1/4) Epoch 18, batch 2350, loss[loss=0.2231, simple_loss=0.2888, pruned_loss=0.05802, ctc_loss=0.1037, over 19669.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2714, pruned_loss=0.04979, ctc_loss=0.09368, over 3864437.57 frames. ], batch size: 63, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:29:16,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=238213.33333333334, ans=0.0
+2024-08-31 16:29:23,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=238266.66666666666, ans=0.125
+2024-08-31 16:29:34,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=238320.0, ans=0.125
+2024-08-31 16:29:36,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=238320.0, ans=0.0
+2024-08-31 16:29:40,021 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.98 vs. limit=22.5
+2024-08-31 16:30:22,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=238373.33333333334, ans=0.125
+2024-08-31 16:30:38,524 INFO [train.py:1114] (1/4) Epoch 18, batch 2400, loss[loss=0.2113, simple_loss=0.2768, pruned_loss=0.05258, ctc_loss=0.1014, over 19258.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2732, pruned_loss=0.05044, ctc_loss=0.09475, over 3859107.89 frames. ], batch size: 71, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:30:40,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=238480.0, ans=0.5
+2024-08-31 16:30:44,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=238480.0, ans=0.2
+2024-08-31 16:30:50,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=238533.33333333334, ans=0.1
+2024-08-31 16:30:52,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=238533.33333333334, ans=0.2
+2024-08-31 16:30:59,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=238533.33333333334, ans=0.1
+2024-08-31 16:31:02,704 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=238586.66666666666, ans=0.125
+2024-08-31 16:31:04,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=238586.66666666666, ans=0.0
+2024-08-31 16:31:19,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=238640.0, ans=0.025
+2024-08-31 16:31:30,736 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.26 vs. limit=15.0
+2024-08-31 16:31:42,405 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.51 vs. limit=15.0
+2024-08-31 16:31:44,233 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=238693.33333333334, ans=0.125
+2024-08-31 16:31:47,472 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 1.682e+02 1.835e+02 2.125e+02 4.662e+02, threshold=3.671e+02, percent-clipped=5.0
+2024-08-31 16:31:52,085 INFO [train.py:1114] (1/4) Epoch 18, batch 2450, loss[loss=0.2599, simple_loss=0.2987, pruned_loss=0.08088, ctc_loss=0.1485, over 12966.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2773, pruned_loss=0.05318, ctc_loss=0.1005, over 3729718.85 frames. ], batch size: 141, lr: 8.21e-03, grad_scale: 32.0
+2024-08-31 16:32:29,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=238906.66666666666, ans=0.0
+2024-08-31 16:32:30,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=238906.66666666666, ans=0.07
+2024-08-31 16:32:31,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=238906.66666666666, ans=0.125
+2024-08-31 16:33:43,933 INFO [train.py:1114] (1/4) Epoch 19, batch 0, loss[loss=0.2162, simple_loss=0.2716, pruned_loss=0.0588, ctc_loss=0.1081, over 19417.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2716, pruned_loss=0.0588, ctc_loss=0.1081, over 19417.00 frames. ], batch size: 48, lr: 7.99e-03, grad_scale: 32.0
+2024-08-31 16:33:43,934 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-31 16:33:52,403 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.0505, 2.2462, 2.9520, 3.4045], device='cuda:1')
+2024-08-31 16:34:00,545 INFO [train.py:1146] (1/4) Epoch 19, validation: loss=0.1846, simple_loss=0.2728, pruned_loss=0.03584, ctc_loss=0.06159, over 944034.00 frames.
+2024-08-31 16:34:01,380 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13681MB
+2024-08-31 16:34:02,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=238954.66666666666, ans=0.125
+2024-08-31 16:34:21,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=239008.0, ans=0.1
+2024-08-31 16:34:28,856 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=239061.33333333334, ans=0.1
+2024-08-31 16:34:36,687 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.04 vs. limit=15.0
+2024-08-31 16:35:04,408 INFO [train.py:1114] (1/4) Epoch 19, batch 50, loss[loss=0.2033, simple_loss=0.2578, pruned_loss=0.05355, ctc_loss=0.1043, over 19694.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2764, pruned_loss=0.05387, ctc_loss=0.1024, over 843816.80 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:35:12,511 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.795e+02 2.006e+02 2.342e+02 4.821e+02, threshold=4.012e+02, percent-clipped=4.0
+2024-08-31 16:35:36,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=239328.0, ans=0.2
+2024-08-31 16:35:40,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=239381.33333333334, ans=0.025
+2024-08-31 16:35:58,661 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.83 vs. limit=12.0
+2024-08-31 16:35:59,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=239434.66666666666, ans=15.0
+2024-08-31 16:36:03,816 INFO [train.py:1114] (1/4) Epoch 19, batch 100, loss[loss=0.1634, simple_loss=0.2376, pruned_loss=0.03238, ctc_loss=0.06118, over 19715.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2759, pruned_loss=0.05254, ctc_loss=0.09952, over 1498509.86 frames. ], batch size: 51, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:36:14,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=239488.0, ans=0.2
+2024-08-31 16:36:15,584 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.24 vs. limit=22.5
+2024-08-31 16:36:16,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=239541.33333333334, ans=0.2
+2024-08-31 16:36:28,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=239594.66666666666, ans=0.125
+2024-08-31 16:36:50,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=239648.0, ans=0.125
+2024-08-31 16:36:54,595 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=239701.33333333334, ans=0.125
+2024-08-31 16:37:00,770 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.91 vs. limit=10.0
+2024-08-31 16:37:06,688 INFO [train.py:1114] (1/4) Epoch 19, batch 150, loss[loss=0.1831, simple_loss=0.2456, pruned_loss=0.0445, ctc_loss=0.07927, over 19722.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2732, pruned_loss=0.0508, ctc_loss=0.09615, over 2027222.49 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:37:15,238 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.762e+02 1.953e+02 2.445e+02 3.524e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-31 16:37:24,111 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=239808.0, ans=0.125
+2024-08-31 16:37:44,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=239914.66666666666, ans=0.0
+2024-08-31 16:37:44,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=239914.66666666666, ans=0.0
+2024-08-31 16:37:48,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=239914.66666666666, ans=0.125
+2024-08-31 16:37:52,843 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.41 vs. limit=15.0
+2024-08-31 16:37:55,647 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=239968.0, ans=0.09899494936611666
+2024-08-31 16:37:59,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=239968.0, ans=0.0
+2024-08-31 16:38:07,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=239968.0, ans=0.0
+2024-08-31 16:38:14,093 INFO [train.py:1114] (1/4) Epoch 19, batch 200, loss[loss=0.2249, simple_loss=0.2907, pruned_loss=0.05707, ctc_loss=0.1125, over 18110.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2724, pruned_loss=0.05041, ctc_loss=0.09536, over 2435690.74 frames. ], batch size: 85, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:38:26,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=240074.66666666666, ans=0.125
+2024-08-31 16:38:35,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=240074.66666666666, ans=0.125
+2024-08-31 16:38:37,310 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.96 vs. limit=15.0
+2024-08-31 16:38:39,133 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=240128.0, ans=0.125
+2024-08-31 16:38:46,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=240128.0, ans=10.0
+2024-08-31 16:38:47,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=240128.0, ans=0.95
+2024-08-31 16:39:05,409 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:39:11,661 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.58 vs. limit=22.5
+2024-08-31 16:39:13,565 INFO [train.py:1114] (1/4) Epoch 19, batch 250, loss[loss=0.2361, simple_loss=0.2945, pruned_loss=0.06406, ctc_loss=0.1241, over 19370.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2721, pruned_loss=0.05015, ctc_loss=0.09461, over 2755982.86 frames. ], batch size: 67, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:39:24,853 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:39:26,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=240288.0, ans=0.125
+2024-08-31 16:39:27,165 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 1.733e+02 2.186e+02 2.853e+02 4.755e+02, threshold=4.372e+02, percent-clipped=7.0
+2024-08-31 16:40:02,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=240448.0, ans=0.125
+2024-08-31 16:40:02,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=240448.0, ans=0.125
+2024-08-31 16:40:06,670 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.51 vs. limit=22.5
+2024-08-31 16:40:16,018 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.26 vs. limit=15.0
+2024-08-31 16:40:20,390 INFO [train.py:1114] (1/4) Epoch 19, batch 300, loss[loss=0.2541, simple_loss=0.3092, pruned_loss=0.07284, ctc_loss=0.1333, over 19524.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2715, pruned_loss=0.04979, ctc_loss=0.09376, over 3000158.22 frames. ], batch size: 61, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:40:27,899 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=240554.66666666666, ans=0.125
+2024-08-31 16:40:30,353 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:40:35,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=240608.0, ans=0.0
+2024-08-31 16:40:48,738 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.50 vs. limit=5.0
+2024-08-31 16:40:59,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=240714.66666666666, ans=0.07
+2024-08-31 16:41:01,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=240714.66666666666, ans=0.125
+2024-08-31 16:41:15,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=240768.0, ans=0.0
+2024-08-31 16:41:16,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.42 vs. limit=15.0
+2024-08-31 16:41:21,985 INFO [train.py:1114] (1/4) Epoch 19, batch 350, loss[loss=0.1658, simple_loss=0.2399, pruned_loss=0.03265, ctc_loss=0.06588, over 19750.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.272, pruned_loss=0.04985, ctc_loss=0.09382, over 3190941.52 frames. ], batch size: 48, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:41:24,730 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240821.33333333334, ans=0.1
+2024-08-31 16:41:30,314 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.653e+02 1.904e+02 2.349e+02 4.016e+02, threshold=3.809e+02, percent-clipped=0.0
+2024-08-31 16:41:45,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=240874.66666666666, ans=0.125
+2024-08-31 16:42:03,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=240981.33333333334, ans=0.2
+2024-08-31 16:42:07,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=240981.33333333334, ans=0.125
+2024-08-31 16:42:14,416 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.80 vs. limit=15.0
+2024-08-31 16:42:25,388 INFO [train.py:1114] (1/4) Epoch 19, batch 400, loss[loss=0.192, simple_loss=0.2688, pruned_loss=0.04239, ctc_loss=0.07595, over 19516.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2715, pruned_loss=0.0495, ctc_loss=0.09312, over 3342063.45 frames. ], batch size: 54, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:42:39,210 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.72 vs. limit=15.0
+2024-08-31 16:42:41,800 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.33 vs. limit=15.0
+2024-08-31 16:42:55,797 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.27 vs. limit=22.5
+2024-08-31 16:42:58,021 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=241194.66666666666, ans=0.125
+2024-08-31 16:43:11,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=241248.0, ans=0.0
+2024-08-31 16:43:20,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=241301.33333333334, ans=0.125
+2024-08-31 16:43:26,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=241301.33333333334, ans=0.2
+2024-08-31 16:43:34,374 INFO [train.py:1114] (1/4) Epoch 19, batch 450, loss[loss=0.2314, simple_loss=0.3037, pruned_loss=0.05776, ctc_loss=0.109, over 19611.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2725, pruned_loss=0.05005, ctc_loss=0.09407, over 3450262.93 frames. ], batch size: 55, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:43:42,750 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.686e+02 1.896e+02 2.370e+02 4.152e+02, threshold=3.792e+02, percent-clipped=1.0
+2024-08-31 16:43:44,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=241354.66666666666, ans=0.0
+2024-08-31 16:43:59,559 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.84 vs. limit=10.0
+2024-08-31 16:44:35,480 INFO [train.py:1114] (1/4) Epoch 19, batch 500, loss[loss=0.2038, simple_loss=0.2766, pruned_loss=0.04752, ctc_loss=0.08992, over 19638.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2716, pruned_loss=0.04959, ctc_loss=0.09334, over 3545831.45 frames. ], batch size: 63, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:44:37,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=241621.33333333334, ans=0.125
+2024-08-31 16:44:45,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=241621.33333333334, ans=0.125
+2024-08-31 16:44:54,047 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.04 vs. limit=15.0
+2024-08-31 16:44:55,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=241674.66666666666, ans=0.0
+2024-08-31 16:44:56,205 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.75 vs. limit=22.5
+2024-08-31 16:45:11,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=241781.33333333334, ans=0.2
+2024-08-31 16:45:28,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-31 16:46:04,286 INFO [train.py:1114] (1/4) Epoch 19, batch 550, loss[loss=0.2331, simple_loss=0.2967, pruned_loss=0.06188, ctc_loss=0.1142, over 19283.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2715, pruned_loss=0.0495, ctc_loss=0.09311, over 3608551.17 frames. ], batch size: 71, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:46:07,005 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=241888.0, ans=0.125
+2024-08-31 16:46:10,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=241888.0, ans=0.0
+2024-08-31 16:46:12,724 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.697e+02 1.983e+02 2.191e+02 3.507e+02, threshold=3.966e+02, percent-clipped=0.0
+2024-08-31 16:46:13,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff2.min_abs, batch_count=241888.0, ans=0.1
+2024-08-31 16:46:35,561 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=241941.33333333334, ans=0.125
+2024-08-31 16:46:41,233 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=241994.66666666666, ans=0.125
+2024-08-31 16:46:45,232 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.53 vs. limit=15.0
+2024-08-31 16:46:47,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=241994.66666666666, ans=0.0
+2024-08-31 16:46:47,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=241994.66666666666, ans=0.125
+2024-08-31 16:47:06,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=242101.33333333334, ans=0.2
+2024-08-31 16:47:10,170 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=242101.33333333334, ans=0.2
+2024-08-31 16:47:14,509 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.14 vs. limit=10.0
+2024-08-31 16:47:16,282 INFO [train.py:1114] (1/4) Epoch 19, batch 600, loss[loss=0.2385, simple_loss=0.3089, pruned_loss=0.06077, ctc_loss=0.1163, over 19427.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2718, pruned_loss=0.04963, ctc_loss=0.09322, over 3666336.13 frames. ], batch size: 67, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:47:27,257 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=242154.66666666666, ans=0.125
+2024-08-31 16:48:29,055 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=242368.0, ans=0.0
+2024-08-31 16:48:31,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=242368.0, ans=0.2
+2024-08-31 16:48:39,709 INFO [train.py:1114] (1/4) Epoch 19, batch 650, loss[loss=0.2055, simple_loss=0.2809, pruned_loss=0.04695, ctc_loss=0.0906, over 19773.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2714, pruned_loss=0.04953, ctc_loss=0.09307, over 3717066.29 frames. ], batch size: 54, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:48:39,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff2.min_abs, batch_count=242421.33333333334, ans=0.1
+2024-08-31 16:48:48,390 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.784e+02 2.044e+02 2.793e+02 4.792e+02, threshold=4.088e+02, percent-clipped=6.0
+2024-08-31 16:49:19,135 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.03 vs. limit=15.0
+2024-08-31 16:49:23,716 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=242581.33333333334, ans=0.125
+2024-08-31 16:49:31,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=242634.66666666666, ans=0.125
+2024-08-31 16:49:32,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=242634.66666666666, ans=0.125
+2024-08-31 16:49:38,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=242634.66666666666, ans=0.125
+2024-08-31 16:49:41,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=242634.66666666666, ans=0.05
+2024-08-31 16:50:02,119 INFO [train.py:1114] (1/4) Epoch 19, batch 700, loss[loss=0.1968, simple_loss=0.2607, pruned_loss=0.04727, ctc_loss=0.09596, over 19732.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2718, pruned_loss=0.04941, ctc_loss=0.0929, over 3749165.96 frames. ], batch size: 51, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:50:04,419 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.32 vs. limit=15.0
+2024-08-31 16:50:08,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=242688.0, ans=0.0
+2024-08-31 16:50:10,180 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.89 vs. limit=6.0
+2024-08-31 16:50:13,691 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=242688.0, ans=0.95
+2024-08-31 16:50:21,036 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=242741.33333333334, ans=0.0
+2024-08-31 16:50:40,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=242794.66666666666, ans=0.1
+2024-08-31 16:51:20,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=242901.33333333334, ans=0.025
+2024-08-31 16:51:21,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=242901.33333333334, ans=0.125
+2024-08-31 16:51:22,096 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.74 vs. limit=15.0
+2024-08-31 16:52:16,541 INFO [train.py:1114] (1/4) Epoch 19, batch 750, loss[loss=0.2062, simple_loss=0.2806, pruned_loss=0.04712, ctc_loss=0.09386, over 19487.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2713, pruned_loss=0.04927, ctc_loss=0.09282, over 3774464.38 frames. ], batch size: 54, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:52:21,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=242954.66666666666, ans=0.125
+2024-08-31 16:52:40,611 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.707e+02 2.012e+02 2.576e+02 4.596e+02, threshold=4.024e+02, percent-clipped=2.0
+2024-08-31 16:52:45,366 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=243008.0, ans=0.125
+2024-08-31 16:52:48,041 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.50 vs. limit=15.0
+2024-08-31 16:52:58,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=243061.33333333334, ans=0.125
+2024-08-31 16:53:04,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=243061.33333333334, ans=0.125
+2024-08-31 16:53:05,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=243061.33333333334, ans=0.0
+2024-08-31 16:53:05,867 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.49 vs. limit=6.0
+2024-08-31 16:53:37,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=243168.0, ans=0.125
+2024-08-31 16:53:40,085 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:53:40,993 INFO [train.py:1114] (1/4) Epoch 19, batch 800, loss[loss=0.1737, simple_loss=0.2415, pruned_loss=0.03913, ctc_loss=0.06914, over 19841.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2708, pruned_loss=0.04904, ctc_loss=0.09246, over 3796520.43 frames. ], batch size: 49, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:53:45,941 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=243221.33333333334, ans=0.125
+2024-08-31 16:53:52,269 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:54:10,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=243328.0, ans=0.125
+2024-08-31 16:54:35,760 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=243434.66666666666, ans=0.1
+2024-08-31 16:54:52,032 INFO [train.py:1114] (1/4) Epoch 19, batch 850, loss[loss=0.2213, simple_loss=0.2903, pruned_loss=0.05513, ctc_loss=0.1052, over 19650.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2708, pruned_loss=0.04917, ctc_loss=0.09257, over 3814948.74 frames. ], batch size: 59, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:54:54,733 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=243488.0, ans=0.09899494936611666
+2024-08-31 16:55:00,086 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.677e+02 1.837e+02 2.316e+02 3.927e+02, threshold=3.675e+02, percent-clipped=0.0
+2024-08-31 16:55:00,523 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=243488.0, ans=0.1
+2024-08-31 16:55:05,649 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.14 vs. limit=15.0
+2024-08-31 16:55:13,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=243541.33333333334, ans=0.0
+2024-08-31 16:55:20,177 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.19 vs. limit=15.0
+2024-08-31 16:55:26,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=243648.0, ans=0.125
+2024-08-31 16:55:34,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=243648.0, ans=0.125
+2024-08-31 16:55:38,137 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=243648.0, ans=0.125
+2024-08-31 16:55:41,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=243648.0, ans=0.09899494936611666
+2024-08-31 16:55:41,935 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=243648.0, ans=0.125
+2024-08-31 16:55:43,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=243701.33333333334, ans=0.125
+2024-08-31 16:55:55,902 INFO [train.py:1114] (1/4) Epoch 19, batch 900, loss[loss=0.1762, simple_loss=0.2479, pruned_loss=0.03853, ctc_loss=0.06871, over 19823.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2708, pruned_loss=0.04929, ctc_loss=0.0929, over 3819183.23 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:56:03,894 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.01 vs. limit=8.0
+2024-08-31 16:56:26,697 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=243861.33333333334, ans=0.0
+2024-08-31 16:56:33,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=243861.33333333334, ans=0.1
+2024-08-31 16:56:38,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=243861.33333333334, ans=0.0
+2024-08-31 16:56:41,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=243914.66666666666, ans=0.125
+2024-08-31 16:57:03,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=243968.0, ans=0.1
+2024-08-31 16:57:05,834 INFO [train.py:1114] (1/4) Epoch 19, batch 950, loss[loss=0.1946, simple_loss=0.2576, pruned_loss=0.04823, ctc_loss=0.08791, over 19491.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.271, pruned_loss=0.04942, ctc_loss=0.09309, over 3820498.17 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:57:10,110 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.17 vs. limit=15.0
+2024-08-31 16:57:14,299 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.751e+02 2.034e+02 2.400e+02 3.857e+02, threshold=4.067e+02, percent-clipped=1.0
+2024-08-31 16:57:27,858 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:57:31,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=244128.0, ans=0.025
+2024-08-31 16:58:04,883 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.03 vs. limit=15.0
+2024-08-31 16:58:06,311 INFO [train.py:1114] (1/4) Epoch 19, batch 1000, loss[loss=0.169, simple_loss=0.2443, pruned_loss=0.03353, ctc_loss=0.06665, over 19867.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2721, pruned_loss=0.04996, ctc_loss=0.0941, over 3816829.39 frames. ], batch size: 52, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 16:58:50,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=244341.33333333334, ans=0.125
+2024-08-31 16:58:56,731 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=244341.33333333334, ans=0.1
+2024-08-31 17:00:06,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=244501.33333333334, ans=0.1
+2024-08-31 17:00:09,461 INFO [train.py:1114] (1/4) Epoch 19, batch 1050, loss[loss=0.2224, simple_loss=0.286, pruned_loss=0.05777, ctc_loss=0.1083, over 19850.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2714, pruned_loss=0.04985, ctc_loss=0.09402, over 3822541.22 frames. ], batch size: 57, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 17:00:14,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=244554.66666666666, ans=0.125
+2024-08-31 17:00:15,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=244554.66666666666, ans=0.1
+2024-08-31 17:00:17,657 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.651e+02 1.935e+02 2.361e+02 3.363e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-31 17:00:27,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=244608.0, ans=0.125
+2024-08-31 17:00:36,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=244661.33333333334, ans=0.125
+2024-08-31 17:01:12,065 INFO [train.py:1114] (1/4) Epoch 19, batch 1100, loss[loss=0.2024, simple_loss=0.268, pruned_loss=0.0499, ctc_loss=0.09242, over 19592.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2716, pruned_loss=0.04996, ctc_loss=0.09412, over 3830857.76 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:01:28,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=244874.66666666666, ans=0.125
+2024-08-31 17:02:04,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=244981.33333333334, ans=0.2
+2024-08-31 17:02:43,478 INFO [train.py:1114] (1/4) Epoch 19, batch 1150, loss[loss=0.217, simple_loss=0.2791, pruned_loss=0.05689, ctc_loss=0.1028, over 19598.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2718, pruned_loss=0.05011, ctc_loss=0.09442, over 3830026.08 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:03:11,399 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.693e+02 1.899e+02 2.295e+02 3.327e+02, threshold=3.798e+02, percent-clipped=0.0
+2024-08-31 17:03:53,650 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.94 vs. limit=22.5
+2024-08-31 17:04:04,674 INFO [train.py:1114] (1/4) Epoch 19, batch 1200, loss[loss=0.2019, simple_loss=0.2755, pruned_loss=0.04601, ctc_loss=0.09064, over 19840.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2727, pruned_loss=0.05036, ctc_loss=0.09494, over 3824926.60 frames. ], batch size: 57, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:04:07,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=245354.66666666666, ans=0.025
+2024-08-31 17:04:26,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=245408.0, ans=0.04949747468305833
+2024-08-31 17:04:30,597 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.99 vs. limit=15.0
+2024-08-31 17:04:38,485 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=245461.33333333334, ans=0.0
+2024-08-31 17:04:46,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=245514.66666666666, ans=0.5
+2024-08-31 17:05:06,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=245568.0, ans=0.125
+2024-08-31 17:05:08,562 INFO [train.py:1114] (1/4) Epoch 19, batch 1250, loss[loss=0.2327, simple_loss=0.2932, pruned_loss=0.06306, ctc_loss=0.1152, over 19547.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2738, pruned_loss=0.05091, ctc_loss=0.09565, over 3843041.14 frames. ], batch size: 61, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:05:09,272 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.10 vs. limit=15.0
+2024-08-31 17:05:16,762 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.647e+02 1.911e+02 2.205e+02 3.499e+02, threshold=3.822e+02, percent-clipped=0.0
+2024-08-31 17:05:35,630 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=245728.0, ans=0.125
+2024-08-31 17:06:14,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=245834.66666666666, ans=0.025
+2024-08-31 17:06:15,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=245834.66666666666, ans=0.04949747468305833
+2024-08-31 17:06:19,695 INFO [train.py:1114] (1/4) Epoch 19, batch 1300, loss[loss=0.2115, simple_loss=0.2792, pruned_loss=0.05261, ctc_loss=0.09661, over 18776.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2732, pruned_loss=0.05055, ctc_loss=0.09491, over 3846494.21 frames. ], batch size: 76, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:06:24,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=245888.0, ans=0.125
+2024-08-31 17:06:37,592 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=245941.33333333334, ans=0.125
+2024-08-31 17:06:51,803 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.80 vs. limit=10.0
+2024-08-31 17:07:01,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=246048.0, ans=0.0
+2024-08-31 17:07:25,626 INFO [train.py:1114] (1/4) Epoch 19, batch 1350, loss[loss=0.1789, simple_loss=0.2554, pruned_loss=0.03692, ctc_loss=0.07114, over 19783.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2721, pruned_loss=0.04997, ctc_loss=0.09376, over 3856904.11 frames. ], batch size: 54, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:07:35,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=246154.66666666666, ans=0.1
+2024-08-31 17:07:39,279 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.765e+02 2.070e+02 2.720e+02 4.418e+02, threshold=4.141e+02, percent-clipped=1.0
+2024-08-31 17:08:18,091 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:08:27,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=246368.0, ans=0.125
+2024-08-31 17:08:35,882 INFO [train.py:1114] (1/4) Epoch 19, batch 1400, loss[loss=0.1559, simple_loss=0.2238, pruned_loss=0.03152, ctc_loss=0.06248, over 19689.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2718, pruned_loss=0.04999, ctc_loss=0.09384, over 3864018.00 frames. ], batch size: 46, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:08:43,652 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.52 vs. limit=22.5
+2024-08-31 17:09:08,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=246474.66666666666, ans=0.125
+2024-08-31 17:09:22,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=246528.0, ans=0.125
+2024-08-31 17:09:34,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=246581.33333333334, ans=0.0
+2024-08-31 17:09:52,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=246688.0, ans=0.125
+2024-08-31 17:09:53,676 INFO [train.py:1114] (1/4) Epoch 19, batch 1450, loss[loss=0.2106, simple_loss=0.2802, pruned_loss=0.05232, ctc_loss=0.09106, over 19664.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2723, pruned_loss=0.05015, ctc_loss=0.09413, over 3863145.24 frames. ], batch size: 63, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:10:02,078 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 1.691e+02 1.919e+02 2.362e+02 3.353e+02, threshold=3.838e+02, percent-clipped=0.0
+2024-08-31 17:10:02,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=246688.0, ans=0.1
+2024-08-31 17:10:08,685 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=246741.33333333334, ans=0.125
+2024-08-31 17:11:07,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=246741.33333333334, ans=0.125
+2024-08-31 17:11:30,391 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=11.07 vs. limit=12.0
+2024-08-31 17:11:42,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=246848.0, ans=0.125
+2024-08-31 17:12:10,413 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:12:12,398 INFO [train.py:1114] (1/4) Epoch 19, batch 1500, loss[loss=0.2263, simple_loss=0.2921, pruned_loss=0.05872, ctc_loss=0.1076, over 19575.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2726, pruned_loss=0.05002, ctc_loss=0.09416, over 3862569.89 frames. ], batch size: 57, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:12:14,716 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.43 vs. limit=15.0
+2024-08-31 17:12:22,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=246954.66666666666, ans=0.0
+2024-08-31 17:13:04,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.75 vs. limit=6.0
+2024-08-31 17:13:08,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=247008.0, ans=0.0
+2024-08-31 17:13:45,561 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.08 vs. limit=15.0
+2024-08-31 17:14:14,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=247114.66666666666, ans=0.0
+2024-08-31 17:14:20,584 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.60 vs. limit=15.0
+2024-08-31 17:14:26,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=247168.0, ans=0.0
+2024-08-31 17:14:33,507 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:14:36,325 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=247168.0, ans=0.125
+2024-08-31 17:14:36,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=247168.0, ans=0.125
+2024-08-31 17:14:38,417 INFO [train.py:1114] (1/4) Epoch 19, batch 1550, loss[loss=0.2281, simple_loss=0.2896, pruned_loss=0.06051, ctc_loss=0.1137, over 19583.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2725, pruned_loss=0.05013, ctc_loss=0.09462, over 3847411.22 frames. ], batch size: 60, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:14:41,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=247221.33333333334, ans=0.2
+2024-08-31 17:14:43,940 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.07 vs. limit=22.5
+2024-08-31 17:14:46,785 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.654e+02 1.883e+02 2.328e+02 3.879e+02, threshold=3.765e+02, percent-clipped=1.0
+2024-08-31 17:14:47,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=247221.33333333334, ans=0.125
+2024-08-31 17:15:14,920 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:15:15,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=247274.66666666666, ans=0.0
+2024-08-31 17:16:14,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=247381.33333333334, ans=0.0
+2024-08-31 17:16:14,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=247381.33333333334, ans=0.1
+2024-08-31 17:16:19,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=247381.33333333334, ans=0.1
+2024-08-31 17:16:29,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=247434.66666666666, ans=0.025
+2024-08-31 17:16:40,657 INFO [train.py:1114] (1/4) Epoch 19, batch 1600, loss[loss=0.2051, simple_loss=0.2834, pruned_loss=0.04576, ctc_loss=0.08834, over 19843.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2724, pruned_loss=0.05024, ctc_loss=0.09512, over 3836078.74 frames. ], batch size: 57, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:16:53,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=247541.33333333334, ans=0.0
+2024-08-31 17:16:54,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=247541.33333333334, ans=0.125
+2024-08-31 17:16:59,559 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.44 vs. limit=15.0
+2024-08-31 17:17:01,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=247541.33333333334, ans=0.125
+2024-08-31 17:17:25,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=247648.0, ans=0.2
+2024-08-31 17:17:35,491 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.12 vs. limit=10.0
+2024-08-31 17:17:42,018 INFO [train.py:1114] (1/4) Epoch 19, batch 1650, loss[loss=0.2155, simple_loss=0.2882, pruned_loss=0.05147, ctc_loss=0.09983, over 19660.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2721, pruned_loss=0.05015, ctc_loss=0.0948, over 3832558.49 frames. ], batch size: 59, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:17:49,639 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=247754.66666666666, ans=0.0
+2024-08-31 17:17:50,573 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.753e+02 1.927e+02 2.360e+02 4.500e+02, threshold=3.853e+02, percent-clipped=4.0
+2024-08-31 17:17:58,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=247808.0, ans=0.125
+2024-08-31 17:18:36,080 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=3.92 vs. limit=12.0
+2024-08-31 17:18:44,942 INFO [train.py:1114] (1/4) Epoch 19, batch 1700, loss[loss=0.1859, simple_loss=0.2457, pruned_loss=0.04561, ctc_loss=0.08694, over 19679.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2715, pruned_loss=0.0495, ctc_loss=0.09357, over 3846785.53 frames. ], batch size: 46, lr: 7.84e-03, grad_scale: 64.0
+2024-08-31 17:18:57,810 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=248074.66666666666, ans=0.125
+2024-08-31 17:19:10,052 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=7.98 vs. limit=22.5
+2024-08-31 17:19:46,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=248234.66666666666, ans=0.0
+2024-08-31 17:19:52,928 INFO [train.py:1114] (1/4) Epoch 19, batch 1750, loss[loss=0.1832, simple_loss=0.246, pruned_loss=0.04336, ctc_loss=0.0842, over 19659.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2712, pruned_loss=0.04945, ctc_loss=0.09322, over 3852102.13 frames. ], batch size: 45, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:19:57,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=248288.0, ans=0.0
+2024-08-31 17:20:02,151 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.715e+02 1.941e+02 2.441e+02 4.524e+02, threshold=3.882e+02, percent-clipped=3.0
+2024-08-31 17:20:15,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.whiten.whitening_limit, batch_count=248394.66666666666, ans=12.0
+2024-08-31 17:20:21,095 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.08 vs. limit=22.5
+2024-08-31 17:20:28,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=248448.0, ans=0.1
+2024-08-31 17:20:35,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=248448.0, ans=0.125
+2024-08-31 17:20:39,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=248501.33333333334, ans=0.0
+2024-08-31 17:20:45,500 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=248501.33333333334, ans=0.07
+2024-08-31 17:20:49,927 INFO [train.py:1114] (1/4) Epoch 19, batch 1800, loss[loss=0.2012, simple_loss=0.2743, pruned_loss=0.04682, ctc_loss=0.08615, over 19617.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2711, pruned_loss=0.04941, ctc_loss=0.09298, over 3853513.81 frames. ], batch size: 55, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:21:00,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=248608.0, ans=0.2
+2024-08-31 17:21:01,401 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=248608.0, ans=0.2
+2024-08-31 17:21:08,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=248608.0, ans=0.125
+2024-08-31 17:21:36,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=248768.0, ans=0.1
+2024-08-31 17:21:39,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=248768.0, ans=0.025
+2024-08-31 17:21:41,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=248768.0, ans=0.125
+2024-08-31 17:21:46,637 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.26 vs. limit=15.0
+2024-08-31 17:21:47,149 INFO [train.py:1114] (1/4) Epoch 19, batch 1850, loss[loss=0.2161, simple_loss=0.2909, pruned_loss=0.05045, ctc_loss=0.1007, over 19581.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2716, pruned_loss=0.04965, ctc_loss=0.0935, over 3856194.78 frames. ], batch size: 57, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:21:56,043 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.825e+02 2.203e+02 3.044e+02 4.782e+02, threshold=4.406e+02, percent-clipped=6.0
+2024-08-31 17:22:35,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=248981.33333333334, ans=0.1
+2024-08-31 17:22:52,487 INFO [train.py:1114] (1/4) Epoch 19, batch 1900, loss[loss=0.1989, simple_loss=0.2733, pruned_loss=0.04549, ctc_loss=0.08392, over 19643.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2719, pruned_loss=0.04962, ctc_loss=0.09317, over 3860254.22 frames. ], batch size: 59, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:22:53,889 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=249088.0, ans=0.0
+2024-08-31 17:22:57,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=249088.0, ans=0.0
+2024-08-31 17:23:00,201 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=249088.0, ans=0.0
+2024-08-31 17:23:30,954 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.53 vs. limit=6.0
+2024-08-31 17:23:36,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=249301.33333333334, ans=0.125
+2024-08-31 17:23:38,300 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.49 vs. limit=22.5
+2024-08-31 17:23:45,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=249301.33333333334, ans=0.125
+2024-08-31 17:23:49,002 INFO [train.py:1114] (1/4) Epoch 19, batch 1950, loss[loss=0.2048, simple_loss=0.269, pruned_loss=0.0522, ctc_loss=0.09071, over 19609.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2732, pruned_loss=0.04963, ctc_loss=0.09343, over 3869695.05 frames. ], batch size: 52, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:23:49,481 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=1.95 vs. limit=6.0
+2024-08-31 17:23:55,357 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.76 vs. limit=6.0
+2024-08-31 17:23:58,747 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.354e+02 1.608e+02 1.802e+02 2.157e+02 4.545e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 17:24:04,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=249408.0, ans=0.125
+2024-08-31 17:24:50,835 INFO [train.py:1114] (1/4) Epoch 19, batch 2000, loss[loss=0.1755, simple_loss=0.2382, pruned_loss=0.04067, ctc_loss=0.07859, over 19645.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2739, pruned_loss=0.05002, ctc_loss=0.09443, over 3855107.31 frames. ], batch size: 45, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:25:18,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=249728.0, ans=0.125
+2024-08-31 17:25:33,070 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:25:35,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=249834.66666666666, ans=0.2
+2024-08-31 17:25:45,653 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:25:47,800 INFO [train.py:1114] (1/4) Epoch 19, batch 2050, loss[loss=0.1758, simple_loss=0.253, pruned_loss=0.03547, ctc_loss=0.06902, over 19722.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2728, pruned_loss=0.0499, ctc_loss=0.09424, over 3851539.82 frames. ], batch size: 47, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:25:51,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=249888.0, ans=0.125
+2024-08-31 17:25:57,144 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.719e+02 2.018e+02 2.402e+02 3.677e+02, threshold=4.037e+02, percent-clipped=1.0
+2024-08-31 17:26:02,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=249941.33333333334, ans=0.125
+2024-08-31 17:26:02,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=249941.33333333334, ans=0.125
+2024-08-31 17:26:06,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=249941.33333333334, ans=0.1
+2024-08-31 17:26:15,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=249994.66666666666, ans=0.1
+2024-08-31 17:26:24,226 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=250048.0, ans=0.0
+2024-08-31 17:26:44,677 INFO [train.py:1114] (1/4) Epoch 19, batch 2100, loss[loss=0.195, simple_loss=0.2715, pruned_loss=0.04337, ctc_loss=0.07943, over 19776.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2725, pruned_loss=0.04982, ctc_loss=0.09408, over 3858510.10 frames. ], batch size: 54, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:26:47,767 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.42 vs. limit=15.0
+2024-08-31 17:27:27,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=250314.66666666666, ans=0.125
+2024-08-31 17:27:35,683 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=250368.0, ans=0.125
+2024-08-31 17:27:38,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=250368.0, ans=0.125
+2024-08-31 17:27:42,496 INFO [train.py:1114] (1/4) Epoch 19, batch 2150, loss[loss=0.1971, simple_loss=0.2655, pruned_loss=0.04612, ctc_loss=0.09097, over 19581.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2719, pruned_loss=0.0497, ctc_loss=0.09375, over 3868401.60 frames. ], batch size: 52, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:27:51,501 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.672e+02 1.975e+02 2.523e+02 4.782e+02, threshold=3.951e+02, percent-clipped=2.0
+2024-08-31 17:27:58,699 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=250474.66666666666, ans=0.125
+2024-08-31 17:28:10,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=250528.0, ans=0.1
+2024-08-31 17:28:22,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=250581.33333333334, ans=0.07
+2024-08-31 17:28:25,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=250581.33333333334, ans=0.025
+2024-08-31 17:28:39,694 INFO [train.py:1114] (1/4) Epoch 19, batch 2200, loss[loss=0.2226, simple_loss=0.2853, pruned_loss=0.05711, ctc_loss=0.1143, over 19583.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2715, pruned_loss=0.04958, ctc_loss=0.09335, over 3866828.23 frames. ], batch size: 57, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:28:51,949 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.13 vs. limit=22.5
+2024-08-31 17:29:00,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=250741.33333333334, ans=0.04949747468305833
+2024-08-31 17:29:09,123 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.26 vs. limit=15.0
+2024-08-31 17:29:18,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=250848.0, ans=0.0
+2024-08-31 17:29:20,556 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=250848.0, ans=0.2
+2024-08-31 17:29:25,512 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.40 vs. limit=15.0
+2024-08-31 17:29:38,764 INFO [train.py:1114] (1/4) Epoch 19, batch 2250, loss[loss=0.2205, simple_loss=0.2902, pruned_loss=0.05462, ctc_loss=0.104, over 19610.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2716, pruned_loss=0.04972, ctc_loss=0.09352, over 3867414.95 frames. ], batch size: 55, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:29:47,351 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.680e+02 1.896e+02 2.375e+02 5.292e+02, threshold=3.791e+02, percent-clipped=4.0
+2024-08-31 17:30:19,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=251114.66666666666, ans=0.125
+2024-08-31 17:30:32,286 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:30:38,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=251168.0, ans=0.125
+2024-08-31 17:30:40,044 INFO [train.py:1114] (1/4) Epoch 19, batch 2300, loss[loss=0.1959, simple_loss=0.2594, pruned_loss=0.0482, ctc_loss=0.09006, over 19536.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2707, pruned_loss=0.04956, ctc_loss=0.09322, over 3861612.16 frames. ], batch size: 49, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:31:04,565 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.59 vs. limit=15.0
+2024-08-31 17:31:09,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=251328.0, ans=0.125
+2024-08-31 17:31:19,138 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.79 vs. limit=15.0
+2024-08-31 17:31:26,710 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.74 vs. limit=15.0
+2024-08-31 17:31:32,463 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:31:36,393 INFO [train.py:1114] (1/4) Epoch 19, batch 2350, loss[loss=0.2125, simple_loss=0.2791, pruned_loss=0.05307, ctc_loss=0.09944, over 19657.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2707, pruned_loss=0.04951, ctc_loss=0.09307, over 3863892.88 frames. ], batch size: 63, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:31:45,227 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.718e+02 2.013e+02 2.563e+02 3.706e+02, threshold=4.026e+02, percent-clipped=0.0
+2024-08-31 17:32:00,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=251541.33333333334, ans=0.125
+2024-08-31 17:32:10,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=251594.66666666666, ans=0.0
+2024-08-31 17:32:14,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=251648.0, ans=0.0
+2024-08-31 17:32:14,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=251648.0, ans=0.125
+2024-08-31 17:32:14,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.78 vs. limit=15.0
+2024-08-31 17:32:15,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=251648.0, ans=0.025
+2024-08-31 17:32:28,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=251701.33333333334, ans=0.125
+2024-08-31 17:32:36,598 INFO [train.py:1114] (1/4) Epoch 19, batch 2400, loss[loss=0.2215, simple_loss=0.2909, pruned_loss=0.05588, ctc_loss=0.1007, over 19414.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2727, pruned_loss=0.05028, ctc_loss=0.09429, over 3858113.05 frames. ], batch size: 67, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:33:08,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=251861.33333333334, ans=0.2
+2024-08-31 17:33:39,852 INFO [train.py:1114] (1/4) Epoch 19, batch 2450, loss[loss=0.2403, simple_loss=0.2847, pruned_loss=0.07172, ctc_loss=0.1312, over 13285.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2759, pruned_loss=0.05257, ctc_loss=0.09886, over 3731183.79 frames. ], batch size: 141, lr: 7.78e-03, grad_scale: 32.0
+2024-08-31 17:33:42,493 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=252021.33333333334, ans=0.1
+2024-08-31 17:33:48,950 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.610e+02 1.856e+02 2.081e+02 3.075e+02, threshold=3.711e+02, percent-clipped=0.0
+2024-08-31 17:34:17,480 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=252181.33333333334, ans=0.0
+2024-08-31 17:34:19,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=252181.33333333334, ans=0.0
+2024-08-31 17:36:18,534 INFO [train.py:1114] (1/4) Epoch 20, batch 0, loss[loss=0.2366, simple_loss=0.2866, pruned_loss=0.06771, ctc_loss=0.128, over 19805.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.2866, pruned_loss=0.06771, ctc_loss=0.128, over 19805.00 frames. ], batch size: 49, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:36:18,535 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-31 17:36:23,463 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.4.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.1277, 2.3304, 2.5698, 2.2196], device='cuda:1')
+2024-08-31 17:36:28,439 INFO [train.py:1146] (1/4) Epoch 20, validation: loss=0.1834, simple_loss=0.2715, pruned_loss=0.03542, ctc_loss=0.061, over 944034.00 frames.
+2024-08-31 17:36:28,440 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13681MB
+2024-08-31 17:36:42,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=252288.0, ans=0.025
+2024-08-31 17:36:44,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=252288.0, ans=0.1
+2024-08-31 17:36:49,687 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.42 vs. limit=22.5
+2024-08-31 17:36:57,602 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.57 vs. limit=22.5
+2024-08-31 17:37:11,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=252394.66666666666, ans=0.1
+2024-08-31 17:37:22,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=252448.0, ans=0.0
+2024-08-31 17:37:27,989 INFO [train.py:1114] (1/4) Epoch 20, batch 50, loss[loss=0.1755, simple_loss=0.2421, pruned_loss=0.03919, ctc_loss=0.07613, over 19711.00 frames. ], tot_loss[loss=0.205, simple_loss=0.273, pruned_loss=0.04972, ctc_loss=0.09411, over 845201.66 frames. ], batch size: 47, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:37:43,992 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.64 vs. limit=15.0
+2024-08-31 17:37:51,156 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.696e+02 1.962e+02 2.261e+02 4.473e+02, threshold=3.923e+02, percent-clipped=2.0
+2024-08-31 17:41:27,216 INFO [train.py:1114] (1/4) Epoch 20, batch 100, loss[loss=0.1835, simple_loss=0.249, pruned_loss=0.04294, ctc_loss=0.08039, over 19724.00 frames. ], tot_loss[loss=0.206, simple_loss=0.274, pruned_loss=0.05, ctc_loss=0.09478, over 1498926.28 frames. ], batch size: 51, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:41:30,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=252768.0, ans=0.0
+2024-08-31 17:44:06,481 INFO [train.py:1114] (1/4) Epoch 20, batch 150, loss[loss=0.206, simple_loss=0.2612, pruned_loss=0.05413, ctc_loss=0.1065, over 19725.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2711, pruned_loss=0.04909, ctc_loss=0.09312, over 2028652.43 frames. ], batch size: 47, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:44:59,740 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.634e+02 1.821e+02 2.194e+02 3.683e+02, threshold=3.641e+02, percent-clipped=0.0
+2024-08-31 17:45:25,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=253141.33333333334, ans=0.125
+2024-08-31 17:45:59,900 INFO [train.py:1114] (1/4) Epoch 20, batch 200, loss[loss=0.2289, simple_loss=0.2934, pruned_loss=0.05984, ctc_loss=0.1116, over 18278.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2707, pruned_loss=0.0493, ctc_loss=0.09329, over 2436406.66 frames. ], batch size: 85, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:46:03,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=253301.33333333334, ans=0.125
+2024-08-31 17:46:07,857 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=253301.33333333334, ans=0.125
+2024-08-31 17:46:15,769 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.98 vs. limit=15.0
+2024-08-31 17:46:51,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=253408.0, ans=0.1
+2024-08-31 17:47:14,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=253461.33333333334, ans=0.0
+2024-08-31 17:47:17,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=253514.66666666666, ans=0.07
+2024-08-31 17:47:20,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=253514.66666666666, ans=0.025
+2024-08-31 17:47:32,398 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:47:33,345 INFO [train.py:1114] (1/4) Epoch 20, batch 250, loss[loss=0.2025, simple_loss=0.2742, pruned_loss=0.04797, ctc_loss=0.08746, over 19412.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2709, pruned_loss=0.04902, ctc_loss=0.09255, over 2755617.90 frames. ], batch size: 67, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:47:45,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=253568.0, ans=0.0
+2024-08-31 17:47:58,528 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=253621.33333333334, ans=0.035
+2024-08-31 17:47:59,362 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.745e+02 2.044e+02 2.602e+02 4.259e+02, threshold=4.089e+02, percent-clipped=6.0
+2024-08-31 17:49:24,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=253674.66666666666, ans=0.1
+2024-08-31 17:49:51,098 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.76 vs. limit=15.0
+2024-08-31 17:50:00,350 INFO [train.py:1114] (1/4) Epoch 20, batch 300, loss[loss=0.2364, simple_loss=0.2917, pruned_loss=0.06589, ctc_loss=0.1233, over 19501.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2706, pruned_loss=0.04893, ctc_loss=0.09214, over 3001492.53 frames. ], batch size: 61, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:50:08,309 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.00 vs. limit=10.0
+2024-08-31 17:50:47,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=253994.66666666666, ans=0.125
+2024-08-31 17:50:52,944 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.77 vs. limit=15.0
+2024-08-31 17:51:05,501 INFO [train.py:1114] (1/4) Epoch 20, batch 350, loss[loss=0.1822, simple_loss=0.2503, pruned_loss=0.04173, ctc_loss=0.07652, over 19766.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2703, pruned_loss=0.04899, ctc_loss=0.09246, over 3190578.29 frames. ], batch size: 48, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:51:23,946 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.78 vs. limit=12.0
+2024-08-31 17:51:26,966 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.703e+02 1.946e+02 2.321e+02 4.034e+02, threshold=3.891e+02, percent-clipped=0.0
+2024-08-31 17:51:28,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=254208.0, ans=0.5
+2024-08-31 17:52:04,345 INFO [train.py:1114] (1/4) Epoch 20, batch 400, loss[loss=0.1979, simple_loss=0.2727, pruned_loss=0.04474, ctc_loss=0.08406, over 19857.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2694, pruned_loss=0.04826, ctc_loss=0.09112, over 3344185.38 frames. ], batch size: 55, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:52:20,022 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.54 vs. limit=15.0
+2024-08-31 17:52:42,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=254474.66666666666, ans=0.125
+2024-08-31 17:53:10,613 INFO [train.py:1114] (1/4) Epoch 20, batch 450, loss[loss=0.1812, simple_loss=0.2639, pruned_loss=0.03557, ctc_loss=0.06817, over 19601.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2697, pruned_loss=0.04833, ctc_loss=0.09118, over 3453379.41 frames. ], batch size: 55, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:53:13,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=254634.66666666666, ans=0.0
+2024-08-31 17:53:19,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=254634.66666666666, ans=0.0
+2024-08-31 17:53:31,702 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.627e+02 1.777e+02 2.217e+02 3.582e+02, threshold=3.554e+02, percent-clipped=0.0
+2024-08-31 17:53:42,080 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.65 vs. limit=22.5
+2024-08-31 17:53:48,251 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.39 vs. limit=22.5
+2024-08-31 17:53:50,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=254741.33333333334, ans=0.125
+2024-08-31 17:54:15,355 INFO [train.py:1114] (1/4) Epoch 20, batch 500, loss[loss=0.2202, simple_loss=0.2877, pruned_loss=0.05586, ctc_loss=0.1025, over 19698.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2693, pruned_loss=0.04801, ctc_loss=0.09086, over 3548352.56 frames. ], batch size: 63, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:54:45,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=255008.0, ans=0.125
+2024-08-31 17:55:09,619 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.24 vs. limit=15.0
+2024-08-31 17:55:14,675 INFO [train.py:1114] (1/4) Epoch 20, batch 550, loss[loss=0.21, simple_loss=0.2769, pruned_loss=0.05233, ctc_loss=0.09616, over 19308.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2694, pruned_loss=0.04833, ctc_loss=0.09135, over 3609596.37 frames. ], batch size: 71, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:55:16,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=255168.0, ans=0.2
+2024-08-31 17:55:23,991 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=255168.0, ans=15.0
+2024-08-31 17:55:24,700 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:55:35,939 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.640e+02 1.908e+02 2.178e+02 3.229e+02, threshold=3.816e+02, percent-clipped=0.0
+2024-08-31 17:55:50,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=255274.66666666666, ans=0.025
+2024-08-31 17:55:54,524 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.29 vs. limit=10.0
+2024-08-31 17:56:03,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=255328.0, ans=0.0
+2024-08-31 17:56:16,931 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=255381.33333333334, ans=0.125
+2024-08-31 17:56:22,749 INFO [train.py:1114] (1/4) Epoch 20, batch 600, loss[loss=0.2371, simple_loss=0.2975, pruned_loss=0.06486, ctc_loss=0.1175, over 19349.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2695, pruned_loss=0.04837, ctc_loss=0.09111, over 3666314.13 frames. ], batch size: 67, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:56:25,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=255434.66666666666, ans=0.95
+2024-08-31 17:56:29,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=255434.66666666666, ans=0.125
+2024-08-31 17:56:31,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=255434.66666666666, ans=0.125
+2024-08-31 17:56:33,976 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.51 vs. limit=15.0
+2024-08-31 17:57:06,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=255594.66666666666, ans=0.025
+2024-08-31 17:57:22,374 INFO [train.py:1114] (1/4) Epoch 20, batch 650, loss[loss=0.1935, simple_loss=0.2756, pruned_loss=0.04124, ctc_loss=0.07218, over 19776.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2689, pruned_loss=0.04805, ctc_loss=0.09046, over 3716846.63 frames. ], batch size: 54, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:57:44,323 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.759e+02 2.153e+02 2.838e+02 5.166e+02, threshold=4.306e+02, percent-clipped=8.0
+2024-08-31 17:57:58,069 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.39 vs. limit=15.0
+2024-08-31 17:57:58,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=255861.33333333334, ans=0.125
+2024-08-31 17:58:22,790 INFO [train.py:1114] (1/4) Epoch 20, batch 700, loss[loss=0.1821, simple_loss=0.2522, pruned_loss=0.04178, ctc_loss=0.07107, over 19719.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2696, pruned_loss=0.04832, ctc_loss=0.09107, over 3749179.31 frames. ], batch size: 51, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:59:11,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=256181.33333333334, ans=0.125
+2024-08-31 17:59:24,262 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.37 vs. limit=12.0
+2024-08-31 17:59:24,904 INFO [train.py:1114] (1/4) Epoch 20, batch 750, loss[loss=0.22, simple_loss=0.2891, pruned_loss=0.05513, ctc_loss=0.1018, over 19487.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2686, pruned_loss=0.04769, ctc_loss=0.08984, over 3775736.67 frames. ], batch size: 54, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 17:59:46,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=256288.0, ans=0.125
+2024-08-31 17:59:57,700 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=256288.0, ans=10.0
+2024-08-31 17:59:58,588 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 1.642e+02 1.855e+02 2.095e+02 3.716e+02, threshold=3.709e+02, percent-clipped=0.0
+2024-08-31 17:59:59,205 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.93 vs. limit=6.0
+2024-08-31 18:00:00,563 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.65 vs. limit=15.0
+2024-08-31 18:00:08,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=256341.33333333334, ans=0.07
+2024-08-31 18:00:22,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=256394.66666666666, ans=0.0
+2024-08-31 18:00:40,627 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=256448.0, ans=0.125
+2024-08-31 18:00:40,778 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=256448.0, ans=0.0
+2024-08-31 18:00:41,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=256501.33333333334, ans=0.125
+2024-08-31 18:00:42,941 INFO [train.py:1114] (1/4) Epoch 20, batch 800, loss[loss=0.206, simple_loss=0.2657, pruned_loss=0.05255, ctc_loss=0.1031, over 19810.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2687, pruned_loss=0.04762, ctc_loss=0.08974, over 3797557.31 frames. ], batch size: 49, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 18:01:00,148 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=256554.66666666666, ans=0.2
+2024-08-31 18:01:29,863 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=256714.66666666666, ans=0.125
+2024-08-31 18:01:30,312 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.67 vs. limit=15.0
+2024-08-31 18:01:31,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=256714.66666666666, ans=0.125
+2024-08-31 18:01:38,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=256714.66666666666, ans=0.125
+2024-08-31 18:01:43,070 INFO [train.py:1114] (1/4) Epoch 20, batch 850, loss[loss=0.2041, simple_loss=0.28, pruned_loss=0.04646, ctc_loss=0.08795, over 19661.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2685, pruned_loss=0.04752, ctc_loss=0.08946, over 3816420.74 frames. ], batch size: 59, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:01:54,698 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=256821.33333333334, ans=0.1
+2024-08-31 18:01:59,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=256821.33333333334, ans=0.125
+2024-08-31 18:02:00,680 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=256821.33333333334, ans=0.2
+2024-08-31 18:02:05,170 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.672e+02 2.009e+02 2.661e+02 4.692e+02, threshold=4.019e+02, percent-clipped=5.0
+2024-08-31 18:02:41,077 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.89 vs. limit=22.5
+2024-08-31 18:02:42,834 INFO [train.py:1114] (1/4) Epoch 20, batch 900, loss[loss=0.1924, simple_loss=0.2531, pruned_loss=0.04902, ctc_loss=0.08416, over 19408.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2688, pruned_loss=0.04796, ctc_loss=0.0903, over 3819263.87 frames. ], batch size: 48, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:02:49,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=257034.66666666666, ans=0.125
+2024-08-31 18:03:01,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=257088.0, ans=0.125
+2024-08-31 18:03:04,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=257088.0, ans=0.0
+2024-08-31 18:03:05,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=257088.0, ans=0.125
+2024-08-31 18:03:15,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=257141.33333333334, ans=0.125
+2024-08-31 18:03:29,499 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=257194.66666666666, ans=0.0
+2024-08-31 18:03:29,849 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.33 vs. limit=22.5
+2024-08-31 18:03:39,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=257248.0, ans=0.1
+2024-08-31 18:03:45,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=257248.0, ans=10.0
+2024-08-31 18:03:50,708 INFO [train.py:1114] (1/4) Epoch 20, batch 950, loss[loss=0.2093, simple_loss=0.2668, pruned_loss=0.05598, ctc_loss=0.09961, over 19481.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.27, pruned_loss=0.04853, ctc_loss=0.0915, over 3819608.55 frames. ], batch size: 49, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:03:53,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=257301.33333333334, ans=0.125
+2024-08-31 18:04:02,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=257354.66666666666, ans=0.025
+2024-08-31 18:04:08,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten.whitening_limit, batch_count=257354.66666666666, ans=15.0
+2024-08-31 18:04:11,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=257354.66666666666, ans=0.125
+2024-08-31 18:04:12,200 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.674e+02 1.914e+02 2.385e+02 5.476e+02, threshold=3.829e+02, percent-clipped=1.0
+2024-08-31 18:05:02,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=257461.33333333334, ans=0.07
+2024-08-31 18:05:05,279 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.39 vs. limit=6.0
+2024-08-31 18:05:15,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=257514.66666666666, ans=0.09899494936611666
+2024-08-31 18:05:19,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=257514.66666666666, ans=0.05
+2024-08-31 18:05:25,089 INFO [train.py:1114] (1/4) Epoch 20, batch 1000, loss[loss=0.1935, simple_loss=0.2575, pruned_loss=0.04641, ctc_loss=0.0919, over 19840.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2705, pruned_loss=0.04899, ctc_loss=0.0923, over 3816334.17 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:05:29,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=257568.0, ans=0.025
+2024-08-31 18:12:01,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=257781.33333333334, ans=0.125
+2024-08-31 18:12:15,984 INFO [train.py:1114] (1/4) Epoch 20, batch 1050, loss[loss=0.2002, simple_loss=0.2743, pruned_loss=0.04536, ctc_loss=0.08874, over 19825.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2691, pruned_loss=0.04835, ctc_loss=0.09128, over 3821748.40 frames. ], batch size: 57, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:12:22,249 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=257834.66666666666, ans=0.125
+2024-08-31 18:12:34,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=257888.0, ans=0.125
+2024-08-31 18:12:36,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=257888.0, ans=0.04949747468305833
+2024-08-31 18:12:37,419 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.683e+02 1.941e+02 2.234e+02 3.103e+02, threshold=3.882e+02, percent-clipped=0.0
+2024-08-31 18:13:09,818 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.34 vs. limit=6.0
+2024-08-31 18:13:20,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=258048.0, ans=0.125
+2024-08-31 18:13:25,869 INFO [train.py:1114] (1/4) Epoch 20, batch 1100, loss[loss=0.2061, simple_loss=0.2734, pruned_loss=0.05073, ctc_loss=0.09322, over 19589.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2692, pruned_loss=0.04823, ctc_loss=0.09106, over 3829696.72 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:13:26,574 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.58 vs. limit=6.0
+2024-08-31 18:13:28,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=258101.33333333334, ans=0.0
+2024-08-31 18:14:20,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=258314.66666666666, ans=0.0
+2024-08-31 18:14:26,123 INFO [train.py:1114] (1/4) Epoch 20, batch 1150, loss[loss=0.1871, simple_loss=0.2615, pruned_loss=0.04179, ctc_loss=0.07271, over 19589.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2693, pruned_loss=0.04832, ctc_loss=0.09137, over 3829160.70 frames. ], batch size: 52, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:15:12,224 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.657e+02 1.937e+02 2.398e+02 3.976e+02, threshold=3.875e+02, percent-clipped=1.0
+2024-08-31 18:15:19,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=258474.66666666666, ans=0.025
+2024-08-31 18:15:25,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=258528.0, ans=0.125
+2024-08-31 18:15:32,897 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.11 vs. limit=10.0
+2024-08-31 18:15:33,620 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=258528.0, ans=0.1
+2024-08-31 18:15:51,942 INFO [train.py:1114] (1/4) Epoch 20, batch 1200, loss[loss=0.2099, simple_loss=0.2798, pruned_loss=0.05086, ctc_loss=0.09572, over 19848.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2698, pruned_loss=0.04847, ctc_loss=0.09168, over 3824397.58 frames. ], batch size: 57, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:15:52,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=258634.66666666666, ans=0.125
+2024-08-31 18:16:17,081 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:16:35,317 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.88 vs. limit=15.0
+2024-08-31 18:16:54,797 INFO [train.py:1114] (1/4) Epoch 20, batch 1250, loss[loss=0.217, simple_loss=0.2817, pruned_loss=0.05575, ctc_loss=0.1023, over 19518.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2696, pruned_loss=0.048, ctc_loss=0.09059, over 3842508.95 frames. ], batch size: 61, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:17:05,859 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=258954.66666666666, ans=0.125
+2024-08-31 18:17:06,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=258954.66666666666, ans=0.125
+2024-08-31 18:17:20,828 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.673e+02 1.864e+02 2.243e+02 4.460e+02, threshold=3.727e+02, percent-clipped=1.0
+2024-08-31 18:17:37,032 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.11 vs. limit=10.0
+2024-08-31 18:17:50,702 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.05 vs. limit=12.0
+2024-08-31 18:17:55,525 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.43 vs. limit=6.0
+2024-08-31 18:18:00,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=259114.66666666666, ans=0.0
+2024-08-31 18:18:05,841 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=259114.66666666666, ans=0.125
+2024-08-31 18:19:05,809 INFO [train.py:1114] (1/4) Epoch 20, batch 1300, loss[loss=0.2345, simple_loss=0.2982, pruned_loss=0.06082, ctc_loss=0.123, over 18844.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2694, pruned_loss=0.04789, ctc_loss=0.09062, over 3846249.38 frames. ], batch size: 76, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:19:11,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=259168.0, ans=0.125
+2024-08-31 18:19:12,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=259168.0, ans=0.125
+2024-08-31 18:20:12,172 INFO [train.py:1114] (1/4) Epoch 20, batch 1350, loss[loss=0.1949, simple_loss=0.2675, pruned_loss=0.04289, ctc_loss=0.09153, over 19790.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2696, pruned_loss=0.04777, ctc_loss=0.09023, over 3857281.69 frames. ], batch size: 54, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:20:13,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=259434.66666666666, ans=0.125
+2024-08-31 18:20:38,783 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 1.677e+02 1.917e+02 2.382e+02 4.193e+02, threshold=3.834e+02, percent-clipped=5.0
+2024-08-31 18:20:41,503 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=259541.33333333334, ans=0.125
+2024-08-31 18:21:16,779 INFO [train.py:1114] (1/4) Epoch 20, batch 1400, loss[loss=0.1594, simple_loss=0.2303, pruned_loss=0.03243, ctc_loss=0.05891, over 19662.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2694, pruned_loss=0.04778, ctc_loss=0.09024, over 3864220.63 frames. ], batch size: 46, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:21:19,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=259701.33333333334, ans=0.1
+2024-08-31 18:21:26,654 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=259701.33333333334, ans=0.0
+2024-08-31 18:21:42,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=259754.66666666666, ans=0.0
+2024-08-31 18:22:00,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=259861.33333333334, ans=0.1
+2024-08-31 18:22:13,309 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.35 vs. limit=15.0
+2024-08-31 18:22:30,366 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=259914.66666666666, ans=0.0
+2024-08-31 18:22:31,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=259914.66666666666, ans=0.1
+2024-08-31 18:22:33,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=259914.66666666666, ans=0.0
+2024-08-31 18:22:53,605 INFO [train.py:1114] (1/4) Epoch 20, batch 1450, loss[loss=0.214, simple_loss=0.282, pruned_loss=0.05303, ctc_loss=0.1002, over 19660.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2699, pruned_loss=0.04782, ctc_loss=0.09022, over 3862364.72 frames. ], batch size: 63, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:23:11,685 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=260021.33333333334, ans=0.125
+2024-08-31 18:23:13,278 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.88 vs. limit=15.0
+2024-08-31 18:23:14,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=260021.33333333334, ans=0.125
+2024-08-31 18:23:17,246 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.776e+02 2.029e+02 2.458e+02 5.712e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-31 18:23:19,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=260074.66666666666, ans=0.125
+2024-08-31 18:23:27,318 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=260074.66666666666, ans=0.125
+2024-08-31 18:23:34,191 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=260128.0, ans=0.125
+2024-08-31 18:23:41,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=260181.33333333334, ans=0.125
+2024-08-31 18:23:42,986 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.12 vs. limit=15.0
+2024-08-31 18:23:49,560 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=260181.33333333334, ans=0.1
+2024-08-31 18:23:50,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=260181.33333333334, ans=0.1
+2024-08-31 18:23:54,007 INFO [train.py:1114] (1/4) Epoch 20, batch 1500, loss[loss=0.1961, simple_loss=0.2718, pruned_loss=0.04438, ctc_loss=0.07923, over 19567.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.27, pruned_loss=0.04776, ctc_loss=0.08992, over 3862278.54 frames. ], batch size: 57, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:24:08,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=260288.0, ans=0.1
+2024-08-31 18:24:18,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=260341.33333333334, ans=0.0
+2024-08-31 18:24:26,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=260341.33333333334, ans=0.125
+2024-08-31 18:24:31,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=260394.66666666666, ans=0.125
+2024-08-31 18:25:34,748 INFO [train.py:1114] (1/4) Epoch 20, batch 1550, loss[loss=0.2083, simple_loss=0.28, pruned_loss=0.04975, ctc_loss=0.09263, over 19615.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2698, pruned_loss=0.0479, ctc_loss=0.0903, over 3845948.64 frames. ], batch size: 60, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:26:01,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=260501.33333333334, ans=0.125
+2024-08-31 18:26:05,544 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=2.514e-03
+2024-08-31 18:26:23,560 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=260554.66666666666, ans=0.04949747468305833
+2024-08-31 18:26:25,861 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=260554.66666666666, ans=0.125
+2024-08-31 18:26:33,206 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.748e+02 2.049e+02 2.466e+02 3.855e+02, threshold=4.097e+02, percent-clipped=0.0
+2024-08-31 18:27:11,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=260714.66666666666, ans=0.125
+2024-08-31 18:27:11,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=260714.66666666666, ans=0.125
+2024-08-31 18:27:12,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=260714.66666666666, ans=0.125
+2024-08-31 18:27:16,001 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.47 vs. limit=12.0
+2024-08-31 18:27:18,529 INFO [train.py:1114] (1/4) Epoch 20, batch 1600, loss[loss=0.1904, simple_loss=0.2684, pruned_loss=0.04097, ctc_loss=0.07613, over 19846.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2693, pruned_loss=0.04773, ctc_loss=0.09007, over 3836160.04 frames. ], batch size: 57, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:27:35,804 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.03 vs. limit=12.0
+2024-08-31 18:27:46,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=260874.66666666666, ans=0.05
+2024-08-31 18:28:30,411 INFO [train.py:1114] (1/4) Epoch 20, batch 1650, loss[loss=0.1867, simple_loss=0.2665, pruned_loss=0.0381, ctc_loss=0.07678, over 19659.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2692, pruned_loss=0.0477, ctc_loss=0.09008, over 3834174.80 frames. ], batch size: 59, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:28:31,688 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=261034.66666666666, ans=0.0
+2024-08-31 18:28:53,165 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.719e+02 2.026e+02 2.553e+02 4.958e+02, threshold=4.052e+02, percent-clipped=3.0
+2024-08-31 18:29:18,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=261248.0, ans=0.2
+2024-08-31 18:29:29,547 INFO [train.py:1114] (1/4) Epoch 20, batch 1700, loss[loss=0.1806, simple_loss=0.2416, pruned_loss=0.04409, ctc_loss=0.07844, over 19673.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.269, pruned_loss=0.0474, ctc_loss=0.0894, over 3848230.73 frames. ], batch size: 46, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:29:39,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=261301.33333333334, ans=0.125
+2024-08-31 18:30:04,960 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.14 vs. limit=12.0
+2024-08-31 18:30:05,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=261461.33333333334, ans=0.0
+2024-08-31 18:30:06,647 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=261461.33333333334, ans=0.125
+2024-08-31 18:30:12,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=261461.33333333334, ans=0.1
+2024-08-31 18:30:13,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=261461.33333333334, ans=0.2
+2024-08-31 18:30:16,981 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=3.88 vs. limit=12.0
+2024-08-31 18:31:18,040 INFO [train.py:1114] (1/4) Epoch 20, batch 1750, loss[loss=0.1609, simple_loss=0.2333, pruned_loss=0.03291, ctc_loss=0.05685, over 19648.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2689, pruned_loss=0.04732, ctc_loss=0.08926, over 3852510.28 frames. ], batch size: 45, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:31:22,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=261568.0, ans=0.05
+2024-08-31 18:31:29,909 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:31:39,999 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.679e+02 1.951e+02 2.329e+02 4.159e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-31 18:31:41,459 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=261674.66666666666, ans=0.2
+2024-08-31 18:31:41,471 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=261674.66666666666, ans=0.125
+2024-08-31 18:31:49,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=261674.66666666666, ans=0.0
+2024-08-31 18:32:15,183 INFO [train.py:1114] (1/4) Epoch 20, batch 1800, loss[loss=0.1972, simple_loss=0.2709, pruned_loss=0.04465, ctc_loss=0.0854, over 19620.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2694, pruned_loss=0.04754, ctc_loss=0.08965, over 3854196.06 frames. ], batch size: 55, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:32:22,160 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.59 vs. limit=15.0
+2024-08-31 18:32:22,770 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=261834.66666666666, ans=0.125
+2024-08-31 18:33:06,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=261941.33333333334, ans=0.0
+2024-08-31 18:33:10,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=261994.66666666666, ans=0.2
+2024-08-31 18:33:12,062 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=261994.66666666666, ans=0.125
+2024-08-31 18:33:27,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=262048.0, ans=0.0
+2024-08-31 18:33:34,488 INFO [train.py:1114] (1/4) Epoch 20, batch 1850, loss[loss=0.2338, simple_loss=0.2986, pruned_loss=0.06229, ctc_loss=0.1111, over 19588.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2688, pruned_loss=0.04733, ctc_loss=0.08915, over 3857282.87 frames. ], batch size: 57, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:33:36,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=262101.33333333334, ans=0.0
+2024-08-31 18:33:56,010 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.842e+02 2.206e+02 3.038e+02 4.306e+02, threshold=4.411e+02, percent-clipped=5.0
+2024-08-31 18:34:06,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=262208.0, ans=0.125
+2024-08-31 18:34:19,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=262261.3333333333, ans=0.0
+2024-08-31 18:34:28,906 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.21 vs. limit=15.0
+2024-08-31 18:34:36,265 INFO [train.py:1114] (1/4) Epoch 20, batch 1900, loss[loss=0.2069, simple_loss=0.2819, pruned_loss=0.04726, ctc_loss=0.09333, over 19660.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2698, pruned_loss=0.0479, ctc_loss=0.09013, over 3861469.85 frames. ], batch size: 59, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:34:50,523 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=262421.3333333333, ans=0.0
+2024-08-31 18:34:52,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=262421.3333333333, ans=0.1
+2024-08-31 18:35:08,227 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.51 vs. limit=15.0
+2024-08-31 18:35:11,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=262528.0, ans=0.125
+2024-08-31 18:35:12,544 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.40 vs. limit=12.0
+2024-08-31 18:35:13,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=262528.0, ans=0.2
+2024-08-31 18:35:34,439 INFO [train.py:1114] (1/4) Epoch 20, batch 1950, loss[loss=0.1971, simple_loss=0.262, pruned_loss=0.04786, ctc_loss=0.09118, over 19589.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2708, pruned_loss=0.04826, ctc_loss=0.09073, over 3870567.74 frames. ], batch size: 52, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:35:39,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=262634.6666666667, ans=0.125
+2024-08-31 18:35:55,636 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.650e+02 1.780e+02 2.101e+02 3.496e+02, threshold=3.560e+02, percent-clipped=0.0
+2024-08-31 18:35:58,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=262741.3333333333, ans=0.0
+2024-08-31 18:36:14,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=262794.6666666667, ans=0.2
+2024-08-31 18:36:15,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=262794.6666666667, ans=0.125
+2024-08-31 18:36:16,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=262794.6666666667, ans=10.0
+2024-08-31 18:36:23,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=262848.0, ans=0.125
+2024-08-31 18:36:28,117 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=262848.0, ans=0.1
+2024-08-31 18:36:31,287 INFO [train.py:1114] (1/4) Epoch 20, batch 2000, loss[loss=0.1995, simple_loss=0.2528, pruned_loss=0.053, ctc_loss=0.1006, over 19661.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2717, pruned_loss=0.04878, ctc_loss=0.09184, over 3854852.83 frames. ], batch size: 45, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:36:41,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=262954.6666666667, ans=0.2
+2024-08-31 18:36:50,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=262954.6666666667, ans=0.125
+2024-08-31 18:37:14,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=263061.3333333333, ans=0.0
+2024-08-31 18:37:17,105 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=263061.3333333333, ans=0.125
+2024-08-31 18:37:32,660 INFO [train.py:1114] (1/4) Epoch 20, batch 2050, loss[loss=0.1951, simple_loss=0.2561, pruned_loss=0.04843, ctc_loss=0.09314, over 19690.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2711, pruned_loss=0.04877, ctc_loss=0.09182, over 3849839.36 frames. ], batch size: 47, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:38:01,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=263221.3333333333, ans=0.125
+2024-08-31 18:38:02,083 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.724e+02 2.041e+02 2.585e+02 3.821e+02, threshold=4.082e+02, percent-clipped=5.0
+2024-08-31 18:38:19,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=263328.0, ans=0.2
+2024-08-31 18:38:36,471 INFO [train.py:1114] (1/4) Epoch 20, batch 2100, loss[loss=0.1913, simple_loss=0.2678, pruned_loss=0.04181, ctc_loss=0.07799, over 19757.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2707, pruned_loss=0.04849, ctc_loss=0.09145, over 3857655.54 frames. ], batch size: 54, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:38:49,583 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.62 vs. limit=22.5
+2024-08-31 18:39:22,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=263648.0, ans=0.0
+2024-08-31 18:39:25,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=263648.0, ans=0.1
+2024-08-31 18:39:32,922 INFO [train.py:1114] (1/4) Epoch 20, batch 2150, loss[loss=0.1795, simple_loss=0.2545, pruned_loss=0.03743, ctc_loss=0.07416, over 19590.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2699, pruned_loss=0.04844, ctc_loss=0.09121, over 3868423.44 frames. ], batch size: 52, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:39:37,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=263701.3333333333, ans=0.125
+2024-08-31 18:39:46,819 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.37 vs. limit=15.0
+2024-08-31 18:39:51,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=263754.6666666667, ans=0.1
+2024-08-31 18:39:58,523 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.627e+02 1.896e+02 2.393e+02 5.058e+02, threshold=3.792e+02, percent-clipped=5.0
+2024-08-31 18:40:00,048 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=263808.0, ans=0.1
+2024-08-31 18:40:02,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=263808.0, ans=0.0
+2024-08-31 18:40:25,418 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.20 vs. limit=15.0
+2024-08-31 18:40:25,676 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.02 vs. limit=12.0
+2024-08-31 18:40:32,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.06 vs. limit=22.5
+2024-08-31 18:40:33,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=263914.6666666667, ans=0.0
+2024-08-31 18:40:33,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.90 vs. limit=15.0
+2024-08-31 18:41:09,871 INFO [train.py:1114] (1/4) Epoch 20, batch 2200, loss[loss=0.1907, simple_loss=0.2671, pruned_loss=0.04243, ctc_loss=0.0735, over 19583.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2693, pruned_loss=0.048, ctc_loss=0.09043, over 3866680.19 frames. ], batch size: 57, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:41:13,428 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=263968.0, ans=0.125
+2024-08-31 18:41:49,765 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=264074.6666666667, ans=0.125
+2024-08-31 18:41:55,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=264128.0, ans=0.2
+2024-08-31 18:42:03,856 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=264128.0, ans=0.125
+2024-08-31 18:42:17,184 INFO [train.py:1114] (1/4) Epoch 20, batch 2250, loss[loss=0.2108, simple_loss=0.2838, pruned_loss=0.0501, ctc_loss=0.09403, over 19619.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2699, pruned_loss=0.04822, ctc_loss=0.09092, over 3866736.95 frames. ], batch size: 55, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:42:41,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=264288.0, ans=0.125
+2024-08-31 18:42:42,066 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.705e+02 2.149e+02 2.747e+02 5.291e+02, threshold=4.298e+02, percent-clipped=7.0
+2024-08-31 18:42:43,719 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.36 vs. limit=22.5
+2024-08-31 18:43:04,857 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=264448.0, ans=0.0
+2024-08-31 18:43:12,636 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.78 vs. limit=15.0
+2024-08-31 18:43:12,664 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.22 vs. limit=15.0
+2024-08-31 18:43:12,731 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.01 vs. limit=15.0
+2024-08-31 18:43:13,127 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.62 vs. limit=15.0
+2024-08-31 18:43:16,695 INFO [train.py:1114] (1/4) Epoch 20, batch 2300, loss[loss=0.1928, simple_loss=0.2551, pruned_loss=0.04794, ctc_loss=0.08668, over 19515.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2692, pruned_loss=0.04819, ctc_loss=0.09093, over 3860575.75 frames. ], batch size: 49, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:43:24,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=264501.3333333333, ans=0.125
+2024-08-31 18:43:37,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=264554.6666666667, ans=0.2
+2024-08-31 18:43:44,139 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.64 vs. limit=15.0
+2024-08-31 18:43:46,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=264608.0, ans=0.2
+2024-08-31 18:43:49,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=264608.0, ans=0.0
+2024-08-31 18:43:49,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=264608.0, ans=0.2
+2024-08-31 18:43:51,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=264661.3333333333, ans=0.0
+2024-08-31 18:44:12,820 INFO [train.py:1114] (1/4) Epoch 20, batch 2350, loss[loss=0.2139, simple_loss=0.2799, pruned_loss=0.05489, ctc_loss=0.09521, over 19677.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2686, pruned_loss=0.04789, ctc_loss=0.09009, over 3863388.31 frames. ], batch size: 63, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:44:15,379 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.70 vs. limit=22.5
+2024-08-31 18:44:18,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=264768.0, ans=0.2
+2024-08-31 18:44:49,425 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.669e+02 1.905e+02 2.325e+02 3.822e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-31 18:44:55,895 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.22 vs. limit=6.0
+2024-08-31 18:45:26,864 INFO [train.py:1114] (1/4) Epoch 20, batch 2400, loss[loss=0.2161, simple_loss=0.2809, pruned_loss=0.05454, ctc_loss=0.1053, over 19227.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2706, pruned_loss=0.04871, ctc_loss=0.0915, over 3857894.04 frames. ], batch size: 71, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:45:27,560 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.01 vs. limit=15.0
+2024-08-31 18:45:33,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=265034.6666666667, ans=0.0
+2024-08-31 18:45:42,217 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.83 vs. limit=15.0
+2024-08-31 18:45:47,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=265088.0, ans=0.05
+2024-08-31 18:46:15,484 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=12.08 vs. limit=15.0
+2024-08-31 18:46:23,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=265301.3333333333, ans=0.125
+2024-08-31 18:46:23,884 INFO [train.py:1114] (1/4) Epoch 20, batch 2450, loss[loss=0.2325, simple_loss=0.2872, pruned_loss=0.06531, ctc_loss=0.1181, over 13318.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2739, pruned_loss=0.05106, ctc_loss=0.09615, over 3731381.34 frames. ], batch size: 140, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:46:30,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=265301.3333333333, ans=0.125
+2024-08-31 18:46:43,068 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.86 vs. limit=15.0
+2024-08-31 18:46:45,877 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.663e+02 1.874e+02 2.086e+02 3.013e+02, threshold=3.749e+02, percent-clipped=0.0
+2024-08-31 18:47:04,935 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_ff2.min_abs, batch_count=265461.3333333333, ans=0.1
+2024-08-31 18:47:07,639 INFO [train.py:1387] (1/4) Done!
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-2 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-2
new file mode 100644
index 0000000000000000000000000000000000000000..7dcef8de004a2bbe2d96dd7f78a58440951a1015
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-2
@@ -0,0 +1,1097 @@
+2024-08-31 13:15:01,244 INFO [train.py:1182] (2/4) Training started
+2024-08-31 13:15:02,665 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-31 13:15:02,669 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-31 13:15:02,669 INFO [train.py:1212] (2/4) About to create model
+2024-08-31 13:15:10,388 INFO [train.py:1216] (2/4) Number of model parameters: 66367431
+2024-08-31 13:15:10,438 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-31 13:16:01,722 INFO [train.py:1231] (2/4) Using DDP
+2024-08-31 13:16:07,014 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-31 13:16:07,207 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-31 13:16:07,208 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-31 13:16:07,275 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-31 13:16:07,276 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-31 13:16:07,276 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-31 13:16:07,276 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-31 13:16:07,276 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-31 13:16:07,276 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-31 13:16:08,877 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-31 13:16:08,883 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-31 13:16:09,671 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-31 13:16:10,141 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-31 13:16:10,465 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-31 13:16:10,466 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:22:43,896 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12782MB
+2024-08-31 13:22:45,384 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12849MB
+2024-08-31 13:23:02,017 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-31 13:23:03,248 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=256, metric=8.54 vs. limit=7.5
+2024-08-31 13:23:03,530 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-31 13:24:12,098 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-31 13:24:13,687 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-31 13:24:13,707 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-31 13:25:06,943 INFO [train.py:1114] (2/4) Epoch 18, batch 0, loss[loss=0.1826, simple_loss=0.2456, pruned_loss=0.04306, ctc_loss=0.08405, over 19432.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2456, pruned_loss=0.04306, ctc_loss=0.08405, over 19432.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 13:25:06,944 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-31 13:25:31,865 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3674, 1.1105, 1.6340, 0.7188, 1.6513, 1.7595, 1.8532, 1.6009],
+ device='cuda:2')
+2024-08-31 13:25:49,908 INFO [train.py:1146] (2/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-31 13:25:49,909 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13069MB
+2024-08-31 13:25:51,829 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.87 vs. limit=22.5
+2024-08-31 13:27:19,277 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=225680.0, ans=0.0
+2024-08-31 13:32:33,653 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.39 vs. limit=15.0
+2024-08-31 13:48:13,677 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.934e+02 2.118e+02 2.433e+02 6.228e+02, threshold=4.237e+02, percent-clipped=5.0
+2024-08-31 13:56:45,326 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=225946.66666666666, ans=0.1
+2024-08-31 13:56:46,289 INFO [train.py:1114] (2/4) Epoch 18, batch 50, loss[loss=0.197, simple_loss=0.2517, pruned_loss=0.05073, ctc_loss=0.1018, over 19736.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.277, pruned_loss=0.05294, ctc_loss=0.1004, over 844774.48 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 13:57:26,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=225946.66666666666, ans=0.025
+2024-08-31 13:57:32,050 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.94 vs. limit=15.0
+2024-08-31 14:00:23,349 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.26 vs. limit=15.0
+2024-08-31 14:00:59,673 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=226053.33333333334, ans=0.0
+2024-08-31 14:09:11,734 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.26 vs. limit=6.0
+2024-08-31 14:11:29,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=226160.0, ans=0.0
+2024-08-31 14:11:30,626 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 14:13:36,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=226160.0, ans=0.125
+2024-08-31 14:13:38,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=226160.0, ans=0.0
+2024-08-31 14:13:50,103 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.48 vs. limit=22.5
+2024-08-31 14:15:00,241 INFO [train.py:1114] (2/4) Epoch 18, batch 100, loss[loss=0.1836, simple_loss=0.2584, pruned_loss=0.03984, ctc_loss=0.07282, over 19708.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2786, pruned_loss=0.05329, ctc_loss=0.09989, over 1499238.30 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:15:17,141 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.12 vs. limit=15.0
+2024-08-31 14:15:17,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten.whitening_limit, batch_count=226213.33333333334, ans=15.0
+2024-08-31 14:16:55,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=226213.33333333334, ans=0.2
+2024-08-31 14:25:28,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=226320.0, ans=0.125
+2024-08-31 14:28:22,027 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.87 vs. limit=15.0
+2024-08-31 14:29:05,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=226373.33333333334, ans=0.1
+2024-08-31 14:30:18,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=226373.33333333334, ans=0.1
+2024-08-31 14:31:33,311 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=226426.66666666666, ans=0.0
+2024-08-31 14:32:50,894 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=226426.66666666666, ans=0.0
+2024-08-31 14:32:51,146 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.29 vs. limit=15.0
+2024-08-31 14:32:51,605 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.685e+02 1.949e+02 2.332e+02 3.525e+02, threshold=3.898e+02, percent-clipped=0.0
+2024-08-31 14:34:38,790 INFO [train.py:1114] (2/4) Epoch 18, batch 150, loss[loss=0.1868, simple_loss=0.2539, pruned_loss=0.04315, ctc_loss=0.08347, over 19719.00 frames. ], tot_loss[loss=0.208, simple_loss=0.275, pruned_loss=0.05127, ctc_loss=0.09632, over 2028180.04 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:45:09,278 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-31 14:45:55,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-31 14:45:55,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.97 vs. limit=15.0
+2024-08-31 14:49:44,034 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.49 vs. limit=12.0
+2024-08-31 14:55:51,690 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=226640.0, ans=0.2
+2024-08-31 15:01:41,771 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=226693.33333333334, ans=0.125
+2024-08-31 15:03:26,245 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=5.92 vs. limit=15.0
+2024-08-31 15:05:15,420 INFO [train.py:1114] (2/4) Epoch 18, batch 200, loss[loss=0.2233, simple_loss=0.2852, pruned_loss=0.05773, ctc_loss=0.1151, over 18272.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2739, pruned_loss=0.05108, ctc_loss=0.09591, over 2435566.49 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:07:59,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=226800.0, ans=0.025
+2024-08-31 15:07:59,521 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.60 vs. limit=10.0
+2024-08-31 15:10:48,589 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.34 vs. limit=6.0
+2024-08-31 15:15:18,879 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.94 vs. limit=22.5
+2024-08-31 15:16:13,823 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226906.66666666666, ans=0.125
+2024-08-31 15:16:14,330 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.28 vs. limit=15.0
+2024-08-31 15:17:11,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=226960.0, ans=0.125
+2024-08-31 15:17:44,784 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.761e+02 2.086e+02 2.524e+02 4.159e+02, threshold=4.172e+02, percent-clipped=2.0
+2024-08-31 15:17:59,747 INFO [train.py:1114] (2/4) Epoch 18, batch 250, loss[loss=0.2549, simple_loss=0.3142, pruned_loss=0.07141, ctc_loss=0.1318, over 19347.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2742, pruned_loss=0.05145, ctc_loss=0.09669, over 2754676.87 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:19:14,674 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.19 vs. limit=10.0
+2024-08-31 15:21:17,326 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.54 vs. limit=22.5
+2024-08-31 15:22:47,238 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.85 vs. limit=15.0
+2024-08-31 15:22:58,180 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=227173.33333333334, ans=0.0
+2024-08-31 15:22:58,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=227173.33333333334, ans=0.125
+2024-08-31 15:24:04,330 INFO [train.py:1114] (2/4) Epoch 18, batch 300, loss[loss=0.2481, simple_loss=0.2983, pruned_loss=0.07267, ctc_loss=0.1314, over 19550.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.274, pruned_loss=0.05152, ctc_loss=0.09687, over 3000148.78 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:30:28,323 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=227493.33333333334, ans=0.125
+2024-08-31 15:30:47,329 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.680e+02 1.932e+02 2.386e+02 3.920e+02, threshold=3.864e+02, percent-clipped=0.0
+2024-08-31 15:31:47,643 INFO [train.py:1114] (2/4) Epoch 18, batch 350, loss[loss=0.1857, simple_loss=0.249, pruned_loss=0.0446, ctc_loss=0.08302, over 19783.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2748, pruned_loss=0.05187, ctc_loss=0.09758, over 3189797.23 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:32:05,406 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.14 vs. limit=15.0
+2024-08-31 15:32:35,527 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.11 vs. limit=15.0
+2024-08-31 15:33:57,841 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=227706.66666666666, ans=0.025
+2024-08-31 15:34:33,711 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer_ff3.min_abs, batch_count=227760.0, ans=0.2
+2024-08-31 15:34:39,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=227760.0, ans=0.1
+2024-08-31 15:34:57,666 INFO [train.py:1114] (2/4) Epoch 18, batch 400, loss[loss=0.2101, simple_loss=0.2875, pruned_loss=0.04798, ctc_loss=0.09204, over 19482.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2743, pruned_loss=0.05162, ctc_loss=0.09712, over 3341232.34 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:35:09,737 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=227813.33333333334, ans=0.125
+2024-08-31 15:35:12,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=227813.33333333334, ans=0.025
+2024-08-31 15:35:14,633 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=227813.33333333334, ans=0.125
+2024-08-31 15:37:09,334 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=228026.66666666666, ans=0.125
+2024-08-31 15:37:11,053 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.718e+02 1.967e+02 2.336e+02 3.401e+02, threshold=3.934e+02, percent-clipped=0.0
+2024-08-31 15:37:37,967 INFO [train.py:1114] (2/4) Epoch 18, batch 450, loss[loss=0.2387, simple_loss=0.3003, pruned_loss=0.06281, ctc_loss=0.1289, over 19625.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2747, pruned_loss=0.05173, ctc_loss=0.09742, over 3450729.30 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:39:32,283 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:39:51,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=228240.0, ans=0.1
+2024-08-31 15:39:53,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=228240.0, ans=0.125
+2024-08-31 15:39:56,401 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=228240.0, ans=0.125
+2024-08-31 15:39:56,660 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.80 vs. limit=10.0
+2024-08-31 15:40:01,058 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=228293.33333333334, ans=0.2
+2024-08-31 15:40:01,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=228293.33333333334, ans=0.0
+2024-08-31 15:40:18,492 INFO [train.py:1114] (2/4) Epoch 18, batch 500, loss[loss=0.2065, simple_loss=0.2849, pruned_loss=0.04699, ctc_loss=0.08539, over 19681.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2738, pruned_loss=0.05124, ctc_loss=0.09659, over 3546019.83 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:40:19,709 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=228346.66666666666, ans=0.125
+2024-08-31 15:40:39,303 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.03 vs. limit=10.0
+2024-08-31 15:40:41,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=228453.33333333334, ans=0.0
+2024-08-31 15:40:42,632 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.38 vs. limit=6.0
+2024-08-31 15:40:45,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=228453.33333333334, ans=0.125
+2024-08-31 15:40:50,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=228453.33333333334, ans=0.1
+2024-08-31 15:40:52,240 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=10.24 vs. limit=15.0
+2024-08-31 15:41:10,342 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.618e+02 1.812e+02 2.329e+02 3.946e+02, threshold=3.624e+02, percent-clipped=1.0
+2024-08-31 15:41:17,481 INFO [train.py:1114] (2/4) Epoch 18, batch 550, loss[loss=0.2256, simple_loss=0.2873, pruned_loss=0.05965, ctc_loss=0.1113, over 19242.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2738, pruned_loss=0.05134, ctc_loss=0.09678, over 3608988.72 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:42:31,152 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=228666.66666666666, ans=0.2
+2024-08-31 15:43:41,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=228773.33333333334, ans=0.2
+2024-08-31 15:44:00,806 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.23 vs. limit=22.5
+2024-08-31 15:44:18,818 INFO [train.py:1114] (2/4) Epoch 18, batch 600, loss[loss=0.2245, simple_loss=0.2926, pruned_loss=0.0577, ctc_loss=0.1026, over 19360.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2732, pruned_loss=0.05072, ctc_loss=0.09536, over 3665746.22 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:44:38,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=228933.33333333334, ans=0.0
+2024-08-31 15:45:28,763 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.735e+02 2.092e+02 3.203e+02 5.009e+02, threshold=4.184e+02, percent-clipped=13.0
+2024-08-31 15:45:30,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229093.33333333334, ans=0.1
+2024-08-31 15:45:38,279 INFO [train.py:1114] (2/4) Epoch 18, batch 650, loss[loss=0.2041, simple_loss=0.2782, pruned_loss=0.04732, ctc_loss=0.08861, over 19756.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2727, pruned_loss=0.05068, ctc_loss=0.09538, over 3716208.52 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:46:20,705 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=229146.66666666666, ans=0.0
+2024-08-31 15:46:24,699 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.40 vs. limit=15.0
+2024-08-31 15:46:27,910 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=229200.0, ans=0.025
+2024-08-31 15:46:30,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229200.0, ans=0.1
+2024-08-31 15:46:51,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=229306.66666666666, ans=0.0
+2024-08-31 15:47:16,567 INFO [train.py:1114] (2/4) Epoch 18, batch 700, loss[loss=0.1828, simple_loss=0.2521, pruned_loss=0.04181, ctc_loss=0.07494, over 19722.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2731, pruned_loss=0.05086, ctc_loss=0.09588, over 3749014.97 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:48:03,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=229626.66666666666, ans=0.125
+2024-08-31 15:48:03,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=229626.66666666666, ans=0.125
+2024-08-31 15:48:04,312 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.99 vs. limit=22.5
+2024-08-31 15:48:10,583 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.672e+02 1.935e+02 2.401e+02 4.868e+02, threshold=3.870e+02, percent-clipped=1.0
+2024-08-31 15:48:12,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=229626.66666666666, ans=0.0
+2024-08-31 15:48:16,528 INFO [train.py:1114] (2/4) Epoch 18, batch 750, loss[loss=0.2087, simple_loss=0.2764, pruned_loss=0.05082, ctc_loss=0.09843, over 19859.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2727, pruned_loss=0.05063, ctc_loss=0.09526, over 3775543.53 frames. ], batch size: 55, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:48:38,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=229733.33333333334, ans=0.125
+2024-08-31 15:49:03,503 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=229840.0, ans=6.0
+2024-08-31 15:49:17,264 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=229893.33333333334, ans=0.0
+2024-08-31 15:49:19,736 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=229893.33333333334, ans=0.0
+2024-08-31 15:49:28,065 INFO [train.py:1114] (2/4) Epoch 18, batch 800, loss[loss=0.1791, simple_loss=0.2475, pruned_loss=0.03994, ctc_loss=0.0769, over 19430.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2732, pruned_loss=0.05093, ctc_loss=0.09594, over 3796776.82 frames. ], batch size: 48, lr: 8.37e-03, grad_scale: 32.0
+2024-08-31 15:49:39,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=229946.66666666666, ans=0.0
+2024-08-31 15:50:26,260 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.84 vs. limit=15.0
+2024-08-31 15:50:27,780 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.682e+02 1.957e+02 2.333e+02 3.697e+02, threshold=3.913e+02, percent-clipped=0.0
+2024-08-31 15:50:29,311 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=230160.0, ans=0.1
+2024-08-31 15:50:33,702 INFO [train.py:1114] (2/4) Epoch 18, batch 850, loss[loss=0.2134, simple_loss=0.2943, pruned_loss=0.04873, ctc_loss=0.08778, over 19630.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2725, pruned_loss=0.05061, ctc_loss=0.09535, over 3815628.64 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:51:31,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=230266.66666666666, ans=0.125
+2024-08-31 15:51:33,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=230266.66666666666, ans=0.07
+2024-08-31 15:51:47,807 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=230320.0, ans=0.2
+2024-08-31 15:52:06,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=230426.66666666666, ans=0.0
+2024-08-31 15:52:06,630 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.10 vs. limit=22.5
+2024-08-31 15:52:07,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=230426.66666666666, ans=0.125
+2024-08-31 15:52:08,892 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=230426.66666666666, ans=0.0
+2024-08-31 15:52:15,954 INFO [train.py:1114] (2/4) Epoch 18, batch 900, loss[loss=0.1701, simple_loss=0.2408, pruned_loss=0.03566, ctc_loss=0.07006, over 19406.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.273, pruned_loss=0.05097, ctc_loss=0.09616, over 3819241.07 frames. ], batch size: 48, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:52:17,051 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=230480.0, ans=0.1
+2024-08-31 15:52:49,852 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=230586.66666666666, ans=0.0
+2024-08-31 15:52:57,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=230640.0, ans=0.125
+2024-08-31 15:53:00,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=230640.0, ans=0.125
+2024-08-31 15:53:06,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=230693.33333333334, ans=0.2
+2024-08-31 15:53:07,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=230693.33333333334, ans=0.0
+2024-08-31 15:53:08,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=230693.33333333334, ans=0.0
+2024-08-31 15:53:12,021 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 1.645e+02 1.872e+02 2.411e+02 3.930e+02, threshold=3.745e+02, percent-clipped=1.0
+2024-08-31 15:53:13,529 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=230693.33333333334, ans=0.0
+2024-08-31 15:53:46,104 INFO [train.py:1114] (2/4) Epoch 18, batch 950, loss[loss=0.2067, simple_loss=0.2653, pruned_loss=0.05465, ctc_loss=0.09693, over 19504.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2731, pruned_loss=0.05111, ctc_loss=0.09636, over 3821614.48 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:53:47,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=230746.66666666666, ans=0.125
+2024-08-31 15:53:47,924 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.23 vs. limit=15.0
+2024-08-31 15:53:54,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=230746.66666666666, ans=0.0
+2024-08-31 15:53:57,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=230800.0, ans=0.125
+2024-08-31 15:54:03,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=230800.0, ans=0.1
+2024-08-31 15:54:32,767 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=230906.66666666666, ans=0.125
+2024-08-31 15:54:48,344 INFO [train.py:1114] (2/4) Epoch 18, batch 1000, loss[loss=0.2083, simple_loss=0.2823, pruned_loss=0.04853, ctc_loss=0.09287, over 19859.00 frames. ], tot_loss[loss=0.208, simple_loss=0.274, pruned_loss=0.05152, ctc_loss=0.09708, over 3816720.59 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:55:02,820 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=231013.33333333334, ans=0.125
+2024-08-31 15:55:10,604 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.15 vs. limit=10.0
+2024-08-31 15:55:12,590 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:55:15,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=231120.0, ans=0.125
+2024-08-31 15:55:43,283 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231173.33333333334, ans=0.1
+2024-08-31 15:55:55,150 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 1.660e+02 1.836e+02 2.172e+02 3.389e+02, threshold=3.673e+02, percent-clipped=0.0
+2024-08-31 15:56:01,095 INFO [train.py:1114] (2/4) Epoch 18, batch 1050, loss[loss=0.2094, simple_loss=0.2888, pruned_loss=0.04775, ctc_loss=0.08633, over 19821.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2739, pruned_loss=0.05162, ctc_loss=0.09737, over 3823860.40 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:56:11,490 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.56 vs. limit=6.0
+2024-08-31 15:56:26,774 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:56:37,881 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.40 vs. limit=15.0
+2024-08-31 15:56:38,490 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=231440.0, ans=0.125
+2024-08-31 15:56:39,514 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=231440.0, ans=0.125
+2024-08-31 15:57:01,196 INFO [train.py:1114] (2/4) Epoch 18, batch 1100, loss[loss=0.1915, simple_loss=0.2681, pruned_loss=0.04221, ctc_loss=0.07619, over 19597.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2733, pruned_loss=0.05103, ctc_loss=0.09602, over 3831830.36 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:57:07,417 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.82 vs. limit=15.0
+2024-08-31 15:57:08,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=231546.66666666666, ans=0.125
+2024-08-31 15:57:21,676 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:57:26,486 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.31 vs. limit=10.0
+2024-08-31 15:57:50,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=231706.66666666666, ans=0.0
+2024-08-31 15:57:51,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231760.0, ans=0.1
+2024-08-31 15:57:58,316 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.608e+02 1.860e+02 2.284e+02 4.941e+02, threshold=3.719e+02, percent-clipped=1.0
+2024-08-31 15:58:04,224 INFO [train.py:1114] (2/4) Epoch 18, batch 1150, loss[loss=0.1821, simple_loss=0.2578, pruned_loss=0.0384, ctc_loss=0.07372, over 19594.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2728, pruned_loss=0.0508, ctc_loss=0.09576, over 3830669.25 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:58:18,567 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:58:25,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=231866.66666666666, ans=0.0
+2024-08-31 15:58:31,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=231866.66666666666, ans=0.125
+2024-08-31 15:58:35,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=231866.66666666666, ans=0.2
+2024-08-31 15:58:36,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=231866.66666666666, ans=0.2
+2024-08-31 15:58:48,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231920.0, ans=0.1
+2024-08-31 15:58:52,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=231973.33333333334, ans=0.125
+2024-08-31 15:59:17,232 INFO [train.py:1114] (2/4) Epoch 18, batch 1200, loss[loss=0.199, simple_loss=0.2722, pruned_loss=0.046, ctc_loss=0.08446, over 19844.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2739, pruned_loss=0.05146, ctc_loss=0.09689, over 3825491.91 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:59:30,693 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=232133.33333333334, ans=0.125
+2024-08-31 15:59:31,096 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.35 vs. limit=15.0
+2024-08-31 15:59:43,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=232186.66666666666, ans=0.0
+2024-08-31 15:59:44,126 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.82 vs. limit=15.0
+2024-08-31 15:59:53,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=232240.0, ans=0.1
+2024-08-31 16:00:10,264 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.83 vs. limit=10.0
+2024-08-31 16:00:12,205 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.681e+02 1.869e+02 2.236e+02 3.755e+02, threshold=3.738e+02, percent-clipped=1.0
+2024-08-31 16:00:18,285 INFO [train.py:1114] (2/4) Epoch 18, batch 1250, loss[loss=0.2325, simple_loss=0.2955, pruned_loss=0.06174, ctc_loss=0.1153, over 19524.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2747, pruned_loss=0.05163, ctc_loss=0.09711, over 3842929.75 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:00:37,819 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=232400.0, ans=0.0
+2024-08-31 16:00:50,444 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=232453.33333333334, ans=0.0
+2024-08-31 16:00:54,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=232453.33333333334, ans=0.5
+2024-08-31 16:00:56,575 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:01:05,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=232506.66666666666, ans=0.125
+2024-08-31 16:01:19,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=232613.33333333334, ans=0.125
+2024-08-31 16:01:22,451 INFO [train.py:1114] (2/4) Epoch 18, batch 1300, loss[loss=0.2313, simple_loss=0.2931, pruned_loss=0.0625, ctc_loss=0.1113, over 18811.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2741, pruned_loss=0.05132, ctc_loss=0.0964, over 3845733.83 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:01:31,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=232613.33333333334, ans=0.125
+2024-08-31 16:01:36,771 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=232666.66666666666, ans=0.125
+2024-08-31 16:01:44,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=232666.66666666666, ans=0.125
+2024-08-31 16:01:44,155 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=232666.66666666666, ans=0.125
+2024-08-31 16:02:14,384 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=232826.66666666666, ans=0.0
+2024-08-31 16:02:19,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=232826.66666666666, ans=0.0
+2024-08-31 16:02:21,667 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 1.758e+02 2.176e+02 2.645e+02 4.342e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-31 16:02:23,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=232826.66666666666, ans=0.0
+2024-08-31 16:02:27,594 INFO [train.py:1114] (2/4) Epoch 18, batch 1350, loss[loss=0.2033, simple_loss=0.2753, pruned_loss=0.0471, ctc_loss=0.09272, over 19781.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2731, pruned_loss=0.05072, ctc_loss=0.0953, over 3856639.95 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:02:31,298 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=232880.0, ans=0.0
+2024-08-31 16:02:37,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232880.0, ans=0.1
+2024-08-31 16:02:49,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=232933.33333333334, ans=0.125
+2024-08-31 16:03:13,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff2.min_abs, batch_count=233040.0, ans=0.1
+2024-08-31 16:03:25,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=233093.33333333334, ans=0.2
+2024-08-31 16:03:29,593 INFO [train.py:1114] (2/4) Epoch 18, batch 1400, loss[loss=0.1901, simple_loss=0.2544, pruned_loss=0.04559, ctc_loss=0.0867, over 19707.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2726, pruned_loss=0.05038, ctc_loss=0.09465, over 3863721.32 frames. ], batch size: 46, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:03:31,016 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=233146.66666666666, ans=0.125
+2024-08-31 16:03:41,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=233146.66666666666, ans=0.125
+2024-08-31 16:03:47,927 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=233146.66666666666, ans=0.0
+2024-08-31 16:03:51,959 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.57 vs. limit=15.0
+2024-08-31 16:04:14,946 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.14 vs. limit=15.0
+2024-08-31 16:04:16,240 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.62 vs. limit=15.0
+2024-08-31 16:04:31,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=233360.0, ans=0.1
+2024-08-31 16:04:36,287 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.655e+02 1.916e+02 2.338e+02 3.956e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-31 16:04:40,650 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.73 vs. limit=6.0
+2024-08-31 16:04:42,277 INFO [train.py:1114] (2/4) Epoch 18, batch 1450, loss[loss=0.2215, simple_loss=0.2817, pruned_loss=0.05792, ctc_loss=0.1137, over 19658.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2735, pruned_loss=0.05073, ctc_loss=0.09546, over 3861602.59 frames. ], batch size: 63, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:04:42,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=233413.33333333334, ans=0.125
+2024-08-31 16:05:01,734 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.68 vs. limit=15.0
+2024-08-31 16:05:17,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=233520.0, ans=0.125
+2024-08-31 16:05:44,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=233626.66666666666, ans=0.0
+2024-08-31 16:05:48,828 INFO [train.py:1114] (2/4) Epoch 18, batch 1500, loss[loss=0.235, simple_loss=0.2977, pruned_loss=0.06227, ctc_loss=0.1196, over 19583.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2739, pruned_loss=0.05104, ctc_loss=0.09608, over 3861275.12 frames. ], batch size: 57, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:06:09,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=233733.33333333334, ans=0.125
+2024-08-31 16:06:16,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=233786.66666666666, ans=0.0
+2024-08-31 16:06:29,341 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=233840.0, ans=0.0
+2024-08-31 16:06:45,561 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=233893.33333333334, ans=0.1
+2024-08-31 16:06:46,031 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.48 vs. limit=15.0
+2024-08-31 16:06:50,185 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.669e+02 1.866e+02 2.355e+02 3.552e+02, threshold=3.733e+02, percent-clipped=0.0
+2024-08-31 16:07:06,063 INFO [train.py:1114] (2/4) Epoch 18, batch 1550, loss[loss=0.2282, simple_loss=0.2873, pruned_loss=0.06207, ctc_loss=0.1125, over 19595.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2731, pruned_loss=0.05096, ctc_loss=0.09598, over 3845791.37 frames. ], batch size: 60, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:07:24,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=234000.0, ans=0.125
+2024-08-31 16:07:27,795 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.55 vs. limit=15.0
+2024-08-31 16:07:33,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=234053.33333333334, ans=0.125
+2024-08-31 16:07:37,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=234053.33333333334, ans=0.2
+2024-08-31 16:07:44,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=234106.66666666666, ans=0.0
+2024-08-31 16:07:48,668 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.38 vs. limit=15.0
+2024-08-31 16:08:07,324 INFO [train.py:1114] (2/4) Epoch 18, batch 1600, loss[loss=0.2198, simple_loss=0.2872, pruned_loss=0.05542, ctc_loss=0.1039, over 19853.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2731, pruned_loss=0.05097, ctc_loss=0.09607, over 3836055.87 frames. ], batch size: 57, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:08:07,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=234213.33333333334, ans=0.025
+2024-08-31 16:08:11,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=234213.33333333334, ans=0.0
+2024-08-31 16:08:21,781 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.53 vs. limit=6.0
+2024-08-31 16:08:24,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=234266.66666666666, ans=0.1
+2024-08-31 16:08:43,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff2.min_abs, batch_count=234320.0, ans=0.1
+2024-08-31 16:09:01,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=234426.66666666666, ans=0.0
+2024-08-31 16:09:20,623 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.787e+02 2.153e+02 2.672e+02 5.491e+02, threshold=4.305e+02, percent-clipped=8.0
+2024-08-31 16:09:20,877 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=234426.66666666666, ans=0.025
+2024-08-31 16:09:26,583 INFO [train.py:1114] (2/4) Epoch 18, batch 1650, loss[loss=0.2001, simple_loss=0.2767, pruned_loss=0.04459, ctc_loss=0.08589, over 19643.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2731, pruned_loss=0.05105, ctc_loss=0.0961, over 3830462.78 frames. ], batch size: 59, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:09:26,826 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=234480.0, ans=0.125
+2024-08-31 16:12:44,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=234480.0, ans=0.0
+2024-08-31 16:13:15,676 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=234533.33333333334, ans=0.025
+2024-08-31 16:13:28,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=234586.66666666666, ans=0.1
+2024-08-31 16:13:28,903 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.02 vs. limit=22.5
+2024-08-31 16:14:05,443 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=234693.33333333334, ans=0.125
+2024-08-31 16:14:15,749 INFO [train.py:1114] (2/4) Epoch 18, batch 1700, loss[loss=0.1925, simple_loss=0.2453, pruned_loss=0.05027, ctc_loss=0.09769, over 19671.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2732, pruned_loss=0.05083, ctc_loss=0.09581, over 3845647.98 frames. ], batch size: 46, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:14:36,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=234800.0, ans=0.125
+2024-08-31 16:14:43,083 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.31 vs. limit=15.0
+2024-08-31 16:14:51,218 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=234906.66666666666, ans=0.1
+2024-08-31 16:14:55,768 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=234906.66666666666, ans=0.2
+2024-08-31 16:15:07,762 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.694e+02 2.038e+02 2.484e+02 5.869e+02, threshold=4.076e+02, percent-clipped=3.0
+2024-08-31 16:15:13,535 INFO [train.py:1114] (2/4) Epoch 18, batch 1750, loss[loss=0.1738, simple_loss=0.2427, pruned_loss=0.03789, ctc_loss=0.07289, over 19674.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2723, pruned_loss=0.05024, ctc_loss=0.09466, over 3850710.37 frames. ], batch size: 45, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:15:27,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=235066.66666666666, ans=0.125
+2024-08-31 16:15:34,792 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.07 vs. limit=22.5
+2024-08-31 16:15:35,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=235066.66666666666, ans=0.025
+2024-08-31 16:15:40,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=235066.66666666666, ans=0.125
+2024-08-31 16:15:43,772 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.62 vs. limit=15.0
+2024-08-31 16:15:44,472 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=235120.0, ans=0.125
+2024-08-31 16:15:49,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=235120.0, ans=0.0
+2024-08-31 16:15:51,356 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.17 vs. limit=15.0
+2024-08-31 16:15:58,418 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.18 vs. limit=10.0
+2024-08-31 16:16:03,102 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.29 vs. limit=6.0
+2024-08-31 16:16:18,961 INFO [train.py:1114] (2/4) Epoch 18, batch 1800, loss[loss=0.2056, simple_loss=0.2764, pruned_loss=0.0493, ctc_loss=0.09063, over 19612.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2726, pruned_loss=0.05039, ctc_loss=0.09484, over 3851559.86 frames. ], batch size: 55, lr: 8.27e-03, grad_scale: 32.0
+2024-08-31 16:16:26,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=235280.0, ans=0.125
+2024-08-31 16:16:31,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=235333.33333333334, ans=15.0
+2024-08-31 16:16:46,448 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.19 vs. limit=15.0
+2024-08-31 16:16:48,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=235386.66666666666, ans=0.07
+2024-08-31 16:16:50,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=235386.66666666666, ans=0.125
+2024-08-31 16:16:53,838 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=235440.0, ans=0.0
+2024-08-31 16:17:02,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=235440.0, ans=0.2
+2024-08-31 16:17:12,067 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.739e+02 2.099e+02 2.606e+02 4.220e+02, threshold=4.197e+02, percent-clipped=1.0
+2024-08-31 16:17:14,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=235493.33333333334, ans=0.125
+2024-08-31 16:17:16,686 INFO [train.py:1114] (2/4) Epoch 18, batch 1850, loss[loss=0.2, simple_loss=0.2759, pruned_loss=0.04542, ctc_loss=0.08299, over 19593.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2722, pruned_loss=0.05022, ctc_loss=0.09456, over 3855874.93 frames. ], batch size: 57, lr: 8.27e-03, grad_scale: 16.0
+2024-08-31 16:17:19,387 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.06 vs. limit=15.0
+2024-08-31 16:17:39,868 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=235600.0, ans=0.2
+2024-08-31 16:17:58,369 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=235706.66666666666, ans=0.0
+2024-08-31 16:18:01,699 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=235706.66666666666, ans=0.125
+2024-08-31 16:18:02,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=235706.66666666666, ans=0.125
+2024-08-31 16:18:05,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=235706.66666666666, ans=0.025
+2024-08-31 16:18:06,544 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=235706.66666666666, ans=0.025
+2024-08-31 16:18:21,118 INFO [train.py:1114] (2/4) Epoch 18, batch 1900, loss[loss=0.2071, simple_loss=0.2812, pruned_loss=0.04851, ctc_loss=0.08976, over 19650.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2732, pruned_loss=0.05076, ctc_loss=0.09534, over 3861000.92 frames. ], batch size: 59, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:18:23,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=235813.33333333334, ans=0.1
+2024-08-31 16:18:45,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=235920.0, ans=0.125
+2024-08-31 16:18:56,692 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.69 vs. limit=8.0
+2024-08-31 16:19:05,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=235973.33333333334, ans=0.5
+2024-08-31 16:19:13,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=236026.66666666666, ans=0.0
+2024-08-31 16:19:14,244 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.296e+02 1.623e+02 1.837e+02 2.195e+02 5.135e+02, threshold=3.673e+02, percent-clipped=2.0
+2024-08-31 16:19:18,768 INFO [train.py:1114] (2/4) Epoch 18, batch 1950, loss[loss=0.2075, simple_loss=0.2697, pruned_loss=0.05218, ctc_loss=0.1021, over 19583.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2742, pruned_loss=0.0509, ctc_loss=0.09554, over 3869991.12 frames. ], batch size: 52, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:20:28,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=236133.33333333334, ans=0.1
+2024-08-31 16:20:34,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=236133.33333333334, ans=0.0
+2024-08-31 16:20:36,081 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.45 vs. limit=6.0
+2024-08-31 16:20:45,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=236186.66666666666, ans=0.125
+2024-08-31 16:20:45,563 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.34 vs. limit=15.0
+2024-08-31 16:21:03,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=236293.33333333334, ans=0.125
+2024-08-31 16:21:21,708 INFO [train.py:1114] (2/4) Epoch 18, batch 2000, loss[loss=0.1827, simple_loss=0.249, pruned_loss=0.04216, ctc_loss=0.08037, over 19673.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2748, pruned_loss=0.05123, ctc_loss=0.09617, over 3854452.41 frames. ], batch size: 45, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:21:24,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=236346.66666666666, ans=0.125
+2024-08-31 16:21:59,145 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.40 vs. limit=10.0
+2024-08-31 16:22:10,741 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.54 vs. limit=10.0
+2024-08-31 16:22:14,035 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.42 vs. limit=10.0
+2024-08-31 16:22:14,731 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.400e+02 1.704e+02 2.096e+02 2.751e+02 4.638e+02, threshold=4.193e+02, percent-clipped=6.0
+2024-08-31 16:22:19,168 INFO [train.py:1114] (2/4) Epoch 18, batch 2050, loss[loss=0.208, simple_loss=0.262, pruned_loss=0.05668, ctc_loss=0.1017, over 19716.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2742, pruned_loss=0.05129, ctc_loss=0.09632, over 3851418.13 frames. ], batch size: 47, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:22:31,705 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=236666.66666666666, ans=0.2
+2024-08-31 16:22:35,112 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=236666.66666666666, ans=0.2
+2024-08-31 16:22:41,072 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.76 vs. limit=15.0
+2024-08-31 16:22:59,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=236773.33333333334, ans=0.125
+2024-08-31 16:23:18,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=236826.66666666666, ans=10.0
+2024-08-31 16:23:21,346 INFO [train.py:1114] (2/4) Epoch 18, batch 2100, loss[loss=0.2063, simple_loss=0.2738, pruned_loss=0.05048, ctc_loss=0.09448, over 19773.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2733, pruned_loss=0.05079, ctc_loss=0.09552, over 3858565.95 frames. ], batch size: 54, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:23:29,963 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=236880.0, ans=0.1
+2024-08-31 16:23:33,651 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.24 vs. limit=15.0
+2024-08-31 16:23:34,753 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.84 vs. limit=22.5
+2024-08-31 16:23:56,154 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:24:01,802 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.86 vs. limit=22.5
+2024-08-31 16:24:26,143 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=237093.33333333334, ans=0.125
+2024-08-31 16:24:27,115 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.628e+02 1.802e+02 2.351e+02 4.404e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 16:24:31,676 INFO [train.py:1114] (2/4) Epoch 18, batch 2150, loss[loss=0.1948, simple_loss=0.2648, pruned_loss=0.04513, ctc_loss=0.08661, over 19870.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2721, pruned_loss=0.05033, ctc_loss=0.09448, over 3868180.37 frames. ], batch size: 52, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:24:35,306 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=237146.66666666666, ans=0.2
+2024-08-31 16:24:47,819 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=237200.0, ans=0.1
+2024-08-31 16:24:51,356 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=237200.0, ans=0.0
+2024-08-31 16:25:25,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.88 vs. limit=15.0
+2024-08-31 16:25:40,266 INFO [train.py:1114] (2/4) Epoch 18, batch 2200, loss[loss=0.238, simple_loss=0.2965, pruned_loss=0.06478, ctc_loss=0.1251, over 19592.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2723, pruned_loss=0.0504, ctc_loss=0.0947, over 3866406.18 frames. ], batch size: 57, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:25:50,098 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=237413.33333333334, ans=0.125
+2024-08-31 16:25:55,818 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.30 vs. limit=15.0
+2024-08-31 16:26:17,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=237573.33333333334, ans=0.125
+2024-08-31 16:26:33,729 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.652e+02 1.938e+02 2.493e+02 4.901e+02, threshold=3.877e+02, percent-clipped=6.0
+2024-08-31 16:26:38,334 INFO [train.py:1114] (2/4) Epoch 18, batch 2250, loss[loss=0.2001, simple_loss=0.2815, pruned_loss=0.04286, ctc_loss=0.08269, over 19616.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2719, pruned_loss=0.05015, ctc_loss=0.09414, over 3865969.40 frames. ], batch size: 55, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:26:53,958 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=237680.0, ans=0.0
+2024-08-31 16:27:06,411 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.24 vs. limit=15.0
+2024-08-31 16:27:09,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=237733.33333333334, ans=0.125
+2024-08-31 16:27:26,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=237840.0, ans=0.125
+2024-08-31 16:27:51,890 INFO [train.py:1114] (2/4) Epoch 18, batch 2300, loss[loss=0.1878, simple_loss=0.2615, pruned_loss=0.04112, ctc_loss=0.07954, over 19510.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2715, pruned_loss=0.05026, ctc_loss=0.09452, over 3860086.18 frames. ], batch size: 49, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:27:56,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=237946.66666666666, ans=0.0
+2024-08-31 16:28:00,053 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.03 vs. limit=15.0
+2024-08-31 16:28:19,429 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=238053.33333333334, ans=0.025
+2024-08-31 16:28:28,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=238106.66666666666, ans=0.125
+2024-08-31 16:28:30,586 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.44 vs. limit=15.0
+2024-08-31 16:28:47,462 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.696e+02 1.848e+02 2.393e+02 3.836e+02, threshold=3.696e+02, percent-clipped=0.0
+2024-08-31 16:29:07,685 INFO [train.py:1114] (2/4) Epoch 18, batch 2350, loss[loss=0.2023, simple_loss=0.2787, pruned_loss=0.04582, ctc_loss=0.08596, over 19687.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2716, pruned_loss=0.05036, ctc_loss=0.09464, over 3862480.50 frames. ], batch size: 63, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:29:14,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=238213.33333333334, ans=0.125
+2024-08-31 16:29:40,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=238373.33333333334, ans=0.0
+2024-08-31 16:30:30,916 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.88 vs. limit=15.0
+2024-08-31 16:30:36,466 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=238426.66666666666, ans=0.1
+2024-08-31 16:30:38,505 INFO [train.py:1114] (2/4) Epoch 18, batch 2400, loss[loss=0.2199, simple_loss=0.2902, pruned_loss=0.05395, ctc_loss=0.1041, over 19247.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2746, pruned_loss=0.05165, ctc_loss=0.09696, over 3857430.91 frames. ], batch size: 71, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:30:52,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=238533.33333333334, ans=0.125
+2024-08-31 16:30:58,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=238533.33333333334, ans=0.2
+2024-08-31 16:31:03,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=238586.66666666666, ans=0.025
+2024-08-31 16:31:30,449 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=238693.33333333334, ans=0.125
+2024-08-31 16:31:47,473 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 1.682e+02 1.835e+02 2.125e+02 4.662e+02, threshold=3.671e+02, percent-clipped=5.0
+2024-08-31 16:31:52,088 INFO [train.py:1114] (2/4) Epoch 18, batch 2450, loss[loss=0.2773, simple_loss=0.3079, pruned_loss=0.09028, ctc_loss=0.1655, over 14215.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2782, pruned_loss=0.05417, ctc_loss=0.102, over 3733241.97 frames. ], batch size: 140, lr: 8.21e-03, grad_scale: 32.0
+2024-08-31 16:31:52,651 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=11.21 vs. limit=12.0
+2024-08-31 16:32:08,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=238800.0, ans=0.125
+2024-08-31 16:32:22,018 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=238853.33333333334, ans=0.09899494936611666
+2024-08-31 16:32:25,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=238906.66666666666, ans=0.125
+2024-08-31 16:33:43,942 INFO [train.py:1114] (2/4) Epoch 19, batch 0, loss[loss=0.204, simple_loss=0.2631, pruned_loss=0.05302, ctc_loss=0.09728, over 19405.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2631, pruned_loss=0.05302, ctc_loss=0.09728, over 19405.00 frames. ], batch size: 48, lr: 7.99e-03, grad_scale: 32.0
+2024-08-31 16:33:43,943 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-31 16:33:52,861 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.0668, 2.2441, 2.9506, 3.3905], device='cuda:2')
+2024-08-31 16:34:00,536 INFO [train.py:1146] (2/4) Epoch 19, validation: loss=0.1846, simple_loss=0.2728, pruned_loss=0.03584, ctc_loss=0.06159, over 944034.00 frames.
+2024-08-31 16:34:01,380 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13505MB
+2024-08-31 16:34:20,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=239008.0, ans=0.0
+2024-08-31 16:34:26,370 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=239061.33333333334, ans=0.125
+2024-08-31 16:34:54,290 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.57 vs. limit=15.0
+2024-08-31 16:35:04,412 INFO [train.py:1114] (2/4) Epoch 19, batch 50, loss[loss=0.1988, simple_loss=0.2562, pruned_loss=0.05183, ctc_loss=0.09426, over 19697.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2744, pruned_loss=0.05164, ctc_loss=0.0981, over 843613.11 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:35:05,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=239221.33333333334, ans=0.2
+2024-08-31 16:35:09,141 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=239221.33333333334, ans=0.1
+2024-08-31 16:35:12,115 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.19 vs. limit=15.0
+2024-08-31 16:35:12,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=239221.33333333334, ans=6.0
+2024-08-31 16:35:12,518 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.795e+02 2.006e+02 2.342e+02 4.821e+02, threshold=4.012e+02, percent-clipped=4.0
+2024-08-31 16:35:24,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=239274.66666666666, ans=0.04949747468305833
+2024-08-31 16:35:26,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=239328.0, ans=0.2
+2024-08-31 16:35:34,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=239328.0, ans=0.025
+2024-08-31 16:35:43,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=239381.33333333334, ans=0.025
+2024-08-31 16:35:45,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=239381.33333333334, ans=0.0
+2024-08-31 16:35:46,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=239381.33333333334, ans=0.0
+2024-08-31 16:36:03,809 INFO [train.py:1114] (2/4) Epoch 19, batch 100, loss[loss=0.187, simple_loss=0.2547, pruned_loss=0.04367, ctc_loss=0.07987, over 19711.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2758, pruned_loss=0.0514, ctc_loss=0.09745, over 1498173.55 frames. ], batch size: 51, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:36:05,291 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:36:23,598 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=239541.33333333334, ans=0.125
+2024-08-31 16:36:32,095 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.60 vs. limit=12.0
+2024-08-31 16:36:35,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=239594.66666666666, ans=0.125
+2024-08-31 16:36:54,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=239701.33333333334, ans=0.0
+2024-08-31 16:37:00,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=239701.33333333334, ans=0.125
+2024-08-31 16:37:06,673 INFO [train.py:1114] (2/4) Epoch 19, batch 150, loss[loss=0.1964, simple_loss=0.251, pruned_loss=0.05246, ctc_loss=0.09199, over 19712.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2721, pruned_loss=0.0496, ctc_loss=0.0936, over 2027757.76 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:37:14,403 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=239754.66666666666, ans=0.09899494936611666
+2024-08-31 16:37:15,241 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.762e+02 1.953e+02 2.445e+02 3.524e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-31 16:37:43,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=239914.66666666666, ans=0.1
+2024-08-31 16:37:50,082 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=239914.66666666666, ans=0.2
+2024-08-31 16:37:52,724 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.12 vs. limit=10.0
+2024-08-31 16:38:02,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=239968.0, ans=0.0
+2024-08-31 16:38:02,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=239968.0, ans=0.07
+2024-08-31 16:38:14,091 INFO [train.py:1114] (2/4) Epoch 19, batch 200, loss[loss=0.2242, simple_loss=0.2916, pruned_loss=0.05776, ctc_loss=0.1031, over 18193.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2722, pruned_loss=0.04987, ctc_loss=0.0936, over 2435345.63 frames. ], batch size: 85, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:38:15,558 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=240021.33333333334, ans=0.0
+2024-08-31 16:38:17,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=240021.33333333334, ans=0.125
+2024-08-31 16:38:47,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=240128.0, ans=0.0
+2024-08-31 16:38:49,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=240181.33333333334, ans=0.2
+2024-08-31 16:38:50,767 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=240181.33333333334, ans=0.125
+2024-08-31 16:38:54,524 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff3.min_abs, batch_count=240181.33333333334, ans=0.2
+2024-08-31 16:39:03,528 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.13 vs. limit=12.0
+2024-08-31 16:39:13,532 INFO [train.py:1114] (2/4) Epoch 19, batch 250, loss[loss=0.2254, simple_loss=0.2843, pruned_loss=0.06097, ctc_loss=0.1111, over 19434.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2723, pruned_loss=0.05018, ctc_loss=0.09419, over 2756225.88 frames. ], batch size: 67, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:39:18,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=240288.0, ans=0.125
+2024-08-31 16:39:27,166 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 1.733e+02 2.186e+02 2.853e+02 4.755e+02, threshold=4.372e+02, percent-clipped=7.0
+2024-08-31 16:39:30,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=240341.33333333334, ans=0.125
+2024-08-31 16:39:31,483 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.05 vs. limit=22.5
+2024-08-31 16:39:33,380 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=240341.33333333334, ans=0.025
+2024-08-31 16:39:36,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=240341.33333333334, ans=0.0
+2024-08-31 16:39:55,922 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.84 vs. limit=15.0
+2024-08-31 16:39:58,787 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.96 vs. limit=15.0
+2024-08-31 16:40:12,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=240501.33333333334, ans=0.0
+2024-08-31 16:40:14,963 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.29 vs. limit=22.5
+2024-08-31 16:40:15,868 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=240501.33333333334, ans=0.1
+2024-08-31 16:40:20,412 INFO [train.py:1114] (2/4) Epoch 19, batch 300, loss[loss=0.2345, simple_loss=0.2937, pruned_loss=0.06393, ctc_loss=0.1185, over 19508.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2715, pruned_loss=0.04995, ctc_loss=0.09426, over 3000511.74 frames. ], batch size: 61, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:40:20,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=240554.66666666666, ans=0.0
+2024-08-31 16:40:21,698 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=240554.66666666666, ans=0.035
+2024-08-31 16:40:22,244 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.46 vs. limit=15.0
+2024-08-31 16:40:25,521 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=240554.66666666666, ans=0.125
+2024-08-31 16:40:29,100 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=1.83 vs. limit=6.0
+2024-08-31 16:40:37,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=240608.0, ans=0.025
+2024-08-31 16:40:47,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=240661.33333333334, ans=0.125
+2024-08-31 16:41:06,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=240714.66666666666, ans=0.125
+2024-08-31 16:41:21,973 INFO [train.py:1114] (2/4) Epoch 19, batch 350, loss[loss=0.1779, simple_loss=0.2474, pruned_loss=0.03942, ctc_loss=0.07405, over 19796.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2728, pruned_loss=0.05023, ctc_loss=0.09471, over 3190250.24 frames. ], batch size: 48, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:41:30,312 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.653e+02 1.904e+02 2.349e+02 4.016e+02, threshold=3.809e+02, percent-clipped=0.0
+2024-08-31 16:41:35,458 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.11 vs. limit=15.0
+2024-08-31 16:41:46,551 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=15.08 vs. limit=15.0
+2024-08-31 16:42:06,158 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.94 vs. limit=15.0
+2024-08-31 16:42:25,379 INFO [train.py:1114] (2/4) Epoch 19, batch 400, loss[loss=0.2056, simple_loss=0.2768, pruned_loss=0.04867, ctc_loss=0.09252, over 19481.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2719, pruned_loss=0.04954, ctc_loss=0.09334, over 3342450.82 frames. ], batch size: 54, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:42:37,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=241088.0, ans=0.2
+2024-08-31 16:43:12,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=241248.0, ans=0.2
+2024-08-31 16:43:15,757 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=241248.0, ans=0.04949747468305833
+2024-08-31 16:43:18,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=241248.0, ans=0.0
+2024-08-31 16:43:27,313 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=241301.33333333334, ans=0.2
+2024-08-31 16:43:34,394 INFO [train.py:1114] (2/4) Epoch 19, batch 450, loss[loss=0.2163, simple_loss=0.2834, pruned_loss=0.05428, ctc_loss=0.1016, over 19612.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2721, pruned_loss=0.04976, ctc_loss=0.09369, over 3450474.67 frames. ], batch size: 55, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:43:42,745 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.686e+02 1.896e+02 2.370e+02 4.152e+02, threshold=3.792e+02, percent-clipped=1.0
+2024-08-31 16:43:44,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=241354.66666666666, ans=0.2
+2024-08-31 16:44:01,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=241461.33333333334, ans=0.125
+2024-08-31 16:44:05,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=241461.33333333334, ans=0.2
+2024-08-31 16:44:11,441 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.47 vs. limit=22.5
+2024-08-31 16:44:13,623 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=241514.66666666666, ans=0.0
+2024-08-31 16:44:14,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=241514.66666666666, ans=0.125
+2024-08-31 16:44:35,447 INFO [train.py:1114] (2/4) Epoch 19, batch 500, loss[loss=0.2435, simple_loss=0.3011, pruned_loss=0.06839, ctc_loss=0.1226, over 19693.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2712, pruned_loss=0.04935, ctc_loss=0.09294, over 3545980.66 frames. ], batch size: 63, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:44:35,655 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:44:37,998 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=241621.33333333334, ans=0.035
+2024-08-31 16:44:51,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=241674.66666666666, ans=0.125
+2024-08-31 16:45:06,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=241728.0, ans=0.1
+2024-08-31 16:45:12,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=241781.33333333334, ans=0.0
+2024-08-31 16:45:28,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-31 16:45:31,582 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-31 16:45:55,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=241888.0, ans=0.125
+2024-08-31 16:46:04,262 INFO [train.py:1114] (2/4) Epoch 19, batch 550, loss[loss=0.2042, simple_loss=0.2769, pruned_loss=0.04814, ctc_loss=0.08822, over 19316.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2708, pruned_loss=0.04922, ctc_loss=0.09254, over 3608060.51 frames. ], batch size: 71, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:46:12,730 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.697e+02 1.983e+02 2.191e+02 3.507e+02, threshold=3.966e+02, percent-clipped=0.0
+2024-08-31 16:46:25,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=241888.0, ans=0.125
+2024-08-31 16:46:27,367 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=241941.33333333334, ans=0.07
+2024-08-31 16:46:35,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=241941.33333333334, ans=0.2
+2024-08-31 16:46:40,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=241994.66666666666, ans=0.125
+2024-08-31 16:46:58,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=242048.0, ans=0.2
+2024-08-31 16:47:05,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=242101.33333333334, ans=0.2
+2024-08-31 16:47:05,417 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=242101.33333333334, ans=0.0
+2024-08-31 16:47:10,202 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=242101.33333333334, ans=0.0
+2024-08-31 16:47:16,276 INFO [train.py:1114] (2/4) Epoch 19, batch 600, loss[loss=0.2074, simple_loss=0.2757, pruned_loss=0.05032, ctc_loss=0.0962, over 19426.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2707, pruned_loss=0.04899, ctc_loss=0.09211, over 3664988.97 frames. ], batch size: 67, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:47:22,232 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=242154.66666666666, ans=0.125
+2024-08-31 16:47:26,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=242154.66666666666, ans=0.0
+2024-08-31 16:47:30,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=242154.66666666666, ans=0.07
+2024-08-31 16:47:37,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=242208.0, ans=0.0
+2024-08-31 16:47:46,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=242208.0, ans=0.125
+2024-08-31 16:47:54,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=242261.33333333334, ans=0.0
+2024-08-31 16:48:21,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=242314.66666666666, ans=0.0
+2024-08-31 16:48:22,211 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=242314.66666666666, ans=0.1
+2024-08-31 16:48:39,676 INFO [train.py:1114] (2/4) Epoch 19, batch 650, loss[loss=0.1988, simple_loss=0.2691, pruned_loss=0.04677, ctc_loss=0.08712, over 19774.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2705, pruned_loss=0.04901, ctc_loss=0.09245, over 3715999.86 frames. ], batch size: 54, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:48:42,378 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:48:48,388 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.784e+02 2.044e+02 2.793e+02 4.792e+02, threshold=4.088e+02, percent-clipped=6.0
+2024-08-31 16:49:03,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=242474.66666666666, ans=0.125
+2024-08-31 16:49:08,043 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=242528.0, ans=0.2
+2024-08-31 16:49:20,492 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.48 vs. limit=15.0
+2024-08-31 16:49:29,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=242581.33333333334, ans=0.0
+2024-08-31 16:49:40,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=242634.66666666666, ans=0.2
+2024-08-31 16:50:02,079 INFO [train.py:1114] (2/4) Epoch 19, batch 700, loss[loss=0.1932, simple_loss=0.265, pruned_loss=0.04262, ctc_loss=0.09048, over 19734.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2711, pruned_loss=0.0491, ctc_loss=0.09244, over 3749459.92 frames. ], batch size: 51, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:50:14,642 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=242688.0, ans=0.125
+2024-08-31 16:50:22,507 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=242741.33333333334, ans=0.09899494936611666
+2024-08-31 16:50:30,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=242794.66666666666, ans=0.1
+2024-08-31 16:50:43,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=242848.0, ans=0.95
+2024-08-31 16:50:51,495 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.76 vs. limit=10.0
+2024-08-31 16:51:27,355 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.73 vs. limit=15.0
+2024-08-31 16:52:16,516 INFO [train.py:1114] (2/4) Epoch 19, batch 750, loss[loss=0.2027, simple_loss=0.2674, pruned_loss=0.05036, ctc_loss=0.09327, over 19506.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2705, pruned_loss=0.04888, ctc_loss=0.0919, over 3774607.39 frames. ], batch size: 54, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:52:38,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=242954.66666666666, ans=0.0
+2024-08-31 16:52:40,602 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.707e+02 2.012e+02 2.576e+02 4.596e+02, threshold=4.024e+02, percent-clipped=2.0
+2024-08-31 16:52:42,195 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=242954.66666666666, ans=0.0
+2024-08-31 16:53:17,033 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=243114.66666666666, ans=0.0
+2024-08-31 16:53:23,351 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.89 vs. limit=15.0
+2024-08-31 16:53:27,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=243168.0, ans=0.125
+2024-08-31 16:53:40,986 INFO [train.py:1114] (2/4) Epoch 19, batch 800, loss[loss=0.1848, simple_loss=0.2485, pruned_loss=0.04444, ctc_loss=0.08061, over 19799.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2698, pruned_loss=0.04858, ctc_loss=0.09133, over 3796075.64 frames. ], batch size: 49, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:53:43,008 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.31 vs. limit=10.0
+2024-08-31 16:54:07,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=243328.0, ans=0.0
+2024-08-31 16:54:18,805 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.79 vs. limit=15.0
+2024-08-31 16:54:40,503 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=243434.66666666666, ans=0.125
+2024-08-31 16:54:52,038 INFO [train.py:1114] (2/4) Epoch 19, batch 850, loss[loss=0.2007, simple_loss=0.2828, pruned_loss=0.04236, ctc_loss=0.08497, over 19632.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2701, pruned_loss=0.04879, ctc_loss=0.09187, over 3814477.48 frames. ], batch size: 59, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:55:00,087 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.677e+02 1.837e+02 2.316e+02 3.927e+02, threshold=3.675e+02, percent-clipped=0.0
+2024-08-31 16:55:32,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=243648.0, ans=0.125
+2024-08-31 16:55:40,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=243648.0, ans=0.0
+2024-08-31 16:55:41,913 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=243648.0, ans=0.0
+2024-08-31 16:55:45,623 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:55:52,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=243701.33333333334, ans=0.125
+2024-08-31 16:55:52,696 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=243701.33333333334, ans=0.1
+2024-08-31 16:55:55,939 INFO [train.py:1114] (2/4) Epoch 19, batch 900, loss[loss=0.1734, simple_loss=0.2408, pruned_loss=0.03883, ctc_loss=0.07063, over 19400.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2707, pruned_loss=0.04934, ctc_loss=0.09293, over 3817687.94 frames. ], batch size: 48, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:56:06,947 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=243808.0, ans=0.2
+2024-08-31 16:56:08,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=243808.0, ans=0.0
+2024-08-31 16:56:10,870 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.36 vs. limit=15.0
+2024-08-31 16:56:12,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=243808.0, ans=0.1
+2024-08-31 16:56:14,445 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=243808.0, ans=0.2
+2024-08-31 16:56:19,375 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=243861.33333333334, ans=0.0
+2024-08-31 16:56:53,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=243968.0, ans=0.125
+2024-08-31 16:57:05,831 INFO [train.py:1114] (2/4) Epoch 19, batch 950, loss[loss=0.1791, simple_loss=0.2468, pruned_loss=0.03994, ctc_loss=0.07906, over 19503.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2711, pruned_loss=0.04957, ctc_loss=0.0933, over 3817453.70 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:57:10,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=244021.33333333334, ans=0.0
+2024-08-31 16:57:14,292 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.751e+02 2.034e+02 2.400e+02 3.857e+02, threshold=4.067e+02, percent-clipped=1.0
+2024-08-31 16:57:14,640 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=244021.33333333334, ans=0.0
+2024-08-31 16:57:18,242 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=244074.66666666666, ans=0.125
+2024-08-31 16:57:48,081 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=244181.33333333334, ans=0.07
+2024-08-31 16:57:51,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=244181.33333333334, ans=0.2
+2024-08-31 16:57:52,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=244234.66666666666, ans=0.95
+2024-08-31 16:57:59,448 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=244234.66666666666, ans=0.125
+2024-08-31 16:58:06,281 INFO [train.py:1114] (2/4) Epoch 19, batch 1000, loss[loss=0.1944, simple_loss=0.2605, pruned_loss=0.04676, ctc_loss=0.08708, over 19847.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2721, pruned_loss=0.05018, ctc_loss=0.09451, over 3813881.18 frames. ], batch size: 52, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 16:58:30,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=244288.0, ans=0.125
+2024-08-31 16:58:52,288 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=244341.33333333334, ans=0.0
+2024-08-31 16:59:43,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=244448.0, ans=0.125
+2024-08-31 16:59:50,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=244448.0, ans=0.025
+2024-08-31 17:00:09,436 INFO [train.py:1114] (2/4) Epoch 19, batch 1050, loss[loss=0.1893, simple_loss=0.2656, pruned_loss=0.04027, ctc_loss=0.08089, over 19837.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2713, pruned_loss=0.04961, ctc_loss=0.09347, over 3821505.79 frames. ], batch size: 57, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 17:00:09,620 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=244554.66666666666, ans=0.0
+2024-08-31 17:00:15,863 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.97 vs. limit=15.0
+2024-08-31 17:00:17,655 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.651e+02 1.935e+02 2.361e+02 3.363e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-31 17:00:23,871 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.24 vs. limit=6.0
+2024-08-31 17:00:27,637 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.12 vs. limit=15.0
+2024-08-31 17:01:12,074 INFO [train.py:1114] (2/4) Epoch 19, batch 1100, loss[loss=0.207, simple_loss=0.2733, pruned_loss=0.05189, ctc_loss=0.09218, over 19574.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2711, pruned_loss=0.0492, ctc_loss=0.09283, over 3828692.34 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:01:46,022 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.45 vs. limit=22.5
+2024-08-31 17:02:31,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=245034.66666666666, ans=0.07
+2024-08-31 17:02:40,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=245034.66666666666, ans=0.0
+2024-08-31 17:02:43,470 INFO [train.py:1114] (2/4) Epoch 19, batch 1150, loss[loss=0.1951, simple_loss=0.2585, pruned_loss=0.04856, ctc_loss=0.08652, over 19586.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2713, pruned_loss=0.04932, ctc_loss=0.09286, over 3826076.37 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:02:44,881 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=245088.0, ans=0.0
+2024-08-31 17:03:11,404 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.693e+02 1.899e+02 2.295e+02 3.327e+02, threshold=3.798e+02, percent-clipped=0.0
+2024-08-31 17:03:14,162 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=245141.33333333334, ans=0.125
+2024-08-31 17:03:36,455 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=245194.66666666666, ans=0.1
+2024-08-31 17:03:56,462 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:04:04,712 INFO [train.py:1114] (2/4) Epoch 19, batch 1200, loss[loss=0.224, simple_loss=0.2874, pruned_loss=0.05756, ctc_loss=0.1139, over 19843.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2722, pruned_loss=0.04967, ctc_loss=0.09346, over 3821922.92 frames. ], batch size: 57, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:04:20,019 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=245408.0, ans=0.125
+2024-08-31 17:04:23,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=245408.0, ans=0.125
+2024-08-31 17:04:29,019 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_ff2.min_abs, batch_count=245408.0, ans=0.1
+2024-08-31 17:04:31,466 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.52 vs. limit=12.0
+2024-08-31 17:04:35,939 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=245461.33333333334, ans=0.125
+2024-08-31 17:05:08,583 INFO [train.py:1114] (2/4) Epoch 19, batch 1250, loss[loss=0.2319, simple_loss=0.2911, pruned_loss=0.0627, ctc_loss=0.1184, over 19548.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2734, pruned_loss=0.05026, ctc_loss=0.09439, over 3840961.06 frames. ], batch size: 61, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:05:16,751 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.647e+02 1.911e+02 2.205e+02 3.499e+02, threshold=3.822e+02, percent-clipped=0.0
+2024-08-31 17:05:18,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=245621.33333333334, ans=0.0
+2024-08-31 17:05:24,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=245674.66666666666, ans=0.125
+2024-08-31 17:06:04,934 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.20 vs. limit=15.0
+2024-08-31 17:06:19,724 INFO [train.py:1114] (2/4) Epoch 19, batch 1300, loss[loss=0.2118, simple_loss=0.279, pruned_loss=0.05196, ctc_loss=0.1015, over 18921.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2721, pruned_loss=0.04953, ctc_loss=0.09306, over 3844775.59 frames. ], batch size: 76, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:06:29,465 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.42 vs. limit=22.5
+2024-08-31 17:07:01,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=246048.0, ans=0.125
+2024-08-31 17:07:10,917 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=246101.33333333334, ans=0.125
+2024-08-31 17:07:12,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=246101.33333333334, ans=0.125
+2024-08-31 17:07:25,631 INFO [train.py:1114] (2/4) Epoch 19, batch 1350, loss[loss=0.1903, simple_loss=0.2634, pruned_loss=0.04161, ctc_loss=0.0848, over 19770.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2713, pruned_loss=0.04908, ctc_loss=0.09232, over 3855306.68 frames. ], batch size: 54, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:07:28,108 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=246154.66666666666, ans=0.125
+2024-08-31 17:07:39,280 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.765e+02 2.070e+02 2.720e+02 4.418e+02, threshold=4.141e+02, percent-clipped=1.0
+2024-08-31 17:07:39,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=246154.66666666666, ans=0.0
+2024-08-31 17:07:47,591 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.22 vs. limit=15.0
+2024-08-31 17:07:48,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=246208.0, ans=0.0
+2024-08-31 17:07:53,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=246208.0, ans=0.125
+2024-08-31 17:07:57,143 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=246261.33333333334, ans=0.0
+2024-08-31 17:08:06,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=246314.66666666666, ans=0.07
+2024-08-31 17:08:18,168 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=246314.66666666666, ans=0.125
+2024-08-31 17:08:35,887 INFO [train.py:1114] (2/4) Epoch 19, batch 1400, loss[loss=0.1738, simple_loss=0.2423, pruned_loss=0.03858, ctc_loss=0.07047, over 19671.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2708, pruned_loss=0.04893, ctc_loss=0.09195, over 3863274.26 frames. ], batch size: 46, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:08:46,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=246474.66666666666, ans=0.1
+2024-08-31 17:09:02,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=246474.66666666666, ans=0.125
+2024-08-31 17:09:13,321 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=246528.0, ans=0.125
+2024-08-31 17:09:24,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=246581.33333333334, ans=0.125
+2024-08-31 17:09:25,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=246581.33333333334, ans=0.125
+2024-08-31 17:09:27,574 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=246581.33333333334, ans=0.5
+2024-08-31 17:09:53,658 INFO [train.py:1114] (2/4) Epoch 19, batch 1450, loss[loss=0.2193, simple_loss=0.2862, pruned_loss=0.05525, ctc_loss=0.1049, over 19636.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2719, pruned_loss=0.04963, ctc_loss=0.09341, over 3860184.88 frames. ], batch size: 63, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:10:02,066 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 1.691e+02 1.919e+02 2.362e+02 3.353e+02, threshold=3.838e+02, percent-clipped=0.0
+2024-08-31 17:10:07,299 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=246741.33333333334, ans=0.05
+2024-08-31 17:11:23,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=246741.33333333334, ans=0.1
+2024-08-31 17:11:25,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=246794.66666666666, ans=0.1
+2024-08-31 17:11:50,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=246848.0, ans=0.2
+2024-08-31 17:11:53,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=246901.33333333334, ans=0.1
+2024-08-31 17:12:07,010 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.71 vs. limit=15.0
+2024-08-31 17:12:07,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=246901.33333333334, ans=0.0
+2024-08-31 17:12:12,393 INFO [train.py:1114] (2/4) Epoch 19, batch 1500, loss[loss=0.1912, simple_loss=0.2729, pruned_loss=0.03948, ctc_loss=0.07671, over 19590.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2723, pruned_loss=0.0496, ctc_loss=0.0934, over 3861306.88 frames. ], batch size: 57, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:12:27,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=247008.0, ans=10.0
+2024-08-31 17:13:58,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=247114.66666666666, ans=0.0
+2024-08-31 17:14:38,401 INFO [train.py:1114] (2/4) Epoch 19, batch 1550, loss[loss=0.2226, simple_loss=0.2863, pruned_loss=0.05834, ctc_loss=0.1055, over 19613.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2723, pruned_loss=0.04988, ctc_loss=0.09389, over 3845734.36 frames. ], batch size: 60, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:14:42,804 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.77 vs. limit=15.0
+2024-08-31 17:14:46,788 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.654e+02 1.883e+02 2.328e+02 3.879e+02, threshold=3.765e+02, percent-clipped=1.0
+2024-08-31 17:15:14,291 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=13.06 vs. limit=15.0
+2024-08-31 17:15:15,748 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=247274.66666666666, ans=10.0
+2024-08-31 17:16:14,423 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=247381.33333333334, ans=0.2
+2024-08-31 17:16:29,403 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.64 vs. limit=15.0
+2024-08-31 17:16:40,633 INFO [train.py:1114] (2/4) Epoch 19, batch 1600, loss[loss=0.2214, simple_loss=0.2933, pruned_loss=0.0538, ctc_loss=0.1048, over 19825.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.272, pruned_loss=0.04952, ctc_loss=0.09334, over 3835103.23 frames. ], batch size: 57, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:16:48,650 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=247488.0, ans=0.125
+2024-08-31 17:16:54,602 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=247541.33333333334, ans=0.125
+2024-08-31 17:17:19,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=247648.0, ans=0.125
+2024-08-31 17:17:39,837 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=247701.33333333334, ans=0.0
+2024-08-31 17:17:42,004 INFO [train.py:1114] (2/4) Epoch 19, batch 1650, loss[loss=0.2071, simple_loss=0.2786, pruned_loss=0.04891, ctc_loss=0.0948, over 19643.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2716, pruned_loss=0.04935, ctc_loss=0.09297, over 3830846.76 frames. ], batch size: 59, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:17:50,571 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.753e+02 1.927e+02 2.360e+02 4.500e+02, threshold=3.853e+02, percent-clipped=4.0
+2024-08-31 17:17:52,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=247754.66666666666, ans=0.0
+2024-08-31 17:18:00,443 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.60 vs. limit=22.5
+2024-08-31 17:18:01,207 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=247808.0, ans=0.0
+2024-08-31 17:18:44,908 INFO [train.py:1114] (2/4) Epoch 19, batch 1700, loss[loss=0.2146, simple_loss=0.2654, pruned_loss=0.06034, ctc_loss=0.1081, over 19654.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2717, pruned_loss=0.0496, ctc_loss=0.09336, over 3846161.58 frames. ], batch size: 46, lr: 7.84e-03, grad_scale: 64.0
+2024-08-31 17:18:49,054 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.58 vs. limit=15.0
+2024-08-31 17:18:56,848 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.35 vs. limit=15.0
+2024-08-31 17:19:05,046 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=248074.66666666666, ans=0.125
+2024-08-31 17:19:25,991 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.54 vs. limit=15.0
+2024-08-31 17:19:33,468 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.69 vs. limit=15.0
+2024-08-31 17:19:52,916 INFO [train.py:1114] (2/4) Epoch 19, batch 1750, loss[loss=0.1998, simple_loss=0.2589, pruned_loss=0.05042, ctc_loss=0.09954, over 19643.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2711, pruned_loss=0.04927, ctc_loss=0.09278, over 3852816.79 frames. ], batch size: 45, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:19:57,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=248288.0, ans=0.1
+2024-08-31 17:20:02,157 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.715e+02 1.941e+02 2.441e+02 4.524e+02, threshold=3.882e+02, percent-clipped=3.0
+2024-08-31 17:20:03,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=248341.33333333334, ans=0.2
+2024-08-31 17:20:04,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=248341.33333333334, ans=0.2
+2024-08-31 17:20:07,753 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=248341.33333333334, ans=0.125
+2024-08-31 17:20:15,030 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=248394.66666666666, ans=0.125
+2024-08-31 17:20:40,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=248501.33333333334, ans=0.07
+2024-08-31 17:20:47,052 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.62 vs. limit=6.0
+2024-08-31 17:20:47,905 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.40 vs. limit=15.0
+2024-08-31 17:20:49,896 INFO [train.py:1114] (2/4) Epoch 19, batch 1800, loss[loss=0.2257, simple_loss=0.2927, pruned_loss=0.05794, ctc_loss=0.1072, over 19631.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2711, pruned_loss=0.04934, ctc_loss=0.09287, over 3852949.68 frames. ], batch size: 55, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:20:54,122 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.22 vs. limit=22.5
+2024-08-31 17:20:58,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=248554.66666666666, ans=0.125
+2024-08-31 17:21:02,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=248608.0, ans=0.0
+2024-08-31 17:21:05,974 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=248608.0, ans=0.0
+2024-08-31 17:21:07,545 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.88 vs. limit=6.0
+2024-08-31 17:21:10,697 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=248608.0, ans=0.125
+2024-08-31 17:21:16,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=248661.33333333334, ans=0.125
+2024-08-31 17:21:30,086 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.39 vs. limit=15.0
+2024-08-31 17:21:47,154 INFO [train.py:1114] (2/4) Epoch 19, batch 1850, loss[loss=0.2023, simple_loss=0.2777, pruned_loss=0.04666, ctc_loss=0.08398, over 19557.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2712, pruned_loss=0.04937, ctc_loss=0.09307, over 3855688.77 frames. ], batch size: 57, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:21:56,052 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.825e+02 2.203e+02 3.044e+02 4.782e+02, threshold=4.406e+02, percent-clipped=6.0
+2024-08-31 17:22:08,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=248874.66666666666, ans=0.05
+2024-08-31 17:22:18,190 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.12 vs. limit=15.0
+2024-08-31 17:22:28,378 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.32 vs. limit=15.0
+2024-08-31 17:22:37,016 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=248981.33333333334, ans=0.2
+2024-08-31 17:22:52,468 INFO [train.py:1114] (2/4) Epoch 19, batch 1900, loss[loss=0.2002, simple_loss=0.2751, pruned_loss=0.04529, ctc_loss=0.0869, over 19664.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2716, pruned_loss=0.04935, ctc_loss=0.09284, over 3860408.64 frames. ], batch size: 59, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:23:19,556 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.87 vs. limit=22.5
+2024-08-31 17:23:20,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=249194.66666666666, ans=0.0
+2024-08-31 17:23:44,486 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=249301.33333333334, ans=0.0
+2024-08-31 17:23:48,204 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.84 vs. limit=15.0
+2024-08-31 17:23:48,979 INFO [train.py:1114] (2/4) Epoch 19, batch 1950, loss[loss=0.188, simple_loss=0.2566, pruned_loss=0.04346, ctc_loss=0.08116, over 19576.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2725, pruned_loss=0.04944, ctc_loss=0.09308, over 3869360.89 frames. ], batch size: 52, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:23:55,509 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.59 vs. limit=22.5
+2024-08-31 17:23:56,630 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=249354.66666666666, ans=0.125
+2024-08-31 17:23:58,758 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.354e+02 1.608e+02 1.802e+02 2.157e+02 4.545e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 17:24:00,242 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:24:00,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=249408.0, ans=0.125
+2024-08-31 17:24:06,526 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.91 vs. limit=15.0
+2024-08-31 17:24:25,322 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=249514.66666666666, ans=0.0
+2024-08-31 17:24:32,282 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.83 vs. limit=22.5
+2024-08-31 17:24:48,788 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=249568.0, ans=0.0
+2024-08-31 17:24:50,861 INFO [train.py:1114] (2/4) Epoch 19, batch 2000, loss[loss=0.1783, simple_loss=0.2398, pruned_loss=0.04225, ctc_loss=0.08084, over 19698.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2736, pruned_loss=0.05028, ctc_loss=0.09447, over 3854395.22 frames. ], batch size: 45, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:24:50,903 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=249621.33333333334, ans=0.125
+2024-08-31 17:25:21,304 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.78 vs. limit=22.5
+2024-08-31 17:25:43,635 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=249834.66666666666, ans=0.2
+2024-08-31 17:25:47,794 INFO [train.py:1114] (2/4) Epoch 19, batch 2050, loss[loss=0.1946, simple_loss=0.2478, pruned_loss=0.05164, ctc_loss=0.09563, over 19725.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2727, pruned_loss=0.05016, ctc_loss=0.09432, over 3851573.45 frames. ], batch size: 47, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:25:48,393 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=249888.0, ans=0.2
+2024-08-31 17:25:57,143 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.719e+02 2.018e+02 2.402e+02 3.677e+02, threshold=4.037e+02, percent-clipped=1.0
+2024-08-31 17:26:00,930 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=249941.33333333334, ans=0.0
+2024-08-31 17:26:06,324 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=249941.33333333334, ans=0.125
+2024-08-31 17:26:44,693 INFO [train.py:1114] (2/4) Epoch 19, batch 2100, loss[loss=0.1956, simple_loss=0.2726, pruned_loss=0.04244, ctc_loss=0.08413, over 19766.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2716, pruned_loss=0.04951, ctc_loss=0.0931, over 3858392.60 frames. ], batch size: 54, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:26:46,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=250154.66666666666, ans=0.0
+2024-08-31 17:26:54,369 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=6.59 vs. limit=15.0
+2024-08-31 17:27:02,835 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=250208.0, ans=0.0
+2024-08-31 17:27:29,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=250368.0, ans=0.125
+2024-08-31 17:27:32,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=250368.0, ans=0.0
+2024-08-31 17:27:37,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=250368.0, ans=0.125
+2024-08-31 17:27:42,518 INFO [train.py:1114] (2/4) Epoch 19, batch 2150, loss[loss=0.1887, simple_loss=0.2574, pruned_loss=0.04329, ctc_loss=0.08363, over 19837.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2712, pruned_loss=0.04917, ctc_loss=0.09243, over 3869130.50 frames. ], batch size: 52, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:27:42,625 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=250421.33333333334, ans=0.1
+2024-08-31 17:27:43,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=250421.33333333334, ans=0.125
+2024-08-31 17:27:51,503 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.672e+02 1.975e+02 2.523e+02 4.782e+02, threshold=3.951e+02, percent-clipped=2.0
+2024-08-31 17:28:03,504 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:28:05,832 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=250528.0, ans=0.0
+2024-08-31 17:28:10,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=250528.0, ans=0.0
+2024-08-31 17:28:14,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=250528.0, ans=0.1
+2024-08-31 17:28:20,118 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.55 vs. limit=15.0
+2024-08-31 17:28:33,215 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=250634.66666666666, ans=0.2
+2024-08-31 17:28:39,682 INFO [train.py:1114] (2/4) Epoch 19, batch 2200, loss[loss=0.2261, simple_loss=0.2888, pruned_loss=0.05851, ctc_loss=0.116, over 19582.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2714, pruned_loss=0.04918, ctc_loss=0.09261, over 3867627.29 frames. ], batch size: 57, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:28:59,687 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.40 vs. limit=10.0
+2024-08-31 17:29:16,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=250848.0, ans=0.125
+2024-08-31 17:29:19,325 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=250848.0, ans=0.125
+2024-08-31 17:29:20,500 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=250848.0, ans=0.0
+2024-08-31 17:29:38,740 INFO [train.py:1114] (2/4) Epoch 19, batch 2250, loss[loss=0.1834, simple_loss=0.2628, pruned_loss=0.03846, ctc_loss=0.06771, over 19610.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2716, pruned_loss=0.04934, ctc_loss=0.09308, over 3867632.10 frames. ], batch size: 55, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:29:45,516 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=250954.66666666666, ans=0.125
+2024-08-31 17:29:47,361 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.680e+02 1.896e+02 2.375e+02 5.292e+02, threshold=3.791e+02, percent-clipped=4.0
+2024-08-31 17:29:57,508 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.21 vs. limit=15.0
+2024-08-31 17:30:21,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=251114.66666666666, ans=0.07
+2024-08-31 17:30:22,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=251114.66666666666, ans=0.0
+2024-08-31 17:30:34,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=251168.0, ans=0.125
+2024-08-31 17:30:38,156 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.23 vs. limit=15.0
+2024-08-31 17:30:40,025 INFO [train.py:1114] (2/4) Epoch 19, batch 2300, loss[loss=0.2046, simple_loss=0.264, pruned_loss=0.05219, ctc_loss=0.1021, over 19515.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2707, pruned_loss=0.04921, ctc_loss=0.09276, over 3861449.56 frames. ], batch size: 49, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:30:52,122 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.64 vs. limit=15.0
+2024-08-31 17:31:27,010 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.60 vs. limit=15.0
+2024-08-31 17:31:34,699 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.76 vs. limit=15.0
+2024-08-31 17:31:36,421 INFO [train.py:1114] (2/4) Epoch 19, batch 2350, loss[loss=0.1846, simple_loss=0.2687, pruned_loss=0.03581, ctc_loss=0.07242, over 19701.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2706, pruned_loss=0.04923, ctc_loss=0.09257, over 3864096.56 frames. ], batch size: 63, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:31:43,451 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.57 vs. limit=15.0
+2024-08-31 17:31:45,233 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.718e+02 2.013e+02 2.563e+02 3.706e+02, threshold=4.026e+02, percent-clipped=0.0
+2024-08-31 17:31:52,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=251541.33333333334, ans=0.125
+2024-08-31 17:32:12,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=251594.66666666666, ans=0.0
+2024-08-31 17:32:18,993 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=251648.0, ans=0.125
+2024-08-31 17:32:20,118 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:32:26,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=251701.33333333334, ans=0.025
+2024-08-31 17:32:28,501 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.18 vs. limit=22.5
+2024-08-31 17:32:36,577 INFO [train.py:1114] (2/4) Epoch 19, batch 2400, loss[loss=0.2026, simple_loss=0.2665, pruned_loss=0.04979, ctc_loss=0.0978, over 19333.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2721, pruned_loss=0.04986, ctc_loss=0.09355, over 3857815.16 frames. ], batch size: 71, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:32:37,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=251754.66666666666, ans=0.125
+2024-08-31 17:32:49,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=251808.0, ans=0.0
+2024-08-31 17:32:54,132 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.89 vs. limit=15.0
+2024-08-31 17:33:15,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=251914.66666666666, ans=0.0
+2024-08-31 17:33:18,120 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=251914.66666666666, ans=0.0
+2024-08-31 17:33:39,841 INFO [train.py:1114] (2/4) Epoch 19, batch 2450, loss[loss=0.2899, simple_loss=0.3219, pruned_loss=0.09196, ctc_loss=0.1851, over 13397.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2761, pruned_loss=0.0525, ctc_loss=0.09895, over 3731230.38 frames. ], batch size: 140, lr: 7.78e-03, grad_scale: 32.0
+2024-08-31 17:33:48,954 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.610e+02 1.856e+02 2.081e+02 3.075e+02, threshold=3.711e+02, percent-clipped=0.0
+2024-08-31 17:33:51,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=252074.66666666666, ans=0.04949747468305833
+2024-08-31 17:34:00,698 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=252074.66666666666, ans=0.125
+2024-08-31 17:34:02,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=252128.0, ans=0.2
+2024-08-31 17:34:03,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=252128.0, ans=0.125
+2024-08-31 17:34:09,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=252128.0, ans=0.125
+2024-08-31 17:36:18,554 INFO [train.py:1114] (2/4) Epoch 20, batch 0, loss[loss=0.2266, simple_loss=0.2783, pruned_loss=0.06332, ctc_loss=0.1208, over 19812.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2783, pruned_loss=0.06332, ctc_loss=0.1208, over 19812.00 frames. ], batch size: 49, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:36:18,554 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-31 17:36:23,601 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.4.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.1220, 2.2929, 2.5668, 2.2020], device='cuda:2')
+2024-08-31 17:36:28,429 INFO [train.py:1146] (2/4) Epoch 20, validation: loss=0.1834, simple_loss=0.2715, pruned_loss=0.03542, ctc_loss=0.061, over 944034.00 frames.
+2024-08-31 17:36:28,429 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13705MB
+2024-08-31 17:36:41,924 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=252288.0, ans=0.025
+2024-08-31 17:36:47,342 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.24 vs. limit=15.0
+2024-08-31 17:36:50,249 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=252288.0, ans=0.125
+2024-08-31 17:37:06,696 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=252394.66666666666, ans=0.025
+2024-08-31 17:37:18,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=252448.0, ans=0.125
+2024-08-31 17:37:27,974 INFO [train.py:1114] (2/4) Epoch 20, batch 50, loss[loss=0.1885, simple_loss=0.2487, pruned_loss=0.04669, ctc_loss=0.08728, over 19724.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2738, pruned_loss=0.05018, ctc_loss=0.09484, over 844974.64 frames. ], batch size: 47, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:37:36,498 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=252501.33333333334, ans=0.125
+2024-08-31 17:37:37,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=252501.33333333334, ans=0.0
+2024-08-31 17:37:51,148 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.696e+02 1.962e+02 2.261e+02 4.473e+02, threshold=3.923e+02, percent-clipped=2.0
+2024-08-31 17:38:34,794 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=15.82 vs. limit=15.0
+2024-08-31 17:41:08,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=252714.66666666666, ans=0.125
+2024-08-31 17:41:27,187 INFO [train.py:1114] (2/4) Epoch 20, batch 100, loss[loss=0.1955, simple_loss=0.2628, pruned_loss=0.04602, ctc_loss=0.09063, over 19701.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2748, pruned_loss=0.05027, ctc_loss=0.09532, over 1499556.84 frames. ], batch size: 51, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:41:53,801 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=252821.33333333334, ans=0.125
+2024-08-31 17:42:08,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=252821.33333333334, ans=0.2
+2024-08-31 17:43:46,826 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=252928.0, ans=0.1
+2024-08-31 17:43:51,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=252981.33333333334, ans=0.125
+2024-08-31 17:44:05,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=253034.66666666666, ans=0.0
+2024-08-31 17:44:06,482 INFO [train.py:1114] (2/4) Epoch 20, batch 150, loss[loss=0.1987, simple_loss=0.259, pruned_loss=0.05054, ctc_loss=0.0934, over 19725.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2723, pruned_loss=0.04923, ctc_loss=0.0934, over 2027658.68 frames. ], batch size: 47, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:44:11,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=253034.66666666666, ans=0.125
+2024-08-31 17:44:57,848 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=253088.0, ans=0.125
+2024-08-31 17:44:59,739 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.634e+02 1.821e+02 2.194e+02 3.683e+02, threshold=3.641e+02, percent-clipped=0.0
+2024-08-31 17:45:25,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=253141.33333333334, ans=0.125
+2024-08-31 17:45:33,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=253194.66666666666, ans=0.125
+2024-08-31 17:45:39,673 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=253194.66666666666, ans=0.05
+2024-08-31 17:45:39,801 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=253194.66666666666, ans=0.125
+2024-08-31 17:45:59,902 INFO [train.py:1114] (2/4) Epoch 20, batch 200, loss[loss=0.2369, simple_loss=0.2993, pruned_loss=0.06305, ctc_loss=0.1208, over 18199.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.271, pruned_loss=0.04879, ctc_loss=0.09245, over 2435022.78 frames. ], batch size: 85, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:46:41,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=253408.0, ans=0.125
+2024-08-31 17:46:48,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=253408.0, ans=0.07
+2024-08-31 17:46:48,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=253408.0, ans=10.0
+2024-08-31 17:47:03,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=253461.33333333334, ans=0.2
+2024-08-31 17:47:09,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=253461.33333333334, ans=0.1
+2024-08-31 17:47:11,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=253461.33333333334, ans=0.125
+2024-08-31 17:47:33,348 INFO [train.py:1114] (2/4) Epoch 20, batch 250, loss[loss=0.2386, simple_loss=0.2965, pruned_loss=0.0656, ctc_loss=0.1235, over 19424.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2715, pruned_loss=0.04916, ctc_loss=0.09299, over 2755868.39 frames. ], batch size: 67, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:47:44,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=253568.0, ans=0.125
+2024-08-31 17:47:57,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=253621.33333333334, ans=0.2
+2024-08-31 17:47:59,369 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.745e+02 2.044e+02 2.602e+02 4.259e+02, threshold=4.089e+02, percent-clipped=6.0
+2024-08-31 17:48:00,886 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=253674.66666666666, ans=0.025
+2024-08-31 17:49:29,736 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.15 vs. limit=22.5
+2024-08-31 17:49:51,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=253781.33333333334, ans=0.125
+2024-08-31 17:49:59,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=253834.66666666666, ans=0.1
+2024-08-31 17:50:00,339 INFO [train.py:1114] (2/4) Epoch 20, batch 300, loss[loss=0.2223, simple_loss=0.2862, pruned_loss=0.05767, ctc_loss=0.1075, over 19523.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2706, pruned_loss=0.04832, ctc_loss=0.09118, over 3002011.17 frames. ], batch size: 61, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:50:03,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=253834.66666666666, ans=0.2
+2024-08-31 17:50:12,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=253888.0, ans=0.125
+2024-08-31 17:50:21,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=253888.0, ans=0.125
+2024-08-31 17:50:27,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=253941.33333333334, ans=0.1
+2024-08-31 17:51:05,483 INFO [train.py:1114] (2/4) Epoch 20, batch 350, loss[loss=0.2015, simple_loss=0.2577, pruned_loss=0.05141, ctc_loss=0.1061, over 19767.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2706, pruned_loss=0.04841, ctc_loss=0.09131, over 3190861.06 frames. ], batch size: 48, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:51:23,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=254154.66666666666, ans=0.125
+2024-08-31 17:51:26,962 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.703e+02 1.946e+02 2.321e+02 4.034e+02, threshold=3.891e+02, percent-clipped=0.0
+2024-08-31 17:51:27,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=254154.66666666666, ans=0.05
+2024-08-31 17:51:36,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=254208.0, ans=0.125
+2024-08-31 17:51:41,672 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.43 vs. limit=10.0
+2024-08-31 17:51:45,262 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.45 vs. limit=15.0
+2024-08-31 17:51:58,902 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=254314.66666666666, ans=0.025
+2024-08-31 17:52:04,334 INFO [train.py:1114] (2/4) Epoch 20, batch 400, loss[loss=0.2073, simple_loss=0.284, pruned_loss=0.04875, ctc_loss=0.08288, over 19488.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2702, pruned_loss=0.04823, ctc_loss=0.09073, over 3341925.11 frames. ], batch size: 54, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:52:21,967 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=254421.33333333334, ans=0.125
+2024-08-31 17:52:23,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=254421.33333333334, ans=0.125
+2024-08-31 17:52:29,871 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=254421.33333333334, ans=0.0
+2024-08-31 17:52:47,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=254528.0, ans=0.07
+2024-08-31 17:53:01,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=254581.33333333334, ans=0.0
+2024-08-31 17:53:10,616 INFO [train.py:1114] (2/4) Epoch 20, batch 450, loss[loss=0.1961, simple_loss=0.2694, pruned_loss=0.04407, ctc_loss=0.08644, over 19610.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2701, pruned_loss=0.0482, ctc_loss=0.09055, over 3450704.69 frames. ], batch size: 55, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:53:15,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=254634.66666666666, ans=0.2
+2024-08-31 17:53:31,692 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.627e+02 1.777e+02 2.217e+02 3.582e+02, threshold=3.554e+02, percent-clipped=0.0
+2024-08-31 17:54:15,354 INFO [train.py:1114] (2/4) Epoch 20, batch 500, loss[loss=0.2308, simple_loss=0.298, pruned_loss=0.05965, ctc_loss=0.1106, over 19677.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2689, pruned_loss=0.04761, ctc_loss=0.08954, over 3546812.11 frames. ], batch size: 63, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:54:20,717 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.21 vs. limit=6.0
+2024-08-31 17:55:14,672 INFO [train.py:1114] (2/4) Epoch 20, batch 550, loss[loss=0.213, simple_loss=0.2803, pruned_loss=0.05362, ctc_loss=0.09645, over 19343.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2687, pruned_loss=0.04761, ctc_loss=0.08967, over 3609053.13 frames. ], batch size: 71, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:55:14,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=255168.0, ans=0.1
+2024-08-31 17:55:16,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=255168.0, ans=0.0
+2024-08-31 17:55:29,707 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.81 vs. limit=15.0
+2024-08-31 17:55:35,927 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.640e+02 1.908e+02 2.178e+02 3.229e+02, threshold=3.816e+02, percent-clipped=0.0
+2024-08-31 17:55:49,257 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=255274.66666666666, ans=0.125
+2024-08-31 17:56:19,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=255381.33333333334, ans=0.025
+2024-08-31 17:56:21,761 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=255434.66666666666, ans=0.125
+2024-08-31 17:56:22,749 INFO [train.py:1114] (2/4) Epoch 20, batch 600, loss[loss=0.2385, simple_loss=0.3059, pruned_loss=0.06208, ctc_loss=0.1173, over 19390.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2692, pruned_loss=0.04787, ctc_loss=0.09002, over 3665874.64 frames. ], batch size: 67, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:56:36,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=255488.0, ans=0.0
+2024-08-31 17:56:47,924 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=255541.33333333334, ans=0.0
+2024-08-31 17:56:55,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=255541.33333333334, ans=0.2
+2024-08-31 17:57:22,369 INFO [train.py:1114] (2/4) Epoch 20, batch 650, loss[loss=0.2026, simple_loss=0.2643, pruned_loss=0.05021, ctc_loss=0.1011, over 19779.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2692, pruned_loss=0.04784, ctc_loss=0.0901, over 3715968.33 frames. ], batch size: 54, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:57:44,318 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.759e+02 2.153e+02 2.838e+02 5.166e+02, threshold=4.306e+02, percent-clipped=8.0
+2024-08-31 17:57:46,923 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=255808.0, ans=0.125
+2024-08-31 17:57:49,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=255808.0, ans=0.1
+2024-08-31 17:57:51,956 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.88 vs. limit=6.0
+2024-08-31 17:58:05,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=255861.33333333334, ans=0.2
+2024-08-31 17:58:22,799 INFO [train.py:1114] (2/4) Epoch 20, batch 700, loss[loss=0.1768, simple_loss=0.2504, pruned_loss=0.03804, ctc_loss=0.0676, over 19692.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2697, pruned_loss=0.04816, ctc_loss=0.09053, over 3747565.94 frames. ], batch size: 51, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:58:27,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=255968.0, ans=0.2
+2024-08-31 17:58:39,539 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=256021.33333333334, ans=0.125
+2024-08-31 17:58:57,857 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.54 vs. limit=6.0
+2024-08-31 17:59:00,761 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=256128.0, ans=0.0
+2024-08-31 17:59:06,731 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.52 vs. limit=15.0
+2024-08-31 17:59:11,242 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=256181.33333333334, ans=0.125
+2024-08-31 17:59:17,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=256181.33333333334, ans=0.0
+2024-08-31 17:59:17,477 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.32 vs. limit=15.0
+2024-08-31 17:59:24,908 INFO [train.py:1114] (2/4) Epoch 20, batch 750, loss[loss=0.2088, simple_loss=0.2794, pruned_loss=0.04879, ctc_loss=0.1016, over 19505.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2698, pruned_loss=0.04816, ctc_loss=0.09076, over 3773491.09 frames. ], batch size: 54, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 17:59:29,442 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=256234.66666666666, ans=0.125
+2024-08-31 17:59:46,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=256288.0, ans=0.05
+2024-08-31 17:59:58,592 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 1.642e+02 1.855e+02 2.095e+02 3.716e+02, threshold=3.709e+02, percent-clipped=0.0
+2024-08-31 18:00:01,392 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.33 vs. limit=15.0
+2024-08-31 18:00:03,625 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=256341.33333333334, ans=0.0
+2024-08-31 18:00:12,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=256394.66666666666, ans=0.0
+2024-08-31 18:00:15,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=256394.66666666666, ans=0.07
+2024-08-31 18:00:22,035 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=256394.66666666666, ans=0.2
+2024-08-31 18:00:23,185 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=256394.66666666666, ans=0.125
+2024-08-31 18:00:42,916 INFO [train.py:1114] (2/4) Epoch 20, batch 800, loss[loss=0.1706, simple_loss=0.2439, pruned_loss=0.03501, ctc_loss=0.06802, over 19396.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2697, pruned_loss=0.04805, ctc_loss=0.09056, over 3795249.99 frames. ], batch size: 48, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 18:00:43,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=256501.33333333334, ans=0.0
+2024-08-31 18:00:44,412 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=256501.33333333334, ans=0.1
+2024-08-31 18:00:54,122 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.38 vs. limit=6.0
+2024-08-31 18:00:59,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=256554.66666666666, ans=10.0
+2024-08-31 18:01:28,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=256661.33333333334, ans=0.0
+2024-08-31 18:01:40,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_abs, batch_count=256714.66666666666, ans=0.5
+2024-08-31 18:01:43,053 INFO [train.py:1114] (2/4) Epoch 20, batch 850, loss[loss=0.1964, simple_loss=0.2782, pruned_loss=0.04071, ctc_loss=0.08291, over 19678.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2696, pruned_loss=0.04817, ctc_loss=0.09069, over 3813468.42 frames. ], batch size: 59, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:01:43,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=256768.0, ans=15.0
+2024-08-31 18:01:53,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=256768.0, ans=0.125
+2024-08-31 18:02:05,172 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.672e+02 2.009e+02 2.661e+02 4.692e+02, threshold=4.019e+02, percent-clipped=5.0
+2024-08-31 18:02:31,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=256981.33333333334, ans=0.125
+2024-08-31 18:02:33,511 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=8.83 vs. limit=22.5
+2024-08-31 18:02:42,849 INFO [train.py:1114] (2/4) Epoch 20, batch 900, loss[loss=0.203, simple_loss=0.2574, pruned_loss=0.05524, ctc_loss=0.09515, over 19823.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2699, pruned_loss=0.04838, ctc_loss=0.09107, over 3818082.07 frames. ], batch size: 49, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:02:43,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=257034.66666666666, ans=0.0
+2024-08-31 18:02:47,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=257034.66666666666, ans=0.2
+2024-08-31 18:02:51,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=257034.66666666666, ans=0.0
+2024-08-31 18:02:51,530 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=257034.66666666666, ans=0.125
+2024-08-31 18:03:03,081 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.65 vs. limit=15.0
+2024-08-31 18:03:20,624 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:03:23,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=257194.66666666666, ans=0.0
+2024-08-31 18:03:25,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=257194.66666666666, ans=0.2
+2024-08-31 18:03:32,209 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.04 vs. limit=15.0
+2024-08-31 18:03:45,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=257248.0, ans=0.05
+2024-08-31 18:03:49,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=257301.33333333334, ans=0.125
+2024-08-31 18:03:49,915 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.60 vs. limit=15.0
+2024-08-31 18:03:50,697 INFO [train.py:1114] (2/4) Epoch 20, batch 950, loss[loss=0.1765, simple_loss=0.2424, pruned_loss=0.04025, ctc_loss=0.07544, over 19523.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2698, pruned_loss=0.04832, ctc_loss=0.09102, over 3819962.07 frames. ], batch size: 49, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:03:50,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=257301.33333333334, ans=0.035
+2024-08-31 18:03:51,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=257301.33333333334, ans=0.125
+2024-08-31 18:03:53,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=257301.33333333334, ans=0.125
+2024-08-31 18:03:55,546 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=257301.33333333334, ans=0.125
+2024-08-31 18:04:12,191 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.674e+02 1.914e+02 2.385e+02 5.476e+02, threshold=3.829e+02, percent-clipped=1.0
+2024-08-31 18:04:48,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=257408.0, ans=0.035
+2024-08-31 18:04:48,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=257408.0, ans=0.125
+2024-08-31 18:05:05,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=257461.33333333334, ans=10.0
+2024-08-31 18:05:12,756 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.89 vs. limit=6.0
+2024-08-31 18:05:18,604 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=1.98 vs. limit=6.0
+2024-08-31 18:05:25,082 INFO [train.py:1114] (2/4) Epoch 20, batch 1000, loss[loss=0.1882, simple_loss=0.2565, pruned_loss=0.0433, ctc_loss=0.08345, over 19868.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2705, pruned_loss=0.0485, ctc_loss=0.0914, over 3817270.55 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:05:27,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=257568.0, ans=0.0
+2024-08-31 18:10:43,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=257728.0, ans=0.0
+2024-08-31 18:11:53,006 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.94 vs. limit=22.5
+2024-08-31 18:12:06,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=257781.33333333334, ans=0.035
+2024-08-31 18:12:10,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=257781.33333333334, ans=0.1
+2024-08-31 18:12:15,980 INFO [train.py:1114] (2/4) Epoch 20, batch 1050, loss[loss=0.2247, simple_loss=0.2908, pruned_loss=0.05773, ctc_loss=0.1078, over 19844.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2698, pruned_loss=0.04824, ctc_loss=0.09094, over 3823601.14 frames. ], batch size: 57, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:12:21,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=257834.66666666666, ans=0.125
+2024-08-31 18:12:37,422 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.683e+02 1.941e+02 2.234e+02 3.103e+02, threshold=3.882e+02, percent-clipped=0.0
+2024-08-31 18:12:38,177 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.97 vs. limit=6.0
+2024-08-31 18:12:46,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=257941.33333333334, ans=0.125
+2024-08-31 18:13:11,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=258048.0, ans=0.2
+2024-08-31 18:13:25,850 INFO [train.py:1114] (2/4) Epoch 20, batch 1100, loss[loss=0.1899, simple_loss=0.2603, pruned_loss=0.04352, ctc_loss=0.0814, over 19588.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2698, pruned_loss=0.04813, ctc_loss=0.09086, over 3830386.52 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:13:30,949 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=258101.33333333334, ans=0.125
+2024-08-31 18:13:45,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=258154.66666666666, ans=0.1
+2024-08-31 18:13:59,122 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=258208.0, ans=0.125
+2024-08-31 18:14:00,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=258208.0, ans=0.125
+2024-08-31 18:14:02,704 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=258261.33333333334, ans=0.0
+2024-08-31 18:14:13,687 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.14 vs. limit=15.0
+2024-08-31 18:14:26,128 INFO [train.py:1114] (2/4) Epoch 20, batch 1150, loss[loss=0.2038, simple_loss=0.2729, pruned_loss=0.04946, ctc_loss=0.08953, over 19588.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2701, pruned_loss=0.04854, ctc_loss=0.09172, over 3830406.24 frames. ], batch size: 52, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:14:29,408 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.84 vs. limit=15.0
+2024-08-31 18:15:02,788 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=258421.33333333334, ans=0.125
+2024-08-31 18:15:02,931 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:15:04,054 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=258421.33333333334, ans=0.0
+2024-08-31 18:15:04,484 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=21.34 vs. limit=22.5
+2024-08-31 18:15:12,227 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.657e+02 1.937e+02 2.398e+02 3.976e+02, threshold=3.875e+02, percent-clipped=1.0
+2024-08-31 18:15:12,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=258421.33333333334, ans=0.025
+2024-08-31 18:15:33,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=258528.0, ans=0.125
+2024-08-31 18:15:38,134 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=258581.33333333334, ans=0.025
+2024-08-31 18:15:51,964 INFO [train.py:1114] (2/4) Epoch 20, batch 1200, loss[loss=0.1929, simple_loss=0.2748, pruned_loss=0.04053, ctc_loss=0.07496, over 19834.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2707, pruned_loss=0.04838, ctc_loss=0.09146, over 3826157.79 frames. ], batch size: 57, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:16:21,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=258741.33333333334, ans=0.0
+2024-08-31 18:16:53,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=258901.33333333334, ans=0.125
+2024-08-31 18:16:54,775 INFO [train.py:1114] (2/4) Epoch 20, batch 1250, loss[loss=0.219, simple_loss=0.2845, pruned_loss=0.05522, ctc_loss=0.1075, over 19528.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2713, pruned_loss=0.04862, ctc_loss=0.09158, over 3844326.38 frames. ], batch size: 61, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:16:58,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=258901.33333333334, ans=0.125
+2024-08-31 18:17:19,400 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.17 vs. limit=10.0
+2024-08-31 18:17:20,837 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.673e+02 1.864e+02 2.243e+02 4.460e+02, threshold=3.727e+02, percent-clipped=1.0
+2024-08-31 18:17:41,315 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=259061.33333333334, ans=0.0
+2024-08-31 18:17:42,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=259061.33333333334, ans=0.125
+2024-08-31 18:17:51,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=259061.33333333334, ans=0.125
+2024-08-31 18:19:03,734 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.32 vs. limit=15.0
+2024-08-31 18:19:05,804 INFO [train.py:1114] (2/4) Epoch 20, batch 1300, loss[loss=0.2409, simple_loss=0.306, pruned_loss=0.06445, ctc_loss=0.1173, over 18879.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2702, pruned_loss=0.04811, ctc_loss=0.09049, over 3848500.15 frames. ], batch size: 76, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:19:09,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=259168.0, ans=0.0
+2024-08-31 18:19:49,913 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=259328.0, ans=0.125
+2024-08-31 18:19:54,003 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=259328.0, ans=0.125
+2024-08-31 18:20:01,456 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.27 vs. limit=12.0
+2024-08-31 18:20:12,176 INFO [train.py:1114] (2/4) Epoch 20, batch 1350, loss[loss=0.2074, simple_loss=0.2771, pruned_loss=0.04942, ctc_loss=0.09713, over 19750.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2701, pruned_loss=0.04788, ctc_loss=0.09003, over 3859791.35 frames. ], batch size: 54, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:20:15,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=259434.66666666666, ans=0.025
+2024-08-31 18:20:24,958 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=259488.0, ans=0.0
+2024-08-31 18:20:31,620 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=259488.0, ans=0.125
+2024-08-31 18:20:38,792 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 1.677e+02 1.917e+02 2.382e+02 4.193e+02, threshold=3.834e+02, percent-clipped=5.0
+2024-08-31 18:21:01,744 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.17 vs. limit=15.0
+2024-08-31 18:21:06,766 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.50 vs. limit=22.5
+2024-08-31 18:21:16,778 INFO [train.py:1114] (2/4) Epoch 20, batch 1400, loss[loss=0.1471, simple_loss=0.2213, pruned_loss=0.02578, ctc_loss=0.05327, over 19673.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2699, pruned_loss=0.04767, ctc_loss=0.08976, over 3866412.50 frames. ], batch size: 46, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:21:18,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=259701.33333333334, ans=0.025
+2024-08-31 18:21:38,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=259754.66666666666, ans=0.07
+2024-08-31 18:21:51,646 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=259808.0, ans=0.1
+2024-08-31 18:22:05,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=259861.33333333334, ans=0.0
+2024-08-31 18:22:06,349 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=259861.33333333334, ans=0.09899494936611666
+2024-08-31 18:22:07,453 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=259861.33333333334, ans=0.2
+2024-08-31 18:22:50,432 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.72 vs. limit=15.0
+2024-08-31 18:22:53,584 INFO [train.py:1114] (2/4) Epoch 20, batch 1450, loss[loss=0.2143, simple_loss=0.2876, pruned_loss=0.05071, ctc_loss=0.09878, over 19651.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2706, pruned_loss=0.04804, ctc_loss=0.09051, over 3863949.13 frames. ], batch size: 63, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:23:08,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=260021.33333333334, ans=0.0
+2024-08-31 18:23:09,447 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=260021.33333333334, ans=0.125
+2024-08-31 18:23:09,941 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.61 vs. limit=15.0
+2024-08-31 18:23:17,227 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.776e+02 2.029e+02 2.458e+02 5.712e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-31 18:23:28,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=260074.66666666666, ans=0.2
+2024-08-31 18:23:36,942 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.65 vs. limit=15.0
+2024-08-31 18:23:39,870 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:23:53,214 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=260234.66666666666, ans=0.0
+2024-08-31 18:23:54,006 INFO [train.py:1114] (2/4) Epoch 20, batch 1500, loss[loss=0.2014, simple_loss=0.2739, pruned_loss=0.04705, ctc_loss=0.08707, over 19614.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2707, pruned_loss=0.04807, ctc_loss=0.09049, over 3863395.95 frames. ], batch size: 57, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:24:05,446 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=260288.0, ans=0.0
+2024-08-31 18:24:14,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=260288.0, ans=0.1
+2024-08-31 18:24:31,620 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:24:35,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=260394.66666666666, ans=0.0
+2024-08-31 18:25:24,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=260448.0, ans=0.125
+2024-08-31 18:25:34,759 INFO [train.py:1114] (2/4) Epoch 20, batch 1550, loss[loss=0.2261, simple_loss=0.2841, pruned_loss=0.06219, ctc_loss=0.1091, over 19609.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2704, pruned_loss=0.04818, ctc_loss=0.09069, over 3847375.28 frames. ], batch size: 60, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:26:01,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=260501.33333333334, ans=0.125
+2024-08-31 18:26:25,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=260554.66666666666, ans=0.125
+2024-08-31 18:26:31,694 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=260554.66666666666, ans=0.1
+2024-08-31 18:26:33,184 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.748e+02 2.049e+02 2.466e+02 3.855e+02, threshold=4.097e+02, percent-clipped=0.0
+2024-08-31 18:26:44,825 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:26:51,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=260661.33333333334, ans=0.0
+2024-08-31 18:26:56,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=260661.33333333334, ans=0.125
+2024-08-31 18:26:56,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=260661.33333333334, ans=0.2
+2024-08-31 18:27:11,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=260714.66666666666, ans=0.1
+2024-08-31 18:27:11,679 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.64 vs. limit=22.5
+2024-08-31 18:27:18,521 INFO [train.py:1114] (2/4) Epoch 20, batch 1600, loss[loss=0.1954, simple_loss=0.2666, pruned_loss=0.04476, ctc_loss=0.08689, over 19838.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2697, pruned_loss=0.0477, ctc_loss=0.09006, over 3837212.03 frames. ], batch size: 57, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:27:26,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=260768.0, ans=0.125
+2024-08-31 18:27:45,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=260874.66666666666, ans=0.025
+2024-08-31 18:28:07,550 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=260981.33333333334, ans=0.125
+2024-08-31 18:28:15,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=260981.33333333334, ans=0.0
+2024-08-31 18:28:28,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=260981.33333333334, ans=0.125
+2024-08-31 18:28:30,410 INFO [train.py:1114] (2/4) Epoch 20, batch 1650, loss[loss=0.2043, simple_loss=0.2785, pruned_loss=0.04714, ctc_loss=0.08965, over 19667.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2694, pruned_loss=0.0478, ctc_loss=0.09017, over 3834348.45 frames. ], batch size: 59, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:28:37,229 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=16.09 vs. limit=22.5
+2024-08-31 18:28:53,164 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.719e+02 2.026e+02 2.553e+02 4.958e+02, threshold=4.052e+02, percent-clipped=3.0
+2024-08-31 18:28:54,563 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:29:06,283 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=261194.66666666666, ans=0.0
+2024-08-31 18:29:07,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=261194.66666666666, ans=0.0
+2024-08-31 18:29:19,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=261248.0, ans=0.07
+2024-08-31 18:29:20,946 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.96 vs. limit=15.0
+2024-08-31 18:29:26,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=261248.0, ans=0.125
+2024-08-31 18:29:29,542 INFO [train.py:1114] (2/4) Epoch 20, batch 1700, loss[loss=0.1842, simple_loss=0.2411, pruned_loss=0.04628, ctc_loss=0.08683, over 19659.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2689, pruned_loss=0.04731, ctc_loss=0.08917, over 3848218.58 frames. ], batch size: 46, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:29:30,914 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=261301.33333333334, ans=0.1
+2024-08-31 18:29:41,396 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=261354.66666666666, ans=0.125
+2024-08-31 18:29:54,466 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=261408.0, ans=0.125
+2024-08-31 18:29:58,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=261408.0, ans=0.125
+2024-08-31 18:30:04,344 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=261408.0, ans=0.2
+2024-08-31 18:30:10,062 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=261461.33333333334, ans=0.2
+2024-08-31 18:31:08,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff3.min_abs, batch_count=261514.66666666666, ans=0.2
+2024-08-31 18:31:18,019 INFO [train.py:1114] (2/4) Epoch 20, batch 1750, loss[loss=0.1892, simple_loss=0.2508, pruned_loss=0.04694, ctc_loss=0.08414, over 19694.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2688, pruned_loss=0.04723, ctc_loss=0.08893, over 3853221.62 frames. ], batch size: 45, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:31:24,384 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.01 vs. limit=22.5
+2024-08-31 18:31:28,582 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=261621.33333333334, ans=0.125
+2024-08-31 18:31:32,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=261621.33333333334, ans=0.1
+2024-08-31 18:31:40,012 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.679e+02 1.951e+02 2.329e+02 4.159e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-31 18:31:40,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=261674.66666666666, ans=0.125
+2024-08-31 18:31:53,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=261728.0, ans=0.0
+2024-08-31 18:31:57,638 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.97 vs. limit=10.0
+2024-08-31 18:32:02,296 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.41 vs. limit=15.0
+2024-08-31 18:32:06,990 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.85 vs. limit=15.0
+2024-08-31 18:32:15,177 INFO [train.py:1114] (2/4) Epoch 20, batch 1800, loss[loss=0.2156, simple_loss=0.2862, pruned_loss=0.05252, ctc_loss=0.09997, over 19607.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2692, pruned_loss=0.04766, ctc_loss=0.08991, over 3854716.77 frames. ], batch size: 55, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:32:36,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=261888.0, ans=0.125
+2024-08-31 18:32:59,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=261941.33333333334, ans=0.07
+2024-08-31 18:33:06,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=261941.33333333334, ans=0.1
+2024-08-31 18:33:13,457 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=6.52 vs. limit=15.0
+2024-08-31 18:33:14,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=261994.66666666666, ans=0.0
+2024-08-31 18:33:31,522 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.43 vs. limit=15.0
+2024-08-31 18:33:34,490 INFO [train.py:1114] (2/4) Epoch 20, batch 1850, loss[loss=0.2079, simple_loss=0.2818, pruned_loss=0.04822, ctc_loss=0.09384, over 19587.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2692, pruned_loss=0.04785, ctc_loss=0.09001, over 3858477.29 frames. ], batch size: 57, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:33:40,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=262101.33333333334, ans=0.2
+2024-08-31 18:33:51,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=262154.6666666667, ans=0.125
+2024-08-31 18:33:56,004 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.842e+02 2.206e+02 3.038e+02 4.306e+02, threshold=4.411e+02, percent-clipped=5.0
+2024-08-31 18:33:57,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=262208.0, ans=0.1
+2024-08-31 18:34:04,960 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.64 vs. limit=15.0
+2024-08-31 18:34:07,855 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:34:19,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=262261.3333333333, ans=0.125
+2024-08-31 18:34:36,223 INFO [train.py:1114] (2/4) Epoch 20, batch 1900, loss[loss=0.1825, simple_loss=0.267, pruned_loss=0.03561, ctc_loss=0.06667, over 19647.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2696, pruned_loss=0.04809, ctc_loss=0.09039, over 3863146.02 frames. ], batch size: 59, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:34:41,344 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.15 vs. limit=12.0
+2024-08-31 18:35:07,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=262474.6666666667, ans=0.125
+2024-08-31 18:35:34,445 INFO [train.py:1114] (2/4) Epoch 20, batch 1950, loss[loss=0.1821, simple_loss=0.2528, pruned_loss=0.04053, ctc_loss=0.0758, over 19589.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2701, pruned_loss=0.04785, ctc_loss=0.08996, over 3871529.26 frames. ], batch size: 52, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:35:43,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=262634.6666666667, ans=0.125
+2024-08-31 18:35:47,982 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=262688.0, ans=0.0
+2024-08-31 18:35:54,906 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.62 vs. limit=15.0
+2024-08-31 18:35:55,625 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.650e+02 1.780e+02 2.101e+02 3.496e+02, threshold=3.560e+02, percent-clipped=0.0
+2024-08-31 18:36:00,642 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=262741.3333333333, ans=0.0
+2024-08-31 18:36:14,445 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.21 vs. limit=6.0
+2024-08-31 18:36:24,836 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.66 vs. limit=15.0
+2024-08-31 18:36:31,323 INFO [train.py:1114] (2/4) Epoch 20, batch 2000, loss[loss=0.1738, simple_loss=0.2359, pruned_loss=0.04065, ctc_loss=0.07592, over 19617.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2706, pruned_loss=0.04828, ctc_loss=0.09092, over 3855804.95 frames. ], batch size: 45, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:36:31,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=262901.3333333333, ans=0.0
+2024-08-31 18:36:40,189 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.65 vs. limit=6.0
+2024-08-31 18:36:51,016 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=262954.6666666667, ans=0.0
+2024-08-31 18:36:55,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=263008.0, ans=0.1
+2024-08-31 18:37:04,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=263008.0, ans=0.125
+2024-08-31 18:37:06,563 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.26 vs. limit=22.5
+2024-08-31 18:37:32,668 INFO [train.py:1114] (2/4) Epoch 20, batch 2050, loss[loss=0.1788, simple_loss=0.2491, pruned_loss=0.03983, ctc_loss=0.07203, over 19726.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.27, pruned_loss=0.04839, ctc_loss=0.091, over 3851100.32 frames. ], batch size: 47, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:37:37,613 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.88 vs. limit=15.0
+2024-08-31 18:37:51,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=263221.3333333333, ans=0.0
+2024-08-31 18:37:53,019 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=263221.3333333333, ans=0.125
+2024-08-31 18:38:02,084 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.724e+02 2.041e+02 2.585e+02 3.821e+02, threshold=4.082e+02, percent-clipped=5.0
+2024-08-31 18:38:32,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=263381.3333333333, ans=0.04949747468305833
+2024-08-31 18:38:36,465 INFO [train.py:1114] (2/4) Epoch 20, batch 2100, loss[loss=0.1988, simple_loss=0.2663, pruned_loss=0.04759, ctc_loss=0.09013, over 19766.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2695, pruned_loss=0.04808, ctc_loss=0.09028, over 3857313.01 frames. ], batch size: 54, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:38:38,090 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.46 vs. limit=15.0
+2024-08-31 18:38:53,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=263488.0, ans=0.125
+2024-08-31 18:39:07,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=263541.3333333333, ans=0.125
+2024-08-31 18:39:32,897 INFO [train.py:1114] (2/4) Epoch 20, batch 2150, loss[loss=0.2043, simple_loss=0.2724, pruned_loss=0.0491, ctc_loss=0.09524, over 19843.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2686, pruned_loss=0.04763, ctc_loss=0.08941, over 3867882.17 frames. ], batch size: 52, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:39:33,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=263701.3333333333, ans=0.125
+2024-08-31 18:39:37,273 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=263701.3333333333, ans=0.2
+2024-08-31 18:39:58,528 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.627e+02 1.896e+02 2.393e+02 5.058e+02, threshold=3.792e+02, percent-clipped=5.0
+2024-08-31 18:40:03,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=263808.0, ans=0.125
+2024-08-31 18:40:05,606 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.87 vs. limit=10.0
+2024-08-31 18:40:07,708 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=263808.0, ans=0.125
+2024-08-31 18:40:10,954 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=263861.3333333333, ans=0.0
+2024-08-31 18:40:33,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=263914.6666666667, ans=0.0
+2024-08-31 18:41:03,445 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=263914.6666666667, ans=0.0
+2024-08-31 18:41:09,871 INFO [train.py:1114] (2/4) Epoch 20, batch 2200, loss[loss=0.1871, simple_loss=0.2626, pruned_loss=0.04053, ctc_loss=0.07599, over 19596.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2688, pruned_loss=0.0476, ctc_loss=0.08923, over 3866356.65 frames. ], batch size: 57, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:41:45,061 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.16 vs. limit=22.5
+2024-08-31 18:42:03,871 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=264128.0, ans=0.2
+2024-08-31 18:42:06,209 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:42:17,182 INFO [train.py:1114] (2/4) Epoch 20, batch 2250, loss[loss=0.2022, simple_loss=0.2822, pruned_loss=0.04396, ctc_loss=0.08545, over 19614.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2698, pruned_loss=0.04772, ctc_loss=0.08955, over 3866128.11 frames. ], batch size: 55, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:42:24,125 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=264234.6666666667, ans=0.0
+2024-08-31 18:42:42,056 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.705e+02 2.149e+02 2.747e+02 5.291e+02, threshold=4.298e+02, percent-clipped=7.0
+2024-08-31 18:42:49,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=264341.3333333333, ans=0.2
+2024-08-31 18:43:02,523 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=264394.6666666667, ans=0.025
+2024-08-31 18:43:16,659 INFO [train.py:1114] (2/4) Epoch 20, batch 2300, loss[loss=0.1774, simple_loss=0.2547, pruned_loss=0.03553, ctc_loss=0.07252, over 19497.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2694, pruned_loss=0.04793, ctc_loss=0.08987, over 3860959.48 frames. ], batch size: 49, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:43:17,885 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=264501.3333333333, ans=0.125
+2024-08-31 18:43:21,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=264501.3333333333, ans=0.0
+2024-08-31 18:43:30,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=264554.6666666667, ans=0.1
+2024-08-31 18:43:34,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=264554.6666666667, ans=0.0
+2024-08-31 18:43:36,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=264554.6666666667, ans=0.0
+2024-08-31 18:43:39,080 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.18 vs. limit=15.0
+2024-08-31 18:43:41,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=264608.0, ans=0.1
+2024-08-31 18:43:42,676 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=264608.0, ans=0.125
+2024-08-31 18:43:56,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=264661.3333333333, ans=0.2
+2024-08-31 18:44:01,457 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.23 vs. limit=10.0
+2024-08-31 18:44:12,816 INFO [train.py:1114] (2/4) Epoch 20, batch 2350, loss[loss=0.2356, simple_loss=0.2934, pruned_loss=0.06348, ctc_loss=0.1269, over 19642.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2694, pruned_loss=0.04783, ctc_loss=0.08962, over 3863166.37 frames. ], batch size: 63, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:44:34,828 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=264821.3333333333, ans=0.1
+2024-08-31 18:44:47,446 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=264821.3333333333, ans=0.125
+2024-08-31 18:44:49,422 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.669e+02 1.905e+02 2.325e+02 3.822e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-31 18:45:09,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=264928.0, ans=0.125
+2024-08-31 18:45:26,862 INFO [train.py:1114] (2/4) Epoch 20, batch 2400, loss[loss=0.2183, simple_loss=0.287, pruned_loss=0.05441, ctc_loss=0.1018, over 19315.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2712, pruned_loss=0.04846, ctc_loss=0.09065, over 3857478.20 frames. ], batch size: 71, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:45:32,625 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=265034.6666666667, ans=0.07
+2024-08-31 18:45:33,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=265034.6666666667, ans=0.125
+2024-08-31 18:45:39,263 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=265088.0, ans=0.025
+2024-08-31 18:45:44,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=265088.0, ans=0.2
+2024-08-31 18:45:44,398 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.00 vs. limit=15.0
+2024-08-31 18:45:46,361 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:45:50,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=265141.3333333333, ans=0.1
+2024-08-31 18:46:08,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=265194.6666666667, ans=0.0
+2024-08-31 18:46:13,106 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.16 vs. limit=6.0
+2024-08-31 18:46:20,832 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=265248.0, ans=0.0
+2024-08-31 18:46:23,887 INFO [train.py:1114] (2/4) Epoch 20, batch 2450, loss[loss=0.2717, simple_loss=0.3115, pruned_loss=0.08417, ctc_loss=0.159, over 13610.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2747, pruned_loss=0.05094, ctc_loss=0.09589, over 3726544.93 frames. ], batch size: 140, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:46:45,868 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.663e+02 1.874e+02 2.086e+02 3.013e+02, threshold=3.749e+02, percent-clipped=0.0
+2024-08-31 18:46:48,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=265408.0, ans=0.0
+2024-08-31 18:47:07,520 INFO [train.py:1387] (2/4) Done!
diff --git a/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-3 b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-3
new file mode 100644
index 0000000000000000000000000000000000000000..3c42374eb5d19d13f4612c0a06cae77ec5ba7e60
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/log/log-train-2024-08-31-13-15-01-3
@@ -0,0 +1,1045 @@
+2024-08-31 13:15:01,249 INFO [train.py:1182] (3/4) Training started
+2024-08-31 13:15:01,971 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-31 13:15:02,198 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2535.int.cedar.computecanada.ca', 'IP address': '172.16.145.228'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 18, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': True, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-31 13:15:02,198 INFO [train.py:1212] (3/4) About to create model
+2024-08-31 13:15:10,412 INFO [train.py:1216] (3/4) Number of model parameters: 66367431
+2024-08-31 13:15:10,438 INFO [checkpoint.py:112] (3/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/streaming/exp/epoch-17.pt
+2024-08-31 13:16:01,730 INFO [train.py:1231] (3/4) Using DDP
+2024-08-31 13:16:07,016 INFO [train.py:1243] (3/4) Loading optimizer state dict
+2024-08-31 13:16:39,979 INFO [train.py:1251] (3/4) Loading scheduler state dict
+2024-08-31 13:16:39,979 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-31 13:16:39,986 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-31 13:16:39,987 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-31 13:16:39,987 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-31 13:16:39,987 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-31 13:16:39,987 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-31 13:16:39,988 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-31 13:16:41,571 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-31 13:16:41,848 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-31 13:16:41,851 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-31 13:16:41,852 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-31 13:16:42,173 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-31 13:16:42,173 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-31 13:22:43,896 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12808MB
+2024-08-31 13:22:45,377 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-31 13:23:02,019 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 12885MB
+2024-08-31 13:23:03,015 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=192, metric=4.30 vs. limit=5.0
+2024-08-31 13:23:03,528 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-31 13:24:12,095 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-31 13:24:13,681 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-31 13:24:13,703 INFO [train.py:1344] (3/4) Loading grad scaler state dict
+2024-08-31 13:25:06,159 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-31 13:25:06,950 INFO [train.py:1114] (3/4) Epoch 18, batch 0, loss[loss=0.1891, simple_loss=0.2515, pruned_loss=0.04522, ctc_loss=0.09044, over 19798.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2515, pruned_loss=0.04522, ctc_loss=0.09044, over 19798.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 13:25:06,951 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-31 13:25:28,552 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([1.3005, 0.9812, 1.4801, 0.7409, 1.4295, 1.5490, 1.6372, 1.3385],
+ device='cuda:3')
+2024-08-31 13:25:49,895 INFO [train.py:1146] (3/4) Epoch 18, validation: loss=0.1864, simple_loss=0.2743, pruned_loss=0.03646, ctc_loss=0.06397, over 944034.00 frames.
+2024-08-31 13:25:49,896 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13097MB
+2024-08-31 13:27:49,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=225680.0, ans=0.1
+2024-08-31 13:38:35,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-31 13:41:44,609 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=225786.66666666666, ans=0.125
+2024-08-31 13:44:41,910 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=225840.0, ans=0.125
+2024-08-31 13:48:13,677 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.934e+02 2.118e+02 2.433e+02 6.228e+02, threshold=4.237e+02, percent-clipped=5.0
+2024-08-31 13:56:45,348 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=225946.66666666666, ans=0.0
+2024-08-31 13:56:46,282 INFO [train.py:1114] (3/4) Epoch 18, batch 50, loss[loss=0.182, simple_loss=0.2481, pruned_loss=0.04203, ctc_loss=0.0794, over 19711.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2764, pruned_loss=0.05246, ctc_loss=0.09993, over 844773.76 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-31 14:00:22,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=226000.0, ans=0.0
+2024-08-31 14:00:57,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=226000.0, ans=0.2
+2024-08-31 14:01:09,608 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=226053.33333333334, ans=0.125
+2024-08-31 14:02:51,357 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=226053.33333333334, ans=0.125
+2024-08-31 14:15:00,238 INFO [train.py:1114] (3/4) Epoch 18, batch 100, loss[loss=0.19, simple_loss=0.2623, pruned_loss=0.04262, ctc_loss=0.08109, over 19730.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2781, pruned_loss=0.0526, ctc_loss=0.1003, over 1499130.94 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:16:49,963 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=226213.33333333334, ans=0.2
+2024-08-31 14:17:06,231 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.84 vs. limit=6.0
+2024-08-31 14:26:09,085 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=226320.0, ans=0.125
+2024-08-31 14:26:20,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=226320.0, ans=0.125
+2024-08-31 14:28:23,803 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=226320.0, ans=0.025
+2024-08-31 14:28:47,723 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=226373.33333333334, ans=0.125
+2024-08-31 14:32:43,711 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=226426.66666666666, ans=0.125
+2024-08-31 14:32:51,603 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.685e+02 1.949e+02 2.332e+02 3.525e+02, threshold=3.898e+02, percent-clipped=0.0
+2024-08-31 14:34:38,797 INFO [train.py:1114] (3/4) Epoch 18, batch 150, loss[loss=0.1807, simple_loss=0.2471, pruned_loss=0.04195, ctc_loss=0.07635, over 19710.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2754, pruned_loss=0.05175, ctc_loss=0.0982, over 2028556.29 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-31 14:44:38,821 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-31 14:45:09,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-31 14:45:10,864 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.91 vs. limit=15.0
+2024-08-31 14:47:42,216 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226586.66666666666, ans=0.125
+2024-08-31 15:05:15,398 INFO [train.py:1114] (3/4) Epoch 18, batch 200, loss[loss=0.2423, simple_loss=0.3004, pruned_loss=0.06577, ctc_loss=0.1317, over 18383.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2747, pruned_loss=0.05163, ctc_loss=0.09788, over 2435908.49 frames. ], batch size: 86, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:06:06,479 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.30 vs. limit=15.0
+2024-08-31 15:15:15,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=226906.66666666666, ans=0.2
+2024-08-31 15:17:44,777 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 1.761e+02 2.086e+02 2.524e+02 4.159e+02, threshold=4.172e+02, percent-clipped=2.0
+2024-08-31 15:17:59,744 INFO [train.py:1114] (3/4) Epoch 18, batch 250, loss[loss=0.2346, simple_loss=0.2974, pruned_loss=0.06208, ctc_loss=0.119, over 19385.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2738, pruned_loss=0.05107, ctc_loss=0.09641, over 2755239.67 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-31 15:19:52,573 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=227066.66666666666, ans=0.125
+2024-08-31 15:22:02,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=227120.0, ans=0.125
+2024-08-31 15:22:32,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=227120.0, ans=0.125
+2024-08-31 15:23:12,306 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=227173.33333333334, ans=0.125
+2024-08-31 15:24:04,301 INFO [train.py:1114] (3/4) Epoch 18, batch 300, loss[loss=0.2284, simple_loss=0.2895, pruned_loss=0.06192, ctc_loss=0.1087, over 19510.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2733, pruned_loss=0.05096, ctc_loss=0.09586, over 3000722.02 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:24:59,901 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=227333.33333333334, ans=0.125
+2024-08-31 15:25:24,494 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=227333.33333333334, ans=0.125
+2024-08-31 15:28:09,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=227386.66666666666, ans=0.125
+2024-08-31 15:30:47,324 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.305e+02 1.680e+02 1.932e+02 2.386e+02 3.920e+02, threshold=3.864e+02, percent-clipped=0.0
+2024-08-31 15:31:32,984 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=227493.33333333334, ans=0.125
+2024-08-31 15:31:47,640 INFO [train.py:1114] (3/4) Epoch 18, batch 350, loss[loss=0.17, simple_loss=0.2372, pruned_loss=0.0366, ctc_loss=0.07378, over 19798.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2744, pruned_loss=0.05153, ctc_loss=0.09691, over 3190837.21 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-31 15:31:49,937 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=227546.66666666666, ans=0.0
+2024-08-31 15:32:20,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=227600.0, ans=0.125
+2024-08-31 15:33:58,980 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=227706.66666666666, ans=0.0
+2024-08-31 15:34:22,821 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=227760.0, ans=0.0
+2024-08-31 15:34:33,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=227760.0, ans=0.125
+2024-08-31 15:34:57,662 INFO [train.py:1114] (3/4) Epoch 18, batch 400, loss[loss=0.1921, simple_loss=0.2766, pruned_loss=0.03874, ctc_loss=0.07516, over 19478.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2741, pruned_loss=0.05131, ctc_loss=0.09655, over 3342300.72 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:35:52,826 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=227920.0, ans=0.125
+2024-08-31 15:36:35,607 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.56 vs. limit=10.0
+2024-08-31 15:37:04,976 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=228026.66666666666, ans=0.0
+2024-08-31 15:37:11,046 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.718e+02 1.967e+02 2.336e+02 3.401e+02, threshold=3.934e+02, percent-clipped=0.0
+2024-08-31 15:37:37,964 INFO [train.py:1114] (3/4) Epoch 18, batch 450, loss[loss=0.197, simple_loss=0.271, pruned_loss=0.04471, ctc_loss=0.08391, over 19604.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2744, pruned_loss=0.05149, ctc_loss=0.09684, over 3450789.67 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-31 15:39:20,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=228133.33333333334, ans=0.1
+2024-08-31 15:39:21,873 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.56 vs. limit=15.0
+2024-08-31 15:39:36,923 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=228186.66666666666, ans=0.125
+2024-08-31 15:39:48,905 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=228240.0, ans=0.0
+2024-08-31 15:39:50,148 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=228240.0, ans=0.125
+2024-08-31 15:40:18,494 INFO [train.py:1114] (3/4) Epoch 18, batch 500, loss[loss=0.2439, simple_loss=0.3041, pruned_loss=0.06675, ctc_loss=0.1255, over 19664.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2733, pruned_loss=0.05084, ctc_loss=0.09569, over 3545366.65 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:40:26,996 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=228346.66666666666, ans=0.2
+2024-08-31 15:40:30,603 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=228400.0, ans=0.125
+2024-08-31 15:40:36,364 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=228400.0, ans=0.0
+2024-08-31 15:40:53,091 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=228506.66666666666, ans=0.2
+2024-08-31 15:41:02,530 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=228506.66666666666, ans=0.125
+2024-08-31 15:41:10,332 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.618e+02 1.812e+02 2.329e+02 3.946e+02, threshold=3.624e+02, percent-clipped=1.0
+2024-08-31 15:41:10,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=228560.0, ans=0.0
+2024-08-31 15:41:17,484 INFO [train.py:1114] (3/4) Epoch 18, batch 550, loss[loss=0.256, simple_loss=0.3078, pruned_loss=0.0744, ctc_loss=0.1389, over 19350.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2736, pruned_loss=0.05109, ctc_loss=0.0961, over 3608466.68 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-31 15:42:27,602 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=228666.66666666666, ans=0.0
+2024-08-31 15:43:26,719 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=228720.0, ans=0.0
+2024-08-31 15:43:27,106 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.39 vs. limit=15.0
+2024-08-31 15:43:28,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=228720.0, ans=0.125
+2024-08-31 15:43:37,736 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=228720.0, ans=0.05
+2024-08-31 15:44:18,825 INFO [train.py:1114] (3/4) Epoch 18, batch 600, loss[loss=0.2144, simple_loss=0.2744, pruned_loss=0.0553, ctc_loss=0.1095, over 19320.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2734, pruned_loss=0.05108, ctc_loss=0.09605, over 3665320.01 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:44:56,517 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.98 vs. limit=10.0
+2024-08-31 15:45:05,006 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.66 vs. limit=6.0
+2024-08-31 15:45:07,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=228986.66666666666, ans=0.1
+2024-08-31 15:45:28,758 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.314e+02 1.735e+02 2.092e+02 3.203e+02 5.009e+02, threshold=4.184e+02, percent-clipped=13.0
+2024-08-31 15:45:38,283 INFO [train.py:1114] (3/4) Epoch 18, batch 650, loss[loss=0.2017, simple_loss=0.2752, pruned_loss=0.04644, ctc_loss=0.08838, over 19775.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2729, pruned_loss=0.05069, ctc_loss=0.0954, over 3715876.17 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-31 15:46:17,182 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=229146.66666666666, ans=0.125
+2024-08-31 15:46:31,788 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=229200.0, ans=0.05
+2024-08-31 15:46:39,508 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=229253.33333333334, ans=0.2
+2024-08-31 15:46:51,488 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-31 15:46:59,817 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=229306.66666666666, ans=0.1
+2024-08-31 15:47:10,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=229360.0, ans=0.125
+2024-08-31 15:47:16,570 INFO [train.py:1114] (3/4) Epoch 18, batch 700, loss[loss=0.1723, simple_loss=0.2481, pruned_loss=0.03487, ctc_loss=0.06689, over 19735.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2737, pruned_loss=0.05118, ctc_loss=0.09642, over 3746552.88 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:47:26,744 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=229413.33333333334, ans=0.125
+2024-08-31 15:47:30,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=229466.66666666666, ans=0.0
+2024-08-31 15:47:30,894 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.98 vs. limit=15.0
+2024-08-31 15:47:34,154 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.98 vs. limit=15.0
+2024-08-31 15:48:10,576 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.672e+02 1.935e+02 2.401e+02 4.868e+02, threshold=3.870e+02, percent-clipped=1.0
+2024-08-31 15:48:14,761 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.52 vs. limit=15.0
+2024-08-31 15:48:16,519 INFO [train.py:1114] (3/4) Epoch 18, batch 750, loss[loss=0.2169, simple_loss=0.2779, pruned_loss=0.05625, ctc_loss=0.1083, over 19502.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2729, pruned_loss=0.05082, ctc_loss=0.09591, over 3773339.17 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 16.0
+2024-08-31 15:48:20,242 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=229680.0, ans=0.125
+2024-08-31 15:48:22,386 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=229680.0, ans=0.125
+2024-08-31 15:49:08,259 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.62 vs. limit=15.0
+2024-08-31 15:49:10,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229840.0, ans=0.1
+2024-08-31 15:49:17,258 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229893.33333333334, ans=0.1
+2024-08-31 15:49:28,025 INFO [train.py:1114] (3/4) Epoch 18, batch 800, loss[loss=0.194, simple_loss=0.2544, pruned_loss=0.04952, ctc_loss=0.08633, over 19800.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2732, pruned_loss=0.05098, ctc_loss=0.0961, over 3794534.88 frames. ], batch size: 49, lr: 8.37e-03, grad_scale: 32.0
+2024-08-31 15:49:37,148 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=229946.66666666666, ans=0.2
+2024-08-31 15:49:56,677 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.46 vs. limit=15.0
+2024-08-31 15:49:58,651 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=230053.33333333334, ans=0.125
+2024-08-31 15:49:59,897 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=230053.33333333334, ans=0.0
+2024-08-31 15:50:00,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=230053.33333333334, ans=0.025
+2024-08-31 15:50:24,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=230160.0, ans=0.2
+2024-08-31 15:50:24,838 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=230160.0, ans=22.5
+2024-08-31 15:50:27,782 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.682e+02 1.957e+02 2.333e+02 3.697e+02, threshold=3.913e+02, percent-clipped=0.0
+2024-08-31 15:50:33,684 INFO [train.py:1114] (3/4) Epoch 18, batch 850, loss[loss=0.2164, simple_loss=0.2837, pruned_loss=0.05471, ctc_loss=0.09926, over 19618.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2727, pruned_loss=0.05112, ctc_loss=0.09613, over 3812874.07 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:50:40,242 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.10 vs. limit=22.5
+2024-08-31 15:51:33,823 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=230266.66666666666, ans=0.95
+2024-08-31 15:51:38,200 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=230320.0, ans=0.125
+2024-08-31 15:51:50,216 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=230373.33333333334, ans=0.0
+2024-08-31 15:51:58,107 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.28 vs. limit=15.0
+2024-08-31 15:51:59,453 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.33 vs. limit=12.0
+2024-08-31 15:52:15,347 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.22 vs. limit=12.0
+2024-08-31 15:52:15,925 INFO [train.py:1114] (3/4) Epoch 18, batch 900, loss[loss=0.1792, simple_loss=0.2437, pruned_loss=0.04221, ctc_loss=0.0756, over 19824.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2729, pruned_loss=0.05134, ctc_loss=0.09645, over 3817990.98 frames. ], batch size: 49, lr: 8.36e-03, grad_scale: 32.0
+2024-08-31 15:52:20,901 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.40 vs. limit=15.0
+2024-08-31 15:52:23,175 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 15:52:26,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=230533.33333333334, ans=0.2
+2024-08-31 15:52:47,908 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.14 vs. limit=10.0
+2024-08-31 15:53:02,597 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=230640.0, ans=0.05
+2024-08-31 15:53:09,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=230693.33333333334, ans=0.025
+2024-08-31 15:53:12,022 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 1.645e+02 1.872e+02 2.411e+02 3.930e+02, threshold=3.745e+02, percent-clipped=1.0
+2024-08-31 15:53:16,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=230693.33333333334, ans=0.1
+2024-08-31 15:53:46,110 INFO [train.py:1114] (3/4) Epoch 18, batch 950, loss[loss=0.2131, simple_loss=0.2681, pruned_loss=0.05739, ctc_loss=0.1085, over 19489.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2733, pruned_loss=0.05165, ctc_loss=0.09697, over 3820036.99 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:53:46,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=230746.66666666666, ans=0.1
+2024-08-31 15:53:50,430 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.54 vs. limit=22.5
+2024-08-31 15:53:53,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=230746.66666666666, ans=0.1
+2024-08-31 15:54:35,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=230960.0, ans=0.2
+2024-08-31 15:54:36,611 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=230960.0, ans=0.95
+2024-08-31 15:54:40,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=230960.0, ans=0.0
+2024-08-31 15:54:48,313 INFO [train.py:1114] (3/4) Epoch 18, batch 1000, loss[loss=0.2018, simple_loss=0.2724, pruned_loss=0.04639, ctc_loss=0.09608, over 19864.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2745, pruned_loss=0.05203, ctc_loss=0.09765, over 3816317.46 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-31 15:54:57,911 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=231013.33333333334, ans=0.0
+2024-08-31 15:55:12,674 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.70 vs. limit=22.5
+2024-08-31 15:55:38,616 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231173.33333333334, ans=0.1
+2024-08-31 15:55:51,785 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=231226.66666666666, ans=0.0
+2024-08-31 15:55:55,126 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 1.660e+02 1.836e+02 2.172e+02 3.389e+02, threshold=3.673e+02, percent-clipped=0.0
+2024-08-31 15:56:01,085 INFO [train.py:1114] (3/4) Epoch 18, batch 1050, loss[loss=0.2064, simple_loss=0.2823, pruned_loss=0.0466, ctc_loss=0.0931, over 19827.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2737, pruned_loss=0.05162, ctc_loss=0.097, over 3823782.84 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:56:03,658 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=231280.0, ans=0.125
+2024-08-31 15:56:13,090 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=231333.33333333334, ans=0.2
+2024-08-31 15:57:01,167 INFO [train.py:1114] (3/4) Epoch 18, batch 1100, loss[loss=0.2189, simple_loss=0.2822, pruned_loss=0.05634, ctc_loss=0.1072, over 19595.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2736, pruned_loss=0.05133, ctc_loss=0.09652, over 3831868.12 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 32.0
+2024-08-31 15:57:01,383 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=231546.66666666666, ans=0.0
+2024-08-31 15:57:10,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=231546.66666666666, ans=0.125
+2024-08-31 15:57:13,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=231546.66666666666, ans=0.125
+2024-08-31 15:57:20,781 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.96 vs. limit=22.5
+2024-08-31 15:57:44,654 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=231706.66666666666, ans=0.025
+2024-08-31 15:57:58,310 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.608e+02 1.860e+02 2.284e+02 4.941e+02, threshold=3.719e+02, percent-clipped=1.0
+2024-08-31 15:58:04,206 INFO [train.py:1114] (3/4) Epoch 18, batch 1150, loss[loss=0.1967, simple_loss=0.2678, pruned_loss=0.04517, ctc_loss=0.08796, over 19585.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2736, pruned_loss=0.05139, ctc_loss=0.09646, over 3828941.83 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:58:26,720 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=3.92 vs. limit=12.0
+2024-08-31 15:58:27,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=231866.66666666666, ans=0.95
+2024-08-31 15:58:37,893 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=231920.0, ans=0.1
+2024-08-31 15:58:45,205 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=231920.0, ans=0.125
+2024-08-31 15:58:57,200 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=231973.33333333334, ans=0.95
+2024-08-31 15:59:03,437 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.31 vs. limit=10.0
+2024-08-31 15:59:17,232 INFO [train.py:1114] (3/4) Epoch 18, batch 1200, loss[loss=0.205, simple_loss=0.2802, pruned_loss=0.04791, ctc_loss=0.08501, over 19846.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.274, pruned_loss=0.05123, ctc_loss=0.09636, over 3824642.10 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-31 15:59:33,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=232133.33333333334, ans=0.0
+2024-08-31 15:59:34,501 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=232133.33333333334, ans=0.125
+2024-08-31 15:59:37,926 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=232133.33333333334, ans=0.1
+2024-08-31 15:59:46,575 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.41 vs. limit=15.0
+2024-08-31 15:59:48,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=232186.66666666666, ans=0.025
+2024-08-31 15:59:50,345 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.17 vs. limit=6.0
+2024-08-31 15:59:52,625 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.25 vs. limit=10.0
+2024-08-31 16:00:06,511 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=232293.33333333334, ans=0.0
+2024-08-31 16:00:12,202 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.681e+02 1.869e+02 2.236e+02 3.755e+02, threshold=3.738e+02, percent-clipped=1.0
+2024-08-31 16:00:18,304 INFO [train.py:1114] (3/4) Epoch 18, batch 1250, loss[loss=0.2303, simple_loss=0.2992, pruned_loss=0.05875, ctc_loss=0.1096, over 19519.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2746, pruned_loss=0.05129, ctc_loss=0.09653, over 3842414.52 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:00:24,668 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=232346.66666666666, ans=0.2
+2024-08-31 16:01:12,442 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=232560.0, ans=0.2
+2024-08-31 16:01:15,059 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=232560.0, ans=0.125
+2024-08-31 16:01:22,409 INFO [train.py:1114] (3/4) Epoch 18, batch 1300, loss[loss=0.2172, simple_loss=0.2789, pruned_loss=0.05567, ctc_loss=0.1103, over 18885.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2742, pruned_loss=0.05105, ctc_loss=0.09628, over 3845881.93 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 32.0
+2024-08-31 16:01:41,024 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.30 vs. limit=10.0
+2024-08-31 16:01:48,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=232720.0, ans=0.125
+2024-08-31 16:02:21,662 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 1.758e+02 2.176e+02 2.645e+02 4.342e+02, threshold=4.353e+02, percent-clipped=3.0
+2024-08-31 16:02:25,783 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.99 vs. limit=15.0
+2024-08-31 16:02:27,588 INFO [train.py:1114] (3/4) Epoch 18, batch 1350, loss[loss=0.2047, simple_loss=0.2731, pruned_loss=0.04981, ctc_loss=0.09156, over 19779.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2739, pruned_loss=0.05112, ctc_loss=0.09623, over 3857636.80 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:02:34,246 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=232880.0, ans=0.0
+2024-08-31 16:02:52,804 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.95 vs. limit=15.0
+2024-08-31 16:03:04,599 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.98 vs. limit=15.0
+2024-08-31 16:03:25,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=233093.33333333334, ans=0.125
+2024-08-31 16:03:29,585 INFO [train.py:1114] (3/4) Epoch 18, batch 1400, loss[loss=0.1888, simple_loss=0.2552, pruned_loss=0.04496, ctc_loss=0.0811, over 19661.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2739, pruned_loss=0.05112, ctc_loss=0.09614, over 3864000.41 frames. ], batch size: 46, lr: 8.31e-03, grad_scale: 32.0
+2024-08-31 16:03:39,940 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=233146.66666666666, ans=0.125
+2024-08-31 16:03:40,439 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.18 vs. limit=12.0
+2024-08-31 16:04:00,349 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.90 vs. limit=15.0
+2024-08-31 16:04:06,600 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.00 vs. limit=22.5
+2024-08-31 16:04:29,892 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=17.45 vs. limit=22.5
+2024-08-31 16:04:36,291 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.655e+02 1.916e+02 2.338e+02 3.956e+02, threshold=3.832e+02, percent-clipped=0.0
+2024-08-31 16:04:42,274 INFO [train.py:1114] (3/4) Epoch 18, batch 1450, loss[loss=0.2494, simple_loss=0.3061, pruned_loss=0.07002, ctc_loss=0.1318, over 19631.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2748, pruned_loss=0.05125, ctc_loss=0.09651, over 3862140.30 frames. ], batch size: 63, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:04:49,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=233413.33333333334, ans=0.2
+2024-08-31 16:05:12,955 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=233520.0, ans=0.1
+2024-08-31 16:05:22,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=233573.33333333334, ans=0.025
+2024-08-31 16:05:35,763 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=233626.66666666666, ans=0.0
+2024-08-31 16:05:48,827 INFO [train.py:1114] (3/4) Epoch 18, batch 1500, loss[loss=0.18, simple_loss=0.2528, pruned_loss=0.03902, ctc_loss=0.07278, over 19582.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2749, pruned_loss=0.05123, ctc_loss=0.09641, over 3862528.34 frames. ], batch size: 57, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:06:04,159 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=233733.33333333334, ans=0.125
+2024-08-31 16:06:15,484 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.99 vs. limit=22.5
+2024-08-31 16:06:42,783 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=233893.33333333334, ans=0.125
+2024-08-31 16:06:50,190 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.669e+02 1.866e+02 2.355e+02 3.552e+02, threshold=3.733e+02, percent-clipped=0.0
+2024-08-31 16:07:06,042 INFO [train.py:1114] (3/4) Epoch 18, batch 1550, loss[loss=0.2309, simple_loss=0.2948, pruned_loss=0.06067, ctc_loss=0.1139, over 19583.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2747, pruned_loss=0.05132, ctc_loss=0.0966, over 3847134.59 frames. ], batch size: 60, lr: 8.30e-03, grad_scale: 32.0
+2024-08-31 16:07:20,031 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=234000.0, ans=0.025
+2024-08-31 16:07:26,261 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=234000.0, ans=0.0
+2024-08-31 16:07:44,565 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=234106.66666666666, ans=0.125
+2024-08-31 16:07:45,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=234106.66666666666, ans=0.07
+2024-08-31 16:08:07,315 INFO [train.py:1114] (3/4) Epoch 18, batch 1600, loss[loss=0.2111, simple_loss=0.2823, pruned_loss=0.05051, ctc_loss=0.09738, over 19822.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2744, pruned_loss=0.05131, ctc_loss=0.09687, over 3835614.43 frames. ], batch size: 57, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:08:11,280 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=234213.33333333334, ans=0.0
+2024-08-31 16:08:21,757 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.54 vs. limit=6.0
+2024-08-31 16:08:36,982 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=234320.0, ans=0.5
+2024-08-31 16:08:42,164 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=234320.0, ans=0.125
+2024-08-31 16:08:54,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=234373.33333333334, ans=0.125
+2024-08-31 16:09:02,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=234426.66666666666, ans=0.2
+2024-08-31 16:09:20,622 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.787e+02 2.153e+02 2.672e+02 5.491e+02, threshold=4.305e+02, percent-clipped=8.0
+2024-08-31 16:09:26,584 INFO [train.py:1114] (3/4) Epoch 18, batch 1650, loss[loss=0.2215, simple_loss=0.2882, pruned_loss=0.05662, ctc_loss=0.1036, over 19641.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2743, pruned_loss=0.05136, ctc_loss=0.09679, over 3831937.21 frames. ], batch size: 59, lr: 8.29e-03, grad_scale: 32.0
+2024-08-31 16:13:29,934 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=234586.66666666666, ans=0.2
+2024-08-31 16:13:36,124 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=234640.0, ans=0.125
+2024-08-31 16:14:12,438 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=234693.33333333334, ans=0.125
+2024-08-31 16:14:15,750 INFO [train.py:1114] (3/4) Epoch 18, batch 1700, loss[loss=0.1724, simple_loss=0.2347, pruned_loss=0.03996, ctc_loss=0.07529, over 19662.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2736, pruned_loss=0.05071, ctc_loss=0.09555, over 3845944.45 frames. ], batch size: 46, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:14:17,310 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=234746.66666666666, ans=0.025
+2024-08-31 16:14:35,859 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=234800.0, ans=0.0
+2024-08-31 16:14:39,127 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=234853.33333333334, ans=0.125
+2024-08-31 16:14:43,947 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=234853.33333333334, ans=0.0
+2024-08-31 16:15:07,764 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.694e+02 2.038e+02 2.484e+02 5.869e+02, threshold=4.076e+02, percent-clipped=3.0
+2024-08-31 16:15:09,318 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=234960.0, ans=0.1
+2024-08-31 16:15:13,538 INFO [train.py:1114] (3/4) Epoch 18, batch 1750, loss[loss=0.2153, simple_loss=0.2678, pruned_loss=0.06074, ctc_loss=0.103, over 19645.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2733, pruned_loss=0.05062, ctc_loss=0.09524, over 3850764.40 frames. ], batch size: 45, lr: 8.28e-03, grad_scale: 32.0
+2024-08-31 16:15:22,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=235013.33333333334, ans=0.0
+2024-08-31 16:15:37,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=235066.66666666666, ans=0.0
+2024-08-31 16:15:45,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=235120.0, ans=0.125
+2024-08-31 16:15:50,629 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=5.75 vs. limit=15.0
+2024-08-31 16:15:59,024 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=235173.33333333334, ans=0.1
+2024-08-31 16:16:05,178 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=235173.33333333334, ans=22.5
+2024-08-31 16:16:07,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=235226.66666666666, ans=0.0
+2024-08-31 16:16:18,932 INFO [train.py:1114] (3/4) Epoch 18, batch 1800, loss[loss=0.2146, simple_loss=0.2811, pruned_loss=0.05412, ctc_loss=0.09974, over 19616.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2737, pruned_loss=0.05086, ctc_loss=0.09548, over 3851681.83 frames. ], batch size: 55, lr: 8.27e-03, grad_scale: 32.0
+2024-08-31 16:16:33,610 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.56 vs. limit=15.0
+2024-08-31 16:16:40,109 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=235333.33333333334, ans=0.125
+2024-08-31 16:16:42,508 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=235386.66666666666, ans=0.125
+2024-08-31 16:17:12,064 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.739e+02 2.099e+02 2.606e+02 4.220e+02, threshold=4.197e+02, percent-clipped=1.0
+2024-08-31 16:17:16,664 INFO [train.py:1114] (3/4) Epoch 18, batch 1850, loss[loss=0.1984, simple_loss=0.2759, pruned_loss=0.0426, ctc_loss=0.08941, over 19615.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2734, pruned_loss=0.05086, ctc_loss=0.09559, over 3855891.58 frames. ], batch size: 57, lr: 8.27e-03, grad_scale: 16.0
+2024-08-31 16:17:26,483 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=235546.66666666666, ans=0.2
+2024-08-31 16:17:43,325 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=235600.0, ans=0.125
+2024-08-31 16:18:06,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=235706.66666666666, ans=0.2
+2024-08-31 16:18:08,938 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=235760.0, ans=0.125
+2024-08-31 16:18:09,857 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=235760.0, ans=0.125
+2024-08-31 16:18:12,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=235760.0, ans=0.5
+2024-08-31 16:18:21,105 INFO [train.py:1114] (3/4) Epoch 18, batch 1900, loss[loss=0.2063, simple_loss=0.2855, pruned_loss=0.0456, ctc_loss=0.0899, over 19669.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2739, pruned_loss=0.05101, ctc_loss=0.09583, over 3860883.18 frames. ], batch size: 59, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:18:24,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=235813.33333333334, ans=0.125
+2024-08-31 16:18:27,724 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=235813.33333333334, ans=0.035
+2024-08-31 16:18:38,426 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=235866.66666666666, ans=15.0
+2024-08-31 16:18:43,945 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.42 vs. limit=22.5
+2024-08-31 16:18:46,843 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=235920.0, ans=0.1
+2024-08-31 16:19:08,683 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=236026.66666666666, ans=0.125
+2024-08-31 16:19:14,238 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.296e+02 1.623e+02 1.837e+02 2.195e+02 5.135e+02, threshold=3.673e+02, percent-clipped=2.0
+2024-08-31 16:19:18,766 INFO [train.py:1114] (3/4) Epoch 18, batch 1950, loss[loss=0.1837, simple_loss=0.2565, pruned_loss=0.04076, ctc_loss=0.07339, over 19593.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.275, pruned_loss=0.05132, ctc_loss=0.09665, over 3869736.26 frames. ], batch size: 52, lr: 8.26e-03, grad_scale: 16.0
+2024-08-31 16:19:29,038 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=236133.33333333334, ans=0.1
+2024-08-31 16:20:21,374 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=236133.33333333334, ans=0.07
+2024-08-31 16:20:28,146 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=236133.33333333334, ans=0.125
+2024-08-31 16:20:39,099 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=236186.66666666666, ans=0.0
+2024-08-31 16:20:48,769 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=236186.66666666666, ans=0.125
+2024-08-31 16:20:49,017 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.82 vs. limit=15.0
+2024-08-31 16:21:07,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=236293.33333333334, ans=0.125
+2024-08-31 16:21:21,681 INFO [train.py:1114] (3/4) Epoch 18, batch 2000, loss[loss=0.2004, simple_loss=0.2528, pruned_loss=0.05436, ctc_loss=0.09824, over 19652.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2753, pruned_loss=0.05158, ctc_loss=0.09726, over 3853632.70 frames. ], batch size: 45, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:21:27,692 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=236346.66666666666, ans=0.125
+2024-08-31 16:21:43,956 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=236453.33333333334, ans=0.125
+2024-08-31 16:21:58,790 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=236506.66666666666, ans=0.125
+2024-08-31 16:22:04,471 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=236506.66666666666, ans=0.1
+2024-08-31 16:22:12,309 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=236560.0, ans=0.1
+2024-08-31 16:22:14,727 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.400e+02 1.704e+02 2.096e+02 2.751e+02 4.638e+02, threshold=4.193e+02, percent-clipped=6.0
+2024-08-31 16:22:19,167 INFO [train.py:1114] (3/4) Epoch 18, batch 2050, loss[loss=0.1865, simple_loss=0.2481, pruned_loss=0.04505, ctc_loss=0.08716, over 19737.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2743, pruned_loss=0.05137, ctc_loss=0.09676, over 3850559.28 frames. ], batch size: 47, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:22:19,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=236613.33333333334, ans=0.125
+2024-08-31 16:22:25,214 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.43 vs. limit=15.0
+2024-08-31 16:22:38,439 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.30 vs. limit=15.0
+2024-08-31 16:22:48,713 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.91 vs. limit=22.5
+2024-08-31 16:23:04,449 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=236773.33333333334, ans=0.04949747468305833
+2024-08-31 16:23:21,344 INFO [train.py:1114] (3/4) Epoch 18, batch 2100, loss[loss=0.1964, simple_loss=0.2689, pruned_loss=0.04561, ctc_loss=0.08157, over 19756.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2737, pruned_loss=0.05083, ctc_loss=0.09584, over 3857749.41 frames. ], batch size: 54, lr: 8.25e-03, grad_scale: 32.0
+2024-08-31 16:23:37,136 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.17 vs. limit=22.5
+2024-08-31 16:23:38,053 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=236933.33333333334, ans=0.0
+2024-08-31 16:23:42,503 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=236933.33333333334, ans=0.025
+2024-08-31 16:23:57,213 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=236986.66666666666, ans=0.2
+2024-08-31 16:23:58,766 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.31 vs. limit=10.0
+2024-08-31 16:24:26,184 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=237093.33333333334, ans=0.125
+2024-08-31 16:24:27,117 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.363e+02 1.628e+02 1.802e+02 2.351e+02 4.404e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 16:24:31,668 INFO [train.py:1114] (3/4) Epoch 18, batch 2150, loss[loss=0.1863, simple_loss=0.2609, pruned_loss=0.04013, ctc_loss=0.07876, over 19829.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2734, pruned_loss=0.05087, ctc_loss=0.09591, over 3869587.38 frames. ], batch size: 52, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:24:41,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=237146.66666666666, ans=0.0
+2024-08-31 16:24:42,489 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.whiten.whitening_limit, batch_count=237146.66666666666, ans=12.0
+2024-08-31 16:24:59,304 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=237253.33333333334, ans=0.0
+2024-08-31 16:25:29,331 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=237360.0, ans=0.125
+2024-08-31 16:25:40,259 INFO [train.py:1114] (3/4) Epoch 18, batch 2200, loss[loss=0.2231, simple_loss=0.2828, pruned_loss=0.05852, ctc_loss=0.1156, over 19580.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2729, pruned_loss=0.05039, ctc_loss=0.095, over 3867603.63 frames. ], batch size: 57, lr: 8.24e-03, grad_scale: 32.0
+2024-08-31 16:26:07,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=237520.0, ans=0.0
+2024-08-31 16:26:10,139 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=237520.0, ans=0.2
+2024-08-31 16:26:15,228 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.38 vs. limit=15.0
+2024-08-31 16:26:33,721 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.369e+02 1.652e+02 1.938e+02 2.493e+02 4.901e+02, threshold=3.877e+02, percent-clipped=6.0
+2024-08-31 16:26:38,341 INFO [train.py:1114] (3/4) Epoch 18, batch 2250, loss[loss=0.214, simple_loss=0.2869, pruned_loss=0.05116, ctc_loss=0.09722, over 19620.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2727, pruned_loss=0.05007, ctc_loss=0.09455, over 3867278.58 frames. ], batch size: 55, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:26:58,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten.whitening_limit, batch_count=237680.0, ans=15.0
+2024-08-31 16:27:03,810 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=237733.33333333334, ans=0.125
+2024-08-31 16:27:13,734 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=237733.33333333334, ans=0.0
+2024-08-31 16:27:39,578 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=237893.33333333334, ans=0.125
+2024-08-31 16:27:51,885 INFO [train.py:1114] (3/4) Epoch 18, batch 2300, loss[loss=0.1943, simple_loss=0.2611, pruned_loss=0.0466, ctc_loss=0.08589, over 19511.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.272, pruned_loss=0.05018, ctc_loss=0.09476, over 3861205.89 frames. ], batch size: 49, lr: 8.23e-03, grad_scale: 32.0
+2024-08-31 16:28:04,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=238000.0, ans=0.125
+2024-08-31 16:28:22,643 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=238053.33333333334, ans=0.95
+2024-08-31 16:28:28,321 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=238106.66666666666, ans=0.125
+2024-08-31 16:28:33,221 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=238106.66666666666, ans=0.125
+2024-08-31 16:28:40,164 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.63 vs. limit=15.0
+2024-08-31 16:28:42,367 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=238160.0, ans=0.0
+2024-08-31 16:28:47,462 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.355e+02 1.696e+02 1.848e+02 2.393e+02 3.836e+02, threshold=3.696e+02, percent-clipped=0.0
+2024-08-31 16:29:03,702 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.60 vs. limit=6.0
+2024-08-31 16:29:07,667 INFO [train.py:1114] (3/4) Epoch 18, batch 2350, loss[loss=0.196, simple_loss=0.2699, pruned_loss=0.0447, ctc_loss=0.08199, over 19631.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2718, pruned_loss=0.04993, ctc_loss=0.09426, over 3863894.69 frames. ], batch size: 63, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:29:33,111 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.91 vs. limit=22.5
+2024-08-31 16:29:34,989 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=238320.0, ans=0.1
+2024-08-31 16:29:35,540 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.00 vs. limit=15.0
+2024-08-31 16:29:37,717 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.89 vs. limit=10.0
+2024-08-31 16:30:29,253 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:30:31,525 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=238426.66666666666, ans=0.125
+2024-08-31 16:30:31,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=238426.66666666666, ans=0.1
+2024-08-31 16:30:32,736 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=238426.66666666666, ans=0.125
+2024-08-31 16:30:38,498 INFO [train.py:1114] (3/4) Epoch 18, batch 2400, loss[loss=0.2115, simple_loss=0.2842, pruned_loss=0.05022, ctc_loss=0.09604, over 19319.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2738, pruned_loss=0.05054, ctc_loss=0.09521, over 3857630.46 frames. ], batch size: 71, lr: 8.22e-03, grad_scale: 32.0
+2024-08-31 16:30:38,680 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=238480.0, ans=0.5
+2024-08-31 16:30:45,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=238480.0, ans=0.0
+2024-08-31 16:30:58,146 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=238533.33333333334, ans=0.125
+2024-08-31 16:31:00,254 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:31:14,444 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=238586.66666666666, ans=0.025
+2024-08-31 16:31:46,562 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=238693.33333333334, ans=0.2
+2024-08-31 16:31:47,468 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 1.682e+02 1.835e+02 2.125e+02 4.662e+02, threshold=3.671e+02, percent-clipped=5.0
+2024-08-31 16:31:52,086 INFO [train.py:1114] (3/4) Epoch 18, batch 2450, loss[loss=0.2635, simple_loss=0.3014, pruned_loss=0.08033, ctc_loss=0.1624, over 13216.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2777, pruned_loss=0.05338, ctc_loss=0.1009, over 3730768.58 frames. ], batch size: 140, lr: 8.21e-03, grad_scale: 32.0
+2024-08-31 16:32:21,039 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=238853.33333333334, ans=0.0
+2024-08-31 16:32:22,499 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=10.32 vs. limit=10.0
+2024-08-31 16:32:27,974 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=238906.66666666666, ans=0.07
+2024-08-31 16:32:31,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=238906.66666666666, ans=0.0
+2024-08-31 16:32:33,819 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.35 vs. limit=6.0
+2024-08-31 16:33:43,960 INFO [train.py:1114] (3/4) Epoch 19, batch 0, loss[loss=0.2179, simple_loss=0.2769, pruned_loss=0.05762, ctc_loss=0.109, over 19809.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2769, pruned_loss=0.05762, ctc_loss=0.109, over 19809.00 frames. ], batch size: 49, lr: 7.99e-03, grad_scale: 32.0
+2024-08-31 16:33:43,961 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-31 16:33:52,624 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.0211, 2.1922, 2.9664, 3.3492], device='cuda:3')
+2024-08-31 16:34:00,542 INFO [train.py:1146] (3/4) Epoch 19, validation: loss=0.1846, simple_loss=0.2728, pruned_loss=0.03584, ctc_loss=0.06159, over 944034.00 frames.
+2024-08-31 16:34:01,381 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13816MB
+2024-08-31 16:34:02,718 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=238954.66666666666, ans=0.125
+2024-08-31 16:34:08,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=238954.66666666666, ans=0.1
+2024-08-31 16:34:48,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=239114.66666666666, ans=0.125
+2024-08-31 16:34:58,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1.whitening_limit, batch_count=239168.0, ans=10.0
+2024-08-31 16:35:04,408 INFO [train.py:1114] (3/4) Epoch 19, batch 50, loss[loss=0.1781, simple_loss=0.2498, pruned_loss=0.03856, ctc_loss=0.0734, over 19714.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.275, pruned_loss=0.05142, ctc_loss=0.0985, over 844167.97 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:35:09,311 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=239221.33333333334, ans=0.125
+2024-08-31 16:35:12,506 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.795e+02 2.006e+02 2.342e+02 4.821e+02, threshold=4.012e+02, percent-clipped=4.0
+2024-08-31 16:35:15,069 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=239274.66666666666, ans=0.05
+2024-08-31 16:35:21,383 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.19 vs. limit=15.0
+2024-08-31 16:35:23,464 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=239274.66666666666, ans=0.125
+2024-08-31 16:35:31,749 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=239328.0, ans=0.07
+2024-08-31 16:35:40,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=239381.33333333334, ans=0.0
+2024-08-31 16:36:03,808 INFO [train.py:1114] (3/4) Epoch 19, batch 100, loss[loss=0.2017, simple_loss=0.2619, pruned_loss=0.05181, ctc_loss=0.09444, over 19723.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2773, pruned_loss=0.05192, ctc_loss=0.09843, over 1498340.35 frames. ], batch size: 51, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:36:12,172 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.63 vs. limit=22.5
+2024-08-31 16:36:16,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=239541.33333333334, ans=0.09899494936611666
+2024-08-31 16:36:35,912 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.97 vs. limit=10.0
+2024-08-31 16:36:48,407 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=239648.0, ans=0.2
+2024-08-31 16:37:02,568 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=239701.33333333334, ans=0.125
+2024-08-31 16:37:06,676 INFO [train.py:1114] (3/4) Epoch 19, batch 150, loss[loss=0.1985, simple_loss=0.2594, pruned_loss=0.05091, ctc_loss=0.08925, over 19736.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2744, pruned_loss=0.05081, ctc_loss=0.09598, over 2027311.34 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-31 16:37:15,238 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.762e+02 1.953e+02 2.445e+02 3.524e+02, threshold=3.906e+02, percent-clipped=0.0
+2024-08-31 16:37:17,823 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=239808.0, ans=0.05
+2024-08-31 16:37:39,123 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=239861.33333333334, ans=0.125
+2024-08-31 16:37:42,892 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=239861.33333333334, ans=0.0
+2024-08-31 16:37:44,002 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=239914.66666666666, ans=0.125
+2024-08-31 16:37:44,469 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.58 vs. limit=10.0
+2024-08-31 16:37:59,529 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.76 vs. limit=15.0
+2024-08-31 16:38:07,875 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=239968.0, ans=0.125
+2024-08-31 16:38:14,090 INFO [train.py:1114] (3/4) Epoch 19, batch 200, loss[loss=0.224, simple_loss=0.2887, pruned_loss=0.05587, ctc_loss=0.1187, over 18332.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2728, pruned_loss=0.04988, ctc_loss=0.09404, over 2435699.92 frames. ], batch size: 85, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:38:19,001 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=240021.33333333334, ans=0.025
+2024-08-31 16:38:35,724 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=240074.66666666666, ans=0.2
+2024-08-31 16:38:39,114 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=240128.0, ans=0.125
+2024-08-31 16:38:47,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=240128.0, ans=10.0
+2024-08-31 16:38:49,842 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=240181.33333333334, ans=0.125
+2024-08-31 16:39:10,170 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=240234.66666666666, ans=0.0
+2024-08-31 16:39:13,534 INFO [train.py:1114] (3/4) Epoch 19, batch 250, loss[loss=0.2275, simple_loss=0.2883, pruned_loss=0.06071, ctc_loss=0.1133, over 19409.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2731, pruned_loss=0.05043, ctc_loss=0.09509, over 2756001.92 frames. ], batch size: 67, lr: 7.97e-03, grad_scale: 32.0
+2024-08-31 16:39:27,162 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 1.733e+02 2.186e+02 2.853e+02 4.755e+02, threshold=4.372e+02, percent-clipped=7.0
+2024-08-31 16:40:02,634 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=240448.0, ans=0.125
+2024-08-31 16:40:03,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240448.0, ans=0.1
+2024-08-31 16:40:03,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=240448.0, ans=0.125
+2024-08-31 16:40:07,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=240501.33333333334, ans=0.1
+2024-08-31 16:40:11,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=240501.33333333334, ans=22.5
+2024-08-31 16:40:19,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=240554.66666666666, ans=0.2
+2024-08-31 16:40:20,385 INFO [train.py:1114] (3/4) Epoch 19, batch 300, loss[loss=0.2432, simple_loss=0.3044, pruned_loss=0.06703, ctc_loss=0.1196, over 19526.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2724, pruned_loss=0.05022, ctc_loss=0.09469, over 3001126.27 frames. ], batch size: 61, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:40:20,629 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=240554.66666666666, ans=0.0
+2024-08-31 16:40:21,066 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.46 vs. limit=15.0
+2024-08-31 16:40:37,643 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=240608.0, ans=0.125
+2024-08-31 16:40:49,119 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=240661.33333333334, ans=0.1
+2024-08-31 16:40:58,463 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:40:59,616 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=240714.66666666666, ans=0.125
+2024-08-31 16:41:03,194 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=240714.66666666666, ans=0.125
+2024-08-31 16:41:04,454 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=240714.66666666666, ans=0.0
+2024-08-31 16:41:06,210 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.34 vs. limit=15.0
+2024-08-31 16:41:09,304 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=240768.0, ans=0.2
+2024-08-31 16:41:10,635 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=240768.0, ans=0.1
+2024-08-31 16:41:13,016 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=240768.0, ans=0.125
+2024-08-31 16:41:21,962 INFO [train.py:1114] (3/4) Epoch 19, batch 350, loss[loss=0.1807, simple_loss=0.2453, pruned_loss=0.04256, ctc_loss=0.07761, over 19758.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2728, pruned_loss=0.05017, ctc_loss=0.09431, over 3191307.89 frames. ], batch size: 48, lr: 7.96e-03, grad_scale: 32.0
+2024-08-31 16:41:26,451 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.69 vs. limit=15.0
+2024-08-31 16:41:30,311 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 1.653e+02 1.904e+02 2.349e+02 4.016e+02, threshold=3.809e+02, percent-clipped=0.0
+2024-08-31 16:42:08,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=240981.33333333334, ans=0.0
+2024-08-31 16:42:25,376 INFO [train.py:1114] (3/4) Epoch 19, batch 400, loss[loss=0.2116, simple_loss=0.2799, pruned_loss=0.05236, ctc_loss=0.09635, over 19488.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2724, pruned_loss=0.05006, ctc_loss=0.09425, over 3343080.18 frames. ], batch size: 54, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:42:53,304 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=241141.33333333334, ans=0.2
+2024-08-31 16:42:56,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=241194.66666666666, ans=0.0
+2024-08-31 16:43:02,981 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.27 vs. limit=15.0
+2024-08-31 16:43:06,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=241194.66666666666, ans=0.125
+2024-08-31 16:43:12,159 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=241248.0, ans=0.0
+2024-08-31 16:43:18,071 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=241248.0, ans=0.1
+2024-08-31 16:43:23,873 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=241301.33333333334, ans=0.025
+2024-08-31 16:43:34,373 INFO [train.py:1114] (3/4) Epoch 19, batch 450, loss[loss=0.2368, simple_loss=0.301, pruned_loss=0.06349, ctc_loss=0.1139, over 19594.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2722, pruned_loss=0.04984, ctc_loss=0.09376, over 3450659.63 frames. ], batch size: 55, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:43:42,751 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.686e+02 1.896e+02 2.370e+02 4.152e+02, threshold=3.792e+02, percent-clipped=1.0
+2024-08-31 16:43:45,314 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=241408.0, ans=0.0
+2024-08-31 16:43:46,673 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=241408.0, ans=0.0
+2024-08-31 16:43:53,704 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=241408.0, ans=0.125
+2024-08-31 16:43:54,742 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=241408.0, ans=0.0
+2024-08-31 16:44:19,845 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.31 vs. limit=15.0
+2024-08-31 16:44:33,125 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=241568.0, ans=0.125
+2024-08-31 16:44:35,446 INFO [train.py:1114] (3/4) Epoch 19, batch 500, loss[loss=0.2048, simple_loss=0.2745, pruned_loss=0.049, ctc_loss=0.09275, over 19678.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.271, pruned_loss=0.0493, ctc_loss=0.09283, over 3546334.18 frames. ], batch size: 63, lr: 7.95e-03, grad_scale: 32.0
+2024-08-31 16:44:35,656 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=241621.33333333334, ans=0.1
+2024-08-31 16:44:40,625 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=241621.33333333334, ans=0.2
+2024-08-31 16:44:47,236 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.10 vs. limit=15.0
+2024-08-31 16:45:12,617 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=241781.33333333334, ans=0.0
+2024-08-31 16:45:12,967 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.58 vs. limit=15.0
+2024-08-31 16:45:13,855 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=241781.33333333334, ans=0.025
+2024-08-31 16:46:04,267 INFO [train.py:1114] (3/4) Epoch 19, batch 550, loss[loss=0.2198, simple_loss=0.2887, pruned_loss=0.05598, ctc_loss=0.09733, over 19290.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.271, pruned_loss=0.04948, ctc_loss=0.09316, over 3608127.09 frames. ], batch size: 71, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:46:11,223 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.47 vs. limit=22.5
+2024-08-31 16:46:12,720 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.697e+02 1.983e+02 2.191e+02 3.507e+02, threshold=3.966e+02, percent-clipped=0.0
+2024-08-31 16:46:48,224 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=241994.66666666666, ans=0.125
+2024-08-31 16:46:56,732 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=242048.0, ans=0.015
+2024-08-31 16:47:00,436 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=242048.0, ans=0.5
+2024-08-31 16:47:08,141 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=3.94 vs. limit=12.0
+2024-08-31 16:47:11,585 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=242101.33333333334, ans=0.0
+2024-08-31 16:47:12,884 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=242101.33333333334, ans=10.0
+2024-08-31 16:47:13,266 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.65 vs. limit=15.0
+2024-08-31 16:47:16,285 INFO [train.py:1114] (3/4) Epoch 19, batch 600, loss[loss=0.2402, simple_loss=0.3063, pruned_loss=0.06301, ctc_loss=0.1201, over 19361.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2722, pruned_loss=0.04989, ctc_loss=0.09405, over 3665674.33 frames. ], batch size: 67, lr: 7.94e-03, grad_scale: 32.0
+2024-08-31 16:47:23,285 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=242154.66666666666, ans=0.125
+2024-08-31 16:47:29,487 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.55 vs. limit=12.0
+2024-08-31 16:47:45,787 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.19 vs. limit=15.0
+2024-08-31 16:47:47,085 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.16 vs. limit=15.0
+2024-08-31 16:47:49,471 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.46 vs. limit=12.0
+2024-08-31 16:48:11,519 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:48:13,126 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.61 vs. limit=15.0
+2024-08-31 16:48:18,018 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.50 vs. limit=15.0
+2024-08-31 16:48:22,288 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=242314.66666666666, ans=0.0
+2024-08-31 16:48:29,262 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.77 vs. limit=15.0
+2024-08-31 16:48:31,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=242368.0, ans=0.125
+2024-08-31 16:48:39,685 INFO [train.py:1114] (3/4) Epoch 19, batch 650, loss[loss=0.2058, simple_loss=0.2733, pruned_loss=0.0499, ctc_loss=0.09602, over 19775.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2712, pruned_loss=0.0494, ctc_loss=0.09299, over 3716489.95 frames. ], batch size: 54, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:48:45,294 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.42 vs. limit=15.0
+2024-08-31 16:48:48,383 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 1.784e+02 2.044e+02 2.793e+02 4.792e+02, threshold=4.088e+02, percent-clipped=6.0
+2024-08-31 16:48:50,948 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=242474.66666666666, ans=0.2
+2024-08-31 16:49:28,153 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=242581.33333333334, ans=0.1
+2024-08-31 16:49:36,568 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=242634.66666666666, ans=0.1
+2024-08-31 16:50:01,010 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=242688.0, ans=0.125
+2024-08-31 16:50:02,078 INFO [train.py:1114] (3/4) Epoch 19, batch 700, loss[loss=0.2035, simple_loss=0.2655, pruned_loss=0.05033, ctc_loss=0.1022, over 19733.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2723, pruned_loss=0.05036, ctc_loss=0.09454, over 3749037.36 frames. ], batch size: 51, lr: 7.93e-03, grad_scale: 32.0
+2024-08-31 16:50:06,791 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.39 vs. limit=15.0
+2024-08-31 16:50:10,824 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=242688.0, ans=0.0
+2024-08-31 16:50:43,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=242848.0, ans=0.125
+2024-08-31 16:51:24,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=242901.33333333334, ans=0.025
+2024-08-31 16:52:16,517 INFO [train.py:1114] (3/4) Epoch 19, batch 750, loss[loss=0.1894, simple_loss=0.2648, pruned_loss=0.04117, ctc_loss=0.07917, over 19495.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2715, pruned_loss=0.04996, ctc_loss=0.09392, over 3775690.54 frames. ], batch size: 54, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:52:17,953 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=242954.66666666666, ans=0.2
+2024-08-31 16:52:18,238 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.41 vs. limit=12.0
+2024-08-31 16:52:40,610 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.707e+02 2.012e+02 2.576e+02 4.596e+02, threshold=4.024e+02, percent-clipped=2.0
+2024-08-31 16:52:40,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=242954.66666666666, ans=0.0
+2024-08-31 16:52:47,543 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=243008.0, ans=0.125
+2024-08-31 16:52:57,271 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=243061.33333333334, ans=0.2
+2024-08-31 16:53:01,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.min_positive, batch_count=243061.33333333334, ans=0.05
+2024-08-31 16:53:12,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=243114.66666666666, ans=0.0
+2024-08-31 16:53:40,995 INFO [train.py:1114] (3/4) Epoch 19, batch 800, loss[loss=0.1907, simple_loss=0.254, pruned_loss=0.04586, ctc_loss=0.08915, over 19406.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2712, pruned_loss=0.04968, ctc_loss=0.09337, over 3797056.26 frames. ], batch size: 48, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:53:48,458 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=243221.33333333334, ans=0.1
+2024-08-31 16:53:48,987 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.35 vs. limit=15.0
+2024-08-31 16:53:55,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=243274.66666666666, ans=0.125
+2024-08-31 16:54:01,012 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.28 vs. limit=15.0
+2024-08-31 16:54:09,649 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.19 vs. limit=10.0
+2024-08-31 16:54:13,813 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=243328.0, ans=0.125
+2024-08-31 16:54:24,404 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=243381.33333333334, ans=0.125
+2024-08-31 16:54:37,460 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.99 vs. limit=22.5
+2024-08-31 16:54:40,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=243434.66666666666, ans=0.0
+2024-08-31 16:54:52,040 INFO [train.py:1114] (3/4) Epoch 19, batch 850, loss[loss=0.2151, simple_loss=0.2889, pruned_loss=0.0516, ctc_loss=0.09536, over 19632.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2706, pruned_loss=0.04929, ctc_loss=0.09257, over 3816610.56 frames. ], batch size: 59, lr: 7.92e-03, grad_scale: 32.0
+2024-08-31 16:54:58,253 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.20 vs. limit=15.0
+2024-08-31 16:55:00,086 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 1.677e+02 1.837e+02 2.316e+02 3.927e+02, threshold=3.675e+02, percent-clipped=0.0
+2024-08-31 16:55:24,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=243594.66666666666, ans=0.2
+2024-08-31 16:55:46,874 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=243701.33333333334, ans=0.125
+2024-08-31 16:55:55,904 INFO [train.py:1114] (3/4) Epoch 19, batch 900, loss[loss=0.1863, simple_loss=0.2522, pruned_loss=0.04413, ctc_loss=0.08042, over 19423.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2711, pruned_loss=0.04967, ctc_loss=0.09342, over 3820995.48 frames. ], batch size: 48, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:56:01,168 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.60 vs. limit=15.0
+2024-08-31 16:56:23,822 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.12 vs. limit=12.0
+2024-08-31 16:56:26,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=243861.33333333334, ans=0.2
+2024-08-31 16:56:39,388 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=243914.66666666666, ans=0.025
+2024-08-31 16:56:46,392 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=243914.66666666666, ans=0.125
+2024-08-31 16:57:04,906 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=244021.33333333334, ans=0.125
+2024-08-31 16:57:05,823 INFO [train.py:1114] (3/4) Epoch 19, batch 950, loss[loss=0.1849, simple_loss=0.2555, pruned_loss=0.04189, ctc_loss=0.07609, over 19517.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2714, pruned_loss=0.04986, ctc_loss=0.09364, over 3820732.71 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-31 16:57:07,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=244021.33333333334, ans=0.0
+2024-08-31 16:57:10,863 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 16:57:14,284 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.751e+02 2.034e+02 2.400e+02 3.857e+02, threshold=4.067e+02, percent-clipped=1.0
+2024-08-31 16:57:24,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=244074.66666666666, ans=0.125
+2024-08-31 16:57:26,488 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=244074.66666666666, ans=0.125
+2024-08-31 16:57:51,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=244181.33333333334, ans=0.0
+2024-08-31 16:57:58,397 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.45 vs. limit=15.0
+2024-08-31 16:58:06,275 INFO [train.py:1114] (3/4) Epoch 19, batch 1000, loss[loss=0.1735, simple_loss=0.2502, pruned_loss=0.0351, ctc_loss=0.06664, over 19864.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2726, pruned_loss=0.05022, ctc_loss=0.09441, over 3816160.43 frames. ], batch size: 52, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 16:59:54,716 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=244501.33333333334, ans=0.025
+2024-08-31 17:00:06,104 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=244501.33333333334, ans=0.1
+2024-08-31 17:00:07,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=244501.33333333334, ans=0.0
+2024-08-31 17:00:09,431 INFO [train.py:1114] (3/4) Epoch 19, batch 1050, loss[loss=0.2118, simple_loss=0.2823, pruned_loss=0.05171, ctc_loss=0.09477, over 19837.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2719, pruned_loss=0.05003, ctc_loss=0.09397, over 3823259.35 frames. ], batch size: 57, lr: 7.90e-03, grad_scale: 32.0
+2024-08-31 17:00:17,649 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.651e+02 1.935e+02 2.361e+02 3.363e+02, threshold=3.870e+02, percent-clipped=0.0
+2024-08-31 17:00:17,832 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=244554.66666666666, ans=0.1
+2024-08-31 17:00:38,395 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=244661.33333333334, ans=0.125
+2024-08-31 17:00:39,545 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=244661.33333333334, ans=0.125
+2024-08-31 17:00:42,949 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.53 vs. limit=12.0
+2024-08-31 17:00:47,397 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=244714.66666666666, ans=0.0
+2024-08-31 17:00:52,397 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.12 vs. limit=22.5
+2024-08-31 17:01:12,071 INFO [train.py:1114] (3/4) Epoch 19, batch 1100, loss[loss=0.1919, simple_loss=0.2604, pruned_loss=0.04507, ctc_loss=0.08305, over 19584.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2712, pruned_loss=0.04943, ctc_loss=0.09303, over 3831325.97 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:01:52,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=244928.0, ans=0.125
+2024-08-31 17:01:55,162 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=244928.0, ans=0.0
+2024-08-31 17:02:07,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=244981.33333333334, ans=0.125
+2024-08-31 17:02:43,476 INFO [train.py:1114] (3/4) Epoch 19, batch 1150, loss[loss=0.2256, simple_loss=0.2805, pruned_loss=0.06213, ctc_loss=0.1159, over 19576.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2712, pruned_loss=0.04963, ctc_loss=0.09344, over 3829831.16 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:03:11,405 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.693e+02 1.899e+02 2.295e+02 3.327e+02, threshold=3.798e+02, percent-clipped=0.0
+2024-08-31 17:03:11,608 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=245088.0, ans=0.125
+2024-08-31 17:03:12,789 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=245088.0, ans=0.125
+2024-08-31 17:03:49,556 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=245248.0, ans=0.2
+2024-08-31 17:03:50,957 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=245248.0, ans=0.1
+2024-08-31 17:03:56,456 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=245301.33333333334, ans=0.0
+2024-08-31 17:03:56,831 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.61 vs. limit=22.5
+2024-08-31 17:04:04,674 INFO [train.py:1114] (3/4) Epoch 19, batch 1200, loss[loss=0.1896, simple_loss=0.2671, pruned_loss=0.04057, ctc_loss=0.07747, over 19836.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2722, pruned_loss=0.04996, ctc_loss=0.09411, over 3824867.73 frames. ], batch size: 57, lr: 7.89e-03, grad_scale: 32.0
+2024-08-31 17:04:30,510 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.72 vs. limit=15.0
+2024-08-31 17:04:46,821 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=245514.66666666666, ans=0.125
+2024-08-31 17:04:49,095 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=245514.66666666666, ans=0.125
+2024-08-31 17:04:52,977 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=245514.66666666666, ans=0.1
+2024-08-31 17:04:55,159 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=245568.0, ans=0.125
+2024-08-31 17:05:08,565 INFO [train.py:1114] (3/4) Epoch 19, batch 1250, loss[loss=0.2459, simple_loss=0.2992, pruned_loss=0.07012, ctc_loss=0.1306, over 19549.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2727, pruned_loss=0.05006, ctc_loss=0.09422, over 3843427.90 frames. ], batch size: 61, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:05:16,755 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.647e+02 1.911e+02 2.205e+02 3.499e+02, threshold=3.822e+02, percent-clipped=0.0
+2024-08-31 17:05:39,123 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=245728.0, ans=0.2
+2024-08-31 17:05:50,132 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:06:03,306 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=245781.33333333334, ans=0.2
+2024-08-31 17:06:15,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=245834.66666666666, ans=15.0
+2024-08-31 17:06:18,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=245888.0, ans=0.125
+2024-08-31 17:06:19,691 INFO [train.py:1114] (3/4) Epoch 19, batch 1300, loss[loss=0.2119, simple_loss=0.282, pruned_loss=0.05119, ctc_loss=0.09866, over 18812.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.272, pruned_loss=0.04998, ctc_loss=0.09405, over 3845729.62 frames. ], batch size: 76, lr: 7.88e-03, grad_scale: 32.0
+2024-08-31 17:06:35,595 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=7.10 vs. limit=15.0
+2024-08-31 17:06:38,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=245941.33333333334, ans=0.2
+2024-08-31 17:06:41,170 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=245941.33333333334, ans=0.0
+2024-08-31 17:06:54,820 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=245994.66666666666, ans=0.1
+2024-08-31 17:07:15,568 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=246101.33333333334, ans=0.0
+2024-08-31 17:07:22,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=246101.33333333334, ans=0.2
+2024-08-31 17:07:25,630 INFO [train.py:1114] (3/4) Epoch 19, batch 1350, loss[loss=0.1712, simple_loss=0.2488, pruned_loss=0.0341, ctc_loss=0.06333, over 19764.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2711, pruned_loss=0.04915, ctc_loss=0.09238, over 3857959.38 frames. ], batch size: 54, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:07:39,286 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.765e+02 2.070e+02 2.720e+02 4.418e+02, threshold=4.141e+02, percent-clipped=1.0
+2024-08-31 17:08:07,951 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=246314.66666666666, ans=0.125
+2024-08-31 17:08:35,879 INFO [train.py:1114] (3/4) Epoch 19, batch 1400, loss[loss=0.1736, simple_loss=0.2353, pruned_loss=0.04081, ctc_loss=0.07566, over 19666.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2704, pruned_loss=0.04884, ctc_loss=0.09181, over 3865333.02 frames. ], batch size: 46, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:08:44,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=246421.33333333334, ans=0.125
+2024-08-31 17:08:44,536 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=246421.33333333334, ans=0.2
+2024-08-31 17:09:09,498 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=246474.66666666666, ans=0.1
+2024-08-31 17:09:34,549 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=246581.33333333334, ans=0.1
+2024-08-31 17:09:36,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=246634.66666666666, ans=0.025
+2024-08-31 17:09:36,940 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=246634.66666666666, ans=0.0
+2024-08-31 17:09:53,651 INFO [train.py:1114] (3/4) Epoch 19, batch 1450, loss[loss=0.2121, simple_loss=0.2818, pruned_loss=0.05175, ctc_loss=0.09734, over 19675.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2712, pruned_loss=0.04929, ctc_loss=0.09278, over 3863411.14 frames. ], batch size: 63, lr: 7.87e-03, grad_scale: 64.0
+2024-08-31 17:10:02,074 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 1.691e+02 1.919e+02 2.362e+02 3.353e+02, threshold=3.838e+02, percent-clipped=0.0
+2024-08-31 17:10:08,575 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=246741.33333333334, ans=0.125
+2024-08-31 17:11:07,228 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=246741.33333333334, ans=0.0
+2024-08-31 17:11:25,140 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=246794.66666666666, ans=0.1
+2024-08-31 17:11:31,577 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.82 vs. limit=15.0
+2024-08-31 17:11:47,130 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.92 vs. limit=22.5
+2024-08-31 17:11:53,256 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=246901.33333333334, ans=0.125
+2024-08-31 17:12:02,033 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.11 vs. limit=15.0
+2024-08-31 17:12:11,940 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-31 17:12:12,390 INFO [train.py:1114] (3/4) Epoch 19, batch 1500, loss[loss=0.2086, simple_loss=0.2889, pruned_loss=0.04672, ctc_loss=0.08717, over 19559.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.272, pruned_loss=0.04963, ctc_loss=0.09359, over 3862802.71 frames. ], batch size: 57, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:14:38,395 INFO [train.py:1114] (3/4) Epoch 19, batch 1550, loss[loss=0.2032, simple_loss=0.2881, pruned_loss=0.04248, ctc_loss=0.0832, over 19587.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2719, pruned_loss=0.04961, ctc_loss=0.09344, over 3847731.08 frames. ], batch size: 60, lr: 7.86e-03, grad_scale: 64.0
+2024-08-31 17:14:46,790 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.654e+02 1.883e+02 2.328e+02 3.879e+02, threshold=3.765e+02, percent-clipped=1.0
+2024-08-31 17:14:48,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=247221.33333333334, ans=0.125
+2024-08-31 17:15:08,672 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=247274.66666666666, ans=0.125
+2024-08-31 17:15:32,182 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.10 vs. limit=12.0
+2024-08-31 17:16:15,716 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=247381.33333333334, ans=0.0
+2024-08-31 17:16:15,842 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=247381.33333333334, ans=0.1
+2024-08-31 17:16:40,629 INFO [train.py:1114] (3/4) Epoch 19, batch 1600, loss[loss=0.2071, simple_loss=0.2805, pruned_loss=0.04894, ctc_loss=0.08984, over 19839.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2717, pruned_loss=0.04959, ctc_loss=0.09346, over 3836938.66 frames. ], batch size: 57, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:17:00,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=247541.33333333334, ans=0.125
+2024-08-31 17:17:11,661 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.22 vs. limit=22.5
+2024-08-31 17:17:20,042 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.19 vs. limit=15.0
+2024-08-31 17:17:35,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=247701.33333333334, ans=0.0
+2024-08-31 17:17:41,216 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.47 vs. limit=15.0
+2024-08-31 17:17:41,997 INFO [train.py:1114] (3/4) Epoch 19, batch 1650, loss[loss=0.1891, simple_loss=0.2738, pruned_loss=0.03846, ctc_loss=0.06853, over 19648.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2713, pruned_loss=0.04964, ctc_loss=0.09376, over 3833338.69 frames. ], batch size: 59, lr: 7.85e-03, grad_scale: 64.0
+2024-08-31 17:17:46,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=247754.66666666666, ans=0.1
+2024-08-31 17:17:50,571 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.753e+02 1.927e+02 2.360e+02 4.500e+02, threshold=3.853e+02, percent-clipped=4.0
+2024-08-31 17:18:30,042 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.96 vs. limit=15.0
+2024-08-31 17:18:38,063 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=247968.0, ans=0.125
+2024-08-31 17:18:44,908 INFO [train.py:1114] (3/4) Epoch 19, batch 1700, loss[loss=0.1991, simple_loss=0.254, pruned_loss=0.05162, ctc_loss=0.1025, over 19679.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2708, pruned_loss=0.04899, ctc_loss=0.09251, over 3847403.35 frames. ], batch size: 46, lr: 7.84e-03, grad_scale: 64.0
+2024-08-31 17:18:53,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=248021.33333333334, ans=0.125
+2024-08-31 17:19:02,719 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=248074.66666666666, ans=0.0
+2024-08-31 17:19:10,916 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=248128.0, ans=0.0
+2024-08-31 17:19:24,171 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.93 vs. limit=15.0
+2024-08-31 17:19:32,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=248181.33333333334, ans=0.0
+2024-08-31 17:19:32,183 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=248181.33333333334, ans=0.025
+2024-08-31 17:19:42,642 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.61 vs. limit=15.0
+2024-08-31 17:19:52,915 INFO [train.py:1114] (3/4) Epoch 19, batch 1750, loss[loss=0.1921, simple_loss=0.2525, pruned_loss=0.04941, ctc_loss=0.08211, over 19628.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2706, pruned_loss=0.04896, ctc_loss=0.09225, over 3851796.27 frames. ], batch size: 45, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:20:02,144 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.715e+02 1.941e+02 2.441e+02 4.524e+02, threshold=3.882e+02, percent-clipped=3.0
+2024-08-31 17:20:28,277 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=248448.0, ans=0.125
+2024-08-31 17:20:35,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=248448.0, ans=0.1
+2024-08-31 17:20:49,898 INFO [train.py:1114] (3/4) Epoch 19, batch 1800, loss[loss=0.2033, simple_loss=0.2817, pruned_loss=0.04603, ctc_loss=0.08216, over 19609.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.271, pruned_loss=0.04888, ctc_loss=0.09214, over 3853062.75 frames. ], batch size: 55, lr: 7.84e-03, grad_scale: 32.0
+2024-08-31 17:20:51,260 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=248554.66666666666, ans=0.0
+2024-08-31 17:21:14,183 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=6.153e-03
+2024-08-31 17:21:22,358 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.57 vs. limit=22.5
+2024-08-31 17:21:36,724 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=248768.0, ans=0.125
+2024-08-31 17:21:47,143 INFO [train.py:1114] (3/4) Epoch 19, batch 1850, loss[loss=0.2155, simple_loss=0.2882, pruned_loss=0.05173, ctc_loss=0.09813, over 19589.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2708, pruned_loss=0.04887, ctc_loss=0.09222, over 3856845.91 frames. ], batch size: 57, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:21:53,501 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.17 vs. limit=22.5
+2024-08-31 17:21:56,046 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.825e+02 2.203e+02 3.044e+02 4.782e+02, threshold=4.406e+02, percent-clipped=6.0
+2024-08-31 17:22:11,105 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=248874.66666666666, ans=0.025
+2024-08-31 17:22:47,375 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=249034.66666666666, ans=0.125
+2024-08-31 17:22:51,498 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=249088.0, ans=0.125
+2024-08-31 17:22:52,470 INFO [train.py:1114] (3/4) Epoch 19, batch 1900, loss[loss=0.1986, simple_loss=0.2663, pruned_loss=0.04762, ctc_loss=0.08905, over 19656.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.271, pruned_loss=0.04878, ctc_loss=0.09183, over 3862137.32 frames. ], batch size: 59, lr: 7.83e-03, grad_scale: 32.0
+2024-08-31 17:23:16,046 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=249194.66666666666, ans=0.125
+2024-08-31 17:23:18,132 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=249194.66666666666, ans=0.2
+2024-08-31 17:23:45,663 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=249301.33333333334, ans=0.04949747468305833
+2024-08-31 17:23:48,976 INFO [train.py:1114] (3/4) Epoch 19, batch 1950, loss[loss=0.1863, simple_loss=0.2483, pruned_loss=0.04522, ctc_loss=0.08497, over 19588.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2718, pruned_loss=0.04907, ctc_loss=0.09228, over 3871034.46 frames. ], batch size: 52, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:23:58,749 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.354e+02 1.608e+02 1.802e+02 2.157e+02 4.545e+02, threshold=3.604e+02, percent-clipped=1.0
+2024-08-31 17:24:01,477 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=249408.0, ans=0.0
+2024-08-31 17:24:11,127 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.39 vs. limit=15.0
+2024-08-31 17:24:36,744 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=8.55 vs. limit=22.5
+2024-08-31 17:24:39,031 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:24:50,837 INFO [train.py:1114] (3/4) Epoch 19, batch 2000, loss[loss=0.1846, simple_loss=0.2436, pruned_loss=0.0449, ctc_loss=0.08945, over 19668.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2723, pruned_loss=0.04924, ctc_loss=0.09261, over 3856443.69 frames. ], batch size: 45, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:25:03,533 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=249674.66666666666, ans=10.0
+2024-08-31 17:25:15,413 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.57 vs. limit=15.0
+2024-08-31 17:25:35,422 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=249834.66666666666, ans=0.125
+2024-08-31 17:25:35,450 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=249834.66666666666, ans=0.125
+2024-08-31 17:25:46,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=249888.0, ans=0.125
+2024-08-31 17:25:47,795 INFO [train.py:1114] (3/4) Epoch 19, batch 2050, loss[loss=0.1951, simple_loss=0.2578, pruned_loss=0.04819, ctc_loss=0.09005, over 19732.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2711, pruned_loss=0.04884, ctc_loss=0.09196, over 3853661.78 frames. ], batch size: 47, lr: 7.82e-03, grad_scale: 32.0
+2024-08-31 17:25:54,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=249888.0, ans=0.0
+2024-08-31 17:25:57,136 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.719e+02 2.018e+02 2.402e+02 3.677e+02, threshold=4.037e+02, percent-clipped=1.0
+2024-08-31 17:26:01,784 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=249941.33333333334, ans=0.0
+2024-08-31 17:26:18,001 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.45 vs. limit=22.5
+2024-08-31 17:26:22,914 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=250048.0, ans=0.125
+2024-08-31 17:26:44,670 INFO [train.py:1114] (3/4) Epoch 19, batch 2100, loss[loss=0.2095, simple_loss=0.2769, pruned_loss=0.05215, ctc_loss=0.09478, over 19771.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2703, pruned_loss=0.04833, ctc_loss=0.09105, over 3860442.32 frames. ], batch size: 54, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:26:48,738 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.48 vs. limit=15.0
+2024-08-31 17:26:49,566 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=250154.66666666666, ans=0.1
+2024-08-31 17:27:06,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=250261.33333333334, ans=0.0
+2024-08-31 17:27:10,798 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=250261.33333333334, ans=0.0
+2024-08-31 17:27:12,309 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.55 vs. limit=22.5
+2024-08-31 17:27:16,765 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=250261.33333333334, ans=0.125
+2024-08-31 17:27:29,077 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.90 vs. limit=15.0
+2024-08-31 17:27:35,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=250368.0, ans=0.125
+2024-08-31 17:27:36,902 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=250368.0, ans=0.0
+2024-08-31 17:27:38,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=250368.0, ans=0.125
+2024-08-31 17:27:42,498 INFO [train.py:1114] (3/4) Epoch 19, batch 2150, loss[loss=0.1845, simple_loss=0.2589, pruned_loss=0.04006, ctc_loss=0.07504, over 19850.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2699, pruned_loss=0.04827, ctc_loss=0.09096, over 3871247.27 frames. ], batch size: 52, lr: 7.81e-03, grad_scale: 32.0
+2024-08-31 17:27:42,793 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=250421.33333333334, ans=0.2
+2024-08-31 17:27:51,498 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 1.672e+02 1.975e+02 2.523e+02 4.782e+02, threshold=3.951e+02, percent-clipped=2.0
+2024-08-31 17:27:51,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=250421.33333333334, ans=0.125
+2024-08-31 17:28:07,117 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=250528.0, ans=0.125
+2024-08-31 17:28:18,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=250581.33333333334, ans=0.125
+2024-08-31 17:28:31,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=250634.66666666666, ans=0.0
+2024-08-31 17:28:35,494 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=250634.66666666666, ans=0.2
+2024-08-31 17:28:39,679 INFO [train.py:1114] (3/4) Epoch 19, batch 2200, loss[loss=0.2016, simple_loss=0.2703, pruned_loss=0.04785, ctc_loss=0.09283, over 19589.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2699, pruned_loss=0.0483, ctc_loss=0.09093, over 3869966.80 frames. ], batch size: 57, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:28:52,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=250741.33333333334, ans=0.1
+2024-08-31 17:28:53,820 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=250741.33333333334, ans=0.125
+2024-08-31 17:29:11,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=250794.66666666666, ans=0.0
+2024-08-31 17:29:11,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=250794.66666666666, ans=0.1
+2024-08-31 17:29:27,398 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=250901.33333333334, ans=0.0
+2024-08-31 17:29:38,744 INFO [train.py:1114] (3/4) Epoch 19, batch 2250, loss[loss=0.2266, simple_loss=0.3, pruned_loss=0.05582, ctc_loss=0.1037, over 19610.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2707, pruned_loss=0.04869, ctc_loss=0.09164, over 3869114.50 frames. ], batch size: 55, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:29:47,351 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.680e+02 1.896e+02 2.375e+02 5.292e+02, threshold=3.791e+02, percent-clipped=4.0
+2024-08-31 17:29:57,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=251008.0, ans=0.0
+2024-08-31 17:29:58,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=251008.0, ans=0.0
+2024-08-31 17:29:59,330 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=251008.0, ans=0.0
+2024-08-31 17:30:01,774 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:30:03,108 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.20 vs. limit=12.0
+2024-08-31 17:30:12,274 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.57 vs. limit=15.0
+2024-08-31 17:30:33,388 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=251168.0, ans=0.125
+2024-08-31 17:30:40,030 INFO [train.py:1114] (3/4) Epoch 19, batch 2300, loss[loss=0.1767, simple_loss=0.2504, pruned_loss=0.037, ctc_loss=0.07256, over 19483.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2697, pruned_loss=0.04856, ctc_loss=0.09145, over 3863046.08 frames. ], batch size: 49, lr: 7.80e-03, grad_scale: 32.0
+2024-08-31 17:30:43,899 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=251221.33333333334, ans=0.0
+2024-08-31 17:30:46,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=251221.33333333334, ans=0.0
+2024-08-31 17:30:55,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=251274.66666666666, ans=0.125
+2024-08-31 17:31:03,247 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.28 vs. limit=15.0
+2024-08-31 17:31:05,414 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.19 vs. limit=15.0
+2024-08-31 17:31:05,765 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.14 vs. limit=15.0
+2024-08-31 17:31:36,399 INFO [train.py:1114] (3/4) Epoch 19, batch 2350, loss[loss=0.2021, simple_loss=0.2764, pruned_loss=0.04688, ctc_loss=0.08488, over 19682.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2702, pruned_loss=0.04873, ctc_loss=0.09152, over 3865447.04 frames. ], batch size: 63, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:31:43,234 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=251488.0, ans=0.0
+2024-08-31 17:31:45,225 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.718e+02 2.013e+02 2.563e+02 3.706e+02, threshold=4.026e+02, percent-clipped=0.0
+2024-08-31 17:31:56,476 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=251541.33333333334, ans=0.07
+2024-08-31 17:32:01,963 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=251594.66666666666, ans=0.0
+2024-08-31 17:32:12,257 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=251594.66666666666, ans=0.1
+2024-08-31 17:32:14,287 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=251648.0, ans=0.125
+2024-08-31 17:32:32,332 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=251701.33333333334, ans=0.0
+2024-08-31 17:32:36,571 INFO [train.py:1114] (3/4) Epoch 19, batch 2400, loss[loss=0.2284, simple_loss=0.2933, pruned_loss=0.05933, ctc_loss=0.1124, over 19361.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2726, pruned_loss=0.04985, ctc_loss=0.09325, over 3858986.01 frames. ], batch size: 71, lr: 7.79e-03, grad_scale: 32.0
+2024-08-31 17:32:43,487 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=251754.66666666666, ans=0.0
+2024-08-31 17:32:44,728 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=251754.66666666666, ans=0.0
+2024-08-31 17:32:45,965 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=251754.66666666666, ans=0.0
+2024-08-31 17:32:46,080 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=251754.66666666666, ans=0.05
+2024-08-31 17:32:48,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=251808.0, ans=0.125
+2024-08-31 17:32:49,361 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=251808.0, ans=0.2
+2024-08-31 17:32:50,411 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=251808.0, ans=0.125
+2024-08-31 17:33:25,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=251914.66666666666, ans=0.0
+2024-08-31 17:33:39,839 INFO [train.py:1114] (3/4) Epoch 19, batch 2450, loss[loss=0.2636, simple_loss=0.3023, pruned_loss=0.08088, ctc_loss=0.158, over 12773.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2764, pruned_loss=0.05257, ctc_loss=0.09876, over 3730665.26 frames. ], batch size: 141, lr: 7.78e-03, grad_scale: 32.0
+2024-08-31 17:33:44,812 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=252021.33333333334, ans=0.1
+2024-08-31 17:33:48,951 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.610e+02 1.856e+02 2.081e+02 3.075e+02, threshold=3.711e+02, percent-clipped=0.0
+2024-08-31 17:33:57,265 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=252074.66666666666, ans=0.125
+2024-08-31 17:34:04,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=252128.0, ans=0.025
+2024-08-31 17:34:11,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=252128.0, ans=0.2
+2024-08-31 17:34:14,072 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=252181.33333333334, ans=0.0
+2024-08-31 17:34:18,920 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.76 vs. limit=15.0
+2024-08-31 17:36:18,534 INFO [train.py:1114] (3/4) Epoch 20, batch 0, loss[loss=0.2192, simple_loss=0.2735, pruned_loss=0.05901, ctc_loss=0.117, over 19388.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2735, pruned_loss=0.05901, ctc_loss=0.117, over 19388.00 frames. ], batch size: 48, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:36:18,534 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-31 17:36:23,457 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.5962, 3.1128, 2.1584, 2.8092], device='cuda:3')
+2024-08-31 17:36:28,434 INFO [train.py:1146] (3/4) Epoch 20, validation: loss=0.1834, simple_loss=0.2715, pruned_loss=0.03542, ctc_loss=0.061, over 944034.00 frames.
+2024-08-31 17:36:28,434 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13816MB
+2024-08-31 17:37:17,570 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=252448.0, ans=0.04949747468305833
+2024-08-31 17:37:20,794 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=252448.0, ans=0.125
+2024-08-31 17:37:23,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=252448.0, ans=0.0
+2024-08-31 17:37:27,970 INFO [train.py:1114] (3/4) Epoch 20, batch 50, loss[loss=0.1903, simple_loss=0.257, pruned_loss=0.04383, ctc_loss=0.08962, over 19735.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2741, pruned_loss=0.05092, ctc_loss=0.0973, over 843375.14 frames. ], batch size: 47, lr: 7.58e-03, grad_scale: 32.0
+2024-08-31 17:37:51,151 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.696e+02 1.962e+02 2.261e+02 4.473e+02, threshold=3.923e+02, percent-clipped=2.0
+2024-08-31 17:40:00,455 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=252661.33333333334, ans=0.1
+2024-08-31 17:41:27,191 INFO [train.py:1114] (3/4) Epoch 20, batch 100, loss[loss=0.1856, simple_loss=0.2579, pruned_loss=0.04111, ctc_loss=0.07769, over 19726.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.275, pruned_loss=0.05043, ctc_loss=0.09577, over 1497870.57 frames. ], batch size: 51, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:41:32,951 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=252768.0, ans=0.025
+2024-08-31 17:42:11,158 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:42:17,204 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=252821.33333333334, ans=0.0
+2024-08-31 17:42:26,801 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=252821.33333333334, ans=0.125
+2024-08-31 17:42:36,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=252874.66666666666, ans=0.125
+2024-08-31 17:42:44,109 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=252874.66666666666, ans=0.0
+2024-08-31 17:43:01,180 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.14 vs. limit=12.0
+2024-08-31 17:43:48,182 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=252981.33333333334, ans=0.04949747468305833
+2024-08-31 17:43:59,681 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=252981.33333333334, ans=0.0
+2024-08-31 17:43:59,831 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=252981.33333333334, ans=0.1
+2024-08-31 17:44:06,483 INFO [train.py:1114] (3/4) Epoch 20, batch 150, loss[loss=0.1703, simple_loss=0.237, pruned_loss=0.03723, ctc_loss=0.07265, over 19717.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2716, pruned_loss=0.04855, ctc_loss=0.09182, over 2027723.87 frames. ], batch size: 47, lr: 7.57e-03, grad_scale: 32.0
+2024-08-31 17:44:06,819 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=253034.66666666666, ans=0.125
+2024-08-31 17:44:59,737 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.634e+02 1.821e+02 2.194e+02 3.683e+02, threshold=3.641e+02, percent-clipped=0.0
+2024-08-31 17:45:32,936 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=253194.66666666666, ans=0.04949747468305833
+2024-08-31 17:45:59,899 INFO [train.py:1114] (3/4) Epoch 20, batch 200, loss[loss=0.2032, simple_loss=0.2739, pruned_loss=0.04742, ctc_loss=0.09421, over 18525.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2706, pruned_loss=0.04839, ctc_loss=0.09115, over 2435784.70 frames. ], batch size: 85, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:46:03,545 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=253301.33333333334, ans=0.125
+2024-08-31 17:46:05,926 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=253301.33333333334, ans=0.0
+2024-08-31 17:46:49,995 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 17:46:58,366 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=253461.33333333334, ans=0.0
+2024-08-31 17:47:12,468 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=253461.33333333334, ans=0.0
+2024-08-31 17:47:33,343 INFO [train.py:1114] (3/4) Epoch 20, batch 250, loss[loss=0.2162, simple_loss=0.2871, pruned_loss=0.0532, ctc_loss=0.09738, over 19355.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2696, pruned_loss=0.04781, ctc_loss=0.09011, over 2756034.48 frames. ], batch size: 67, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:47:56,509 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.99 vs. limit=10.0
+2024-08-31 17:47:59,372 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.745e+02 2.044e+02 2.602e+02 4.259e+02, threshold=4.089e+02, percent-clipped=6.0
+2024-08-31 17:49:00,941 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=253674.66666666666, ans=0.0
+2024-08-31 17:49:45,834 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=253728.0, ans=0.0
+2024-08-31 17:49:54,110 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=253781.33333333334, ans=0.125
+2024-08-31 17:50:00,349 INFO [train.py:1114] (3/4) Epoch 20, batch 300, loss[loss=0.2186, simple_loss=0.287, pruned_loss=0.05508, ctc_loss=0.1, over 19518.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2694, pruned_loss=0.04748, ctc_loss=0.08951, over 3000612.69 frames. ], batch size: 61, lr: 7.56e-03, grad_scale: 32.0
+2024-08-31 17:50:14,391 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.57 vs. limit=10.0
+2024-08-31 17:50:34,181 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.63 vs. limit=15.0
+2024-08-31 17:50:42,504 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.53 vs. limit=15.0
+2024-08-31 17:50:56,234 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=254048.0, ans=0.0
+2024-08-31 17:51:05,490 INFO [train.py:1114] (3/4) Epoch 20, batch 350, loss[loss=0.1775, simple_loss=0.2409, pruned_loss=0.04109, ctc_loss=0.07976, over 19756.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2701, pruned_loss=0.04792, ctc_loss=0.09024, over 3189836.52 frames. ], batch size: 48, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:51:10,498 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=254101.33333333334, ans=0.035
+2024-08-31 17:51:14,194 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=254101.33333333334, ans=0.125
+2024-08-31 17:51:26,965 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.703e+02 1.946e+02 2.321e+02 4.034e+02, threshold=3.891e+02, percent-clipped=0.0
+2024-08-31 17:51:45,947 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=254261.33333333334, ans=0.125
+2024-08-31 17:51:46,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=254261.33333333334, ans=0.125
+2024-08-31 17:52:04,338 INFO [train.py:1114] (3/4) Epoch 20, batch 400, loss[loss=0.213, simple_loss=0.2739, pruned_loss=0.05448, ctc_loss=0.1079, over 19496.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2696, pruned_loss=0.04775, ctc_loss=0.09009, over 3341479.33 frames. ], batch size: 54, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:52:10,473 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.41 vs. limit=22.5
+2024-08-31 17:52:11,154 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.min_positive, batch_count=254368.0, ans=0.025
+2024-08-31 17:52:20,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=254421.33333333334, ans=0.1
+2024-08-31 17:52:58,758 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=254581.33333333334, ans=0.125
+2024-08-31 17:53:10,612 INFO [train.py:1114] (3/4) Epoch 20, batch 450, loss[loss=0.2178, simple_loss=0.2877, pruned_loss=0.05247, ctc_loss=0.1073, over 19610.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2694, pruned_loss=0.04778, ctc_loss=0.09006, over 3448990.92 frames. ], batch size: 55, lr: 7.55e-03, grad_scale: 32.0
+2024-08-31 17:53:16,988 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=254634.66666666666, ans=0.125
+2024-08-31 17:53:18,986 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.47 vs. limit=10.0
+2024-08-31 17:53:23,029 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=254688.0, ans=0.125
+2024-08-31 17:53:31,688 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.627e+02 1.777e+02 2.217e+02 3.582e+02, threshold=3.554e+02, percent-clipped=0.0
+2024-08-31 17:53:42,858 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=254741.33333333334, ans=0.125
+2024-08-31 17:54:01,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=254794.66666666666, ans=0.125
+2024-08-31 17:54:08,527 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=254848.0, ans=0.2
+2024-08-31 17:54:15,350 INFO [train.py:1114] (3/4) Epoch 20, batch 500, loss[loss=0.2191, simple_loss=0.2964, pruned_loss=0.05141, ctc_loss=0.09738, over 19642.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2687, pruned_loss=0.04764, ctc_loss=0.08968, over 3545247.99 frames. ], batch size: 63, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:54:50,515 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=255061.33333333334, ans=0.125
+2024-08-31 17:54:55,361 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=255061.33333333334, ans=0.125
+2024-08-31 17:55:14,677 INFO [train.py:1114] (3/4) Epoch 20, batch 550, loss[loss=0.2286, simple_loss=0.2928, pruned_loss=0.05927, ctc_loss=0.1146, over 19211.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2688, pruned_loss=0.04781, ctc_loss=0.09013, over 3606801.94 frames. ], batch size: 71, lr: 7.54e-03, grad_scale: 32.0
+2024-08-31 17:55:14,967 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=255168.0, ans=0.125
+2024-08-31 17:55:27,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=255221.33333333334, ans=0.125
+2024-08-31 17:55:35,929 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.640e+02 1.908e+02 2.178e+02 3.229e+02, threshold=3.816e+02, percent-clipped=0.0
+2024-08-31 17:55:56,205 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.47 vs. limit=15.0
+2024-08-31 17:56:03,546 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=255328.0, ans=0.0
+2024-08-31 17:56:04,844 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=255328.0, ans=0.0
+2024-08-31 17:56:07,213 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=255381.33333333334, ans=0.125
+2024-08-31 17:56:22,748 INFO [train.py:1114] (3/4) Epoch 20, batch 600, loss[loss=0.2408, simple_loss=0.2994, pruned_loss=0.06423, ctc_loss=0.1342, over 19330.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.27, pruned_loss=0.04856, ctc_loss=0.09137, over 3663846.54 frames. ], batch size: 67, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:56:23,155 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=255434.66666666666, ans=0.125
+2024-08-31 17:56:36,133 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=255488.0, ans=0.1
+2024-08-31 17:56:49,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=255541.33333333334, ans=0.125
+2024-08-31 17:57:01,393 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=255594.66666666666, ans=0.125
+2024-08-31 17:57:02,543 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=255594.66666666666, ans=0.125
+2024-08-31 17:57:11,936 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=255648.0, ans=0.1
+2024-08-31 17:57:15,627 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=255648.0, ans=0.125
+2024-08-31 17:57:22,395 INFO [train.py:1114] (3/4) Epoch 20, batch 650, loss[loss=0.1994, simple_loss=0.2744, pruned_loss=0.04576, ctc_loss=0.0824, over 19774.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2697, pruned_loss=0.04837, ctc_loss=0.09075, over 3714515.65 frames. ], batch size: 54, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:57:25,627 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.88 vs. limit=15.0
+2024-08-31 17:57:39,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=255754.66666666666, ans=0.0
+2024-08-31 17:57:44,325 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.759e+02 2.153e+02 2.838e+02 5.166e+02, threshold=4.306e+02, percent-clipped=8.0
+2024-08-31 17:57:55,268 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=255808.0, ans=0.09899494936611666
+2024-08-31 17:58:22,793 INFO [train.py:1114] (3/4) Epoch 20, batch 700, loss[loss=0.199, simple_loss=0.2641, pruned_loss=0.04807, ctc_loss=0.09417, over 19716.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2703, pruned_loss=0.04858, ctc_loss=0.09106, over 3746641.94 frames. ], batch size: 51, lr: 7.53e-03, grad_scale: 32.0
+2024-08-31 17:58:34,060 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=255968.0, ans=0.0
+2024-08-31 17:58:42,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=256021.33333333334, ans=0.125
+2024-08-31 17:58:59,679 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=256128.0, ans=0.0
+2024-08-31 17:59:04,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=256128.0, ans=0.025
+2024-08-31 17:59:12,495 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=256181.33333333334, ans=0.125
+2024-08-31 17:59:24,898 INFO [train.py:1114] (3/4) Epoch 20, batch 750, loss[loss=0.1972, simple_loss=0.2751, pruned_loss=0.04377, ctc_loss=0.07934, over 19493.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2698, pruned_loss=0.04821, ctc_loss=0.09042, over 3772097.76 frames. ], batch size: 54, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 17:59:37,864 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=256234.66666666666, ans=0.0
+2024-08-31 17:59:54,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=256288.0, ans=0.0
+2024-08-31 17:59:58,591 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 1.642e+02 1.855e+02 2.095e+02 3.716e+02, threshold=3.709e+02, percent-clipped=0.0
+2024-08-31 17:59:58,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=256288.0, ans=0.125
+2024-08-31 18:00:13,439 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=256394.66666666666, ans=0.025
+2024-08-31 18:00:18,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=256394.66666666666, ans=0.125
+2024-08-31 18:00:23,217 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=256394.66666666666, ans=0.2
+2024-08-31 18:00:24,612 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=256448.0, ans=0.0
+2024-08-31 18:00:28,048 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=256448.0, ans=0.0
+2024-08-31 18:00:42,914 INFO [train.py:1114] (3/4) Epoch 20, batch 800, loss[loss=0.1804, simple_loss=0.2542, pruned_loss=0.03854, ctc_loss=0.07378, over 19413.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2694, pruned_loss=0.0478, ctc_loss=0.08962, over 3794382.28 frames. ], batch size: 48, lr: 7.52e-03, grad_scale: 32.0
+2024-08-31 18:00:43,098 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=256501.33333333334, ans=0.025
+2024-08-31 18:00:43,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=256501.33333333334, ans=0.0
+2024-08-31 18:00:43,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=256501.33333333334, ans=0.125
+2024-08-31 18:00:46,564 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=256501.33333333334, ans=0.0
+2024-08-31 18:01:02,597 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=256554.66666666666, ans=0.125
+2024-08-31 18:01:14,882 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.87 vs. limit=22.5
+2024-08-31 18:01:14,949 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.18 vs. limit=22.5
+2024-08-31 18:01:42,134 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=256768.0, ans=0.2
+2024-08-31 18:01:43,047 INFO [train.py:1114] (3/4) Epoch 20, batch 850, loss[loss=0.2142, simple_loss=0.2812, pruned_loss=0.05276, ctc_loss=0.1043, over 19650.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2691, pruned_loss=0.04798, ctc_loss=0.0904, over 3814968.13 frames. ], batch size: 59, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:01:50,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=256768.0, ans=0.125
+2024-08-31 18:02:00,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=256821.33333333334, ans=15.0
+2024-08-31 18:02:05,173 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.672e+02 2.009e+02 2.661e+02 4.692e+02, threshold=4.019e+02, percent-clipped=5.0
+2024-08-31 18:02:21,133 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.35 vs. limit=15.0
+2024-08-31 18:02:42,833 INFO [train.py:1114] (3/4) Epoch 20, batch 900, loss[loss=0.1972, simple_loss=0.2577, pruned_loss=0.04924, ctc_loss=0.09548, over 19400.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2701, pruned_loss=0.04882, ctc_loss=0.09193, over 3818685.56 frames. ], batch size: 48, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:02:44,279 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=257034.66666666666, ans=0.09899494936611666
+2024-08-31 18:02:48,901 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=257034.66666666666, ans=0.025
+2024-08-31 18:03:12,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=257141.33333333334, ans=0.0
+2024-08-31 18:03:13,413 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=257141.33333333334, ans=0.125
+2024-08-31 18:03:13,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=257141.33333333334, ans=0.125
+2024-08-31 18:03:44,086 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.69 vs. limit=10.0
+2024-08-31 18:03:50,701 INFO [train.py:1114] (3/4) Epoch 20, batch 950, loss[loss=0.2256, simple_loss=0.2769, pruned_loss=0.06302, ctc_loss=0.1207, over 19498.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2707, pruned_loss=0.04897, ctc_loss=0.09251, over 3819873.11 frames. ], batch size: 49, lr: 7.51e-03, grad_scale: 32.0
+2024-08-31 18:03:53,582 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.53 vs. limit=6.0
+2024-08-31 18:04:12,191 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.674e+02 1.914e+02 2.385e+02 5.476e+02, threshold=3.829e+02, percent-clipped=1.0
+2024-08-31 18:04:12,484 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=257354.66666666666, ans=0.025
+2024-08-31 18:04:51,464 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=257408.0, ans=0.0
+2024-08-31 18:05:02,241 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=257461.33333333334, ans=0.125
+2024-08-31 18:05:08,616 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=257461.33333333334, ans=0.09899494936611666
+2024-08-31 18:05:13,600 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=257514.66666666666, ans=0.2
+2024-08-31 18:05:19,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=257514.66666666666, ans=0.0
+2024-08-31 18:05:20,838 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=257514.66666666666, ans=0.0
+2024-08-31 18:05:25,097 INFO [train.py:1114] (3/4) Epoch 20, batch 1000, loss[loss=0.1948, simple_loss=0.2574, pruned_loss=0.04837, ctc_loss=0.08875, over 19858.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2713, pruned_loss=0.04916, ctc_loss=0.09292, over 3816153.61 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:05:29,121 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=257568.0, ans=0.125
+2024-08-31 18:05:35,473 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=257568.0, ans=0.2
+2024-08-31 18:10:14,808 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.50 vs. limit=15.0
+2024-08-31 18:10:30,567 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=4.66 vs. limit=15.0
+2024-08-31 18:12:15,977 INFO [train.py:1114] (3/4) Epoch 20, batch 1050, loss[loss=0.1993, simple_loss=0.2768, pruned_loss=0.04446, ctc_loss=0.08224, over 19836.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2703, pruned_loss=0.04882, ctc_loss=0.09232, over 3821136.31 frames. ], batch size: 57, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:12:37,418 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.683e+02 1.941e+02 2.234e+02 3.103e+02, threshold=3.882e+02, percent-clipped=0.0
+2024-08-31 18:12:47,780 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=257941.33333333334, ans=0.035
+2024-08-31 18:12:49,413 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.40 vs. limit=22.5
+2024-08-31 18:13:14,129 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=258048.0, ans=0.125
+2024-08-31 18:13:23,121 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=258101.33333333334, ans=0.2
+2024-08-31 18:13:25,854 INFO [train.py:1114] (3/4) Epoch 20, batch 1100, loss[loss=0.2068, simple_loss=0.2775, pruned_loss=0.04947, ctc_loss=0.09292, over 19579.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2705, pruned_loss=0.04877, ctc_loss=0.09227, over 3828453.10 frames. ], batch size: 52, lr: 7.50e-03, grad_scale: 32.0
+2024-08-31 18:13:28,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=258101.33333333334, ans=0.025
+2024-08-31 18:13:37,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=258154.66666666666, ans=10.0
+2024-08-31 18:14:05,904 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=9.38 vs. limit=15.0
+2024-08-31 18:14:15,126 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=9.62 vs. limit=12.0
+2024-08-31 18:14:25,220 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=258368.0, ans=0.125
+2024-08-31 18:14:26,124 INFO [train.py:1114] (3/4) Epoch 20, batch 1150, loss[loss=0.1897, simple_loss=0.2673, pruned_loss=0.04021, ctc_loss=0.07939, over 19584.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2707, pruned_loss=0.04897, ctc_loss=0.09261, over 3827007.66 frames. ], batch size: 52, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:15:11,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=258421.33333333334, ans=0.1
+2024-08-31 18:15:12,227 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.349e+02 1.657e+02 1.937e+02 2.398e+02 3.976e+02, threshold=3.875e+02, percent-clipped=1.0
+2024-08-31 18:15:27,770 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=258528.0, ans=0.125
+2024-08-31 18:15:36,366 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.18 vs. limit=10.0
+2024-08-31 18:15:42,876 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.09 vs. limit=10.0
+2024-08-31 18:15:51,946 INFO [train.py:1114] (3/4) Epoch 20, batch 1200, loss[loss=0.2169, simple_loss=0.2849, pruned_loss=0.05328, ctc_loss=0.106, over 19843.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2708, pruned_loss=0.04849, ctc_loss=0.09181, over 3823359.18 frames. ], batch size: 57, lr: 7.49e-03, grad_scale: 32.0
+2024-08-31 18:16:02,845 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=258688.0, ans=0.125
+2024-08-31 18:16:16,152 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.87 vs. limit=15.0
+2024-08-31 18:16:27,757 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=258794.66666666666, ans=0.0
+2024-08-31 18:16:31,612 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.89 vs. limit=15.0
+2024-08-31 18:16:36,408 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.74 vs. limit=15.0
+2024-08-31 18:16:54,771 INFO [train.py:1114] (3/4) Epoch 20, batch 1250, loss[loss=0.249, simple_loss=0.3005, pruned_loss=0.07195, ctc_loss=0.1339, over 19512.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2714, pruned_loss=0.04883, ctc_loss=0.09218, over 3841902.61 frames. ], batch size: 61, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:17:10,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=258954.66666666666, ans=0.025
+2024-08-31 18:17:17,869 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=258954.66666666666, ans=0.0
+2024-08-31 18:17:20,836 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.673e+02 1.864e+02 2.243e+02 4.460e+02, threshold=3.727e+02, percent-clipped=1.0
+2024-08-31 18:17:55,118 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=259061.33333333334, ans=0.125
+2024-08-31 18:18:01,106 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=259114.66666666666, ans=0.125
+2024-08-31 18:18:06,102 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.47 vs. limit=22.5
+2024-08-31 18:19:05,809 INFO [train.py:1114] (3/4) Epoch 20, batch 1300, loss[loss=0.2442, simple_loss=0.3034, pruned_loss=0.06667, ctc_loss=0.1291, over 18920.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2706, pruned_loss=0.0485, ctc_loss=0.09132, over 3845313.70 frames. ], batch size: 76, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:19:13,472 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=259168.0, ans=0.2
+2024-08-31 18:19:20,362 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.28 vs. limit=15.0
+2024-08-31 18:19:38,315 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=259274.66666666666, ans=0.1
+2024-08-31 18:20:04,223 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.76 vs. limit=6.0
+2024-08-31 18:20:12,175 INFO [train.py:1114] (3/4) Epoch 20, batch 1350, loss[loss=0.1966, simple_loss=0.2636, pruned_loss=0.04745, ctc_loss=0.08659, over 19766.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2705, pruned_loss=0.04838, ctc_loss=0.09094, over 3856715.66 frames. ], batch size: 54, lr: 7.48e-03, grad_scale: 32.0
+2024-08-31 18:20:15,674 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=259434.66666666666, ans=0.0
+2024-08-31 18:20:38,785 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 1.677e+02 1.917e+02 2.382e+02 4.193e+02, threshold=3.834e+02, percent-clipped=5.0
+2024-08-31 18:20:47,554 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=259541.33333333334, ans=0.125
+2024-08-31 18:20:52,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=259594.66666666666, ans=0.0
+2024-08-31 18:20:56,413 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=259594.66666666666, ans=0.0
+2024-08-31 18:20:58,982 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=259594.66666666666, ans=0.0
+2024-08-31 18:21:14,720 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=259648.0, ans=0.125
+2024-08-31 18:21:14,894 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=259648.0, ans=0.125
+2024-08-31 18:21:16,783 INFO [train.py:1114] (3/4) Epoch 20, batch 1400, loss[loss=0.1636, simple_loss=0.2311, pruned_loss=0.03548, ctc_loss=0.06281, over 19657.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2703, pruned_loss=0.04826, ctc_loss=0.09081, over 3863644.08 frames. ], batch size: 46, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:21:34,492 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=259754.66666666666, ans=0.0
+2024-08-31 18:21:57,856 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=259861.33333333334, ans=0.125
+2024-08-31 18:21:57,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=259861.33333333334, ans=0.125
+2024-08-31 18:22:29,031 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=259914.66666666666, ans=0.0
+2024-08-31 18:22:29,147 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=259914.66666666666, ans=0.1
+2024-08-31 18:22:30,277 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=259914.66666666666, ans=0.1
+2024-08-31 18:22:33,638 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=259914.66666666666, ans=0.1
+2024-08-31 18:22:53,590 INFO [train.py:1114] (3/4) Epoch 20, batch 1450, loss[loss=0.2188, simple_loss=0.2931, pruned_loss=0.05211, ctc_loss=0.1006, over 19646.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2708, pruned_loss=0.04829, ctc_loss=0.0908, over 3862602.45 frames. ], batch size: 63, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:23:01,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=259968.0, ans=0.025
+2024-08-31 18:23:15,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=260021.33333333334, ans=0.125
+2024-08-31 18:23:17,226 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.776e+02 2.029e+02 2.458e+02 5.712e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-31 18:23:21,208 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=260074.66666666666, ans=0.125
+2024-08-31 18:23:21,442 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=260074.66666666666, ans=0.125
+2024-08-31 18:23:25,128 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=260074.66666666666, ans=0.125
+2024-08-31 18:23:28,398 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=260074.66666666666, ans=0.125
+2024-08-31 18:23:33,018 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=260128.0, ans=0.1
+2024-08-31 18:23:35,818 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=6.21 vs. limit=15.0
+2024-08-31 18:23:39,942 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=260128.0, ans=0.2
+2024-08-31 18:23:41,631 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.85 vs. limit=15.0
+2024-08-31 18:23:54,008 INFO [train.py:1114] (3/4) Epoch 20, batch 1500, loss[loss=0.2228, simple_loss=0.2911, pruned_loss=0.05604, ctc_loss=0.1058, over 19585.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2709, pruned_loss=0.04799, ctc_loss=0.09035, over 3862617.62 frames. ], batch size: 57, lr: 7.47e-03, grad_scale: 32.0
+2024-08-31 18:23:54,440 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=260234.66666666666, ans=10.0
+2024-08-31 18:23:57,008 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=260234.66666666666, ans=0.025
+2024-08-31 18:24:19,229 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=260341.33333333334, ans=0.1
+2024-08-31 18:24:20,447 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=260341.33333333334, ans=0.1
+2024-08-31 18:24:26,707 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:24:27,866 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=260341.33333333334, ans=0.0
+2024-08-31 18:24:29,290 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=260394.66666666666, ans=0.0
+2024-08-31 18:24:35,264 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=260394.66666666666, ans=0.025
+2024-08-31 18:24:37,914 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.88 vs. limit=6.0
+2024-08-31 18:25:34,752 INFO [train.py:1114] (3/4) Epoch 20, batch 1550, loss[loss=0.2069, simple_loss=0.281, pruned_loss=0.04877, ctc_loss=0.08821, over 19635.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2711, pruned_loss=0.0483, ctc_loss=0.09099, over 3846805.53 frames. ], batch size: 60, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:26:28,166 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=260554.66666666666, ans=0.0
+2024-08-31 18:26:33,197 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 1.748e+02 2.049e+02 2.466e+02 3.855e+02, threshold=4.097e+02, percent-clipped=0.0
+2024-08-31 18:26:39,385 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=260608.0, ans=0.125
+2024-08-31 18:27:18,518 INFO [train.py:1114] (3/4) Epoch 20, batch 1600, loss[loss=0.2032, simple_loss=0.2781, pruned_loss=0.04688, ctc_loss=0.08615, over 19854.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2712, pruned_loss=0.04852, ctc_loss=0.0913, over 3836004.15 frames. ], batch size: 57, lr: 7.46e-03, grad_scale: 32.0
+2024-08-31 18:27:32,587 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.73 vs. limit=22.5
+2024-08-31 18:27:48,503 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.14 vs. limit=12.0
+2024-08-31 18:27:53,731 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=260928.0, ans=0.125
+2024-08-31 18:28:03,884 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.09 vs. limit=15.0
+2024-08-31 18:28:30,428 INFO [train.py:1114] (3/4) Epoch 20, batch 1650, loss[loss=0.1991, simple_loss=0.2779, pruned_loss=0.04328, ctc_loss=0.08423, over 19670.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2707, pruned_loss=0.04853, ctc_loss=0.09151, over 3832121.53 frames. ], batch size: 59, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:28:34,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=261034.66666666666, ans=0.125
+2024-08-31 18:28:35,521 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=261034.66666666666, ans=0.0
+2024-08-31 18:28:53,153 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 1.719e+02 2.026e+02 2.553e+02 4.958e+02, threshold=4.052e+02, percent-clipped=3.0
+2024-08-31 18:29:25,617 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.04 vs. limit=15.0
+2024-08-31 18:29:28,861 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.91 vs. limit=6.0
+2024-08-31 18:29:29,542 INFO [train.py:1114] (3/4) Epoch 20, batch 1700, loss[loss=0.1714, simple_loss=0.2353, pruned_loss=0.03886, ctc_loss=0.07429, over 19682.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2704, pruned_loss=0.04819, ctc_loss=0.09096, over 3846224.64 frames. ], batch size: 46, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:29:30,306 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.22 vs. limit=15.0
+2024-08-31 18:29:36,995 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=261301.33333333334, ans=0.2
+2024-08-31 18:29:38,160 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=261301.33333333334, ans=0.0
+2024-08-31 18:29:45,496 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=261354.66666666666, ans=0.0
+2024-08-31 18:29:52,114 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=261354.66666666666, ans=0.125
+2024-08-31 18:30:08,789 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=261461.33333333334, ans=0.2
+2024-08-31 18:30:17,693 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=261514.66666666666, ans=0.125
+2024-08-31 18:31:14,498 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=261514.66666666666, ans=0.05
+2024-08-31 18:31:17,101 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=261568.0, ans=0.125
+2024-08-31 18:31:18,020 INFO [train.py:1114] (3/4) Epoch 20, batch 1750, loss[loss=0.1914, simple_loss=0.2524, pruned_loss=0.04752, ctc_loss=0.08852, over 19644.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2703, pruned_loss=0.04815, ctc_loss=0.09108, over 3851220.45 frames. ], batch size: 45, lr: 7.45e-03, grad_scale: 32.0
+2024-08-31 18:31:39,130 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=261621.33333333334, ans=0.07
+2024-08-31 18:31:39,630 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.98 vs. limit=15.0
+2024-08-31 18:31:39,759 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.07 vs. limit=15.0
+2024-08-31 18:31:40,001 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.679e+02 1.951e+02 2.329e+02 4.159e+02, threshold=3.901e+02, percent-clipped=0.0
+2024-08-31 18:31:58,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=261728.0, ans=0.0
+2024-08-31 18:31:58,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=261728.0, ans=0.125
+2024-08-31 18:32:15,181 INFO [train.py:1114] (3/4) Epoch 20, batch 1800, loss[loss=0.2058, simple_loss=0.2758, pruned_loss=0.04911, ctc_loss=0.09426, over 19626.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.27, pruned_loss=0.04824, ctc_loss=0.09116, over 3852909.19 frames. ], batch size: 55, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:32:24,176 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.53 vs. limit=22.5
+2024-08-31 18:33:20,837 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:33:26,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=262048.0, ans=0.125
+2024-08-31 18:33:27,490 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=262048.0, ans=0.95
+2024-08-31 18:33:28,575 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=262048.0, ans=0.125
+2024-08-31 18:33:34,492 INFO [train.py:1114] (3/4) Epoch 20, batch 1850, loss[loss=0.2048, simple_loss=0.2788, pruned_loss=0.04824, ctc_loss=0.08582, over 19584.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2697, pruned_loss=0.04817, ctc_loss=0.09081, over 3857665.79 frames. ], batch size: 57, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:33:52,719 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=262154.6666666667, ans=0.0
+2024-08-31 18:33:55,997 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.842e+02 2.206e+02 3.038e+02 4.306e+02, threshold=4.411e+02, percent-clipped=5.0
+2024-08-31 18:34:05,630 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=262208.0, ans=0.125
+2024-08-31 18:34:06,857 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=262208.0, ans=0.2
+2024-08-31 18:34:21,571 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=262261.3333333333, ans=0.125
+2024-08-31 18:34:36,231 INFO [train.py:1114] (3/4) Epoch 20, batch 1900, loss[loss=0.1958, simple_loss=0.2728, pruned_loss=0.04359, ctc_loss=0.07898, over 19647.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2701, pruned_loss=0.04837, ctc_loss=0.09108, over 3862953.43 frames. ], batch size: 59, lr: 7.44e-03, grad_scale: 32.0
+2024-08-31 18:34:37,555 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=262368.0, ans=0.5
+2024-08-31 18:34:49,141 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=262421.3333333333, ans=0.125
+2024-08-31 18:35:06,502 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=262474.6666666667, ans=0.125
+2024-08-31 18:35:11,138 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=262528.0, ans=0.125
+2024-08-31 18:35:24,652 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=262581.3333333333, ans=0.125
+2024-08-31 18:35:34,443 INFO [train.py:1114] (3/4) Epoch 20, batch 1950, loss[loss=0.1892, simple_loss=0.2562, pruned_loss=0.0439, ctc_loss=0.08589, over 19603.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2712, pruned_loss=0.04862, ctc_loss=0.09146, over 3871290.78 frames. ], batch size: 52, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:35:38,086 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=262634.6666666667, ans=0.125
+2024-08-31 18:35:48,143 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.29 vs. limit=15.0
+2024-08-31 18:35:55,629 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 1.650e+02 1.780e+02 2.101e+02 3.496e+02, threshold=3.560e+02, percent-clipped=0.0
+2024-08-31 18:36:16,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=262794.6666666667, ans=0.0
+2024-08-31 18:36:31,282 INFO [train.py:1114] (3/4) Epoch 20, batch 2000, loss[loss=0.1692, simple_loss=0.2366, pruned_loss=0.03722, ctc_loss=0.06816, over 19675.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2719, pruned_loss=0.04905, ctc_loss=0.09217, over 3854870.91 frames. ], batch size: 45, lr: 7.43e-03, grad_scale: 32.0
+2024-08-31 18:36:33,858 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=262901.3333333333, ans=0.04949747468305833
+2024-08-31 18:36:47,803 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=262954.6666666667, ans=0.1
+2024-08-31 18:37:21,822 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=263114.6666666667, ans=0.125
+2024-08-31 18:37:32,654 INFO [train.py:1114] (3/4) Epoch 20, batch 2050, loss[loss=0.1789, simple_loss=0.2446, pruned_loss=0.04172, ctc_loss=0.07438, over 19699.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2706, pruned_loss=0.04883, ctc_loss=0.09176, over 3850390.55 frames. ], batch size: 47, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:37:34,923 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=263168.0, ans=0.025
+2024-08-31 18:37:47,116 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=263168.0, ans=10.0
+2024-08-31 18:38:01,698 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.44 vs. limit=6.0
+2024-08-31 18:38:02,078 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.724e+02 2.041e+02 2.585e+02 3.821e+02, threshold=4.082e+02, percent-clipped=5.0
+2024-08-31 18:38:28,027 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=263381.3333333333, ans=0.0
+2024-08-31 18:38:28,978 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=263381.3333333333, ans=0.125
+2024-08-31 18:38:36,463 INFO [train.py:1114] (3/4) Epoch 20, batch 2100, loss[loss=0.1916, simple_loss=0.2629, pruned_loss=0.04316, ctc_loss=0.08522, over 19774.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2699, pruned_loss=0.04823, ctc_loss=0.09048, over 3858025.09 frames. ], batch size: 54, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:38:36,741 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=263434.6666666667, ans=0.0
+2024-08-31 18:38:42,298 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=263434.6666666667, ans=0.1
+2024-08-31 18:38:44,666 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=263434.6666666667, ans=0.0
+2024-08-31 18:39:07,273 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=263541.3333333333, ans=0.5
+2024-08-31 18:39:28,991 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.47 vs. limit=12.0
+2024-08-31 18:39:32,892 INFO [train.py:1114] (3/4) Epoch 20, batch 2150, loss[loss=0.1921, simple_loss=0.2628, pruned_loss=0.04463, ctc_loss=0.08035, over 19861.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2692, pruned_loss=0.0481, ctc_loss=0.09034, over 3870347.82 frames. ], batch size: 52, lr: 7.42e-03, grad_scale: 32.0
+2024-08-31 18:39:57,643 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=263754.6666666667, ans=0.05
+2024-08-31 18:39:58,529 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.627e+02 1.896e+02 2.393e+02 5.058e+02, threshold=3.792e+02, percent-clipped=5.0
+2024-08-31 18:40:10,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=263861.3333333333, ans=0.1
+2024-08-31 18:40:11,051 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=263861.3333333333, ans=0.0
+2024-08-31 18:41:09,868 INFO [train.py:1114] (3/4) Epoch 20, batch 2200, loss[loss=0.2244, simple_loss=0.2962, pruned_loss=0.05599, ctc_loss=0.1016, over 19581.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2689, pruned_loss=0.04783, ctc_loss=0.08987, over 3868929.77 frames. ], batch size: 57, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:41:19,124 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.57 vs. limit=15.0
+2024-08-31 18:41:21,568 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.28 vs. limit=6.0
+2024-08-31 18:41:22,682 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=7.00 vs. limit=15.0
+2024-08-31 18:41:24,479 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=264021.3333333333, ans=0.125
+2024-08-31 18:41:25,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=264021.3333333333, ans=0.125
+2024-08-31 18:41:42,440 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.37 vs. limit=15.0
+2024-08-31 18:41:49,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=264074.6666666667, ans=22.5
+2024-08-31 18:42:17,186 INFO [train.py:1114] (3/4) Epoch 20, batch 2250, loss[loss=0.1798, simple_loss=0.2571, pruned_loss=0.0364, ctc_loss=0.07441, over 19609.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2697, pruned_loss=0.04832, ctc_loss=0.09081, over 3867828.09 frames. ], batch size: 55, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:42:39,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=264288.0, ans=0.2
+2024-08-31 18:42:41,062 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:42:42,063 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.705e+02 2.149e+02 2.747e+02 5.291e+02, threshold=4.298e+02, percent-clipped=7.0
+2024-08-31 18:42:56,771 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=264394.6666666667, ans=0.1
+2024-08-31 18:42:58,011 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=264394.6666666667, ans=0.125
+2024-08-31 18:43:00,433 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=264394.6666666667, ans=0.1
+2024-08-31 18:43:16,656 INFO [train.py:1114] (3/4) Epoch 20, batch 2300, loss[loss=0.1902, simple_loss=0.2546, pruned_loss=0.04724, ctc_loss=0.07806, over 19514.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2691, pruned_loss=0.04848, ctc_loss=0.0912, over 3862372.82 frames. ], batch size: 49, lr: 7.41e-03, grad_scale: 32.0
+2024-08-31 18:43:25,207 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=264501.3333333333, ans=0.0
+2024-08-31 18:43:27,525 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=264554.6666666667, ans=0.125
+2024-08-31 18:44:04,757 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.94 vs. limit=22.5
+2024-08-31 18:44:12,819 INFO [train.py:1114] (3/4) Epoch 20, batch 2350, loss[loss=0.2098, simple_loss=0.2824, pruned_loss=0.05107, ctc_loss=0.08755, over 19659.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2687, pruned_loss=0.04814, ctc_loss=0.09044, over 3865153.71 frames. ], batch size: 63, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:44:46,311 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=264821.3333333333, ans=0.0
+2024-08-31 18:44:47,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=264821.3333333333, ans=0.125
+2024-08-31 18:44:49,420 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.669e+02 1.905e+02 2.325e+02 3.822e+02, threshold=3.811e+02, percent-clipped=0.0
+2024-08-31 18:44:55,077 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=264874.6666666667, ans=0.1
+2024-08-31 18:45:05,037 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=264928.0, ans=0.025
+2024-08-31 18:45:13,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=264928.0, ans=0.125
+2024-08-31 18:45:16,792 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=264981.3333333333, ans=0.0
+2024-08-31 18:45:25,809 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=265034.6666666667, ans=0.125
+2024-08-31 18:45:26,868 INFO [train.py:1114] (3/4) Epoch 20, batch 2400, loss[loss=0.1963, simple_loss=0.2751, pruned_loss=0.04241, ctc_loss=0.0818, over 19338.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2707, pruned_loss=0.04858, ctc_loss=0.09108, over 3860083.88 frames. ], batch size: 71, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:45:45,225 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=265088.0, ans=0.125
+2024-08-31 18:45:46,301 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=265088.0, ans=0.0
+2024-08-31 18:45:47,306 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=265088.0, ans=0.125
+2024-08-31 18:45:54,055 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=265141.3333333333, ans=0.125
+2024-08-31 18:46:05,603 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=265194.6666666667, ans=0.125
+2024-08-31 18:46:21,125 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.35 vs. limit=15.0
+2024-08-31 18:46:23,157 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-31 18:46:23,881 INFO [train.py:1114] (3/4) Epoch 20, batch 2450, loss[loss=0.2392, simple_loss=0.286, pruned_loss=0.06858, ctc_loss=0.1383, over 13237.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2747, pruned_loss=0.05145, ctc_loss=0.09681, over 3732985.70 frames. ], batch size: 140, lr: 7.40e-03, grad_scale: 32.0
+2024-08-31 18:46:25,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=265301.3333333333, ans=0.125
+2024-08-31 18:46:32,380 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.14 vs. limit=15.0
+2024-08-31 18:46:44,954 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=265354.6666666667, ans=0.0
+2024-08-31 18:46:45,873 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.336e+02 1.663e+02 1.874e+02 2.086e+02 3.013e+02, threshold=3.749e+02, percent-clipped=0.0
+2024-08-31 18:46:55,236 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=265408.0, ans=0.0
+2024-08-31 18:47:07,497 INFO [train.py:1387] (3/4) Done!
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724577812.cdr2654.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724577812.cdr2654.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..9848dd70f4bb724c2ba22a28e7a93d7925dadbef
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724577812.cdr2654.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:61365b5c7358e64414972303d682b805806f18146b07e25a3e8a95c0934f424e
+size 417133
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724923212.cdr2655.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724923212.cdr2655.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..769bd1330db2480e4baaf5884da05eecaa5be86a
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724923212.cdr2655.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:29e2313dc5ee6ffce015e5b0da45e5903a4c879641a95b6aace79fd890873715
+size 980
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724952574.cdr2558.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724952574.cdr2558.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..ec3798e82b9087d5269b1c59712caa4f1c84b5ca
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724952574.cdr2558.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a6a912ba9d25e5e3cbbcb911907c87324439c2494a113b49c26c774b718ce81a
+size 980
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724962117.cdr2549.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724962117.cdr2549.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..3a50d932560dc9013122e6d741d896f167bd5551
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1724962117.cdr2549.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:766081f198edf21416a04cc0f43b2d1af6937368b383556a113aa33f918f5972
+size 100694
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725047086.cdr2651.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725047086.cdr2651.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..18eed61ad5e9655df186afa213efa2065b813fa7
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725047086.cdr2651.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea2bce5b0c4d605a2d724515cc55837e6ed3064d4361cace12cabc3af6b90daa
+size 47396
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725073612.cdr2647.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725073612.cdr2647.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..dd6cf1f6f16b476bc382b38309417cbe1c0c1b86
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725073612.cdr2647.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:47d6be3336e0b5cb7abdbf85fc8114886e8bb8a987a6a1262456fdc31a96fa07
+size 980
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725078928.cdr2539.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725078928.cdr2539.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..e2a30c911bd6ce8247dcf1ce5ae977ac8ab56fde
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725078928.cdr2539.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a4fdcef438b1f175c7fed56eec6ebe788bacc1ab9e3022dd2e7c6abed7d327f
+size 1579
diff --git a/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725135301.cdr2535.int.cedar.computecanada.ca.70.0 b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725135301.cdr2535.int.cedar.computecanada.ca.70.0
new file mode 100644
index 0000000000000000000000000000000000000000..38bbe352f73a7bd343b85bee6e82ee9bb21949af
--- /dev/null
+++ b/zipformer/pretrained/ctc/causal/exp/tensorboard/events.out.tfevents.1725135301.cdr2535.int.cedar.computecanada.ca.70.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9a91a068436231020e1a2a58c7069f34d7265400a62e8da451d8f9bd7cee8cac
+size 90817
diff --git a/zipformer/pretrained/ctc/non_causal/exp/best-train-loss.pt b/zipformer/pretrained/ctc/non_causal/exp/best-train-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f7e8a42d8bfab6d5f006cfe72255f5a2c4176ef0
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/best-train-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39613c48594e073e86c1f1a4f05015119693bd7ba9f29f168fb8637f28137e91
+size 1053872782
diff --git a/zipformer/pretrained/ctc/non_causal/exp/best-valid-loss.pt b/zipformer/pretrained/ctc/non_causal/exp/best-valid-loss.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f7e8a42d8bfab6d5f006cfe72255f5a2c4176ef0
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/best-valid-loss.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39613c48594e073e86c1f1a4f05015119693bd7ba9f29f168fb8637f28137e91
+size 1053872782
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-12000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-12000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1812bcdd348c2c3f24ef52984b63d2ca72667ac7
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-12000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a03f71fa4973649c9ce4e0adc9e881497f158f03b9956e32a15d663f17b6c97
+size 1053888534
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-16000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-16000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..02a875e7b571c01da57a1c8882b7a47106ef6168
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-16000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:452c1c9add2d8e35fa96f03e76c917c914855b4399a32dd92f4108f8904d04bf
+size 1053888598
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-20000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-20000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5e39e483bfd1245939114a5a93a17b142fe05db4
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-20000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6fa76ed9c42df5965f0a58012ffbefb2446c580d4faa636ff048cf2be14ec92
+size 1053888726
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-24000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-24000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f00fcd4b1a6a45422af3513698468944adde6b02
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-24000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:377b58a1738a23e97d93f8e9d4f4dafb1130263ef115ff229a46691ff613d121
+size 1053888790
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-28000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-28000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b5d33d1c925be52c6983c10c4e0ea612dd3e1db5
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-28000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef1792231251c44b52b1e96a0f6610bd95b6a5eb6ffb0a56b4b984cc200dc1cd
+size 1053888854
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-32000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-32000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9543dec1824651baa08ec3deef386589cb000824
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-32000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53c54341953015d6e084043ff3c2e80e2346f452fb5bb28c3bfabed04a435f09
+size 1053888918
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-36000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-36000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5949aa50c41de1071555611e5744c9134968caaa
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-36000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a42412fc7a42729ae29e5b956802bdc691e87f49efb6a504159ee58c6378495
+size 1053889046
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-4000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-4000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d3ec0c3935b3430baa63a5ba212b40584e8586bf
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-4000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0348de91c692178d305149ea54b8afb5eeadf64a2dc2a666a3a6150e14e696b8
+size 1053886533
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-40000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-40000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..086ddd6f1535a0edb78c3bc71271135c34cd29a6
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-40000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4dcc6ed2ec68be840a092307ebe8c928fdbbf5307bf2cb0c6ae194d94f3a5b3a
+size 1053889110
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-44000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-44000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5e48ee882aa31fd3d78395410920f28571d70359
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-44000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92785d8f33bdc5ca4d83cdc0d4df2f6e209f64c32ae3ca5c04d22f810dbcec5e
+size 1053889174
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-48000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-48000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1253f150130d03b2162b762cb8e3f8532bcb80de
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-48000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:439531d69ca244e649341af26c1dad99341439b1420936105bfeb79de052fb2b
+size 1053888534
diff --git a/zipformer/pretrained/ctc/non_causal/exp/checkpoint-8000.pt b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-8000.pt
new file mode 100644
index 0000000000000000000000000000000000000000..aaf261fd57c6ffd3c157a268d06257b5864ad881
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/checkpoint-8000.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b82a36e8171480c5d1493864382843feba7803477fc8cb952d52a5c7293f2a2d
+size 1053886533
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-1.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-1.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bce37f495126e68f529f710faa288f0ba071716b
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-1.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f5e43c3d0bc9ec0b6f4e9b5cbd80e4ddf5ee11c9530a03489a68313c88c4685
+size 1053870333
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-10.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-10.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b3ea7fd07be88f8ad3d02c05681518c49ebe5f47
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-10.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b9d535a0817a35ab6adf221c8f86b64fe4c363d39ba0a8d2fc7d0281d859342
+size 1053873038
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-11.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-11.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3fe400c25ef60897b7cec6b549fd8048f9b8d328
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-11.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7a8d4f5bf64077e9104310d0748e81caf194bb5d42d8776163fca49b70ac308
+size 1053873038
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-12.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-12.pt
new file mode 100644
index 0000000000000000000000000000000000000000..13922058bf2c0d289ee9c46c26cf37b82b42dcd5
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-12.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18b111699c4e98018557a70a1ad479f6c99558a7684a37a243471e5e0da05886
+size 1053873102
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-13.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-13.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9a2dbc30feb2a0c3646f9f0d9ce2ee308c8f88af
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-13.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c74f2dd4a22e061fec4448b74bcbd107405bed86a55377f314b5b695d940b2d
+size 1053873166
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-14.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-14.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e1c1632dec3c6a73f9d030779645ecf9009740d8
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-14.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a06c97df3fe7a1448ce02471b8e86cdea20642ba2723faa2f96ed8dd9fdcb6f
+size 1053873230
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-15.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-15.pt
new file mode 100644
index 0000000000000000000000000000000000000000..44d1b454dabdb40f8b7a80d31e125c83bb80e4d4
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-15.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d63ebb0ebc7307699a8657c1dffa0e35e9b914571b1c10fc7cd2aa953bc5abb5
+size 1053873230
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-16.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-16.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2cb03e26848d2640c658c9273d1a57a5d9f04a8e
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-16.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:171ce9f91b7160d56723bb1c6943270006d4300b0e4664cffa82f9635b698b02
+size 1053873294
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-17.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-17.pt
new file mode 100644
index 0000000000000000000000000000000000000000..302f67754c534033caa297b50539e15317215923
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-17.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eee22e74fb1973a95bf92a6731af315ab330eec794700e350e262409897aed46
+size 1053873358
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-18.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-18.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9f1767cd0a28e1bbc67fe55689188b99c9e63380
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-18.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4e218ba9aaa427938661fa0887b9be04940ca605fa759e2daa733b339ba87c8e
+size 1053873422
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-19.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-19.pt
new file mode 100644
index 0000000000000000000000000000000000000000..90c702ef625932cfe0fb0a988b55d431df1bde6b
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-19.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8dc77c019568361746d60eb1a507e8999ea825a15407e628f5403e6c070cf63a
+size 1053872718
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-2.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-2.pt
new file mode 100644
index 0000000000000000000000000000000000000000..36cf7fcdebc45307c2de7e6dc5c8634651488e66
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-2.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0c4e9bcb42450dfab31a1ff1848b38370921a736ae3340791642c4fbdec9858e
+size 1053870397
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-20.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-20.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f7e8a42d8bfab6d5f006cfe72255f5a2c4176ef0
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-20.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39613c48594e073e86c1f1a4f05015119693bd7ba9f29f168fb8637f28137e91
+size 1053872782
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-3.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-3.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f61d521cda3edbd3c54140d2e0f034f7953ba474
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-3.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f818990a5e33be5dbef3c10608f46e0f14a217c3b9f834ce594677e437b0ed2b
+size 1053870461
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-4.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-4.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1a350366383e53515feafbd0f293c72a64d0295c
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-4.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1caa70c64a7a9fdd71224616ecb713c8672bd95bfbd618279cf12acb03bd92f0
+size 1053870397
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-5.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-5.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d3bec02c2302ea301d2d3559879a254244222a80
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-5.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e3871823e9e09c729e5b344f99e464dd5adcb68a92cf028b9da2c04da73e5f2c
+size 1053870461
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-6.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-6.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ab5a67438471f93b1155f4ec5c36b77124d3d1b2
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-6.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:71c4f9c0c9ee3ea053f9d98b64ddcc7b222a54c64f7220a1e38423dd5c7ecf8d
+size 1053870525
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-7.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-7.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2135269678933733b619d9f98bc824992dc8ddff
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-7.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93285a7734e563ed969d0720e16267075585483e4af82d159ceffd40b3d4fa2d
+size 1053870525
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-8.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-8.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3f77e6881a24c380f1c572967120e7ed6ecb14b7
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-8.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6789aa0156a77182f0c3d20166e199f16fa65097d40d76a86992dcaba888a8f
+size 1053870589
diff --git a/zipformer/pretrained/ctc/non_causal/exp/epoch-9.pt b/zipformer/pretrained/ctc/non_causal/exp/epoch-9.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bad7362160055bad51e8fbc7151b496b64a1df02
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/epoch-9.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8beaae1488d5394a20e300ed0cdc4cc4f549f047e1f664ef19bf6cbda4769969
+size 1053870653
diff --git a/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-0 b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-0
new file mode 100644
index 0000000000000000000000000000000000000000..c7ee70b45380029850c414923931c55ecd2bfc27
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-0
@@ -0,0 +1,1144 @@
+2024-08-25 03:46:09,034 INFO [train.py:1182] (0/4) Training started
+2024-08-25 03:46:09,039 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-25 03:46:09,373 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 03:46:09,373 INFO [train.py:1212] (0/4) About to create model
+2024-08-25 03:46:10,481 INFO [train.py:1216] (0/4) Number of model parameters: 65805511
+2024-08-25 03:46:11,267 INFO [train.py:1231] (0/4) Using DDP
+2024-08-25 03:46:14,820 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-25 03:46:14,899 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-25 03:46:14,899 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-25 03:46:16,485 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-25 03:46:16,488 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-25 03:46:16,584 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-25 03:46:16,613 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-25 03:46:16,931 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-25 03:46:16,931 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 03:50:49,730 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=49.69 vs. limit=7.5
+2024-08-25 03:50:50,512 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 11612MB
+2024-08-25 03:50:51,645 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 11612MB
+2024-08-25 03:51:20,163 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 11612MB
+2024-08-25 03:51:21,409 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 11612MB
+2024-08-25 03:51:43,054 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 11612MB
+2024-08-25 03:51:44,345 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 11612MB
+2024-08-25 03:53:11,522 INFO [train.py:1114] (0/4) Epoch 1, batch 0, loss[loss=8.717, simple_loss=7.066, pruned_loss=6.906, ctc_loss=4.795, over 19814.00 frames. ], tot_loss[loss=8.717, simple_loss=7.066, pruned_loss=6.906, ctc_loss=4.795, over 19814.00 frames. ], batch size: 49, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 03:53:11,523 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 03:53:26,569 INFO [train.py:1146] (0/4) Epoch 1, validation: loss=8.842, simple_loss=7.151, pruned_loss=6.961, ctc_loss=4.966, over 944034.00 frames.
+2024-08-25 03:53:26,570 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 11981MB
+2024-08-25 03:53:28,073 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.11 vs. limit=3.0
+2024-08-25 03:53:35,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=0.0, ans=0.5
+2024-08-25 03:53:41,642 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=13.31 vs. limit=7.5
+2024-08-25 03:53:42,410 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=0.0, ans=0.2
+2024-08-25 03:53:46,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=27.55 vs. limit=7.5
+2024-08-25 03:54:08,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=0.0, ans=0.9
+2024-08-25 03:54:36,708 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.008e+03 4.149e+03 4.360e+03 5.530e+03 5.553e+03, threshold=1.744e+04, percent-clipped=0.0
+2024-08-25 03:54:39,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=53.333333333333336, ans=0.198
+2024-08-25 03:54:55,967 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=183.33 vs. limit=5.026666666666666
+2024-08-25 03:55:46,191 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.063e+03 1.598e+03 4.141e+03 5.530e+03 6.572e+03, threshold=1.656e+04, percent-clipped=0.0
+2024-08-25 03:57:08,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=277.20 vs. limit=7.54
+2024-08-25 03:57:26,145 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=88.29 vs. limit=5.04
+2024-08-25 03:57:39,313 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=19.90 vs. limit=5.04
+2024-08-25 03:57:39,807 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=98.65 vs. limit=5.0
+2024-08-25 04:00:14,861 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.873e+02 1.048e+03 1.328e+03 4.149e+03 6.572e+03, threshold=5.310e+03, percent-clipped=0.0
+2024-08-25 04:00:18,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=213.33333333333334, ans=0.49
+2024-08-25 04:00:27,894 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.47 vs. limit=7.58
+2024-08-25 04:00:39,849 INFO [train.py:1114] (0/4) Epoch 1, batch 50, loss[loss=1.546, simple_loss=1.018, pruned_loss=1.171, ctc_loss=1.987, over 19697.00 frames. ], tot_loss[loss=3.752, simple_loss=2.911, pruned_loss=2.556, ctc_loss=2.894, over 845725.26 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 04:00:51,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=266.6666666666667, ans=0.8906666666666667
+2024-08-25 04:01:03,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=320.0, ans=0.098
+2024-08-25 04:01:14,399 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=153.91 vs. limit=5.16
+2024-08-25 04:01:20,935 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=12.61 vs. limit=4.128
+2024-08-25 04:01:25,592 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=9.68 vs. limit=5.08
+2024-08-25 04:01:50,686 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=22.67 vs. limit=5.1866666666666665
+2024-08-25 04:01:50,755 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=311.42 vs. limit=5.1866666666666665
+2024-08-25 04:02:04,478 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=10.79 vs. limit=4.1706666666666665
+2024-08-25 04:02:05,349 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=20.71 vs. limit=7.66
+2024-08-25 04:02:06,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=426.6666666666667, ans=0.8850666666666667
+2024-08-25 04:02:39,187 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=57.10 vs. limit=7.86
+2024-08-25 04:02:39,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=480.0, ans=0.0892
+2024-08-25 04:02:53,827 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=29.28 vs. limit=7.86
+2024-08-25 04:02:59,895 INFO [train.py:1114] (0/4) Epoch 1, batch 100, loss[loss=1.406, simple_loss=0.9854, pruned_loss=1.239, ctc_loss=1.328, over 19718.00 frames. ], tot_loss[loss=2.582, simple_loss=1.908, pruned_loss=1.862, ctc_loss=2.359, over 1499439.12 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 04:03:07,083 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.807e+02 4.974e+02 8.674e+02 1.328e+03 6.572e+03, threshold=1.735e+03, percent-clipped=0.0
+2024-08-25 04:03:17,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=533.3333333333334, ans=0.18
+2024-08-25 04:03:19,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=586.6666666666666, ans=0.5
+2024-08-25 04:03:36,642 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.33 vs. limit=4.256
+2024-08-25 04:03:51,963 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=20.02 vs. limit=5.346666666666667
+2024-08-25 04:03:53,759 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.80 vs. limit=3.104
+2024-08-25 04:03:57,195 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=693.3333333333334, ans=0.29306666666666664
+2024-08-25 04:04:01,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff2.min_abs, batch_count=693.3333333333334, ans=0.017333333333333336
+2024-08-25 04:04:03,521 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=12.51 vs. limit=7.76
+2024-08-25 04:04:04,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=746.6666666666666, ans=0.04766666666666667
+2024-08-25 04:04:16,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=746.6666666666666, ans=0.46499999999999997
+2024-08-25 04:04:18,335 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=383.26 vs. limit=7.78
+2024-08-25 04:04:19,552 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=365.52 vs. limit=7.78
+2024-08-25 04:04:22,055 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=800.0, ans=0.5
+2024-08-25 04:04:22,850 INFO [train.py:1114] (0/4) Epoch 1, batch 150, loss[loss=1.148, simple_loss=0.79, pruned_loss=1.006, ctc_loss=1.093, over 19717.00 frames. ], tot_loss[loss=2.05, simple_loss=1.492, pruned_loss=1.569, ctc_loss=1.873, over 2027737.98 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 04:04:24,835 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=39.53 vs. limit=8.1
+2024-08-25 04:04:25,915 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=205.18 vs. limit=7.8
+2024-08-25 04:04:27,527 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=78.34 vs. limit=5.0
+2024-08-25 04:04:33,708 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=27.84 vs. limit=8.1
+2024-08-25 04:04:34,665 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=17.06 vs. limit=7.82
+2024-08-25 04:04:34,674 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=11.01 vs. limit=5.213333333333333
+2024-08-25 04:04:40,119 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=22.79 vs. limit=7.82
+2024-08-25 04:04:58,182 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=27.79 vs. limit=8.18
+2024-08-25 04:05:07,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=906.6666666666666, ans=0.2136
+2024-08-25 04:05:24,337 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=50.09 vs. limit=8.22
+2024-08-25 04:05:40,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=1013.3333333333334, ans=0.4525
+2024-08-25 04:05:42,042 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=12.96 vs. limit=5.253333333333333
+2024-08-25 04:05:44,372 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=26.06 vs. limit=7.88
+2024-08-25 04:05:49,016 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=12.97 vs. limit=7.88
+2024-08-25 04:05:51,954 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=175.34 vs. limit=7.88
+2024-08-25 04:05:53,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=1066.6666666666667, ans=0.45
+2024-08-25 04:05:53,521 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=151.52 vs. limit=7.9
+2024-08-25 04:05:53,952 INFO [train.py:1114] (0/4) Epoch 1, batch 200, loss[loss=1.263, simple_loss=0.8703, pruned_loss=1.013, ctc_loss=1.218, over 18215.00 frames. ], tot_loss[loss=1.761, simple_loss=1.265, pruned_loss=1.381, ctc_loss=1.625, over 2435361.75 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 04:05:57,465 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 7.117e+01 1.191e+02 1.554e+02 2.219e+02 5.914e+02, threshold=3.108e+02, percent-clipped=0.0
+2024-08-25 04:06:01,474 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.20 vs. limit=7.9
+2024-08-25 04:06:03,912 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.83 vs. limit=4.426666666666667
+2024-08-25 04:06:04,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=1120.0, ans=0.0748
+2024-08-25 04:06:16,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=1173.3333333333333, ans=0.35333333333333333
+2024-08-25 04:06:17,810 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=26.12 vs. limit=8.38
+2024-08-25 04:06:17,852 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=13.93 vs. limit=7.94
+2024-08-25 04:06:21,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=9.59 vs. limit=5.293333333333333
+2024-08-25 04:06:22,628 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=12.77 vs. limit=7.94
+2024-08-25 04:06:25,967 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=11.77 vs. limit=4.469333333333333
+2024-08-25 04:06:25,990 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=7.70 vs. limit=4.469333333333333
+2024-08-25 04:06:36,587 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.68 vs. limit=8.42
+2024-08-25 04:06:52,980 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=21.73 vs. limit=8.46
+2024-08-25 04:06:57,326 INFO [train.py:1114] (0/4) Epoch 1, batch 250, loss[loss=1.249, simple_loss=0.8448, pruned_loss=0.9912, ctc_loss=1.232, over 19443.00 frames. ], tot_loss[loss=1.587, simple_loss=1.126, pruned_loss=1.254, ctc_loss=1.484, over 2755446.34 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 04:07:04,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.whiten.whitening_limit, batch_count=1333.3333333333333, ans=4.533333333333333
+2024-08-25 04:07:04,975 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=45.33 vs. limit=5.666666666666667
+2024-08-25 04:07:42,076 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=214.04 vs. limit=8.02
+2024-08-25 04:07:50,954 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.54 vs. limit=8.54
+2024-08-25 04:07:53,538 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=267.32 vs. limit=8.02
+2024-08-25 04:07:54,707 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=18.68 vs. limit=8.02
+2024-08-25 04:07:56,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=1440.0, ans=0.4325
+2024-08-25 04:08:01,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=1440.0, ans=0.8496
+2024-08-25 04:08:10,113 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.67 vs. limit=4.597333333333333
+2024-08-25 04:08:18,548 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=3.96 vs. limit=4.298666666666667
+2024-08-25 04:08:22,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=1546.6666666666667, ans=0.04516666666666667
+2024-08-25 04:08:23,754 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=1546.6666666666667, ans=0.4275
+2024-08-25 04:09:06,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1546.6666666666667, ans=0.2845333333333333
+2024-08-25 04:09:08,294 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=1546.6666666666667, ans=0.2232
+2024-08-25 04:09:11,357 INFO [train.py:1114] (0/4) Epoch 1, batch 300, loss[loss=1.225, simple_loss=0.8202, pruned_loss=0.956, ctc_loss=1.206, over 19507.00 frames. ], tot_loss[loss=1.471, simple_loss=1.031, pruned_loss=1.162, ctc_loss=1.392, over 2998983.33 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 04:09:11,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1600.0, ans=0.284
+2024-08-25 04:09:14,912 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 8.125e+01 1.367e+02 1.753e+02 2.332e+02 3.681e+02, threshold=3.505e+02, percent-clipped=6.0
+2024-08-25 04:09:15,588 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=39.01 vs. limit=8.1
+2024-08-25 04:09:20,316 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=34.34 vs. limit=8.1
+2024-08-25 04:09:34,636 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=115.66 vs. limit=8.12
+2024-08-25 04:10:19,928 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=207.46 vs. limit=8.12
+2024-08-25 04:10:26,748 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=59.49 vs. limit=8.12
+2024-08-25 04:10:30,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=1706.6666666666667, ans=0.42
+2024-08-25 04:10:46,816 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=126.08 vs. limit=8.16
+2024-08-25 04:11:02,936 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=8.293e-01
+2024-08-25 04:11:12,297 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=1866.6666666666667, ans=0.13
+2024-08-25 04:11:13,105 INFO [train.py:1114] (0/4) Epoch 1, batch 350, loss[loss=1.071, simple_loss=0.7124, pruned_loss=0.8183, ctc_loss=1.052, over 19768.00 frames. ], tot_loss[loss=1.393, simple_loss=0.9657, pruned_loss=1.093, ctc_loss=1.329, over 3189785.81 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 04:11:16,010 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.99 vs. limit=8.9
+2024-08-25 04:11:29,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=1920.0, ans=0.41000000000000003
+2024-08-25 04:11:32,064 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=18.24 vs. limit=8.22
+2024-08-25 04:11:39,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=36.51 vs. limit=8.24
+2024-08-25 04:11:43,822 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=5.10 vs. limit=4.789333333333333
+2024-08-25 04:11:44,125 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=18.98 vs. limit=8.98
+2024-08-25 04:11:44,969 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=30.43 vs. limit=8.24
+2024-08-25 04:11:46,185 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=53.49 vs. limit=8.24
+2024-08-25 04:11:46,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=2026.6666666666667, ans=0.8290666666666667
+2024-08-25 04:11:46,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 04:11:51,887 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=21.55 vs. limit=8.26
+2024-08-25 04:11:54,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=2026.6666666666667, ans=0.0544
+2024-08-25 04:11:56,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 04:11:57,692 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.44 vs. limit=9.02
+2024-08-25 04:11:58,120 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=14.48 vs. limit=8.26
+2024-08-25 04:12:00,077 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.13 vs. limit=9.06
+2024-08-25 04:12:11,681 INFO [train.py:1114] (0/4) Epoch 1, batch 400, loss[loss=1.178, simple_loss=0.7908, pruned_loss=0.8549, ctc_loss=1.134, over 19500.00 frames. ], tot_loss[loss=1.332, simple_loss=0.916, pruned_loss=1.034, ctc_loss=1.278, over 3341471.66 frames. ], batch size: 54, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 04:12:13,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=2133.3333333333335, ans=0.4
+2024-08-25 04:12:15,152 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 9.241e+01 1.644e+02 2.144e+02 2.768e+02 4.713e+02, threshold=4.287e+02, percent-clipped=10.0
+2024-08-25 04:12:19,329 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=24.73 vs. limit=8.3
+2024-08-25 04:12:27,437 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=12.92 vs. limit=8.32
+2024-08-25 04:12:32,943 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=1.147e+00
+2024-08-25 04:12:37,827 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=30.50 vs. limit=8.34
+2024-08-25 04:12:37,884 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.66 vs. limit=4.896
+2024-08-25 04:12:38,057 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=17.44 vs. limit=8.34
+2024-08-25 04:12:42,787 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=21.16 vs. limit=8.34
+2024-08-25 04:12:47,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=2240.0, ans=0.395
+2024-08-25 04:12:54,554 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=34.55 vs. limit=8.36
+2024-08-25 04:12:56,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=2293.3333333333335, ans=0.08566666666666667
+2024-08-25 04:13:04,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=2346.6666666666665, ans=0.112
+2024-08-25 04:13:04,691 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.86 vs. limit=5.586666666666667
+2024-08-25 04:13:12,060 INFO [train.py:1114] (0/4) Epoch 1, batch 450, loss[loss=1.107, simple_loss=0.7555, pruned_loss=0.7382, ctc_loss=1.065, over 19617.00 frames. ], tot_loss[loss=1.281, simple_loss=0.8774, pruned_loss=0.9725, ctc_loss=1.231, over 3449265.00 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 04:13:12,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=2400.0, ans=0.27599999999999997
+2024-08-25 04:13:14,003 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=18.76 vs. limit=9.3
+2024-08-25 04:14:01,748 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.27 vs. limit=8.42
+2024-08-25 04:14:08,263 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=2453.3333333333335, ans=0.385
+2024-08-25 04:14:10,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=2453.3333333333335, ans=0.8141333333333334
+2024-08-25 04:14:18,765 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.72 vs. limit=5.626666666666667
+2024-08-25 04:14:24,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=2560.0, ans=0.2744
+2024-08-25 04:14:24,765 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.13 vs. limit=6.28
+2024-08-25 04:14:30,426 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.93 vs. limit=8.46
+2024-08-25 04:14:35,649 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.16 vs. limit=8.46
+2024-08-25 04:14:40,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=2613.3333333333335, ans=0.8085333333333333
+2024-08-25 04:14:45,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=2613.3333333333335, ans=0.102
+2024-08-25 04:14:53,361 INFO [train.py:1114] (0/4) Epoch 1, batch 500, loss[loss=1.061, simple_loss=0.7373, pruned_loss=0.6441, ctc_loss=1.033, over 19666.00 frames. ], tot_loss[loss=1.218, simple_loss=0.8357, pruned_loss=0.8939, ctc_loss=1.171, over 3545091.46 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:14:53,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=2666.6666666666665, ans=0.2733333333333333
+2024-08-25 04:14:57,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=2666.6666666666665, ans=0.375
+2024-08-25 04:14:59,588 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 2.224e+02 2.884e+02 3.405e+02 7.334e+02, threshold=5.768e+02, percent-clipped=15.0
+2024-08-25 04:15:11,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=2720.0, ans=0.3725
+2024-08-25 04:15:14,765 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.28 vs. limit=8.52
+2024-08-25 04:15:14,839 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.52 vs. limit=9.54
+2024-08-25 04:15:17,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=2773.3333333333335, ans=0.8029333333333334
+2024-08-25 04:15:19,510 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.78 vs. limit=5.109333333333334
+2024-08-25 04:15:20,490 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.90 vs. limit=8.54
+2024-08-25 04:15:23,996 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.44 vs. limit=8.54
+2024-08-25 04:15:32,066 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.72 vs. limit=8.56
+2024-08-25 04:15:46,713 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.36 vs. limit=8.58
+2024-08-25 04:15:50,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=2880.0, ans=0.035199999999999995
+2024-08-25 04:15:52,589 INFO [train.py:1114] (0/4) Epoch 1, batch 550, loss[loss=0.9895, simple_loss=0.6945, pruned_loss=0.5728, ctc_loss=0.9569, over 19288.00 frames. ], tot_loss[loss=1.152, simple_loss=0.793, pruned_loss=0.8138, ctc_loss=1.109, over 3607779.82 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:16:01,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=2933.3333333333335, ans=0.27066666666666667
+2024-08-25 04:16:14,560 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.81 vs. limit=5.76
+2024-08-25 04:16:23,825 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.48 vs. limit=8.64
+2024-08-25 04:16:40,774 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=3093.3333333333335, ans=0.11333333333333334
+2024-08-25 04:16:57,297 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.57 vs. limit=5.786666666666667
+2024-08-25 04:16:59,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=3200.0, ans=0.09999999999999998
+2024-08-25 04:17:00,426 INFO [train.py:1114] (0/4) Epoch 1, batch 600, loss[loss=0.7956, simple_loss=0.5745, pruned_loss=0.4204, ctc_loss=0.7587, over 19369.00 frames. ], tot_loss[loss=1.081, simple_loss=0.7491, pruned_loss=0.733, ctc_loss=1.041, over 3664209.30 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:17:03,770 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.809e+02 3.766e+02 4.633e+02 8.655e+02, threshold=7.532e+02, percent-clipped=12.0
+2024-08-25 04:17:04,034 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=3200.0, ans=0.027999999999999997
+2024-08-25 04:17:08,359 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=3200.0, ans=0.35
+2024-08-25 04:17:09,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=3200.0, ans=0.35
+2024-08-25 04:17:12,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=3253.3333333333335, ans=0.03983333333333333
+2024-08-25 04:17:14,220 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.58 vs. limit=9.94
+2024-08-25 04:17:51,581 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.75 vs. limit=5.826666666666666
+2024-08-25 04:17:58,331 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.65 vs. limit=9.98
+2024-08-25 04:18:05,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=3360.0, ans=0.3425
+2024-08-25 04:18:26,482 INFO [train.py:1114] (0/4) Epoch 1, batch 650, loss[loss=0.7809, simple_loss=0.5732, pruned_loss=0.3854, ctc_loss=0.7475, over 19771.00 frames. ], tot_loss[loss=1.008, simple_loss=0.7046, pruned_loss=0.6546, ctc_loss=0.9691, over 3714614.08 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:18:29,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=3466.6666666666665, ans=0.3375
+2024-08-25 04:18:36,857 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.30 vs. limit=5.88
+2024-08-25 04:18:52,151 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.61 vs. limit=8.82
+2024-08-25 04:18:52,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=3573.3333333333335, ans=0.26426666666666665
+2024-08-25 04:19:05,157 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.17 vs. limit=8.86
+2024-08-25 04:19:07,590 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.17 vs. limit=6.8133333333333335
+2024-08-25 04:19:08,421 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.72 vs. limit=10.22
+2024-08-25 04:19:21,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=3680.0, ans=0.3275
+2024-08-25 04:19:23,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=3680.0, ans=0.2552
+2024-08-25 04:20:32,374 INFO [train.py:1114] (0/4) Epoch 1, batch 700, loss[loss=0.6967, simple_loss=0.5113, pruned_loss=0.343, ctc_loss=0.6625, over 19712.00 frames. ], tot_loss[loss=0.9455, simple_loss=0.6672, pruned_loss=0.5877, ctc_loss=0.9048, over 3746967.74 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:20:35,542 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.600e+02 3.309e+02 4.487e+02 1.180e+03, threshold=6.619e+02, percent-clipped=3.0
+2024-08-25 04:20:40,166 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=3733.3333333333335, ans=0.325
+2024-08-25 04:20:45,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=3786.6666666666665, ans=0.3225
+2024-08-25 04:21:08,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=3893.3333333333335, ans=0.3175
+2024-08-25 04:21:14,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=3893.3333333333335, ans=0.3175
+2024-08-25 04:21:19,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=3946.6666666666665, ans=0.26053333333333334
+2024-08-25 04:21:23,011 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.67 vs. limit=5.578666666666667
+2024-08-25 04:21:26,550 INFO [train.py:1114] (0/4) Epoch 1, batch 750, loss[loss=0.6988, simple_loss=0.5283, pruned_loss=0.3246, ctc_loss=0.6379, over 19497.00 frames. ], tot_loss[loss=0.8839, simple_loss=0.6308, pruned_loss=0.5266, ctc_loss=0.8393, over 3772518.90 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:21:26,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=4000.0, ans=0.3125
+2024-08-25 04:21:26,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=4000.0, ans=0.04999999999999999
+2024-08-25 04:21:33,519 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=10.43 vs. limit=10.5
+2024-08-25 04:21:38,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=4053.3333333333335, ans=0.07466666666666667
+2024-08-25 04:21:55,648 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.01 vs. limit=9.040000000000001
+2024-08-25 04:22:04,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=4160.0, ans=0.04933333333333333
+2024-08-25 04:22:06,467 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=4160.0, ans=0.7544
+2024-08-25 04:22:39,908 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=4266.666666666667, ans=0.2573333333333333
+2024-08-25 04:22:40,630 INFO [train.py:1114] (0/4) Epoch 1, batch 800, loss[loss=0.5632, simple_loss=0.4393, pruned_loss=0.2488, ctc_loss=0.4864, over 19798.00 frames. ], tot_loss[loss=0.8271, simple_loss=0.5981, pruned_loss=0.4724, ctc_loss=0.7761, over 3793423.11 frames. ], batch size: 49, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:22:40,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=4266.666666666667, ans=0.7506666666666667
+2024-08-25 04:22:43,867 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.484e+02 3.479e+02 4.307e+02 9.603e+02, threshold=6.957e+02, percent-clipped=4.0
+2024-08-25 04:22:46,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=4266.666666666667, ans=0.3
+2024-08-25 04:22:55,150 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:23:06,056 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.31 vs. limit=9.14
+2024-08-25 04:23:10,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=4373.333333333333, ans=0.29500000000000004
+2024-08-25 04:23:31,165 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.31 vs. limit=9.18
+2024-08-25 04:23:42,176 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.94 vs. limit=9.2
+2024-08-25 04:23:42,668 INFO [train.py:1114] (0/4) Epoch 1, batch 850, loss[loss=0.6521, simple_loss=0.5082, pruned_loss=0.2914, ctc_loss=0.556, over 19652.00 frames. ], tot_loss[loss=0.7775, simple_loss=0.5702, pruned_loss=0.4261, ctc_loss=0.7195, over 3813331.40 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:23:47,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=4533.333333333333, ans=0.2875
+2024-08-25 04:23:50,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=4533.333333333333, ans=0.7413333333333334
+2024-08-25 04:24:16,062 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.32 vs. limit=9.26
+2024-08-25 04:24:21,610 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.73 vs. limit=9.26
+2024-08-25 04:24:24,088 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.57 vs. limit=6.173333333333333
+2024-08-25 04:24:24,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=4746.666666666667, ans=0.00983768115942029
+2024-08-25 04:24:31,450 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.44 vs. limit=6.1866666666666665
+2024-08-25 04:24:34,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4746.666666666667, ans=0.25253333333333333
+2024-08-25 04:24:36,254 INFO [train.py:1114] (0/4) Epoch 1, batch 900, loss[loss=0.5163, simple_loss=0.4195, pruned_loss=0.2131, ctc_loss=0.4178, over 19815.00 frames. ], tot_loss[loss=0.7367, simple_loss=0.5476, pruned_loss=0.3887, ctc_loss=0.6713, over 3817847.63 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:24:39,546 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.433e+02 3.203e+02 4.513e+02 7.559e+02, threshold=6.406e+02, percent-clipped=2.0
+2024-08-25 04:24:58,715 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.38 vs. limit=11.18
+2024-08-25 04:24:59,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=4906.666666666667, ans=0.0
+2024-08-25 04:25:04,664 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=4906.666666666667, ans=0.25093333333333334
+2024-08-25 04:25:16,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=4960.0, ans=0.26749999999999996
+2024-08-25 04:25:19,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=4960.0, ans=8.1
+2024-08-25 04:25:26,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=5013.333333333333, ans=0.009779710144927536
+2024-08-25 04:25:32,737 INFO [train.py:1114] (0/4) Epoch 1, batch 950, loss[loss=0.505, simple_loss=0.4167, pruned_loss=0.1983, ctc_loss=0.4131, over 19497.00 frames. ], tot_loss[loss=0.7001, simple_loss=0.5275, pruned_loss=0.3561, ctc_loss=0.628, over 3819682.03 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:25:38,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=5066.666666666667, ans=0.24933333333333332
+2024-08-25 04:25:43,679 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=5120.0, ans=0.0
+2024-08-25 04:25:49,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=5120.0, ans=0.26
+2024-08-25 04:25:51,197 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=5120.0, ans=0.009756521739130435
+2024-08-25 04:25:55,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=5173.333333333333, ans=0.2575
+2024-08-25 04:26:14,595 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.19 vs. limit=11.42
+2024-08-25 04:26:15,394 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=14.02 vs. limit=11.42
+2024-08-25 04:26:16,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=5226.666666666667, ans=0.255
+2024-08-25 04:26:33,406 INFO [train.py:1114] (0/4) Epoch 1, batch 1000, loss[loss=0.4993, simple_loss=0.418, pruned_loss=0.1933, ctc_loss=0.3975, over 19848.00 frames. ], tot_loss[loss=0.6702, simple_loss=0.5117, pruned_loss=0.3299, ctc_loss=0.5911, over 3816190.86 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:26:36,086 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.07 vs. limit=9.5
+2024-08-25 04:26:36,698 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.226e+02 2.758e+02 3.479e+02 9.619e+02, threshold=5.516e+02, percent-clipped=3.0
+2024-08-25 04:26:39,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=5333.333333333333, ans=0.025
+2024-08-25 04:26:41,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=5333.333333333333, ans=0.25
+2024-08-25 04:26:42,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=5333.333333333333, ans=0.24666666666666667
+2024-08-25 04:26:44,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=5386.666666666667, ans=0.2475
+2024-08-25 04:27:10,258 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=5493.333333333333, ans=0.2824
+2024-08-25 04:27:22,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=5546.666666666667, ans=0.24
+2024-08-25 04:27:25,673 INFO [train.py:1114] (0/4) Epoch 1, batch 1050, loss[loss=0.5363, simple_loss=0.4486, pruned_loss=0.2094, ctc_loss=0.4279, over 19842.00 frames. ], tot_loss[loss=0.6384, simple_loss=0.4944, pruned_loss=0.3042, ctc_loss=0.5534, over 3821745.37 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:27:58,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=5760.0, ans=0.22999999999999998
+2024-08-25 04:28:04,218 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=5760.0, ans=0.22999999999999998
+2024-08-25 04:28:08,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=5813.333333333333, ans=0.24186666666666667
+2024-08-25 04:28:10,626 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=5813.333333333333, ans=0.04244444444444445
+2024-08-25 04:28:11,626 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=5813.333333333333, ans=0.22749999999999998
+2024-08-25 04:28:20,181 INFO [train.py:1114] (0/4) Epoch 1, batch 1100, loss[loss=0.464, simple_loss=0.3976, pruned_loss=0.1749, ctc_loss=0.3582, over 19601.00 frames. ], tot_loss[loss=0.6107, simple_loss=0.4798, pruned_loss=0.2821, ctc_loss=0.5204, over 3828941.86 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:28:23,253 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.143e+02 2.593e+02 3.421e+02 4.407e+02, threshold=5.186e+02, percent-clipped=0.0
+2024-08-25 04:28:38,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=5920.0, ans=0.009582608695652174
+2024-08-25 04:28:52,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=6026.666666666667, ans=0.6890666666666667
+2024-08-25 04:29:06,910 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=6080.0, ans=0.2392
+2024-08-25 04:29:15,902 INFO [train.py:1114] (0/4) Epoch 1, batch 1150, loss[loss=0.4494, simple_loss=0.395, pruned_loss=0.1631, ctc_loss=0.3361, over 19591.00 frames. ], tot_loss[loss=0.5878, simple_loss=0.4678, pruned_loss=0.2642, ctc_loss=0.4928, over 3828275.11 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 04:29:19,536 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=2.687e-03
+2024-08-25 04:29:20,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=6133.333333333333, ans=8.066666666666666
+2024-08-25 04:32:27,988 INFO [train.py:1114] (0/4) Epoch 1, batch 1200, loss[loss=0.4863, simple_loss=0.4188, pruned_loss=0.1863, ctc_loss=0.3696, over 19841.00 frames. ], tot_loss[loss=0.5705, simple_loss=0.4595, pruned_loss=0.2504, ctc_loss=0.4709, over 3823615.31 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:32:31,062 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.077e+02 2.797e+02 3.799e+02 8.339e+02, threshold=5.594e+02, percent-clipped=11.0
+2024-08-25 04:32:31,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=6400.0, ans=0.2
+2024-08-25 04:32:34,731 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.05 vs. limit=8.2
+2024-08-25 04:33:03,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:06,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:19,303 INFO [train.py:1114] (0/4) Epoch 1, batch 1250, loss[loss=0.5004, simple_loss=0.4324, pruned_loss=0.1936, ctc_loss=0.3746, over 19507.00 frames. ], tot_loss[loss=0.5497, simple_loss=0.4495, pruned_loss=0.235, ctc_loss=0.4456, over 3841835.80 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:33:32,113 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.78 vs. limit=6.688000000000001
+2024-08-25 04:33:44,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=6773.333333333333, ans=0.03844444444444445
+2024-08-25 04:33:52,478 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=6826.666666666667, ans=0.18
+2024-08-25 04:33:53,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=6826.666666666667, ans=0.18
+2024-08-25 04:34:10,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=6880.0, ans=0.1775
+2024-08-25 04:34:10,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=6880.0, ans=9.3
+2024-08-25 04:34:12,504 INFO [train.py:1114] (0/4) Epoch 1, batch 1300, loss[loss=0.5163, simple_loss=0.4384, pruned_loss=0.2045, ctc_loss=0.4015, over 18813.00 frames. ], tot_loss[loss=0.5313, simple_loss=0.4398, pruned_loss=0.2226, ctc_loss=0.4241, over 3845162.98 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:34:14,084 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=13.25 vs. limit=12.7
+2024-08-25 04:34:15,550 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.007e+02 2.492e+02 3.309e+02 5.533e+02, threshold=4.985e+02, percent-clipped=0.0
+2024-08-25 04:34:26,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=6986.666666666667, ans=0.1725
+2024-08-25 04:34:32,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=6986.666666666667, ans=0.03755555555555556
+2024-08-25 04:34:50,517 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.56 vs. limit=12.82
+2024-08-25 04:34:55,152 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.84 vs. limit=6.773333333333333
+2024-08-25 04:35:11,278 INFO [train.py:1114] (0/4) Epoch 1, batch 1350, loss[loss=0.4564, simple_loss=0.4054, pruned_loss=0.1712, ctc_loss=0.3337, over 19757.00 frames. ], tot_loss[loss=0.5149, simple_loss=0.4319, pruned_loss=0.2114, ctc_loss=0.4049, over 3855296.72 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:35:19,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=7200.0, ans=0.22799999999999998
+2024-08-25 04:35:22,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=7253.333333333333, ans=0.15999999999999998
+2024-08-25 04:35:24,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=7253.333333333333, ans=0.036444444444444446
+2024-08-25 04:35:28,785 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=7253.333333333333, ans=0.15999999999999998
+2024-08-25 04:35:36,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=7306.666666666667, ans=0.15749999999999997
+2024-08-25 04:35:45,175 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.73 vs. limit=13.02
+2024-08-25 04:35:57,536 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.22 vs. limit=8.706666666666667
+2024-08-25 04:36:02,103 INFO [train.py:1114] (0/4) Epoch 1, batch 1400, loss[loss=0.371, simple_loss=0.3386, pruned_loss=0.1334, ctc_loss=0.2681, over 19686.00 frames. ], tot_loss[loss=0.5016, simple_loss=0.4255, pruned_loss=0.2027, ctc_loss=0.3889, over 3862562.08 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:36:05,092 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.980e+02 2.233e+02 2.820e+02 5.701e+02, threshold=4.466e+02, percent-clipped=2.0
+2024-08-25 04:36:05,459 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=7466.666666666667, ans=0.6386666666666667
+2024-08-25 04:36:27,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=7573.333333333333, ans=0.009223188405797101
+2024-08-25 04:36:50,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=7680.0, ans=0.14
+2024-08-25 04:36:53,008 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:36:54,777 INFO [train.py:1114] (0/4) Epoch 1, batch 1450, loss[loss=0.4547, simple_loss=0.4137, pruned_loss=0.1675, ctc_loss=0.3249, over 19680.00 frames. ], tot_loss[loss=0.4909, simple_loss=0.4209, pruned_loss=0.1956, ctc_loss=0.3763, over 3860643.92 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:37:01,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=7733.333333333333, ans=0.1375
+2024-08-25 04:37:33,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=7893.333333333333, ans=0.0
+2024-08-25 04:37:48,632 INFO [train.py:1114] (0/4) Epoch 1, batch 1500, loss[loss=0.4652, simple_loss=0.4177, pruned_loss=0.1744, ctc_loss=0.3449, over 19588.00 frames. ], tot_loss[loss=0.4803, simple_loss=0.4162, pruned_loss=0.1889, ctc_loss=0.3641, over 3860583.50 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:37:52,706 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=8000.0, ans=0.0
+2024-08-25 04:37:54,386 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 1.987e+02 2.351e+02 3.240e+02 5.717e+02, threshold=4.702e+02, percent-clipped=4.0
+2024-08-25 04:37:58,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=8000.0, ans=0.03333333333333334
+2024-08-25 04:38:13,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=8106.666666666667, ans=0.125
+2024-08-25 04:38:38,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=8160.0, ans=0.09899494936611666
+2024-08-25 04:38:56,143 INFO [train.py:1114] (0/4) Epoch 1, batch 1550, loss[loss=0.4712, simple_loss=0.4275, pruned_loss=0.1759, ctc_loss=0.3455, over 19602.00 frames. ], tot_loss[loss=0.4718, simple_loss=0.4126, pruned_loss=0.1838, ctc_loss=0.3546, over 3844875.38 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:39:07,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=8320.0, ans=0.032
+2024-08-25 04:39:07,157 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=8320.0, ans=0.125
+2024-08-25 04:39:10,303 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.18 vs. limit=4.248
+2024-08-25 04:39:14,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=8320.0, ans=0.025
+2024-08-25 04:39:26,313 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:39:47,214 INFO [train.py:1114] (0/4) Epoch 1, batch 1600, loss[loss=0.4155, simple_loss=0.39, pruned_loss=0.1503, ctc_loss=0.2911, over 19842.00 frames. ], tot_loss[loss=0.4631, simple_loss=0.4087, pruned_loss=0.179, ctc_loss=0.3446, over 3835647.09 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:39:52,850 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.044e+02 2.368e+02 2.950e+02 6.795e+02, threshold=4.737e+02, percent-clipped=6.0
+2024-08-25 04:39:56,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=8533.333333333334, ans=0.009014492753623189
+2024-08-25 04:40:05,847 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.74 vs. limit=13.94
+2024-08-25 04:40:11,743 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=8640.0, ans=10.0
+2024-08-25 04:40:30,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=8693.333333333334, ans=0.125
+2024-08-25 04:40:36,680 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.49 vs. limit=14.059999999999999
+2024-08-25 04:40:40,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=8746.666666666666, ans=0.030222222222222227
+2024-08-25 04:40:42,981 INFO [train.py:1114] (0/4) Epoch 1, batch 1650, loss[loss=0.4339, simple_loss=0.3985, pruned_loss=0.1614, ctc_loss=0.3189, over 19671.00 frames. ], tot_loss[loss=0.4541, simple_loss=0.4047, pruned_loss=0.174, ctc_loss=0.3354, over 3832259.73 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:40:44,571 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.29 vs. limit=7.52
+2024-08-25 04:41:50,509 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=8800.0, ans=10.8
+2024-08-25 04:43:05,928 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=8906.666666666666, ans=0.21093333333333333
+2024-08-25 04:43:09,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=8960.0, ans=0.125
+2024-08-25 04:43:14,469 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.77 vs. limit=14.219999999999999
+2024-08-25 04:43:28,616 INFO [train.py:1114] (0/4) Epoch 1, batch 1700, loss[loss=0.3771, simple_loss=0.3534, pruned_loss=0.1377, ctc_loss=0.2741, over 19670.00 frames. ], tot_loss[loss=0.4446, simple_loss=0.4005, pruned_loss=0.1688, ctc_loss=0.3257, over 3846772.86 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:43:31,592 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.986e+02 2.386e+02 2.791e+02 4.935e+02, threshold=4.772e+02, percent-clipped=1.0
+2024-08-25 04:43:35,755 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=9066.666666666666, ans=0.5826666666666667
+2024-08-25 04:44:00,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=9226.666666666666, ans=0.5770666666666667
+2024-08-25 04:44:06,576 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=9280.0, ans=0.125
+2024-08-25 04:45:26,296 INFO [train.py:1114] (0/4) Epoch 1, batch 1750, loss[loss=0.3362, simple_loss=0.3367, pruned_loss=0.1169, ctc_loss=0.2155, over 19689.00 frames. ], tot_loss[loss=0.436, simple_loss=0.3969, pruned_loss=0.1644, ctc_loss=0.3166, over 3851413.56 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:45:31,448 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.01 vs. limit=11.0
+2024-08-25 04:45:34,969 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=9386.666666666666, ans=0.125
+2024-08-25 04:45:40,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=9386.666666666666, ans=0.125
+2024-08-25 04:45:43,475 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:46:00,365 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=9493.333333333334, ans=0.125
+2024-08-25 04:46:04,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=9546.666666666666, ans=14.66
+2024-08-25 04:46:13,426 INFO [train.py:1114] (0/4) Epoch 1, batch 1800, loss[loss=0.4074, simple_loss=0.3946, pruned_loss=0.1475, ctc_loss=0.281, over 19621.00 frames. ], tot_loss[loss=0.4294, simple_loss=0.3945, pruned_loss=0.161, ctc_loss=0.3101, over 3852102.84 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:46:16,183 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.025e+02 2.321e+02 2.784e+02 4.120e+02, threshold=4.643e+02, percent-clipped=0.0
+2024-08-25 04:48:23,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=9813.333333333334, ans=0.025
+2024-08-25 04:48:28,777 INFO [train.py:1114] (0/4) Epoch 1, batch 1850, loss[loss=0.4153, simple_loss=0.4009, pruned_loss=0.1509, ctc_loss=0.2959, over 19592.00 frames. ], tot_loss[loss=0.4223, simple_loss=0.3918, pruned_loss=0.1575, ctc_loss=0.303, over 3856242.32 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:48:33,528 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:48:34,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=9866.666666666666, ans=0.02555555555555556
+2024-08-25 04:48:35,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=9866.666666666666, ans=0.125
+2024-08-25 04:48:36,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=9866.666666666666, ans=0.125
+2024-08-25 04:48:40,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=9920.0, ans=0.025
+2024-08-25 04:48:48,578 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=9973.333333333334, ans=0.00870144927536232
+2024-08-25 04:49:15,878 INFO [train.py:1114] (0/4) Epoch 1, batch 1900, loss[loss=0.4444, simple_loss=0.4266, pruned_loss=0.1647, ctc_loss=0.3161, over 19637.00 frames. ], tot_loss[loss=0.4179, simple_loss=0.391, pruned_loss=0.1554, ctc_loss=0.2986, over 3862203.38 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:49:18,610 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.031e+02 2.370e+02 2.878e+02 5.610e+02, threshold=4.739e+02, percent-clipped=2.0
+2024-08-25 04:50:13,142 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.62 vs. limit=15.219999999999999
+2024-08-25 04:50:14,679 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=10293.333333333334, ans=0.008631884057971015
+2024-08-25 04:50:15,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=10293.333333333334, ans=0.125
+2024-08-25 04:50:23,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=10346.666666666666, ans=0.02355555555555556
+2024-08-25 04:50:25,916 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.84 vs. limit=11.379999999999999
+2024-08-25 04:50:31,844 INFO [train.py:1114] (0/4) Epoch 1, batch 1950, loss[loss=0.3974, simple_loss=0.3868, pruned_loss=0.1462, ctc_loss=0.2812, over 19597.00 frames. ], tot_loss[loss=0.4127, simple_loss=0.3901, pruned_loss=0.1528, ctc_loss=0.294, over 3870430.16 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:50:42,083 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=10453.333333333334, ans=0.035
+2024-08-25 04:51:05,993 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.47 vs. limit=15.42
+2024-08-25 04:51:16,456 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10560.0, ans=0.1944
+2024-08-25 04:51:35,613 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.03 vs. limit=11.48
+2024-08-25 04:52:05,800 INFO [train.py:1114] (0/4) Epoch 1, batch 2000, loss[loss=0.3463, simple_loss=0.3346, pruned_loss=0.1291, ctc_loss=0.2493, over 19630.00 frames. ], tot_loss[loss=0.4094, simple_loss=0.3894, pruned_loss=0.1515, ctc_loss=0.2911, over 3853410.72 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:52:09,678 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 1.861e+02 2.137e+02 2.685e+02 4.799e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-25 04:53:45,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=10720.0, ans=0.022000000000000002
+2024-08-25 04:54:18,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=10826.666666666666, ans=0.125
+2024-08-25 04:54:18,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=10826.666666666666, ans=0.125
+2024-08-25 04:54:24,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=10880.0, ans=0.125
+2024-08-25 04:54:32,956 INFO [train.py:1114] (0/4) Epoch 1, batch 2050, loss[loss=0.3273, simple_loss=0.338, pruned_loss=0.1142, ctc_loss=0.2205, over 19705.00 frames. ], tot_loss[loss=0.403, simple_loss=0.3866, pruned_loss=0.1487, ctc_loss=0.2855, over 3850116.75 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:54:41,273 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.39 vs. limit=8.373333333333335
+2024-08-25 04:54:51,342 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.90 vs. limit=15.74
+2024-08-25 04:55:21,831 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=11093.333333333334, ans=0.5117333333333334
+2024-08-25 04:55:31,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=11146.666666666666, ans=0.125
+2024-08-25 04:55:40,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=11146.666666666666, ans=0.020222222222222228
+2024-08-25 04:55:42,298 INFO [train.py:1114] (0/4) Epoch 1, batch 2100, loss[loss=0.3654, simple_loss=0.3727, pruned_loss=0.1299, ctc_loss=0.2463, over 19773.00 frames. ], tot_loss[loss=0.3967, simple_loss=0.3838, pruned_loss=0.1458, ctc_loss=0.28, over 3857796.26 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:56:36,116 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 1.936e+02 2.214e+02 2.535e+02 3.885e+02, threshold=4.428e+02, percent-clipped=0.0
+2024-08-25 04:56:36,445 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11200.0, ans=0.188
+2024-08-25 04:57:15,247 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=6.30 vs. limit=8.522666666666666
+2024-08-25 04:57:21,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=11360.0, ans=0.3704
+2024-08-25 04:57:35,985 INFO [train.py:1114] (0/4) Epoch 1, batch 2150, loss[loss=0.3549, simple_loss=0.355, pruned_loss=0.1283, ctc_loss=0.2454, over 19587.00 frames. ], tot_loss[loss=0.3895, simple_loss=0.3802, pruned_loss=0.1423, ctc_loss=0.2734, over 3868506.34 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:58:59,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=11520.0, ans=0.125
+2024-08-25 04:59:34,660 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff2.min_abs, batch_count=11680.0, ans=0.1
+2024-08-25 04:59:34,919 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.47 vs. limit=11.879999999999999
+2024-08-25 04:59:36,285 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.61 vs. limit=4.76
+2024-08-25 04:59:36,673 INFO [train.py:1114] (0/4) Epoch 1, batch 2200, loss[loss=0.3758, simple_loss=0.3766, pruned_loss=0.1341, ctc_loss=0.267, over 19580.00 frames. ], tot_loss[loss=0.3854, simple_loss=0.3786, pruned_loss=0.1404, ctc_loss=0.2697, over 3867344.24 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:59:36,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11733.333333333334, ans=0.18266666666666664
+2024-08-25 04:59:40,220 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 1.884e+02 2.153e+02 2.810e+02 4.673e+02, threshold=4.307e+02, percent-clipped=1.0
+2024-08-25 04:59:56,021 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.61 vs. limit=7.96
+2024-08-25 04:59:56,930 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.89 vs. limit=8.736
+2024-08-25 05:00:06,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=11893.333333333334, ans=0.017111111111111105
+2024-08-25 05:00:27,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=11946.666666666666, ans=0.025
+2024-08-25 05:00:34,226 INFO [train.py:1114] (0/4) Epoch 1, batch 2250, loss[loss=0.3639, simple_loss=0.3761, pruned_loss=0.1258, ctc_loss=0.25, over 19620.00 frames. ], tot_loss[loss=0.3821, simple_loss=0.3772, pruned_loss=0.1387, ctc_loss=0.2667, over 3868369.72 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:00:38,385 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.07 vs. limit=4.8
+2024-08-25 05:00:41,527 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=12000.0, ans=0.48000000000000004
+2024-08-25 05:01:15,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.93 vs. limit=4.816
+2024-08-25 05:01:28,254 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.06 vs. limit=12.059999999999999
+2024-08-25 05:01:37,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=12213.333333333334, ans=0.125
+2024-08-25 05:01:43,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=12266.666666666666, ans=0.17733333333333334
+2024-08-25 05:01:44,079 INFO [train.py:1114] (0/4) Epoch 1, batch 2300, loss[loss=0.3462, simple_loss=0.3559, pruned_loss=0.1203, ctc_loss=0.2396, over 19478.00 frames. ], tot_loss[loss=0.3787, simple_loss=0.3754, pruned_loss=0.1373, ctc_loss=0.2632, over 3862515.72 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:01:47,659 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.926e+02 2.114e+02 2.507e+02 4.625e+02, threshold=4.228e+02, percent-clipped=3.0
+2024-08-25 05:01:49,954 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.80 vs. limit=16.7
+2024-08-25 05:02:02,019 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.89 vs. limit=12.14
+2024-08-25 05:02:11,042 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=12373.333333333334, ans=0.17626666666666668
+2024-08-25 05:02:30,767 INFO [train.py:1114] (0/4) Epoch 1, batch 2350, loss[loss=0.3862, simple_loss=0.3915, pruned_loss=0.1382, ctc_loss=0.261, over 19657.00 frames. ], tot_loss[loss=0.3751, simple_loss=0.3735, pruned_loss=0.1356, ctc_loss=0.2596, over 3864591.13 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:04:22,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=12586.666666666666, ans=0.014222222222222226
+2024-08-25 05:04:24,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=12586.666666666666, ans=0.125
+2024-08-25 05:04:32,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.27 vs. limit=16.98
+2024-08-25 05:04:33,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=12640.0, ans=10.0
+2024-08-25 05:04:37,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=12693.333333333334, ans=0.013777777777777778
+2024-08-25 05:04:54,311 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=12693.333333333334, ans=0.008110144927536232
+2024-08-25 05:05:00,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=12746.666666666666, ans=0.4538666666666667
+2024-08-25 05:05:04,909 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.78 vs. limit=17.1
+2024-08-25 05:05:05,283 INFO [train.py:1114] (0/4) Epoch 1, batch 2400, loss[loss=0.4107, simple_loss=0.4054, pruned_loss=0.1504, ctc_loss=0.2882, over 19375.00 frames. ], tot_loss[loss=0.3762, simple_loss=0.3755, pruned_loss=0.1359, ctc_loss=0.2598, over 3860114.29 frames. ], batch size: 67, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:05:08,752 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.948e+02 2.252e+02 2.666e+02 4.870e+02, threshold=4.504e+02, percent-clipped=4.0
+2024-08-25 05:05:10,210 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=12800.0, ans=0.00808695652173913
+2024-08-25 05:05:16,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=12853.333333333334, ans=0.013111111111111108
+2024-08-25 05:05:24,769 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=12906.666666666666, ans=0.125
+2024-08-25 05:05:30,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=12906.666666666666, ans=0.4482666666666667
+2024-08-25 05:05:31,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=12906.666666666666, ans=0.4482666666666667
+2024-08-25 05:05:34,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=12960.0, ans=0.125
+2024-08-25 05:05:52,659 INFO [train.py:1114] (0/4) Epoch 1, batch 2450, loss[loss=0.5362, simple_loss=0.456, pruned_loss=0.2287, ctc_loss=0.3977, over 13585.00 frames. ], tot_loss[loss=0.3853, simple_loss=0.3807, pruned_loss=0.1408, ctc_loss=0.2682, over 3739887.67 frames. ], batch size: 140, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:06:33,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=13173.333333333334, ans=0.16826666666666668
+2024-08-25 05:06:35,540 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=19.96 vs. limit=17.380000000000003
+2024-08-25 05:06:49,248 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-1.pt
+2024-08-25 05:07:49,747 INFO [train.py:1114] (0/4) Epoch 2, batch 0, loss[loss=0.3392, simple_loss=0.3484, pruned_loss=0.1201, ctc_loss=0.2246, over 19411.00 frames. ], tot_loss[loss=0.3392, simple_loss=0.3484, pruned_loss=0.1201, ctc_loss=0.2246, over 19411.00 frames. ], batch size: 48, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 05:07:49,749 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 05:09:16,713 INFO [train.py:1146] (0/4) Epoch 2, validation: loss=0.2886, simple_loss=0.3508, pruned_loss=0.0823, ctc_loss=0.1542, over 944034.00 frames.
+2024-08-25 05:09:16,713 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13390MB
+2024-08-25 05:09:16,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=13280.0, ans=0.09899494936611666
+2024-08-25 05:09:35,631 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 1.938e+02 2.191e+02 2.677e+02 6.592e+02, threshold=4.382e+02, percent-clipped=7.0
+2024-08-25 05:09:37,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=13333.333333333334, ans=0.125
+2024-08-25 05:09:40,897 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.01 vs. limit=12.52
+2024-08-25 05:09:43,646 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 05:09:44,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 05:09:53,534 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=13440.0, ans=0.05
+2024-08-25 05:09:57,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=13440.0, ans=0.125
+2024-08-25 05:10:10,709 INFO [train.py:1114] (0/4) Epoch 2, batch 50, loss[loss=0.3199, simple_loss=0.3308, pruned_loss=0.1128, ctc_loss=0.2086, over 19712.00 frames. ], tot_loss[loss=0.3744, simple_loss=0.3768, pruned_loss=0.1349, ctc_loss=0.2557, over 843639.75 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:11:06,010 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=13653.333333333334, ans=0.09899494936611666
+2024-08-25 05:11:17,928 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=13706.666666666666, ans=0.007889855072463769
+2024-08-25 05:11:23,523 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:11:30,246 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.94 vs. limit=8.44
+2024-08-25 05:11:47,131 INFO [train.py:1114] (0/4) Epoch 2, batch 100, loss[loss=0.3582, simple_loss=0.3642, pruned_loss=0.1279, ctc_loss=0.2412, over 19726.00 frames. ], tot_loss[loss=0.3719, simple_loss=0.3763, pruned_loss=0.1332, ctc_loss=0.2526, over 1499110.32 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:11:50,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=13813.333333333334, ans=0.025
+2024-08-25 05:11:52,096 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=13813.333333333334, ans=0.125
+2024-08-25 05:11:56,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=13866.666666666666, ans=0.025
+2024-08-25 05:11:57,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=13866.666666666666, ans=0.41466666666666674
+2024-08-25 05:12:00,807 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.907e+02 2.167e+02 2.481e+02 4.957e+02, threshold=4.333e+02, percent-clipped=1.0
+2024-08-25 05:12:03,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=13866.666666666666, ans=0.05
+2024-08-25 05:12:26,840 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=13973.333333333334, ans=0.04949747468305833
+2024-08-25 05:12:50,516 INFO [train.py:1114] (0/4) Epoch 2, batch 150, loss[loss=0.3308, simple_loss=0.3388, pruned_loss=0.1166, ctc_loss=0.224, over 19727.00 frames. ], tot_loss[loss=0.3646, simple_loss=0.3711, pruned_loss=0.1297, ctc_loss=0.2468, over 2027714.20 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:12:56,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=14080.0, ans=0.008
+2024-08-25 05:12:58,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=14080.0, ans=0.125
+2024-08-25 05:13:02,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=14133.333333333334, ans=0.125
+2024-08-25 05:14:14,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=14240.0, ans=0.125
+2024-08-25 05:14:35,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=14240.0, ans=0.025
+2024-08-25 05:14:38,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=14240.0, ans=0.125
+2024-08-25 05:14:39,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=14293.333333333334, ans=0.125
+2024-08-25 05:14:48,020 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.95 vs. limit=8.573333333333334
+2024-08-25 05:14:50,525 INFO [train.py:1114] (0/4) Epoch 2, batch 200, loss[loss=0.3893, simple_loss=0.3929, pruned_loss=0.1404, ctc_loss=0.2622, over 18298.00 frames. ], tot_loss[loss=0.3592, simple_loss=0.3676, pruned_loss=0.127, ctc_loss=0.2419, over 2435029.92 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:15:14,930 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.847e+02 2.110e+02 2.499e+02 4.235e+02, threshold=4.220e+02, percent-clipped=0.0
+2024-08-25 05:15:49,788 INFO [train.py:1114] (0/4) Epoch 2, batch 250, loss[loss=0.3741, simple_loss=0.3798, pruned_loss=0.1312, ctc_loss=0.2648, over 19414.00 frames. ], tot_loss[loss=0.3562, simple_loss=0.3658, pruned_loss=0.1255, ctc_loss=0.239, over 2754771.79 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:16:12,754 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.03 vs. limit=18.46
+2024-08-25 05:16:16,113 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=14613.333333333334, ans=0.125
+2024-08-25 05:16:30,869 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=14720.0, ans=0.007669565217391304
+2024-08-25 05:16:39,778 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=14773.333333333334, ans=0.125
+2024-08-25 05:19:37,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=14880.0, ans=0.125
+2024-08-25 05:19:37,929 INFO [train.py:1114] (0/4) Epoch 2, batch 300, loss[loss=0.3705, simple_loss=0.3761, pruned_loss=0.1341, ctc_loss=0.2415, over 19540.00 frames. ], tot_loss[loss=0.3542, simple_loss=0.3645, pruned_loss=0.1246, ctc_loss=0.2369, over 2999480.96 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:19:48,217 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=14880.0, ans=0.025
+2024-08-25 05:19:56,623 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.858e+02 2.099e+02 2.398e+02 3.801e+02, threshold=4.198e+02, percent-clipped=0.0
+2024-08-25 05:19:59,081 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.11 vs. limit=13.1
+2024-08-25 05:20:10,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=14986.666666666666, ans=0.125
+2024-08-25 05:20:23,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=15040.0, ans=0.025
+2024-08-25 05:20:54,632 INFO [train.py:1114] (0/4) Epoch 2, batch 350, loss[loss=0.3034, simple_loss=0.3292, pruned_loss=0.1007, ctc_loss=0.1903, over 19747.00 frames. ], tot_loss[loss=0.353, simple_loss=0.3643, pruned_loss=0.1238, ctc_loss=0.2354, over 3189626.37 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:21:01,898 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=15146.666666666666, ans=0.14853333333333335
+2024-08-25 05:21:06,876 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=15200.0, ans=0.125
+2024-08-25 05:21:13,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=15200.0, ans=0.003333333333333334
+2024-08-25 05:21:14,268 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=9.30 vs. limit=13.2
+2024-08-25 05:21:16,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15253.333333333334, ans=0.14746666666666666
+2024-08-25 05:21:22,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=15253.333333333334, ans=0.125
+2024-08-25 05:21:37,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=15306.666666666666, ans=0.025
+2024-08-25 05:21:51,461 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.42 vs. limit=5.304
+2024-08-25 05:22:06,916 INFO [train.py:1114] (0/4) Epoch 2, batch 400, loss[loss=0.3104, simple_loss=0.3434, pruned_loss=0.1001, ctc_loss=0.1934, over 19505.00 frames. ], tot_loss[loss=0.3491, simple_loss=0.3619, pruned_loss=0.1218, ctc_loss=0.2319, over 3341816.65 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:22:17,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=15466.666666666666, ans=0.125
+2024-08-25 05:22:20,578 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.895e+02 2.189e+02 2.528e+02 4.758e+02, threshold=4.379e+02, percent-clipped=2.0
+2024-08-25 05:22:22,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=15466.666666666666, ans=0.125
+2024-08-25 05:22:36,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff2.min_abs, batch_count=15520.0, ans=0.1
+2024-08-25 05:22:39,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=15573.333333333334, ans=0.125
+2024-08-25 05:23:46,264 INFO [train.py:1114] (0/4) Epoch 2, batch 450, loss[loss=0.3295, simple_loss=0.3649, pruned_loss=0.1044, ctc_loss=0.213, over 19618.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.3619, pruned_loss=0.122, ctc_loss=0.232, over 3449617.60 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:23:49,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=15680.0, ans=0.125
+2024-08-25 05:23:59,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=15733.333333333334, ans=0.05
+2024-08-25 05:24:04,768 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.72 vs. limit=12.893333333333333
+2024-08-25 05:24:08,459 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=15786.666666666666, ans=0.0008888888888888904
+2024-08-25 05:24:16,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15840.0, ans=0.1416
+2024-08-25 05:24:22,548 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=7.58 vs. limit=8.96
+2024-08-25 05:24:23,442 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.13 vs. limit=19.380000000000003
+2024-08-25 05:24:24,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15893.333333333334, ans=0.14106666666666667
+2024-08-25 05:24:28,216 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=15893.333333333334, ans=0.125
+2024-08-25 05:24:37,913 INFO [train.py:1114] (0/4) Epoch 2, batch 500, loss[loss=0.3392, simple_loss=0.3713, pruned_loss=0.1098, ctc_loss=0.2186, over 19670.00 frames. ], tot_loss[loss=0.3454, simple_loss=0.3595, pruned_loss=0.12, ctc_loss=0.2283, over 3545631.43 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:24:38,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=15946.666666666666, ans=0.125
+2024-08-25 05:26:01,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=16000.0, ans=0.07
+2024-08-25 05:26:05,501 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.778e+02 2.035e+02 2.349e+02 4.286e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-25 05:26:09,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=16000.0, ans=0.125
+2024-08-25 05:26:12,410 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=16053.333333333334, ans=0.125
+2024-08-25 05:26:28,034 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=16106.666666666666, ans=0.125
+2024-08-25 05:26:34,057 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=16160.0, ans=0.007356521739130435
+2024-08-25 05:26:34,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=16160.0, ans=0.125
+2024-08-25 05:26:51,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=16160.0, ans=0.07
+2024-08-25 05:26:53,739 INFO [train.py:1114] (0/4) Epoch 2, batch 550, loss[loss=0.3739, simple_loss=0.3804, pruned_loss=0.1326, ctc_loss=0.2555, over 19306.00 frames. ], tot_loss[loss=0.3451, simple_loss=0.3594, pruned_loss=0.1199, ctc_loss=0.2277, over 3608461.94 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:27:03,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=16213.333333333334, ans=0.0
+2024-08-25 05:27:06,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=16213.333333333334, ans=0.13786666666666667
+2024-08-25 05:27:12,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=16266.666666666666, ans=0.125
+2024-08-25 05:27:22,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=16320.0, ans=0.125
+2024-08-25 05:28:06,013 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.57 vs. limit=13.66
+2024-08-25 05:28:14,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=16426.666666666668, ans=0.125
+2024-08-25 05:28:16,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=16426.666666666668, ans=0.125
+2024-08-25 05:28:19,439 INFO [train.py:1114] (0/4) Epoch 2, batch 600, loss[loss=0.3689, simple_loss=0.3801, pruned_loss=0.1305, ctc_loss=0.242, over 19382.00 frames. ], tot_loss[loss=0.3447, simple_loss=0.3596, pruned_loss=0.1196, ctc_loss=0.2267, over 3666275.06 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:28:26,781 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.02 vs. limit=13.68
+2024-08-25 05:28:30,119 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.91 vs. limit=19.9
+2024-08-25 05:28:31,364 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.88 vs. limit=9.133333333333333
+2024-08-25 05:28:34,467 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.917e+02 2.183e+02 2.770e+02 8.189e+02, threshold=4.366e+02, percent-clipped=5.0
+2024-08-25 05:28:36,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=16533.333333333332, ans=0.125
+2024-08-25 05:28:41,835 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.07 vs. limit=13.72
+2024-08-25 05:29:01,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=16640.0, ans=0.0
+2024-08-25 05:29:10,653 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:29:14,158 INFO [train.py:1114] (0/4) Epoch 2, batch 650, loss[loss=0.3172, simple_loss=0.3526, pruned_loss=0.1014, ctc_loss=0.1971, over 19784.00 frames. ], tot_loss[loss=0.3422, simple_loss=0.3579, pruned_loss=0.1184, ctc_loss=0.2243, over 3716432.68 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:29:14,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=16746.666666666668, ans=0.0
+2024-08-25 05:31:11,018 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.34 vs. limit=13.373333333333335
+2024-08-25 05:31:26,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=16800.0, ans=0.0
+2024-08-25 05:31:59,736 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=16960.0, ans=0.0
+2024-08-25 05:32:01,961 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.44 vs. limit=13.86
+2024-08-25 05:32:02,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=16960.0, ans=0.3064
+2024-08-25 05:32:06,446 INFO [train.py:1114] (0/4) Epoch 2, batch 700, loss[loss=0.3405, simple_loss=0.3549, pruned_loss=0.1198, ctc_loss=0.2163, over 19706.00 frames. ], tot_loss[loss=0.3416, simple_loss=0.3579, pruned_loss=0.1179, ctc_loss=0.2233, over 3747743.13 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:32:36,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=17013.333333333332, ans=0.0
+2024-08-25 05:32:36,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=17013.333333333332, ans=0.30453333333333343
+2024-08-25 05:32:40,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=17013.333333333332, ans=0.0
+2024-08-25 05:32:41,566 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.49 vs. limit=20.259999999999998
+2024-08-25 05:32:47,023 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=17066.666666666668, ans=0.125
+2024-08-25 05:32:47,846 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.759e+02 2.005e+02 2.359e+02 5.033e+02, threshold=4.011e+02, percent-clipped=2.0
+2024-08-25 05:33:00,043 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:33:00,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=17120.0, ans=0.007147826086956522
+2024-08-25 05:33:04,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=17120.0, ans=0.30080000000000007
+2024-08-25 05:33:27,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=17280.0, ans=0.125
+2024-08-25 05:33:28,055 INFO [train.py:1114] (0/4) Epoch 2, batch 750, loss[loss=0.3213, simple_loss=0.353, pruned_loss=0.1042, ctc_loss=0.2029, over 19521.00 frames. ], tot_loss[loss=0.3401, simple_loss=0.3572, pruned_loss=0.1172, ctc_loss=0.2216, over 3774462.36 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:35:17,688 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.44 vs. limit=13.98
+2024-08-25 05:37:34,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=17493.333333333332, ans=0.1250666666666667
+2024-08-25 05:37:40,663 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=15.67 vs. limit=14.08
+2024-08-25 05:37:40,887 INFO [train.py:1114] (0/4) Epoch 2, batch 800, loss[loss=0.2964, simple_loss=0.318, pruned_loss=0.09779, ctc_loss=0.1978, over 19396.00 frames. ], tot_loss[loss=0.3391, simple_loss=0.3565, pruned_loss=0.1167, ctc_loss=0.2207, over 3795728.73 frames. ], batch size: 48, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 05:37:43,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=17546.666666666668, ans=0.2858666666666667
+2024-08-25 05:38:03,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=17600.0, ans=0.125
+2024-08-25 05:38:06,533 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.845e+02 2.130e+02 2.517e+02 4.310e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 05:38:10,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=17600.0, ans=0.124
+2024-08-25 05:38:25,057 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.44 vs. limit=20.740000000000002
+2024-08-25 05:38:48,289 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.33 vs. limit=14.18
+2024-08-25 05:38:48,685 INFO [train.py:1114] (0/4) Epoch 2, batch 850, loss[loss=0.3722, simple_loss=0.3862, pruned_loss=0.1308, ctc_loss=0.2415, over 19654.00 frames. ], tot_loss[loss=0.3377, simple_loss=0.3558, pruned_loss=0.116, ctc_loss=0.219, over 3814410.93 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 16.0
+2024-08-25 05:38:54,785 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=17.15 vs. limit=13.906666666666666
+2024-08-25 05:39:00,423 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=17813.333333333332, ans=0.125
+2024-08-25 05:39:05,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=17866.666666666668, ans=0.12133333333333332
+2024-08-25 05:39:14,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=17920.0, ans=0.0
+2024-08-25 05:39:22,302 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=17920.0, ans=0.0
+2024-08-25 05:39:41,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=18026.666666666668, ans=0.0
+2024-08-25 05:39:58,483 INFO [train.py:1114] (0/4) Epoch 2, batch 900, loss[loss=0.2924, simple_loss=0.3204, pruned_loss=0.09629, ctc_loss=0.1793, over 19409.00 frames. ], tot_loss[loss=0.3373, simple_loss=0.3555, pruned_loss=0.1158, ctc_loss=0.2184, over 3817568.43 frames. ], batch size: 48, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:40:19,546 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.852e+02 2.189e+02 2.703e+02 9.878e+02, threshold=4.378e+02, percent-clipped=3.0
+2024-08-25 05:40:58,429 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=18240.0, ans=0.125
+2024-08-25 05:41:08,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=18293.333333333332, ans=0.0
+2024-08-25 05:41:08,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=18293.333333333332, ans=0.25973333333333337
+2024-08-25 05:41:14,124 INFO [train.py:1114] (0/4) Epoch 2, batch 950, loss[loss=0.2953, simple_loss=0.3183, pruned_loss=0.09982, ctc_loss=0.1817, over 19504.00 frames. ], tot_loss[loss=0.3376, simple_loss=0.3556, pruned_loss=0.1161, ctc_loss=0.2186, over 3819324.98 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:41:17,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=18346.666666666668, ans=0.025
+2024-08-25 05:41:18,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=18346.666666666668, ans=0.11653333333333332
+2024-08-25 05:41:30,301 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.55 vs. limit=21.3
+2024-08-25 05:41:34,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=18453.333333333332, ans=0.125
+2024-08-25 05:41:45,077 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.47 vs. limit=14.440000000000001
+2024-08-25 05:41:53,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=18506.666666666668, ans=0.125
+2024-08-25 05:42:01,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=18560.0, ans=0.006834782608695652
+2024-08-25 05:42:06,452 INFO [train.py:1114] (0/4) Epoch 2, batch 1000, loss[loss=0.3003, simple_loss=0.3288, pruned_loss=0.09994, ctc_loss=0.1799, over 19845.00 frames. ], tot_loss[loss=0.3373, simple_loss=0.3556, pruned_loss=0.1159, ctc_loss=0.218, over 3815946.82 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:42:13,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=18613.333333333332, ans=0.125
+2024-08-25 05:42:37,203 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.59 vs. limit=14.5
+2024-08-25 05:42:41,284 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.839e+02 2.030e+02 2.416e+02 3.488e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-25 05:42:57,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=18773.333333333332, ans=0.125
+2024-08-25 05:43:06,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=18826.666666666668, ans=0.125
+2024-08-25 05:43:16,616 INFO [train.py:1114] (0/4) Epoch 2, batch 1050, loss[loss=0.3204, simple_loss=0.3479, pruned_loss=0.1044, ctc_loss=0.21, over 19838.00 frames. ], tot_loss[loss=0.3359, simple_loss=0.3546, pruned_loss=0.1153, ctc_loss=0.2168, over 3823718.17 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:43:17,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=18880.0, ans=0.23919999999999997
+2024-08-25 05:43:58,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18986.666666666668, ans=0.1101333333333333
+2024-08-25 05:44:00,883 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.47 vs. limit=14.64
+2024-08-25 05:44:05,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=19040.0, ans=0.0
+2024-08-25 05:44:14,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=19093.333333333332, ans=0.23173333333333346
+2024-08-25 05:44:23,169 INFO [train.py:1114] (0/4) Epoch 2, batch 1100, loss[loss=0.3094, simple_loss=0.3335, pruned_loss=0.1026, ctc_loss=0.2003, over 19565.00 frames. ], tot_loss[loss=0.3345, simple_loss=0.3537, pruned_loss=0.1146, ctc_loss=0.2156, over 3831878.62 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:44:40,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19146.666666666668, ans=0.10853333333333334
+2024-08-25 05:44:43,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=19200.0, ans=0.10800000000000001
+2024-08-25 05:44:48,508 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.777e+02 2.009e+02 2.448e+02 3.967e+02, threshold=4.019e+02, percent-clipped=0.0
+2024-08-25 05:44:53,049 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.95 vs. limit=21.939999999999998
+2024-08-25 05:44:56,803 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.24 vs. limit=14.719999999999999
+2024-08-25 05:45:16,285 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19360.0, ans=0.10640000000000002
+2024-08-25 05:45:22,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.29 vs. limit=14.780000000000001
+2024-08-25 05:45:31,302 INFO [train.py:1114] (0/4) Epoch 2, batch 1150, loss[loss=0.3358, simple_loss=0.3517, pruned_loss=0.1159, ctc_loss=0.2204, over 19587.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3536, pruned_loss=0.1144, ctc_loss=0.2148, over 3831567.54 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:45:44,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19466.666666666668, ans=0.10533333333333333
+2024-08-25 05:47:34,827 INFO [train.py:1114] (0/4) Epoch 2, batch 1200, loss[loss=0.336, simple_loss=0.3642, pruned_loss=0.111, ctc_loss=0.2145, over 19835.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3545, pruned_loss=0.115, ctc_loss=0.2155, over 3827106.49 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 05:47:49,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=3.60 vs. limit=14.9
+2024-08-25 05:47:50,316 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.798e+02 2.208e+02 2.852e+02 1.698e+03, threshold=4.415e+02, percent-clipped=3.0
+2024-08-25 05:48:07,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=19786.666666666668, ans=0.125
+2024-08-25 05:48:07,693 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=15.21 vs. limit=14.893333333333334
+2024-08-25 05:48:14,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=19840.0, ans=0.2056
+2024-08-25 05:48:31,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=19893.333333333332, ans=0.125
+2024-08-25 05:48:37,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=19893.333333333332, ans=0.1010666666666667
+2024-08-25 05:48:40,334 INFO [train.py:1114] (0/4) Epoch 2, batch 1250, loss[loss=0.3397, simple_loss=0.3645, pruned_loss=0.1136, ctc_loss=0.2193, over 19537.00 frames. ], tot_loss[loss=0.3337, simple_loss=0.354, pruned_loss=0.114, ctc_loss=0.2135, over 3844123.46 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:48:49,492 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:49:02,749 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.86 vs. limit=15.0
+2024-08-25 05:49:06,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=20000.0, ans=0.125
+2024-08-25 05:49:08,074 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=20053.333333333332, ans=0.125
+2024-08-25 05:49:14,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=20053.333333333332, ans=0.125
+2024-08-25 05:49:16,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=20053.333333333332, ans=0.0
+2024-08-25 05:49:21,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=20106.666666666668, ans=0.00649855072463768
+2024-08-25 05:49:28,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=20160.0, ans=0.025
+2024-08-25 05:49:37,278 INFO [train.py:1114] (0/4) Epoch 2, batch 1300, loss[loss=0.3624, simple_loss=0.3833, pruned_loss=0.1221, ctc_loss=0.2434, over 18852.00 frames. ], tot_loss[loss=0.3322, simple_loss=0.3531, pruned_loss=0.1132, ctc_loss=0.2125, over 3845391.14 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:49:40,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=20213.333333333332, ans=0.0
+2024-08-25 05:49:42,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=20213.333333333332, ans=0.125
+2024-08-25 05:49:52,762 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.771e+02 1.898e+02 2.175e+02 3.765e+02, threshold=3.796e+02, percent-clipped=0.0
+2024-08-25 05:49:59,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=20320.0, ans=0.125
+2024-08-25 05:50:00,696 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=20320.0, ans=0.125
+2024-08-25 05:50:19,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=20426.666666666668, ans=0.125
+2024-08-25 05:50:25,289 INFO [train.py:1114] (0/4) Epoch 2, batch 1350, loss[loss=0.3421, simple_loss=0.3597, pruned_loss=0.1176, ctc_loss=0.2234, over 19753.00 frames. ], tot_loss[loss=0.3306, simple_loss=0.3522, pruned_loss=0.1123, ctc_loss=0.211, over 3856086.37 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:50:47,248 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.65 vs. limit=6.0
+2024-08-25 05:50:55,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=20586.666666666668, ans=0.006394202898550725
+2024-08-25 05:51:19,083 INFO [train.py:1114] (0/4) Epoch 2, batch 1400, loss[loss=0.2681, simple_loss=0.2916, pruned_loss=0.0897, ctc_loss=0.1632, over 19657.00 frames. ], tot_loss[loss=0.33, simple_loss=0.3518, pruned_loss=0.1121, ctc_loss=0.2102, over 3863322.05 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:51:32,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=20800.0, ans=0.125
+2024-08-25 05:51:34,333 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.933e+02 2.205e+02 2.519e+02 3.569e+02, threshold=4.410e+02, percent-clipped=0.0
+2024-08-25 05:51:55,216 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=20906.666666666668, ans=0.2
+2024-08-25 05:51:57,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=20906.666666666668, ans=0.0
+2024-08-25 05:51:59,275 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.64 vs. limit=15.0
+2024-08-25 05:52:00,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=20960.0, ans=0.125
+2024-08-25 05:52:09,361 INFO [train.py:1114] (0/4) Epoch 2, batch 1450, loss[loss=0.3389, simple_loss=0.3675, pruned_loss=0.1122, ctc_loss=0.2145, over 19700.00 frames. ], tot_loss[loss=0.3298, simple_loss=0.352, pruned_loss=0.1119, ctc_loss=0.2097, over 3861891.40 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:52:09,805 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.92 vs. limit=15.0
+2024-08-25 05:52:11,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=21013.333333333332, ans=0.2
+2024-08-25 05:52:18,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=21066.666666666668, ans=0.125
+2024-08-25 05:52:19,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=21066.666666666668, ans=0.025
+2024-08-25 05:52:25,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21066.666666666668, ans=0.1
+2024-08-25 05:52:31,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=21120.0, ans=0.125
+2024-08-25 05:52:43,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=21173.333333333332, ans=0.125
+2024-08-25 05:52:43,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=21173.333333333332, ans=0.0
+2024-08-25 05:52:56,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=21280.0, ans=0.1
+2024-08-25 05:52:56,996 INFO [train.py:1114] (0/4) Epoch 2, batch 1500, loss[loss=0.3165, simple_loss=0.3589, pruned_loss=0.09859, ctc_loss=0.1926, over 19567.00 frames. ], tot_loss[loss=0.3287, simple_loss=0.3517, pruned_loss=0.1112, ctc_loss=0.2082, over 3860597.06 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:53:04,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=21280.0, ans=0.0
+2024-08-25 05:53:05,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=21280.0, ans=0.125
+2024-08-25 05:53:05,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=21280.0, ans=0.125
+2024-08-25 05:53:05,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=21280.0, ans=0.125
+2024-08-25 05:53:06,942 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-4000.pt
+2024-08-25 05:53:17,231 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.832e+02 2.087e+02 2.558e+02 5.212e+02, threshold=4.175e+02, percent-clipped=3.0
+2024-08-25 05:53:24,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=21386.666666666668, ans=0.2
+2024-08-25 05:53:25,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=21386.666666666668, ans=0.125
+2024-08-25 05:53:28,644 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=21386.666666666668, ans=0.125
+2024-08-25 05:53:42,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=21440.0, ans=0.2
+2024-08-25 05:54:05,948 INFO [train.py:1114] (0/4) Epoch 2, batch 1550, loss[loss=0.3657, simple_loss=0.3812, pruned_loss=0.128, ctc_loss=0.2355, over 19590.00 frames. ], tot_loss[loss=0.3287, simple_loss=0.3516, pruned_loss=0.1113, ctc_loss=0.2081, over 3846009.12 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 05:54:07,173 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=21546.666666666668, ans=0.125
+2024-08-25 05:54:08,140 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21546.666666666668, ans=0.1
+2024-08-25 05:54:10,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=21546.666666666668, ans=0.006185507246376811
+2024-08-25 05:54:40,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=21653.333333333332, ans=0.125
+2024-08-25 05:54:41,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=21653.333333333332, ans=0.125
+2024-08-25 05:54:47,215 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=21653.333333333332, ans=0.0
+2024-08-25 05:54:49,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=21706.666666666668, ans=0.125
+2024-08-25 05:54:54,450 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=9.78 vs. limit=12.0
+2024-08-25 05:55:11,750 INFO [train.py:1114] (0/4) Epoch 2, batch 1600, loss[loss=0.3436, simple_loss=0.3632, pruned_loss=0.1192, ctc_loss=0.2143, over 19845.00 frames. ], tot_loss[loss=0.3276, simple_loss=0.3508, pruned_loss=0.1108, ctc_loss=0.2074, over 3836526.19 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 05:55:24,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=21866.666666666668, ans=0.2
+2024-08-25 05:55:32,422 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.812e+02 2.122e+02 2.604e+02 4.336e+02, threshold=4.244e+02, percent-clipped=2.0
+2024-08-25 05:55:37,938 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=21920.0, ans=0.006104347826086956
+2024-08-25 05:55:44,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=21920.0, ans=0.125
+2024-08-25 05:56:00,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=21973.333333333332, ans=0.125
+2024-08-25 05:56:01,268 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=22026.666666666668, ans=0.125
+2024-08-25 05:56:02,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=22026.666666666668, ans=0.1
+2024-08-25 05:56:13,278 INFO [train.py:1114] (0/4) Epoch 2, batch 1650, loss[loss=0.354, simple_loss=0.3733, pruned_loss=0.1219, ctc_loss=0.2273, over 19663.00 frames. ], tot_loss[loss=0.3274, simple_loss=0.3504, pruned_loss=0.1107, ctc_loss=0.2074, over 3833014.04 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 16.0
+2024-08-25 05:56:13,511 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22080.0, ans=0.1
+2024-08-25 05:56:28,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=22133.333333333332, ans=0.125
+2024-08-25 05:56:38,438 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=10.69 vs. limit=12.0
+2024-08-25 05:56:39,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=22186.666666666668, ans=0.125
+2024-08-25 05:56:51,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=22240.0, ans=0.125
+2024-08-25 05:56:53,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=22240.0, ans=0.125
+2024-08-25 05:57:05,870 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.96 vs. limit=15.0
+2024-08-25 05:57:12,875 INFO [train.py:1114] (0/4) Epoch 2, batch 1700, loss[loss=0.2708, simple_loss=0.3002, pruned_loss=0.08801, ctc_loss=0.1637, over 19663.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.3498, pruned_loss=0.1101, ctc_loss=0.2061, over 3847569.20 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:57:20,210 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=22346.666666666668, ans=0.125
+2024-08-25 05:57:29,330 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.791e+02 2.005e+02 2.338e+02 3.555e+02, threshold=4.010e+02, percent-clipped=0.0
+2024-08-25 05:57:33,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22453.333333333332, ans=0.1
+2024-08-25 05:57:36,493 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.03 vs. limit=15.0
+2024-08-25 05:57:41,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22453.333333333332, ans=0.1
+2024-08-25 05:58:34,068 INFO [train.py:1114] (0/4) Epoch 2, batch 1750, loss[loss=0.2548, simple_loss=0.2962, pruned_loss=0.07721, ctc_loss=0.1474, over 19718.00 frames. ], tot_loss[loss=0.324, simple_loss=0.3481, pruned_loss=0.1091, ctc_loss=0.2043, over 3852085.59 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:58:38,819 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=22613.333333333332, ans=0.2
+2024-08-25 05:58:39,046 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.71 vs. limit=22.5
+2024-08-25 05:58:40,540 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=22613.333333333332, ans=0.1
+2024-08-25 05:58:53,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=22720.0, ans=0.0
+2024-08-25 05:59:18,890 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.51 vs. limit=10.0
+2024-08-25 05:59:24,711 INFO [train.py:1114] (0/4) Epoch 2, batch 1800, loss[loss=0.2974, simple_loss=0.3385, pruned_loss=0.09135, ctc_loss=0.1839, over 19612.00 frames. ], tot_loss[loss=0.3237, simple_loss=0.348, pruned_loss=0.1089, ctc_loss=0.204, over 3853623.10 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 05:59:24,979 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=22880.0, ans=0.2
+2024-08-25 05:59:26,148 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.09 vs. limit=15.0
+2024-08-25 05:59:26,739 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=22880.0, ans=0.005895652173913043
+2024-08-25 05:59:28,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=22880.0, ans=0.125
+2024-08-25 05:59:39,812 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.812e+02 2.002e+02 2.312e+02 3.839e+02, threshold=4.004e+02, percent-clipped=0.0
+2024-08-25 05:59:56,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=23040.0, ans=0.125
+2024-08-25 06:00:03,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=23093.333333333332, ans=0.1
+2024-08-25 06:00:10,991 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.89 vs. limit=15.0
+2024-08-25 06:00:12,413 INFO [train.py:1114] (0/4) Epoch 2, batch 1850, loss[loss=0.3537, simple_loss=0.3712, pruned_loss=0.1229, ctc_loss=0.226, over 19567.00 frames. ], tot_loss[loss=0.3222, simple_loss=0.3469, pruned_loss=0.1082, ctc_loss=0.2027, over 3856613.79 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 06:00:40,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=23306.666666666668, ans=0.125
+2024-08-25 06:00:52,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=23360.0, ans=0.005791304347826087
+2024-08-25 06:00:59,790 INFO [train.py:1114] (0/4) Epoch 2, batch 1900, loss[loss=0.3074, simple_loss=0.3504, pruned_loss=0.09434, ctc_loss=0.1893, over 19660.00 frames. ], tot_loss[loss=0.3224, simple_loss=0.3474, pruned_loss=0.1082, ctc_loss=0.2024, over 3861488.72 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 16.0
+2024-08-25 06:01:10,092 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:01:18,236 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=23466.666666666668, ans=0.125
+2024-08-25 06:01:18,891 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.814e+02 2.067e+02 2.451e+02 4.716e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-25 06:01:38,173 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.10 vs. limit=15.0
+2024-08-25 06:01:51,681 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.58 vs. limit=15.0
+2024-08-25 06:01:52,059 INFO [train.py:1114] (0/4) Epoch 2, batch 1950, loss[loss=0.2969, simple_loss=0.3283, pruned_loss=0.09661, ctc_loss=0.1806, over 19592.00 frames. ], tot_loss[loss=0.3219, simple_loss=0.3478, pruned_loss=0.1077, ctc_loss=0.2015, over 3870074.09 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 16.0
+2024-08-25 06:02:17,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=23786.666666666668, ans=0.125
+2024-08-25 06:02:32,761 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=23893.333333333332, ans=0.2
+2024-08-25 06:02:34,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=23893.333333333332, ans=0.125
+2024-08-25 06:02:40,712 INFO [train.py:1114] (0/4) Epoch 2, batch 2000, loss[loss=0.3001, simple_loss=0.3203, pruned_loss=0.1019, ctc_loss=0.19, over 19656.00 frames. ], tot_loss[loss=0.324, simple_loss=0.3492, pruned_loss=0.1088, ctc_loss=0.2031, over 3855218.47 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 06:02:46,034 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=23946.666666666668, ans=0.125
+2024-08-25 06:02:57,863 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.781e+02 1.996e+02 2.377e+02 5.355e+02, threshold=3.992e+02, percent-clipped=1.0
+2024-08-25 06:03:00,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=24053.333333333332, ans=0.125
+2024-08-25 06:03:10,107 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.93 vs. limit=15.0
+2024-08-25 06:03:14,386 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=22.70 vs. limit=15.0
+2024-08-25 06:03:18,716 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=24160.0, ans=10.0
+2024-08-25 06:03:29,343 INFO [train.py:1114] (0/4) Epoch 2, batch 2050, loss[loss=0.2627, simple_loss=0.3001, pruned_loss=0.08168, ctc_loss=0.1547, over 19714.00 frames. ], tot_loss[loss=0.3227, simple_loss=0.3481, pruned_loss=0.1082, ctc_loss=0.2023, over 3852140.17 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:03:49,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=24320.0, ans=0.0
+2024-08-25 06:03:54,502 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.46 vs. limit=15.0
+2024-08-25 06:04:13,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=24480.0, ans=0.125
+2024-08-25 06:04:17,643 INFO [train.py:1114] (0/4) Epoch 2, batch 2100, loss[loss=0.3036, simple_loss=0.3348, pruned_loss=0.09944, ctc_loss=0.1838, over 19792.00 frames. ], tot_loss[loss=0.3198, simple_loss=0.3464, pruned_loss=0.1067, ctc_loss=0.1997, over 3859095.84 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:04:33,037 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.823e+02 2.012e+02 2.259e+02 3.531e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-25 06:04:35,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=24586.666666666668, ans=0.1
+2024-08-25 06:04:54,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=24693.333333333332, ans=0.09899494936611666
+2024-08-25 06:04:54,698 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.07 vs. limit=12.0
+2024-08-25 06:05:02,142 INFO [train.py:1114] (0/4) Epoch 2, batch 2150, loss[loss=0.2863, simple_loss=0.3261, pruned_loss=0.09005, ctc_loss=0.1659, over 19582.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3452, pruned_loss=0.106, ctc_loss=0.1982, over 3869528.29 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 06:05:12,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=24746.666666666668, ans=0.025
+2024-08-25 06:05:15,316 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.52 vs. limit=10.0
+2024-08-25 06:05:20,664 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=10.08 vs. limit=15.0
+2024-08-25 06:05:24,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=24853.333333333332, ans=0.1
+2024-08-25 06:05:29,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=24853.333333333332, ans=0.125
+2024-08-25 06:05:32,908 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=24853.333333333332, ans=0.125
+2024-08-25 06:05:43,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 06:05:53,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=24960.0, ans=0.125
+2024-08-25 06:06:00,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=25013.333333333332, ans=0.025
+2024-08-25 06:06:01,433 INFO [train.py:1114] (0/4) Epoch 2, batch 2200, loss[loss=0.3222, simple_loss=0.3465, pruned_loss=0.1085, ctc_loss=0.2023, over 19607.00 frames. ], tot_loss[loss=0.3184, simple_loss=0.3454, pruned_loss=0.106, ctc_loss=0.1983, over 3867725.96 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:06:01,656 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=25013.333333333332, ans=0.125
+2024-08-25 06:06:02,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=25013.333333333332, ans=0.125
+2024-08-25 06:06:16,771 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.98 vs. limit=12.0
+2024-08-25 06:06:17,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25066.666666666668, ans=0.1
+2024-08-25 06:06:25,290 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.924e+02 2.286e+02 2.709e+02 6.222e+02, threshold=4.573e+02, percent-clipped=4.0
+2024-08-25 06:06:31,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=25120.0, ans=0.0
+2024-08-25 06:06:54,566 INFO [train.py:1114] (0/4) Epoch 2, batch 2250, loss[loss=0.3259, simple_loss=0.3514, pruned_loss=0.1098, ctc_loss=0.2023, over 19629.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.345, pruned_loss=0.1059, ctc_loss=0.1976, over 3866861.28 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:07:08,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=25333.333333333332, ans=0.0053623188405797105
+2024-08-25 06:07:17,444 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 06:07:19,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.min_positive, batch_count=25440.0, ans=0.025
+2024-08-25 06:07:23,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=25440.0, ans=0.125
+2024-08-25 06:07:32,569 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=25493.333333333332, ans=0.005327536231884058
+2024-08-25 06:07:36,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=25493.333333333332, ans=0.2
+2024-08-25 06:07:41,080 INFO [train.py:1114] (0/4) Epoch 2, batch 2300, loss[loss=0.3008, simple_loss=0.3336, pruned_loss=0.09729, ctc_loss=0.1833, over 19494.00 frames. ], tot_loss[loss=0.3164, simple_loss=0.3438, pruned_loss=0.1051, ctc_loss=0.1967, over 3861320.93 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 32.0
+2024-08-25 06:07:54,781 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.85 vs. limit=15.0
+2024-08-25 06:07:57,410 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.59 vs. limit=6.0
+2024-08-25 06:07:58,726 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 1.775e+02 2.049e+02 2.504e+02 6.120e+02, threshold=4.097e+02, percent-clipped=1.0
+2024-08-25 06:08:06,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=25653.333333333332, ans=0.1
+2024-08-25 06:08:21,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=25760.0, ans=0.1
+2024-08-25 06:08:21,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=25760.0, ans=0.125
+2024-08-25 06:08:29,083 INFO [train.py:1114] (0/4) Epoch 2, batch 2350, loss[loss=0.3273, simple_loss=0.3554, pruned_loss=0.1099, ctc_loss=0.1982, over 19671.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3441, pruned_loss=0.1055, ctc_loss=0.1972, over 3864359.88 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:08:30,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=25813.333333333332, ans=0.0
+2024-08-25 06:08:32,125 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.23 vs. limit=15.0
+2024-08-25 06:08:34,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=25813.333333333332, ans=0.2
+2024-08-25 06:08:35,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=25813.333333333332, ans=0.125
+2024-08-25 06:08:47,815 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.94 vs. limit=15.0
+2024-08-25 06:09:06,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=25973.333333333332, ans=0.005223188405797102
+2024-08-25 06:09:24,570 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.48 vs. limit=12.0
+2024-08-25 06:09:28,455 INFO [train.py:1114] (0/4) Epoch 2, batch 2400, loss[loss=0.3336, simple_loss=0.3599, pruned_loss=0.1116, ctc_loss=0.2105, over 19337.00 frames. ], tot_loss[loss=0.3197, simple_loss=0.3466, pruned_loss=0.1066, ctc_loss=0.1992, over 3858931.03 frames. ], batch size: 67, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:09:43,432 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.803e+02 2.129e+02 2.459e+02 5.388e+02, threshold=4.257e+02, percent-clipped=1.0
+2024-08-25 06:09:48,431 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.64 vs. limit=22.5
+2024-08-25 06:09:55,173 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=26240.0, ans=0.0
+2024-08-25 06:09:55,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=26240.0, ans=0.025
+2024-08-25 06:09:56,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=26240.0, ans=0.125
+2024-08-25 06:10:14,652 INFO [train.py:1114] (0/4) Epoch 2, batch 2450, loss[loss=0.4238, simple_loss=0.397, pruned_loss=0.1642, ctc_loss=0.3051, over 13904.00 frames. ], tot_loss[loss=0.3307, simple_loss=0.3528, pruned_loss=0.1124, ctc_loss=0.2096, over 3730699.37 frames. ], batch size: 140, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 06:10:27,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=26400.0, ans=0.125
+2024-08-25 06:10:42,869 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=26453.333333333332, ans=0.125
+2024-08-25 06:10:44,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=26453.333333333332, ans=0.125
+2024-08-25 06:10:52,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=26506.666666666668, ans=0.005107246376811594
+2024-08-25 06:10:57,360 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-2.pt
+2024-08-25 06:11:52,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=26554.666666666668, ans=0.0
+2024-08-25 06:11:53,141 INFO [train.py:1114] (0/4) Epoch 3, batch 0, loss[loss=0.3217, simple_loss=0.3386, pruned_loss=0.1107, ctc_loss=0.2086, over 19791.00 frames. ], tot_loss[loss=0.3217, simple_loss=0.3386, pruned_loss=0.1107, ctc_loss=0.2086, over 19791.00 frames. ], batch size: 49, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 06:11:55,992 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-25 06:12:07,824 INFO [train.py:1146] (0/4) Epoch 3, validation: loss=0.2565, simple_loss=0.3309, pruned_loss=0.06653, ctc_loss=0.1228, over 944034.00 frames.
+2024-08-25 06:12:07,825 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13390MB
+2024-08-25 06:13:57,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=26608.0, ans=0.2
+2024-08-25 06:15:15,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=26661.333333333332, ans=0.0
+2024-08-25 06:15:44,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=26661.333333333332, ans=0.005073623188405798
+2024-08-25 06:16:00,086 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.983e+02 2.286e+02 2.644e+02 3.774e+02, threshold=4.572e+02, percent-clipped=0.0
+2024-08-25 06:17:19,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=26714.666666666668, ans=0.025
+2024-08-25 06:17:56,864 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=26714.666666666668, ans=0.2
+2024-08-25 06:17:57,096 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.69 vs. limit=22.5
+2024-08-25 06:20:02,261 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=26768.0, ans=0.125
+2024-08-25 06:22:59,724 INFO [train.py:1114] (0/4) Epoch 3, batch 50, loss[loss=0.2623, simple_loss=0.3014, pruned_loss=0.08004, ctc_loss=0.1579, over 19698.00 frames. ], tot_loss[loss=0.3225, simple_loss=0.3498, pruned_loss=0.1073, ctc_loss=0.2016, over 845269.70 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:29:44,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=26874.666666666668, ans=0.125
+2024-08-25 06:30:17,450 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=26874.666666666668, ans=0.1
+2024-08-25 06:34:48,132 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=26874.666666666668, ans=0.025
+2024-08-25 06:36:26,387 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.44 vs. limit=15.0
+2024-08-25 06:40:59,024 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:44:24,660 INFO [train.py:1114] (0/4) Epoch 3, batch 100, loss[loss=0.2831, simple_loss=0.3227, pruned_loss=0.08768, ctc_loss=0.1703, over 19727.00 frames. ], tot_loss[loss=0.3218, simple_loss=0.3489, pruned_loss=0.107, ctc_loss=0.2015, over 1500423.89 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:45:53,312 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.99 vs. limit=15.0
+2024-08-25 06:46:28,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=27088.0, ans=10.0
+2024-08-25 06:47:46,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=27194.666666666668, ans=0.1
+2024-08-25 06:48:15,512 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.744e+02 2.032e+02 2.291e+02 1.205e+03, threshold=4.063e+02, percent-clipped=1.0
+2024-08-25 06:50:11,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=27301.333333333332, ans=0.0
+2024-08-25 06:50:28,567 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.02 vs. limit=15.0
+2024-08-25 06:50:43,563 INFO [train.py:1114] (0/4) Epoch 3, batch 150, loss[loss=0.2808, simple_loss=0.3111, pruned_loss=0.09165, ctc_loss=0.1679, over 19745.00 frames. ], tot_loss[loss=0.3163, simple_loss=0.3447, pruned_loss=0.1046, ctc_loss=0.1965, over 2028606.56 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 06:51:33,899 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.13 vs. limit=12.0
+2024-08-25 06:53:56,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=27514.666666666668, ans=0.0
+2024-08-25 06:54:10,654 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.00 vs. limit=15.0
+2024-08-25 06:54:16,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=27568.0, ans=0.125
+2024-08-25 06:54:31,938 INFO [train.py:1114] (0/4) Epoch 3, batch 200, loss[loss=0.3341, simple_loss=0.3491, pruned_loss=0.1157, ctc_loss=0.2195, over 18269.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3419, pruned_loss=0.1031, ctc_loss=0.1938, over 2435586.34 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:56:00,408 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.731e+02 1.977e+02 2.205e+02 3.305e+02, threshold=3.953e+02, percent-clipped=0.0
+2024-08-25 06:56:10,997 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=27781.333333333332, ans=0.025
+2024-08-25 06:56:34,805 INFO [train.py:1114] (0/4) Epoch 3, batch 250, loss[loss=0.3449, simple_loss=0.3686, pruned_loss=0.1177, ctc_loss=0.2146, over 19362.00 frames. ], tot_loss[loss=0.3122, simple_loss=0.3417, pruned_loss=0.1028, ctc_loss=0.1929, over 2754938.96 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:57:32,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27994.666666666668, ans=0.1
+2024-08-25 06:57:53,083 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.18 vs. limit=15.0
+2024-08-25 07:02:46,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=28048.0, ans=0.025
+2024-08-25 07:02:59,151 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=28101.333333333332, ans=0.004760579710144928
+2024-08-25 07:03:09,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=28101.333333333332, ans=0.004760579710144928
+2024-08-25 07:03:09,861 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.74 vs. limit=15.0
+2024-08-25 07:03:24,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=28101.333333333332, ans=0.2
+2024-08-25 07:03:29,189 INFO [train.py:1114] (0/4) Epoch 3, batch 300, loss[loss=0.3133, simple_loss=0.3538, pruned_loss=0.09952, ctc_loss=0.1844, over 19553.00 frames. ], tot_loss[loss=0.3102, simple_loss=0.3403, pruned_loss=0.1018, ctc_loss=0.1912, over 2999555.88 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:03:53,570 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=28154.666666666668, ans=0.125
+2024-08-25 07:04:14,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=28208.0, ans=0.0
+2024-08-25 07:04:33,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=28261.333333333332, ans=0.125
+2024-08-25 07:04:34,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=28261.333333333332, ans=0.125
+2024-08-25 07:04:44,389 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.724e+02 1.968e+02 2.265e+02 3.417e+02, threshold=3.936e+02, percent-clipped=0.0
+2024-08-25 07:05:04,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=28314.666666666668, ans=0.04949747468305833
+2024-08-25 07:05:35,060 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.25 vs. limit=10.0
+2024-08-25 07:05:49,888 INFO [train.py:1114] (0/4) Epoch 3, batch 350, loss[loss=0.2795, simple_loss=0.3131, pruned_loss=0.0893, ctc_loss=0.1684, over 19749.00 frames. ], tot_loss[loss=0.3109, simple_loss=0.341, pruned_loss=0.1021, ctc_loss=0.1917, over 3189804.67 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:05:57,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=28421.333333333332, ans=0.1
+2024-08-25 07:06:09,921 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.98 vs. limit=15.0
+2024-08-25 07:06:19,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=28421.333333333332, ans=0.025
+2024-08-25 07:07:40,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=28581.333333333332, ans=0.125
+2024-08-25 07:07:45,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=28581.333333333332, ans=0.004656231884057971
+2024-08-25 07:07:47,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=28634.666666666668, ans=0.125
+2024-08-25 07:08:01,167 INFO [train.py:1114] (0/4) Epoch 3, batch 400, loss[loss=0.3132, simple_loss=0.3455, pruned_loss=0.1033, ctc_loss=0.1858, over 19501.00 frames. ], tot_loss[loss=0.31, simple_loss=0.3405, pruned_loss=0.1017, ctc_loss=0.1907, over 3342807.24 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 07:08:24,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=28794.666666666668, ans=0.0
+2024-08-25 07:08:42,256 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.762e+02 1.982e+02 2.336e+02 5.420e+02, threshold=3.963e+02, percent-clipped=2.0
+2024-08-25 07:08:46,335 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28848.0, ans=0.1
+2024-08-25 07:09:04,146 INFO [train.py:1114] (0/4) Epoch 3, batch 450, loss[loss=0.2989, simple_loss=0.3439, pruned_loss=0.09183, ctc_loss=0.1757, over 19610.00 frames. ], tot_loss[loss=0.3097, simple_loss=0.3402, pruned_loss=0.1015, ctc_loss=0.1903, over 3452268.07 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:09:13,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=28954.666666666668, ans=0.0
+2024-08-25 07:09:37,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=29114.666666666668, ans=0.125
+2024-08-25 07:09:39,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=29114.666666666668, ans=0.025
+2024-08-25 07:09:56,833 INFO [train.py:1114] (0/4) Epoch 3, batch 500, loss[loss=0.3087, simple_loss=0.3454, pruned_loss=0.09894, ctc_loss=0.1852, over 19636.00 frames. ], tot_loss[loss=0.3081, simple_loss=0.339, pruned_loss=0.1008, ctc_loss=0.189, over 3547304.73 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:10:20,462 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.72 vs. limit=15.0
+2024-08-25 07:10:28,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=29274.666666666668, ans=0.1
+2024-08-25 07:10:34,908 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=6.75 vs. limit=15.0
+2024-08-25 07:10:43,381 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.753e+02 1.966e+02 2.327e+02 4.047e+02, threshold=3.932e+02, percent-clipped=2.0
+2024-08-25 07:11:10,798 INFO [train.py:1114] (0/4) Epoch 3, batch 550, loss[loss=0.3242, simple_loss=0.3526, pruned_loss=0.1073, ctc_loss=0.2032, over 19227.00 frames. ], tot_loss[loss=0.3085, simple_loss=0.3393, pruned_loss=0.101, ctc_loss=0.1892, over 3608971.33 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:11:51,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=29541.333333333332, ans=0.125
+2024-08-25 07:11:55,662 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=29541.333333333332, ans=0.125
+2024-08-25 07:12:37,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=29648.0, ans=0.0
+2024-08-25 07:12:45,723 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=29701.333333333332, ans=0.2
+2024-08-25 07:12:46,934 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.34 vs. limit=15.0
+2024-08-25 07:12:53,133 INFO [train.py:1114] (0/4) Epoch 3, batch 600, loss[loss=0.314, simple_loss=0.3532, pruned_loss=0.1006, ctc_loss=0.1844, over 19344.00 frames. ], tot_loss[loss=0.3078, simple_loss=0.339, pruned_loss=0.1006, ctc_loss=0.1885, over 3665528.88 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:12:59,840 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29754.666666666668, ans=0.1
+2024-08-25 07:13:37,728 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.812e+02 2.009e+02 2.360e+02 5.731e+02, threshold=4.017e+02, percent-clipped=3.0
+2024-08-25 07:13:46,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=29968.0, ans=0.0
+2024-08-25 07:13:54,207 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=29968.0, ans=0.2
+2024-08-25 07:13:57,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=29968.0, ans=0.025
+2024-08-25 07:14:00,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=29968.0, ans=0.004354782608695653
+2024-08-25 07:14:02,714 INFO [train.py:1114] (0/4) Epoch 3, batch 650, loss[loss=0.2891, simple_loss=0.3351, pruned_loss=0.08916, ctc_loss=0.1619, over 19787.00 frames. ], tot_loss[loss=0.3062, simple_loss=0.3378, pruned_loss=0.09991, ctc_loss=0.1872, over 3715665.21 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 16.0
+2024-08-25 07:14:13,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=30074.666666666668, ans=0.07
+2024-08-25 07:14:14,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 07:14:20,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=30074.666666666668, ans=0.025
+2024-08-25 07:14:21,057 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 07:14:34,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=30181.333333333332, ans=0.0043084057971014495
+2024-08-25 07:14:45,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=30234.666666666668, ans=10.0
+2024-08-25 07:14:45,919 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.16 vs. limit=15.0
+2024-08-25 07:14:55,117 INFO [train.py:1114] (0/4) Epoch 3, batch 700, loss[loss=0.2874, simple_loss=0.3224, pruned_loss=0.09123, ctc_loss=0.1747, over 19730.00 frames. ], tot_loss[loss=0.3057, simple_loss=0.3375, pruned_loss=0.09964, ctc_loss=0.1864, over 3746817.77 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:15:03,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=30288.0, ans=0.004285217391304348
+2024-08-25 07:15:07,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=30341.333333333332, ans=0.0
+2024-08-25 07:15:28,451 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.846e+02 1.998e+02 2.505e+02 9.071e+02, threshold=3.995e+02, percent-clipped=5.0
+2024-08-25 07:15:29,036 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.98 vs. limit=15.0
+2024-08-25 07:15:35,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=30448.0, ans=0.125
+2024-08-25 07:15:38,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=30448.0, ans=0.2
+2024-08-25 07:15:43,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=30501.333333333332, ans=0.125
+2024-08-25 07:15:48,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=30501.333333333332, ans=0.125
+2024-08-25 07:15:56,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=30554.666666666668, ans=0.125
+2024-08-25 07:15:58,569 INFO [train.py:1114] (0/4) Epoch 3, batch 750, loss[loss=0.2947, simple_loss=0.3356, pruned_loss=0.09184, ctc_loss=0.1754, over 19487.00 frames. ], tot_loss[loss=0.3047, simple_loss=0.3368, pruned_loss=0.09926, ctc_loss=0.1854, over 3772901.44 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:16:07,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=30554.666666666668, ans=0.1
+2024-08-25 07:16:26,898 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=30608.0, ans=0.1
+2024-08-25 07:16:30,594 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30661.333333333332, ans=0.1
+2024-08-25 07:16:38,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=30661.333333333332, ans=0.2
+2024-08-25 07:17:00,714 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=30714.666666666668, ans=0.025
+2024-08-25 07:24:36,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=30714.666666666668, ans=0.0
+2024-08-25 07:34:01,396 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.07 vs. limit=15.0
+2024-08-25 07:34:42,523 INFO [train.py:1114] (0/4) Epoch 3, batch 800, loss[loss=0.2705, simple_loss=0.309, pruned_loss=0.085, ctc_loss=0.1551, over 19821.00 frames. ], tot_loss[loss=0.3041, simple_loss=0.3363, pruned_loss=0.09898, ctc_loss=0.1849, over 3794536.37 frames. ], batch size: 49, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 07:40:57,178 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30821.333333333332, ans=0.1
+2024-08-25 08:00:17,471 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.88 vs. limit=22.5
+2024-08-25 08:02:40,818 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.761e+02 1.928e+02 2.233e+02 3.899e+02, threshold=3.856e+02, percent-clipped=0.0
+2024-08-25 08:12:12,170 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.21 vs. limit=10.0
+2024-08-25 08:12:59,502 INFO [train.py:1114] (0/4) Epoch 3, batch 850, loss[loss=0.3265, simple_loss=0.3565, pruned_loss=0.1074, ctc_loss=0.2041, over 19628.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3367, pruned_loss=0.09917, ctc_loss=0.1852, over 3815463.35 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 08:14:59,482 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=9.93 vs. limit=15.0
+2024-08-25 08:23:27,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=31141.333333333332, ans=0.2
+2024-08-25 08:25:54,625 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.59 vs. limit=15.0
+2024-08-25 08:43:00,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=31301.333333333332, ans=0.1
+2024-08-25 08:44:41,568 INFO [train.py:1114] (0/4) Epoch 3, batch 900, loss[loss=0.2716, simple_loss=0.3047, pruned_loss=0.08591, ctc_loss=0.1668, over 19827.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.3374, pruned_loss=0.09958, ctc_loss=0.1859, over 3819825.72 frames. ], batch size: 49, lr: 3.72e-02, grad_scale: 32.0
+2024-08-25 08:50:42,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=31408.0, ans=0.95
+2024-08-25 08:52:09,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=31461.333333333332, ans=0.125
+2024-08-25 08:57:05,263 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.64 vs. limit=10.0
+2024-08-25 08:57:54,732 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.748e+02 1.945e+02 2.250e+02 3.446e+02, threshold=3.889e+02, percent-clipped=0.0
+2024-08-25 08:59:29,457 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=31514.666666666668, ans=0.125
+2024-08-25 09:02:10,704 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=31568.0, ans=0.0
+2024-08-25 09:04:40,801 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.61 vs. limit=15.0
+2024-08-25 09:05:01,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=31568.0, ans=0.0
+2024-08-25 09:05:03,363 INFO [train.py:1114] (0/4) Epoch 3, batch 950, loss[loss=0.2728, simple_loss=0.3092, pruned_loss=0.0858, ctc_loss=0.1619, over 19499.00 frames. ], tot_loss[loss=0.3065, simple_loss=0.338, pruned_loss=0.1001, ctc_loss=0.187, over 3821606.58 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 32.0
+2024-08-25 09:07:07,750 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.65 vs. limit=15.0
+2024-08-25 09:16:00,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=31728.0, ans=0.125
+2024-08-25 09:22:20,511 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=31888.0, ans=0.1
+2024-08-25 09:23:03,904 INFO [train.py:1114] (0/4) Epoch 3, batch 1000, loss[loss=0.2564, simple_loss=0.3079, pruned_loss=0.07392, ctc_loss=0.1427, over 19845.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3388, pruned_loss=0.1004, ctc_loss=0.1874, over 3817304.87 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 16.0
+2024-08-25 09:27:50,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=31994.666666666668, ans=0.125
+2024-08-25 09:28:02,887 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=31994.666666666668, ans=0.0
+2024-08-25 09:29:07,850 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.873e+02 2.237e+02 2.628e+02 7.664e+02, threshold=4.475e+02, percent-clipped=6.0
+2024-08-25 09:29:19,256 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.51 vs. limit=15.0
+2024-08-25 09:29:57,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=32048.0, ans=0.003902608695652174
+2024-08-25 09:32:27,621 INFO [train.py:1114] (0/4) Epoch 3, batch 1050, loss[loss=0.323, simple_loss=0.3641, pruned_loss=0.1035, ctc_loss=0.1874, over 19840.00 frames. ], tot_loss[loss=0.3063, simple_loss=0.3379, pruned_loss=0.09998, ctc_loss=0.1867, over 3823854.97 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:32:33,232 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=32154.666666666668, ans=0.125
+2024-08-25 09:33:25,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=32208.0, ans=0.0
+2024-08-25 09:34:01,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=32208.0, ans=0.0
+2024-08-25 09:34:20,317 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.32 vs. limit=22.5
+2024-08-25 09:35:45,576 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.43 vs. limit=15.0
+2024-08-25 09:36:22,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=32314.666666666668, ans=0.0
+2024-08-25 09:36:30,774 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=32314.666666666668, ans=0.1
+2024-08-25 09:39:11,497 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.66 vs. limit=15.0
+2024-08-25 09:39:16,426 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=32368.0, ans=0.125
+2024-08-25 09:41:10,314 INFO [train.py:1114] (0/4) Epoch 3, batch 1100, loss[loss=0.2928, simple_loss=0.3321, pruned_loss=0.0918, ctc_loss=0.1749, over 19578.00 frames. ], tot_loss[loss=0.3051, simple_loss=0.3373, pruned_loss=0.09936, ctc_loss=0.1854, over 3830357.79 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:42:26,287 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.90 vs. limit=15.0
+2024-08-25 09:42:45,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=32528.0, ans=0.2
+2024-08-25 09:43:12,039 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.45 vs. limit=10.0
+2024-08-25 09:43:23,046 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.681e+02 1.943e+02 2.357e+02 4.515e+02, threshold=3.887e+02, percent-clipped=1.0
+2024-08-25 09:45:15,747 INFO [train.py:1114] (0/4) Epoch 3, batch 1150, loss[loss=0.2711, simple_loss=0.311, pruned_loss=0.08428, ctc_loss=0.1569, over 19566.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3366, pruned_loss=0.09924, ctc_loss=0.1851, over 3828597.53 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 16.0
+2024-08-25 09:54:01,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=32794.666666666664, ans=0.125
+2024-08-25 09:55:14,576 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.40 vs. limit=15.0
+2024-08-25 09:55:29,637 INFO [train.py:1114] (0/4) Epoch 3, batch 1200, loss[loss=0.3001, simple_loss=0.3471, pruned_loss=0.09167, ctc_loss=0.1742, over 19832.00 frames. ], tot_loss[loss=0.3058, simple_loss=0.3378, pruned_loss=0.09971, ctc_loss=0.186, over 3824323.82 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:56:10,350 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.93 vs. limit=12.0
+2024-08-25 09:56:16,777 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.92 vs. limit=12.0
+2024-08-25 09:56:30,794 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.95 vs. limit=10.0
+2024-08-25 09:56:31,119 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.705e+02 1.941e+02 2.201e+02 4.168e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-25 09:56:48,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=33114.666666666664, ans=0.125
+2024-08-25 09:57:41,496 INFO [train.py:1114] (0/4) Epoch 3, batch 1250, loss[loss=0.3241, simple_loss=0.3536, pruned_loss=0.1068, ctc_loss=0.2024, over 19515.00 frames. ], tot_loss[loss=0.3049, simple_loss=0.3376, pruned_loss=0.09911, ctc_loss=0.1848, over 3843084.15 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:58:01,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=33274.666666666664, ans=0.125
+2024-08-25 09:58:33,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=33328.0, ans=0.1
+2024-08-25 09:59:04,137 INFO [train.py:1114] (0/4) Epoch 3, batch 1300, loss[loss=0.3129, simple_loss=0.343, pruned_loss=0.1021, ctc_loss=0.1968, over 18759.00 frames. ], tot_loss[loss=0.3025, simple_loss=0.3359, pruned_loss=0.09801, ctc_loss=0.1828, over 3847195.98 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 09:59:28,154 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=33541.333333333336, ans=10.0
+2024-08-25 09:59:48,218 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.674e+02 1.887e+02 2.172e+02 3.368e+02, threshold=3.774e+02, percent-clipped=0.0
+2024-08-25 09:59:57,782 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33648.0, ans=0.1
+2024-08-25 09:59:59,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=33648.0, ans=0.125
+2024-08-25 10:00:05,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33701.333333333336, ans=0.1
+2024-08-25 10:00:09,802 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.44 vs. limit=22.5
+2024-08-25 10:00:22,481 INFO [train.py:1114] (0/4) Epoch 3, batch 1350, loss[loss=0.2958, simple_loss=0.3326, pruned_loss=0.09338, ctc_loss=0.1807, over 19761.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3354, pruned_loss=0.09743, ctc_loss=0.1823, over 3857821.88 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 10:00:22,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33754.666666666664, ans=0.1
+2024-08-25 10:00:33,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=33754.666666666664, ans=0.0
+2024-08-25 10:00:50,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=33808.0, ans=0.0
+2024-08-25 10:00:52,288 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=33808.0, ans=0.125
+2024-08-25 10:00:52,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=33808.0, ans=0.125
+2024-08-25 10:00:52,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=33808.0, ans=0.0
+2024-08-25 10:00:58,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=33808.0, ans=0.0
+2024-08-25 10:01:13,398 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.73 vs. limit=22.5
+2024-08-25 10:01:20,703 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.17 vs. limit=12.0
+2024-08-25 10:01:33,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=33914.666666666664, ans=0.0
+2024-08-25 10:01:59,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.65 vs. limit=15.0
+2024-08-25 10:02:01,554 INFO [train.py:1114] (0/4) Epoch 3, batch 1400, loss[loss=0.2731, simple_loss=0.3031, pruned_loss=0.08862, ctc_loss=0.1644, over 19694.00 frames. ], tot_loss[loss=0.3007, simple_loss=0.3349, pruned_loss=0.09697, ctc_loss=0.1814, over 3864952.25 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 32.0
+2024-08-25 10:02:21,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=34074.666666666664, ans=0.2
+2024-08-25 10:02:32,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=34128.0, ans=0.125
+2024-08-25 10:02:33,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=34128.0, ans=0.0034504347826086953
+2024-08-25 10:02:45,296 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.896e+02 2.159e+02 2.528e+02 3.857e+02, threshold=4.318e+02, percent-clipped=1.0
+2024-08-25 10:03:04,688 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=34234.666666666664, ans=0.125
+2024-08-25 10:03:12,603 INFO [train.py:1114] (0/4) Epoch 3, batch 1450, loss[loss=0.3314, simple_loss=0.3642, pruned_loss=0.1087, ctc_loss=0.2032, over 19649.00 frames. ], tot_loss[loss=0.3015, simple_loss=0.3355, pruned_loss=0.09736, ctc_loss=0.182, over 3863352.84 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:03:33,253 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.49 vs. limit=15.0
+2024-08-25 10:03:33,754 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=34341.333333333336, ans=0.0
+2024-08-25 10:04:06,650 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.16 vs. limit=12.0
+2024-08-25 10:04:13,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=34501.333333333336, ans=0.125
+2024-08-25 10:04:21,619 INFO [train.py:1114] (0/4) Epoch 3, batch 1500, loss[loss=0.3255, simple_loss=0.3626, pruned_loss=0.1044, ctc_loss=0.199, over 19595.00 frames. ], tot_loss[loss=0.302, simple_loss=0.336, pruned_loss=0.09755, ctc_loss=0.1824, over 3863083.24 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:04:29,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=34554.666666666664, ans=0.2
+2024-08-25 10:04:31,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=34554.666666666664, ans=0.125
+2024-08-25 10:04:44,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=34661.333333333336, ans=0.125
+2024-08-25 10:04:51,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=34661.333333333336, ans=0.0
+2024-08-25 10:04:56,926 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=34714.666666666664, ans=0.125
+2024-08-25 10:05:09,914 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.778e+02 1.971e+02 2.353e+02 5.678e+02, threshold=3.941e+02, percent-clipped=1.0
+2024-08-25 10:05:10,193 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34714.666666666664, ans=0.1
+2024-08-25 10:05:11,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=34714.666666666664, ans=0.2
+2024-08-25 10:05:25,643 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.07 vs. limit=22.5
+2024-08-25 10:05:26,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=34768.0, ans=0.1
+2024-08-25 10:05:29,603 INFO [train.py:1114] (0/4) Epoch 3, batch 1550, loss[loss=0.3537, simple_loss=0.3763, pruned_loss=0.1231, ctc_loss=0.2122, over 19626.00 frames. ], tot_loss[loss=0.303, simple_loss=0.3363, pruned_loss=0.09822, ctc_loss=0.1833, over 3846218.17 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:05:36,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=34821.333333333336, ans=0.0
+2024-08-25 10:05:36,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=34821.333333333336, ans=0.2
+2024-08-25 10:05:44,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten.whitening_limit, batch_count=34874.666666666664, ans=15.0
+2024-08-25 10:05:58,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=34928.0, ans=0.003276521739130434
+2024-08-25 10:06:07,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=34981.333333333336, ans=0.125
+2024-08-25 10:06:09,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=34981.333333333336, ans=0.125
+2024-08-25 10:06:42,354 INFO [train.py:1114] (0/4) Epoch 3, batch 1600, loss[loss=0.2926, simple_loss=0.3349, pruned_loss=0.09226, ctc_loss=0.1647, over 19836.00 frames. ], tot_loss[loss=0.3022, simple_loss=0.3356, pruned_loss=0.09786, ctc_loss=0.1828, over 3835008.45 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:07:01,892 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=35088.0, ans=0.003241739130434783
+2024-08-25 10:07:28,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=35194.666666666664, ans=0.125
+2024-08-25 10:07:47,938 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 1.752e+02 2.032e+02 2.338e+02 4.104e+02, threshold=4.064e+02, percent-clipped=1.0
+2024-08-25 10:07:52,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35248.0, ans=0.1
+2024-08-25 10:08:06,823 INFO [train.py:1114] (0/4) Epoch 3, batch 1650, loss[loss=0.353, simple_loss=0.3775, pruned_loss=0.1186, ctc_loss=0.2279, over 19661.00 frames. ], tot_loss[loss=0.3023, simple_loss=0.3356, pruned_loss=0.09794, ctc_loss=0.1828, over 3831787.34 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 10:08:06,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=35354.666666666664, ans=0.0031837681159420303
+2024-08-25 10:08:11,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=35354.666666666664, ans=0.125
+2024-08-25 10:08:21,693 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=35408.0, ans=0.1
+2024-08-25 10:08:21,826 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=35408.0, ans=0.1
+2024-08-25 10:08:41,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=35461.333333333336, ans=0.2
+2024-08-25 10:08:48,059 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.40 vs. limit=15.0
+2024-08-25 10:08:52,022 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.39 vs. limit=15.0
+2024-08-25 10:08:53,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=35514.666666666664, ans=0.015
+2024-08-25 10:08:59,522 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=35568.0, ans=0.0031373913043478262
+2024-08-25 10:09:04,856 INFO [train.py:1114] (0/4) Epoch 3, batch 1700, loss[loss=0.2327, simple_loss=0.2763, pruned_loss=0.06846, ctc_loss=0.1302, over 19671.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.3348, pruned_loss=0.09713, ctc_loss=0.1814, over 3846193.75 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:09:11,485 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:09:22,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=35728.0, ans=0.125
+2024-08-25 10:09:52,811 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.835e+02 2.022e+02 2.484e+02 3.793e+02, threshold=4.043e+02, percent-clipped=0.0
+2024-08-25 10:10:09,480 INFO [train.py:1114] (0/4) Epoch 3, batch 1750, loss[loss=0.2521, simple_loss=0.2845, pruned_loss=0.08083, ctc_loss=0.1451, over 19628.00 frames. ], tot_loss[loss=0.3, simple_loss=0.3341, pruned_loss=0.09683, ctc_loss=0.1807, over 3851012.17 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:10:16,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35888.0, ans=0.1
+2024-08-25 10:10:28,589 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=35941.333333333336, ans=0.1
+2024-08-25 10:10:39,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35994.666666666664, ans=0.1
+2024-08-25 10:10:42,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=35994.666666666664, ans=0.025
+2024-08-25 10:10:48,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35994.666666666664, ans=0.1
+2024-08-25 10:11:09,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=36101.333333333336, ans=0.003021449275362318
+2024-08-25 10:11:20,666 INFO [train.py:1114] (0/4) Epoch 3, batch 1800, loss[loss=0.2989, simple_loss=0.343, pruned_loss=0.09149, ctc_loss=0.1793, over 19624.00 frames. ], tot_loss[loss=0.2998, simple_loss=0.3342, pruned_loss=0.09663, ctc_loss=0.1804, over 3852942.62 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:11:36,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=36208.0, ans=0.0
+2024-08-25 10:11:45,472 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.79 vs. limit=15.0
+2024-08-25 10:11:45,506 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.01 vs. limit=15.0
+2024-08-25 10:11:52,938 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.758e+02 2.042e+02 2.396e+02 4.902e+02, threshold=4.083e+02, percent-clipped=1.0
+2024-08-25 10:11:57,842 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.10 vs. limit=15.0
+2024-08-25 10:12:29,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=36421.333333333336, ans=0.1
+2024-08-25 10:12:33,952 INFO [train.py:1114] (0/4) Epoch 3, batch 1850, loss[loss=0.2964, simple_loss=0.3395, pruned_loss=0.09167, ctc_loss=0.175, over 19590.00 frames. ], tot_loss[loss=0.2973, simple_loss=0.3326, pruned_loss=0.09532, ctc_loss=0.1784, over 3857160.26 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:12:53,515 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.78 vs. limit=15.0
+2024-08-25 10:12:57,898 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.43 vs. limit=10.0
+2024-08-25 10:13:18,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=36581.333333333336, ans=0.125
+2024-08-25 10:13:23,180 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=36634.666666666664, ans=15.0
+2024-08-25 10:13:31,596 INFO [train.py:1114] (0/4) Epoch 3, batch 1900, loss[loss=0.3181, simple_loss=0.3545, pruned_loss=0.1023, ctc_loss=0.193, over 19648.00 frames. ], tot_loss[loss=0.2976, simple_loss=0.3331, pruned_loss=0.09537, ctc_loss=0.1782, over 3860676.35 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 32.0
+2024-08-25 10:14:26,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=36794.666666666664, ans=0.0028707246376811595
+2024-08-25 10:14:29,194 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.725e+02 1.920e+02 2.285e+02 4.448e+02, threshold=3.841e+02, percent-clipped=1.0
+2024-08-25 10:14:34,991 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=36848.0, ans=0.125
+2024-08-25 10:14:35,027 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=36848.0, ans=0.125
+2024-08-25 10:14:47,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36901.333333333336, ans=0.1
+2024-08-25 10:14:52,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=36954.666666666664, ans=0.125
+2024-08-25 10:14:54,742 INFO [train.py:1114] (0/4) Epoch 3, batch 1950, loss[loss=0.2779, simple_loss=0.3186, pruned_loss=0.08505, ctc_loss=0.1676, over 19579.00 frames. ], tot_loss[loss=0.299, simple_loss=0.3348, pruned_loss=0.09583, ctc_loss=0.1792, over 3869319.91 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:15:04,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=36954.666666666664, ans=22.5
+2024-08-25 10:15:08,928 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.49 vs. limit=10.0
+2024-08-25 10:15:14,306 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=37008.0, ans=0.1
+2024-08-25 10:15:17,815 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:15:24,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=37061.333333333336, ans=0.04949747468305833
+2024-08-25 10:15:36,331 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.11 vs. limit=15.0
+2024-08-25 10:15:37,883 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.28 vs. limit=15.0
+2024-08-25 10:15:44,713 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.55 vs. limit=15.0
+2024-08-25 10:15:45,366 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.46 vs. limit=15.0
+2024-08-25 10:15:51,814 INFO [train.py:1114] (0/4) Epoch 3, batch 2000, loss[loss=0.2604, simple_loss=0.2968, pruned_loss=0.08065, ctc_loss=0.1569, over 19645.00 frames. ], tot_loss[loss=0.3, simple_loss=0.3354, pruned_loss=0.09635, ctc_loss=0.1799, over 3854838.15 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:16:10,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=37328.0, ans=0.0
+2024-08-25 10:16:19,093 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.904e+02 2.146e+02 2.566e+02 5.347e+02, threshold=4.293e+02, percent-clipped=2.0
+2024-08-25 10:16:44,027 INFO [train.py:1114] (0/4) Epoch 3, batch 2050, loss[loss=0.2776, simple_loss=0.3094, pruned_loss=0.08873, ctc_loss=0.1708, over 19718.00 frames. ], tot_loss[loss=0.2994, simple_loss=0.3343, pruned_loss=0.09627, ctc_loss=0.1798, over 3852097.82 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 32.0
+2024-08-25 10:16:44,128 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=37488.0, ans=0.125
+2024-08-25 10:16:47,707 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=37488.0, ans=0.025
+2024-08-25 10:17:00,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=37488.0, ans=0.025
+2024-08-25 10:17:21,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=37594.666666666664, ans=0.0
+2024-08-25 10:17:24,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=37594.666666666664, ans=10.0
+2024-08-25 10:17:44,496 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.66 vs. limit=15.0
+2024-08-25 10:17:56,062 INFO [train.py:1114] (0/4) Epoch 3, batch 2100, loss[loss=0.2765, simple_loss=0.3209, pruned_loss=0.08313, ctc_loss=0.1646, over 19755.00 frames. ], tot_loss[loss=0.2984, simple_loss=0.3337, pruned_loss=0.09576, ctc_loss=0.1789, over 3858773.03 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 10:18:06,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=37754.666666666664, ans=0.125
+2024-08-25 10:18:18,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=37754.666666666664, ans=0.2
+2024-08-25 10:18:39,861 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=37808.0, ans=0.04949747468305833
+2024-08-25 10:18:46,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=37808.0, ans=0.0
+2024-08-25 10:18:50,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=37808.0, ans=0.125
+2024-08-25 10:19:08,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=37861.333333333336, ans=0.125
+2024-08-25 10:19:20,785 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.776e+02 1.971e+02 2.246e+02 3.814e+02, threshold=3.941e+02, percent-clipped=0.0
+2024-08-25 10:20:09,497 INFO [train.py:1114] (0/4) Epoch 3, batch 2150, loss[loss=0.258, simple_loss=0.3053, pruned_loss=0.07688, ctc_loss=0.1424, over 19588.00 frames. ], tot_loss[loss=0.2966, simple_loss=0.3324, pruned_loss=0.09501, ctc_loss=0.1772, over 3869168.56 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 10:20:12,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=38021.333333333336, ans=0.125
+2024-08-25 10:20:14,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.55 vs. limit=22.5
+2024-08-25 10:20:50,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38128.0, ans=0.1
+2024-08-25 10:20:51,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=38181.333333333336, ans=0.125
+2024-08-25 10:21:10,828 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.56 vs. limit=15.0
+2024-08-25 10:21:11,134 INFO [train.py:1114] (0/4) Epoch 3, batch 2200, loss[loss=0.3221, simple_loss=0.349, pruned_loss=0.1067, ctc_loss=0.2045, over 19591.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3324, pruned_loss=0.09513, ctc_loss=0.1775, over 3867395.74 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:21:18,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=38288.0, ans=0.0
+2024-08-25 10:21:29,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=38341.333333333336, ans=0.125
+2024-08-25 10:21:30,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=38341.333333333336, ans=0.125
+2024-08-25 10:21:32,206 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.89 vs. limit=6.0
+2024-08-25 10:21:51,396 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:21:56,469 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.750e+02 1.922e+02 2.212e+02 3.187e+02, threshold=3.844e+02, percent-clipped=0.0
+2024-08-25 10:22:09,415 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=38448.0, ans=0.125
+2024-08-25 10:22:21,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38501.333333333336, ans=0.1
+2024-08-25 10:22:28,987 INFO [train.py:1114] (0/4) Epoch 3, batch 2250, loss[loss=0.294, simple_loss=0.3363, pruned_loss=0.09124, ctc_loss=0.173, over 19631.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.3324, pruned_loss=0.09505, ctc_loss=0.1771, over 3866860.08 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:22:30,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=38554.666666666664, ans=0.125
+2024-08-25 10:22:55,039 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=38661.333333333336, ans=0.1
+2024-08-25 10:23:01,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=38661.333333333336, ans=0.1
+2024-08-25 10:23:12,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=38714.666666666664, ans=0.125
+2024-08-25 10:23:40,907 INFO [train.py:1114] (0/4) Epoch 3, batch 2300, loss[loss=0.2506, simple_loss=0.295, pruned_loss=0.07343, ctc_loss=0.1485, over 19515.00 frames. ], tot_loss[loss=0.2964, simple_loss=0.3317, pruned_loss=0.09512, ctc_loss=0.1773, over 3860470.99 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:23:42,168 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.90 vs. limit=15.0
+2024-08-25 10:23:52,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=38874.666666666664, ans=0.125
+2024-08-25 10:23:53,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=38874.666666666664, ans=0.125
+2024-08-25 10:23:58,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38928.0, ans=0.1
+2024-08-25 10:24:00,798 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.05 vs. limit=22.5
+2024-08-25 10:24:10,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=38928.0, ans=0.0
+2024-08-25 10:24:12,494 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.13 vs. limit=10.0
+2024-08-25 10:24:13,778 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.820e+02 2.030e+02 2.354e+02 3.970e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-25 10:24:22,688 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=39034.666666666664, ans=15.0
+2024-08-25 10:24:48,808 INFO [train.py:1114] (0/4) Epoch 3, batch 2350, loss[loss=0.3057, simple_loss=0.3386, pruned_loss=0.09911, ctc_loss=0.1865, over 19684.00 frames. ], tot_loss[loss=0.2957, simple_loss=0.3312, pruned_loss=0.09471, ctc_loss=0.1767, over 3863948.62 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:25:00,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=39088.0, ans=0.09899494936611666
+2024-08-25 10:25:06,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=39141.333333333336, ans=0.0
+2024-08-25 10:25:10,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=39141.333333333336, ans=0.125
+2024-08-25 10:25:15,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=39194.666666666664, ans=0.0023489855072463773
+2024-08-25 10:25:19,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=39194.666666666664, ans=0.2
+2024-08-25 10:25:21,714 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.80 vs. limit=15.0
+2024-08-25 10:25:49,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=39301.333333333336, ans=0.0023257971014492744
+2024-08-25 10:25:52,378 INFO [train.py:1114] (0/4) Epoch 3, batch 2400, loss[loss=0.3205, simple_loss=0.3554, pruned_loss=0.1028, ctc_loss=0.1998, over 19526.00 frames. ], tot_loss[loss=0.2988, simple_loss=0.3338, pruned_loss=0.09602, ctc_loss=0.1791, over 3858826.04 frames. ], batch size: 67, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 10:26:03,966 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.81 vs. limit=10.0
+2024-08-25 10:26:04,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=39408.0, ans=0.1
+2024-08-25 10:26:04,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=39408.0, ans=0.125
+2024-08-25 10:26:16,181 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:26:39,991 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=39514.666666666664, ans=0.125
+2024-08-25 10:26:40,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=9.87 vs. limit=15.0
+2024-08-25 10:26:41,528 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.777e+02 2.047e+02 2.383e+02 4.291e+02, threshold=4.094e+02, percent-clipped=1.0
+2024-08-25 10:26:59,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=39514.666666666664, ans=0.125
+2024-08-25 10:27:14,152 INFO [train.py:1114] (0/4) Epoch 3, batch 2450, loss[loss=0.4108, simple_loss=0.3822, pruned_loss=0.1607, ctc_loss=0.295, over 13134.00 frames. ], tot_loss[loss=0.308, simple_loss=0.3391, pruned_loss=0.1008, ctc_loss=0.1881, over 3733231.49 frames. ], batch size: 141, lr: 3.53e-02, grad_scale: 16.0
+2024-08-25 10:27:15,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=39621.333333333336, ans=0.125
+2024-08-25 10:27:48,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=39781.333333333336, ans=0.09899494936611666
+2024-08-25 10:27:56,280 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-3.pt
diff --git a/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-1 b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-1
new file mode 100644
index 0000000000000000000000000000000000000000..d4b549fd484eba256d8b2ea4adf45f356bcccc5e
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-1
@@ -0,0 +1,1163 @@
+2024-08-25 03:46:09,310 INFO [train.py:1182] (1/4) Training started
+2024-08-25 03:46:09,311 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-25 03:46:09,373 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 03:46:09,373 INFO [train.py:1212] (1/4) About to create model
+2024-08-25 03:46:10,419 INFO [train.py:1216] (1/4) Number of model parameters: 65805511
+2024-08-25 03:46:10,563 INFO [train.py:1231] (1/4) Using DDP
+2024-08-25 03:46:14,817 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 03:46:14,897 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-25 03:46:16,483 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-25 03:46:16,488 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-25 03:46:16,584 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-25 03:46:16,613 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-25 03:46:16,931 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-25 03:46:16,931 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 03:50:49,685 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=9.22 vs. limit=3.0
+2024-08-25 03:50:50,515 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12342MB
+2024-08-25 03:50:51,639 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12342MB
+2024-08-25 03:51:20,162 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12342MB
+2024-08-25 03:51:20,928 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=95.89 vs. limit=4.0
+2024-08-25 03:51:21,408 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12342MB
+2024-08-25 03:51:43,054 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12342MB
+2024-08-25 03:51:44,350 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12342MB
+2024-08-25 03:53:10,643 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.67 vs. limit=7.5
+2024-08-25 03:53:11,521 INFO [train.py:1114] (1/4) Epoch 1, batch 0, loss[loss=8.723, simple_loss=7.061, pruned_loss=6.92, ctc_loss=4.845, over 19825.00 frames. ], tot_loss[loss=8.723, simple_loss=7.061, pruned_loss=6.92, ctc_loss=4.845, over 19825.00 frames. ], batch size: 49, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 03:53:11,522 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 03:53:26,568 INFO [train.py:1146] (1/4) Epoch 1, validation: loss=8.842, simple_loss=7.151, pruned_loss=6.961, ctc_loss=4.966, over 944034.00 frames.
+2024-08-25 03:53:26,569 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12342MB
+2024-08-25 03:53:27,118 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.79 vs. limit=7.5
+2024-08-25 03:53:27,986 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.40 vs. limit=7.5
+2024-08-25 03:53:37,744 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.94 vs. limit=7.5
+2024-08-25 03:53:38,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=0.0, ans=0.25
+2024-08-25 03:53:45,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=0.0, ans=0.2
+2024-08-25 03:54:08,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=0.0, ans=0.5
+2024-08-25 03:54:35,061 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.78 vs. limit=7.52
+2024-08-25 03:54:36,707 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.008e+03 4.149e+03 4.360e+03 5.530e+03 5.553e+03, threshold=1.744e+04, percent-clipped=0.0
+2024-08-25 03:54:38,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=53.333333333333336, ans=0.4975
+2024-08-25 03:54:38,355 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=6.43 vs. limit=4.021333333333334
+2024-08-25 03:54:59,972 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=13.39 vs. limit=7.52
+2024-08-25 03:55:14,449 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=499.63 vs. limit=7.58
+2024-08-25 03:55:46,191 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.063e+03 1.598e+03 4.141e+03 5.530e+03 6.572e+03, threshold=1.656e+04, percent-clipped=0.0
+2024-08-25 03:55:46,838 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=94.13 vs. limit=5.026666666666666
+2024-08-25 03:57:05,411 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=503.08 vs. limit=7.58
+2024-08-25 03:57:10,949 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=374.53 vs. limit=7.58
+2024-08-25 03:57:18,436 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=352.40 vs. limit=5.08
+2024-08-25 03:57:21,971 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=232.56 vs. limit=7.56
+2024-08-25 04:00:04,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=213.33333333333334, ans=0.8925333333333334
+2024-08-25 04:00:04,969 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=68.02 vs. limit=7.58
+2024-08-25 04:00:14,876 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.873e+02 1.048e+03 1.328e+03 4.149e+03 6.572e+03, threshold=5.310e+03, percent-clipped=0.0
+2024-08-25 04:00:17,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=213.33333333333334, ans=0.49
+2024-08-25 04:00:18,637 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=110.58 vs. limit=7.58
+2024-08-25 04:00:19,608 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=360.29 vs. limit=7.58
+2024-08-25 04:00:25,750 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=144.46 vs. limit=7.58
+2024-08-25 04:00:39,839 INFO [train.py:1114] (1/4) Epoch 1, batch 50, loss[loss=1.642, simple_loss=1.082, pruned_loss=1.24, ctc_loss=2.104, over 19710.00 frames. ], tot_loss[loss=3.754, simple_loss=2.917, pruned_loss=2.565, ctc_loss=2.866, over 844643.19 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 04:00:48,159 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=74.31 vs. limit=7.7
+2024-08-25 04:00:50,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=266.6666666666667, ans=0.8906666666666667
+2024-08-25 04:00:54,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=266.6666666666667, ans=0.19
+2024-08-25 04:00:56,121 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=48.50 vs. limit=7.7
+2024-08-25 04:01:20,941 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=8.84 vs. limit=5.08
+2024-08-25 04:01:26,324 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=11.85 vs. limit=4.128
+2024-08-25 04:01:26,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=373.3333333333333, ans=0.4825
+2024-08-25 04:01:41,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=373.3333333333333, ans=0.0916
+2024-08-25 04:02:02,730 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=11.11 vs. limit=5.093333333333334
+2024-08-25 04:02:03,587 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=426.6666666666667, ans=0.29573333333333335
+2024-08-25 04:02:05,507 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=347.88 vs. limit=7.66
+2024-08-25 04:02:37,912 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=161.22 vs. limit=7.68
+2024-08-25 04:02:59,895 INFO [train.py:1114] (1/4) Epoch 1, batch 100, loss[loss=1.395, simple_loss=0.9742, pruned_loss=1.215, ctc_loss=1.341, over 19760.00 frames. ], tot_loss[loss=2.588, simple_loss=1.913, pruned_loss=1.869, ctc_loss=2.355, over 1498832.64 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 04:03:06,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=43.59 vs. limit=7.9
+2024-08-25 04:03:07,089 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.807e+02 4.974e+02 8.674e+02 1.328e+03 6.572e+03, threshold=1.735e+03, percent-clipped=0.0
+2024-08-25 04:03:15,523 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=28.81 vs. limit=5.133333333333334
+2024-08-25 04:03:17,535 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=533.3333333333334, ans=5.333333333333333
+2024-08-25 04:03:19,246 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.42 vs. limit=3.088
+2024-08-25 04:03:19,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=586.6666666666666, ans=0.5
+2024-08-25 04:03:20,185 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=31.12 vs. limit=5.293333333333333
+2024-08-25 04:03:33,766 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=337.16 vs. limit=7.72
+2024-08-25 04:03:33,825 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=19.72 vs. limit=7.72
+2024-08-25 04:03:35,296 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=250.53 vs. limit=7.74
+2024-08-25 04:03:59,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=693.3333333333334, ans=0.4675
+2024-08-25 04:04:02,552 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=11.48 vs. limit=4.277333333333333
+2024-08-25 04:04:04,509 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=8.47 vs. limit=4.298666666666667
+2024-08-25 04:04:11,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=746.6666666666666, ans=5.466666666666667
+2024-08-25 04:04:22,845 INFO [train.py:1114] (1/4) Epoch 1, batch 150, loss[loss=1.145, simple_loss=0.7924, pruned_loss=0.9977, ctc_loss=1.075, over 19707.00 frames. ], tot_loss[loss=2.053, simple_loss=1.496, pruned_loss=1.573, ctc_loss=1.87, over 2028052.72 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 04:04:25,737 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=800.0, ans=0.872
+2024-08-25 04:04:26,062 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=18.48 vs. limit=4.32
+2024-08-25 04:04:26,141 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=102.10 vs. limit=7.8
+2024-08-25 04:04:29,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=11.89 vs. limit=4.32
+2024-08-25 04:04:41,196 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=11.89 vs. limit=7.82
+2024-08-25 04:05:10,244 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=35.58 vs. limit=8.18
+2024-08-25 04:05:10,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=906.6666666666666, ans=0.4575
+2024-08-25 04:05:12,312 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.20 vs. limit=8.18
+2024-08-25 04:05:13,657 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=11.48 vs. limit=4.384
+2024-08-25 04:05:17,334 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=126.47 vs. limit=7.86
+2024-08-25 04:05:40,704 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=76.87 vs. limit=7.88
+2024-08-25 04:05:41,002 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=15.76 vs. limit=5.253333333333333
+2024-08-25 04:05:47,331 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=1013.3333333333334, ans=0.4525
+2024-08-25 04:05:52,904 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.62 vs. limit=8.3
+2024-08-25 04:05:53,934 INFO [train.py:1114] (1/4) Epoch 1, batch 200, loss[loss=1.28, simple_loss=0.881, pruned_loss=1.029, ctc_loss=1.239, over 18088.00 frames. ], tot_loss[loss=1.762, simple_loss=1.267, pruned_loss=1.383, ctc_loss=1.622, over 2435338.20 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 04:05:55,812 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=10.40 vs. limit=4.426666666666667
+2024-08-25 04:05:55,857 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=13.37 vs. limit=7.9
+2024-08-25 04:05:56,680 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=1066.6666666666667, ans=0.7606666666666667
+2024-08-25 04:05:57,467 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 7.117e+01 1.191e+02 1.554e+02 2.219e+02 5.914e+02, threshold=3.108e+02, percent-clipped=0.0
+2024-08-25 04:05:57,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=1066.6666666666667, ans=0.28933333333333333
+2024-08-25 04:06:01,747 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=47.69 vs. limit=7.9
+2024-08-25 04:06:02,960 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=57.47 vs. limit=8.3
+2024-08-25 04:06:03,963 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=142.93 vs. limit=7.9
+2024-08-25 04:06:08,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=1120.0, ans=0.093
+2024-08-25 04:06:12,021 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=42.99 vs. limit=7.92
+2024-08-25 04:06:12,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=1120.0, ans=0.36
+2024-08-25 04:06:19,174 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=39.93 vs. limit=8.38
+2024-08-25 04:06:20,536 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=34.46 vs. limit=7.94
+2024-08-25 04:06:23,561 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=55.55 vs. limit=7.94
+2024-08-25 04:06:28,550 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.27 vs. limit=5.306666666666667
+2024-08-25 04:06:50,076 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=39.86 vs. limit=7.98
+2024-08-25 04:06:55,211 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=1280.0, ans=0.44
+2024-08-25 04:06:55,471 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.53 vs. limit=7.98
+2024-08-25 04:06:56,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=1333.3333333333333, ans=0.8533333333333334
+2024-08-25 04:06:57,331 INFO [train.py:1114] (1/4) Epoch 1, batch 250, loss[loss=1.245, simple_loss=0.8443, pruned_loss=0.988, ctc_loss=1.217, over 19372.00 frames. ], tot_loss[loss=1.585, simple_loss=1.125, pruned_loss=1.253, ctc_loss=1.479, over 2755780.74 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 04:07:48,480 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=1386.6666666666667, ans=0.435
+2024-08-25 04:07:49,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=1386.6666666666667, ans=0.8514666666666667
+2024-08-25 04:07:55,757 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=73.00 vs. limit=8.54
+2024-08-25 04:08:01,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=1440.0, ans=0.14600000000000002
+2024-08-25 04:08:04,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=1440.0, ans=0.5
+2024-08-25 04:08:07,315 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=3.482e-01
+2024-08-25 04:08:11,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=35.44 vs. limit=5.746666666666666
+2024-08-25 04:08:17,026 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.85 vs. limit=5.373333333333333
+2024-08-25 04:08:20,709 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=17.70 vs. limit=8.08
+2024-08-25 04:08:25,384 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=14.41 vs. limit=8.08
+2024-08-25 04:09:05,826 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=1546.6666666666667, ans=0.5
+2024-08-25 04:09:07,238 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.80 vs. limit=8.66
+2024-08-25 04:09:11,363 INFO [train.py:1114] (1/4) Epoch 1, batch 300, loss[loss=1.219, simple_loss=0.8167, pruned_loss=0.9461, ctc_loss=1.206, over 19521.00 frames. ], tot_loss[loss=1.469, simple_loss=1.03, pruned_loss=1.161, ctc_loss=1.388, over 3000944.94 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 04:09:11,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=1600.0, ans=0.5
+2024-08-25 04:09:12,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=1600.0, ans=0.425
+2024-08-25 04:09:14,905 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 8.125e+01 1.367e+02 1.753e+02 2.332e+02 3.681e+02, threshold=3.505e+02, percent-clipped=6.0
+2024-08-25 04:09:15,245 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=1600.0, ans=0.425
+2024-08-25 04:09:16,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=1600.0, ans=0.09000000000000001
+2024-08-25 04:09:16,837 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=48.03 vs. limit=8.1
+2024-08-25 04:10:27,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=1653.3333333333333, ans=0.08966666666666667
+2024-08-25 04:10:37,300 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=1706.6666666666667, ans=0.0616
+2024-08-25 04:10:45,796 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.68 vs. limit=8.82
+2024-08-25 04:11:07,093 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=40.73 vs. limit=8.18
+2024-08-25 04:11:09,105 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=53.05 vs. limit=5.906666666666666
+2024-08-25 04:11:09,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=1813.3333333333333, ans=0.415
+2024-08-25 04:11:13,112 INFO [train.py:1114] (1/4) Epoch 1, batch 350, loss[loss=1.014, simple_loss=0.6721, pruned_loss=0.7762, ctc_loss=1.002, over 19751.00 frames. ], tot_loss[loss=1.392, simple_loss=0.9654, pruned_loss=1.093, ctc_loss=1.327, over 3190480.39 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 04:11:17,954 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=1866.6666666666667, ans=0.4125
+2024-08-25 04:11:19,622 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.66 vs. limit=8.9
+2024-08-25 04:11:22,939 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=19.20 vs. limit=8.2
+2024-08-25 04:11:23,144 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=143.86 vs. limit=8.2
+2024-08-25 04:11:25,354 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=43.19 vs. limit=8.22
+2024-08-25 04:11:28,853 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.54 vs. limit=8.94
+2024-08-25 04:11:34,845 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=25.59 vs. limit=8.22
+2024-08-25 04:11:36,958 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.52 vs. limit=8.98
+2024-08-25 04:11:42,983 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=15.36 vs. limit=8.24
+2024-08-25 04:11:51,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=2026.6666666666667, ans=0.08733333333333333
+2024-08-25 04:11:51,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=2026.6666666666667, ans=0.0544
+2024-08-25 04:11:54,386 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.30 vs. limit=9.02
+2024-08-25 04:11:56,692 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=64.11 vs. limit=8.26
+2024-08-25 04:12:01,331 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=20.35 vs. limit=8.28
+2024-08-25 04:12:02,501 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=6.49 vs. limit=4.832
+2024-08-25 04:12:11,704 INFO [train.py:1114] (1/4) Epoch 1, batch 400, loss[loss=1.184, simple_loss=0.7949, pruned_loss=0.855, ctc_loss=1.145, over 19878.00 frames. ], tot_loss[loss=1.331, simple_loss=0.915, pruned_loss=1.034, ctc_loss=1.275, over 3342068.16 frames. ], batch size: 55, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 04:12:13,549 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=64.12 vs. limit=8.3
+2024-08-25 04:12:15,155 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 9.241e+01 1.644e+02 2.144e+02 2.768e+02 4.713e+02, threshold=4.287e+02, percent-clipped=10.0
+2024-08-25 04:12:18,461 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=6.80 vs. limit=5.0
+2024-08-25 04:12:21,990 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=68.26 vs. limit=8.3
+2024-08-25 04:12:39,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=2240.0, ans=0.11599999999999999
+2024-08-25 04:12:40,176 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=17.84 vs. limit=8.34
+2024-08-25 04:13:02,071 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=2346.6666666666665, ans=0.8178666666666667
+2024-08-25 04:13:04,882 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.70 vs. limit=8.38
+2024-08-25 04:13:05,941 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.48 vs. limit=8.38
+2024-08-25 04:13:10,427 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.27 vs. limit=9.26
+2024-08-25 04:13:12,050 INFO [train.py:1114] (1/4) Epoch 1, batch 450, loss[loss=1.156, simple_loss=0.7904, pruned_loss=0.7709, ctc_loss=1.104, over 19609.00 frames. ], tot_loss[loss=1.28, simple_loss=0.877, pruned_loss=0.9721, ctc_loss=1.228, over 3450267.38 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 04:14:03,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=2453.3333333333335, ans=0.385
+2024-08-25 04:14:04,167 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.57 vs. limit=8.42
+2024-08-25 04:14:08,576 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=17.55 vs. limit=8.42
+2024-08-25 04:14:08,673 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=23.12 vs. limit=8.42
+2024-08-25 04:14:11,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=2453.3333333333335, ans=0.385
+2024-08-25 04:14:12,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=2506.6666666666665, ans=0.22493333333333335
+2024-08-25 04:14:15,282 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.86 vs. limit=9.379999999999999
+2024-08-25 04:14:17,744 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=34.04 vs. limit=8.44
+2024-08-25 04:14:24,628 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=14.03 vs. limit=5.64
+2024-08-25 04:14:28,181 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.14 vs. limit=9.42
+2024-08-25 04:14:28,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=2560.0, ans=0.2744
+2024-08-25 04:14:30,355 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.82 vs. limit=9.42
+2024-08-25 04:14:38,962 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.42 vs. limit=5.045333333333334
+2024-08-25 04:14:39,975 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.20 vs. limit=9.46
+2024-08-25 04:14:43,274 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.39 vs. limit=8.48
+2024-08-25 04:14:45,733 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=6.74 vs. limit=5.045333333333334
+2024-08-25 04:14:52,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=2666.6666666666665, ans=0.375
+2024-08-25 04:14:53,378 INFO [train.py:1114] (1/4) Epoch 1, batch 500, loss[loss=1.01, simple_loss=0.7024, pruned_loss=0.6173, ctc_loss=0.972, over 19664.00 frames. ], tot_loss[loss=1.217, simple_loss=0.835, pruned_loss=0.8932, ctc_loss=1.169, over 3545445.64 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:14:59,590 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 2.224e+02 2.884e+02 3.405e+02 7.334e+02, threshold=5.768e+02, percent-clipped=15.0
+2024-08-25 04:15:03,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=2666.6666666666665, ans=0.2733333333333333
+2024-08-25 04:15:09,375 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.97 vs. limit=8.52
+2024-08-25 04:15:23,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=2773.3333333333335, ans=0.37
+2024-08-25 04:15:27,365 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=23.12 vs. limit=8.54
+2024-08-25 04:15:28,471 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.52 vs. limit=8.54
+2024-08-25 04:15:38,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=2826.6666666666665, ans=0.7782666666666667
+2024-08-25 04:15:45,609 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.74 vs. limit=8.58
+2024-08-25 04:15:49,133 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=4.33 vs. limit=4.576
+2024-08-25 04:15:50,996 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.26 vs. limit=8.58
+2024-08-25 04:15:52,593 INFO [train.py:1114] (1/4) Epoch 1, batch 550, loss[loss=0.912, simple_loss=0.6418, pruned_loss=0.5179, ctc_loss=0.8917, over 19313.00 frames. ], tot_loss[loss=1.151, simple_loss=0.7928, pruned_loss=0.8137, ctc_loss=1.108, over 3607780.47 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:15:56,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=2933.3333333333335, ans=0.08999999999999998
+2024-08-25 04:16:11,315 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=8.69 vs. limit=8.620000000000001
+2024-08-25 04:16:18,191 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=15.19 vs. limit=8.64
+2024-08-25 04:16:23,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=3040.0, ans=0.2456
+2024-08-25 04:17:00,419 INFO [train.py:1114] (1/4) Epoch 1, batch 600, loss[loss=0.8352, simple_loss=0.6007, pruned_loss=0.44, ctc_loss=0.8089, over 19416.00 frames. ], tot_loss[loss=1.082, simple_loss=0.7496, pruned_loss=0.7333, ctc_loss=1.041, over 3665856.03 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:17:03,291 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.48 vs. limit=5.8
+2024-08-25 04:17:03,771 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.809e+02 3.766e+02 4.633e+02 8.655e+02, threshold=7.532e+02, percent-clipped=12.0
+2024-08-25 04:17:05,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=3200.0, ans=0.09999999999999998
+2024-08-25 04:17:13,842 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=3253.3333333333335, ans=0.34750000000000003
+2024-08-25 04:17:19,650 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.77 vs. limit=5.8133333333333335
+2024-08-25 04:18:10,248 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.30 vs. limit=10.02
+2024-08-25 04:18:26,514 INFO [train.py:1114] (1/4) Epoch 1, batch 650, loss[loss=0.6971, simple_loss=0.5186, pruned_loss=0.3355, ctc_loss=0.6552, over 19771.00 frames. ], tot_loss[loss=1.008, simple_loss=0.7046, pruned_loss=0.6544, ctc_loss=0.9687, over 3716267.31 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:18:34,364 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=3466.6666666666665, ans=0.022000000000000006
+2024-08-25 04:18:39,866 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=3520.0, ans=0.0208
+2024-08-25 04:18:51,343 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.22 vs. limit=5.88
+2024-08-25 04:18:55,375 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=11.67 vs. limit=10.18
+2024-08-25 04:18:56,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=3573.3333333333335, ans=0.26426666666666665
+2024-08-25 04:18:58,413 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=3573.3333333333335, ans=0.3325
+2024-08-25 04:20:32,372 INFO [train.py:1114] (1/4) Epoch 1, batch 700, loss[loss=0.6708, simple_loss=0.5004, pruned_loss=0.3244, ctc_loss=0.6167, over 19715.00 frames. ], tot_loss[loss=0.944, simple_loss=0.6662, pruned_loss=0.5867, ctc_loss=0.9031, over 3749077.52 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:20:32,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=3733.3333333333335, ans=0.325
+2024-08-25 04:20:35,545 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.600e+02 3.309e+02 4.487e+02 1.180e+03, threshold=6.619e+02, percent-clipped=3.0
+2024-08-25 04:20:45,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=3786.6666666666665, ans=0.3225
+2024-08-25 04:20:49,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=3786.6666666666665, ans=0.3225
+2024-08-25 04:20:55,590 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=3840.0, ans=0.32
+2024-08-25 04:21:00,179 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.90 vs. limit=10.379999999999999
+2024-08-25 04:21:02,293 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.98 vs. limit=5.536
+2024-08-25 04:21:09,528 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=2.930e-01
+2024-08-25 04:21:15,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=3946.6666666666665, ans=0.315
+2024-08-25 04:21:17,805 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=3.96 vs. limit=5.578666666666667
+2024-08-25 04:21:24,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=3946.6666666666665, ans=0.315
+2024-08-25 04:21:26,550 INFO [train.py:1114] (1/4) Epoch 1, batch 750, loss[loss=0.6553, simple_loss=0.5017, pruned_loss=0.2981, ctc_loss=0.586, over 19495.00 frames. ], tot_loss[loss=0.885, simple_loss=0.6316, pruned_loss=0.5271, ctc_loss=0.8405, over 3775728.42 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:21:28,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4000.0, ans=0.26
+2024-08-25 04:21:32,261 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=4000.0, ans=0.07500000000000001
+2024-08-25 04:21:32,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=4000.0, ans=0.3125
+2024-08-25 04:21:35,658 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.82 vs. limit=10.5
+2024-08-25 04:21:52,182 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.24 vs. limit=9.040000000000001
+2024-08-25 04:22:27,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=4160.0, ans=7.6
+2024-08-25 04:22:29,631 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4213.333333333333, ans=0.2578666666666667
+2024-08-25 04:22:33,238 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.75 vs. limit=10.66
+2024-08-25 04:22:37,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=4213.333333333333, ans=0.7525333333333334
+2024-08-25 04:22:40,637 INFO [train.py:1114] (1/4) Epoch 1, batch 800, loss[loss=0.5599, simple_loss=0.4397, pruned_loss=0.2454, ctc_loss=0.4764, over 19827.00 frames. ], tot_loss[loss=0.8299, simple_loss=0.6, pruned_loss=0.4739, ctc_loss=0.7789, over 3797367.18 frames. ], batch size: 49, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:22:43,863 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.484e+02 3.479e+02 4.307e+02 9.603e+02, threshold=6.957e+02, percent-clipped=4.0
+2024-08-25 04:22:45,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4266.666666666667, ans=0.2573333333333333
+2024-08-25 04:22:46,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4266.666666666667, ans=0.2573333333333333
+2024-08-25 04:23:23,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=4426.666666666667, ans=0.2925
+2024-08-25 04:23:34,667 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.26 vs. limit=10.86
+2024-08-25 04:23:37,967 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.45 vs. limit=6.12
+2024-08-25 04:23:40,877 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=9.898e-01
+2024-08-25 04:23:42,705 INFO [train.py:1114] (1/4) Epoch 1, batch 850, loss[loss=0.593, simple_loss=0.4709, pruned_loss=0.2536, ctc_loss=0.4992, over 19671.00 frames. ], tot_loss[loss=0.7802, simple_loss=0.5721, pruned_loss=0.4275, ctc_loss=0.722, over 3815221.61 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:23:46,439 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.49 vs. limit=10.9
+2024-08-25 04:23:50,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=4533.333333333333, ans=0.2875
+2024-08-25 04:23:51,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1.whitening_limit, batch_count=4533.333333333333, ans=6.133333333333333
+2024-08-25 04:23:55,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4586.666666666667, ans=0.2541333333333333
+2024-08-25 04:24:36,255 INFO [train.py:1114] (1/4) Epoch 1, batch 900, loss[loss=0.5104, simple_loss=0.4095, pruned_loss=0.2147, ctc_loss=0.4231, over 19823.00 frames. ], tot_loss[loss=0.7393, simple_loss=0.5493, pruned_loss=0.3901, ctc_loss=0.674, over 3819074.93 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:24:36,926 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.80 vs. limit=11.1
+2024-08-25 04:24:39,557 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.433e+02 3.203e+02 4.513e+02 7.559e+02, threshold=6.406e+02, percent-clipped=2.0
+2024-08-25 04:24:39,807 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4800.0, ans=0.252
+2024-08-25 04:24:46,707 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.81 vs. limit=7.426666666666666
+2024-08-25 04:25:10,336 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.01 vs. limit=6.24
+2024-08-25 04:25:22,336 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=5013.333333333333, ans=0.265
+2024-08-25 04:25:23,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=5013.333333333333, ans=0.265
+2024-08-25 04:25:24,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=5013.333333333333, ans=0.03433333333333334
+2024-08-25 04:25:25,815 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.54 vs. limit=9.379999999999999
+2024-08-25 04:25:31,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=5066.666666666667, ans=0.025
+2024-08-25 04:25:32,738 INFO [train.py:1114] (1/4) Epoch 1, batch 950, loss[loss=0.5071, simple_loss=0.4155, pruned_loss=0.2039, ctc_loss=0.4135, over 19494.00 frames. ], tot_loss[loss=0.7005, simple_loss=0.5279, pruned_loss=0.3562, ctc_loss=0.6284, over 3820940.71 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:25:41,759 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.43 vs. limit=6.266666666666667
+2024-08-25 04:25:49,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=5120.0, ans=0.0
+2024-08-25 04:25:54,421 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=5173.333333333333, ans=0.04949747468305833
+2024-08-25 04:25:55,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=5173.333333333333, ans=0.24826666666666666
+2024-08-25 04:26:12,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=5226.666666666667, ans=0.06733333333333333
+2024-08-25 04:26:13,364 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.28 vs. limit=3.784
+2024-08-25 04:26:20,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=5280.0, ans=0.24719999999999998
+2024-08-25 04:26:33,405 INFO [train.py:1114] (1/4) Epoch 1, batch 1000, loss[loss=0.4809, simple_loss=0.4059, pruned_loss=0.1847, ctc_loss=0.3747, over 19853.00 frames. ], tot_loss[loss=0.6694, simple_loss=0.5114, pruned_loss=0.3292, ctc_loss=0.5905, over 3817984.21 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:26:36,701 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.226e+02 2.758e+02 3.479e+02 9.619e+02, threshold=5.516e+02, percent-clipped=3.0
+2024-08-25 04:26:44,546 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=5.82 vs. limit=6.154666666666667
+2024-08-25 04:26:47,278 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=5386.666666666667, ans=0.2475
+2024-08-25 04:26:48,434 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=2.172e-01
+2024-08-25 04:26:57,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=5440.0, ans=0.245
+2024-08-25 04:27:00,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=5440.0, ans=0.025
+2024-08-25 04:27:02,167 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.74 vs. limit=9.54
+2024-08-25 04:27:11,358 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=5493.333333333333, ans=0.043777777777777784
+2024-08-25 04:27:21,935 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.76 vs. limit=9.58
+2024-08-25 04:27:25,690 INFO [train.py:1114] (1/4) Epoch 1, batch 1050, loss[loss=0.5276, simple_loss=0.4416, pruned_loss=0.2073, ctc_loss=0.4162, over 19861.00 frames. ], tot_loss[loss=0.6378, simple_loss=0.4943, pruned_loss=0.3034, ctc_loss=0.5529, over 3823633.07 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:27:37,477 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.76 vs. limit=11.74
+2024-08-25 04:27:37,689 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.82 vs. limit=11.74
+2024-08-25 04:27:46,055 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.54 vs. limit=11.780000000000001
+2024-08-25 04:28:03,095 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=5760.0, ans=0.22999999999999998
+2024-08-25 04:28:09,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=5813.333333333333, ans=0.009605797101449275
+2024-08-25 04:28:20,199 INFO [train.py:1114] (1/4) Epoch 1, batch 1100, loss[loss=0.509, simple_loss=0.4281, pruned_loss=0.2009, ctc_loss=0.3954, over 19577.00 frames. ], tot_loss[loss=0.6097, simple_loss=0.4792, pruned_loss=0.2812, ctc_loss=0.5197, over 3830869.71 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:28:21,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=5866.666666666667, ans=0.24133333333333332
+2024-08-25 04:28:23,261 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.143e+02 2.593e+02 3.421e+02 4.407e+02, threshold=5.186e+02, percent-clipped=0.0
+2024-08-25 04:28:34,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=5920.0, ans=0.009582608695652174
+2024-08-25 04:28:35,857 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn2.whiten.whitening_limit, batch_count=5920.0, ans=11.940000000000001
+2024-08-25 04:28:46,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=5973.333333333333, ans=0.24026666666666666
+2024-08-25 04:28:46,925 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=5973.333333333333, ans=0.025
+2024-08-25 04:28:54,778 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.44 vs. limit=8.013333333333334
+2024-08-25 04:28:57,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=6026.666666666667, ans=0.21750000000000003
+2024-08-25 04:29:15,933 INFO [train.py:1114] (1/4) Epoch 1, batch 1150, loss[loss=0.4886, simple_loss=0.4181, pruned_loss=0.1847, ctc_loss=0.3834, over 19567.00 frames. ], tot_loss[loss=0.5881, simple_loss=0.4681, pruned_loss=0.264, ctc_loss=0.4934, over 3829809.04 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 04:29:49,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=6240.0, ans=0.20750000000000002
+2024-08-25 04:32:28,005 INFO [train.py:1114] (1/4) Epoch 1, batch 1200, loss[loss=0.4917, simple_loss=0.4253, pruned_loss=0.1873, ctc_loss=0.3707, over 19852.00 frames. ], tot_loss[loss=0.5708, simple_loss=0.4598, pruned_loss=0.2503, ctc_loss=0.4717, over 3825553.02 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:32:31,062 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.077e+02 2.797e+02 3.799e+02 8.339e+02, threshold=5.594e+02, percent-clipped=11.0
+2024-08-25 04:32:32,613 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.07 vs. limit=9.9
+2024-08-25 04:33:01,153 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:01,324 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:03,570 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.87 vs. limit=12.42
+2024-08-25 04:33:05,626 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.74 vs. limit=6.64
+2024-08-25 04:33:07,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=6560.0, ans=0.2344
+2024-08-25 04:33:07,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:19,310 INFO [train.py:1114] (1/4) Epoch 1, batch 1250, loss[loss=0.5131, simple_loss=0.4353, pruned_loss=0.2055, ctc_loss=0.3889, over 19519.00 frames. ], tot_loss[loss=0.551, simple_loss=0.4502, pruned_loss=0.2357, ctc_loss=0.4474, over 3843501.71 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:33:38,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=6720.0, ans=0.185
+2024-08-25 04:33:44,503 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=6773.333333333333, ans=0.07
+2024-08-25 04:33:48,920 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.30 vs. limit=10.04
+2024-08-25 04:33:49,838 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.29 vs. limit=10.04
+2024-08-25 04:34:06,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=6880.0, ans=0.1775
+2024-08-25 04:34:12,520 INFO [train.py:1114] (1/4) Epoch 1, batch 1300, loss[loss=0.509, simple_loss=0.4346, pruned_loss=0.2018, ctc_loss=0.3869, over 18874.00 frames. ], tot_loss[loss=0.5313, simple_loss=0.4399, pruned_loss=0.2223, ctc_loss=0.4245, over 3846323.59 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:34:12,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=6933.333333333333, ans=0.175
+2024-08-25 04:34:15,557 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.007e+02 2.492e+02 3.309e+02 5.533e+02, threshold=4.985e+02, percent-clipped=0.0
+2024-08-25 04:34:19,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=6933.333333333333, ans=0.23066666666666666
+2024-08-25 04:34:29,288 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:34:35,314 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=7040.0, ans=0.037333333333333336
+2024-08-25 04:34:54,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=7093.333333333333, ans=0.16749999999999998
+2024-08-25 04:34:57,344 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.94 vs. limit=12.82
+2024-08-25 04:35:00,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=7146.666666666667, ans=0.2285333333333333
+2024-08-25 04:35:01,745 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.26 vs. limit=10.18
+2024-08-25 04:35:02,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=7146.666666666667, ans=0.03688888888888889
+2024-08-25 04:35:11,312 INFO [train.py:1114] (1/4) Epoch 1, batch 1350, loss[loss=0.4349, simple_loss=0.3882, pruned_loss=0.1596, ctc_loss=0.3242, over 19760.00 frames. ], tot_loss[loss=0.5147, simple_loss=0.4318, pruned_loss=0.2111, ctc_loss=0.405, over 3858477.95 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:35:16,909 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.72 vs. limit=12.9
+2024-08-25 04:35:23,886 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=7253.333333333333, ans=0.036444444444444446
+2024-08-25 04:35:25,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=7253.333333333333, ans=0.13290666666666667
+2024-08-25 04:35:30,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=7306.666666666667, ans=0.22693333333333332
+2024-08-25 04:35:50,455 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.07 vs. limit=10.26
+2024-08-25 04:35:58,475 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.08 vs. limit=13.059999999999999
+2024-08-25 04:36:02,128 INFO [train.py:1114] (1/4) Epoch 1, batch 1400, loss[loss=0.3964, simple_loss=0.3503, pruned_loss=0.1501, ctc_loss=0.2954, over 19668.00 frames. ], tot_loss[loss=0.5003, simple_loss=0.4246, pruned_loss=0.2019, ctc_loss=0.3882, over 3864290.12 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:36:05,098 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.980e+02 2.233e+02 2.820e+02 5.701e+02, threshold=4.466e+02, percent-clipped=2.0
+2024-08-25 04:36:06,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=7466.666666666667, ans=0.15000000000000002
+2024-08-25 04:36:25,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=7573.333333333333, ans=0.14500000000000002
+2024-08-25 04:36:30,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=7573.333333333333, ans=0.14500000000000002
+2024-08-25 04:36:48,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=7680.0, ans=0.03466666666666667
+2024-08-25 04:36:54,785 INFO [train.py:1114] (1/4) Epoch 1, batch 1450, loss[loss=0.4836, simple_loss=0.4296, pruned_loss=0.1859, ctc_loss=0.3491, over 19686.00 frames. ], tot_loss[loss=0.4908, simple_loss=0.4209, pruned_loss=0.1955, ctc_loss=0.3759, over 3861926.89 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:36:57,323 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.58 vs. limit=10.4
+2024-08-25 04:36:59,928 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=7733.333333333333, ans=0.22266666666666668
+2024-08-25 04:37:02,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=7733.333333333333, ans=0.1375
+2024-08-25 04:37:03,125 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.83 vs. limit=6.933333333333334
+2024-08-25 04:37:30,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=7893.333333333333, ans=0.03377777777777778
+2024-08-25 04:37:40,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=7946.666666666667, ans=0.025
+2024-08-25 04:37:48,625 INFO [train.py:1114] (1/4) Epoch 1, batch 1500, loss[loss=0.4527, simple_loss=0.417, pruned_loss=0.1647, ctc_loss=0.3228, over 19597.00 frames. ], tot_loss[loss=0.48, simple_loss=0.4161, pruned_loss=0.1888, ctc_loss=0.3636, over 3862190.24 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:37:52,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=8000.0, ans=0.009130434782608696
+2024-08-25 04:37:52,766 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=8000.0, ans=0.125
+2024-08-25 04:37:52,925 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.09 vs. limit=9.0
+2024-08-25 04:37:54,384 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 1.987e+02 2.351e+02 3.240e+02 5.717e+02, threshold=4.702e+02, percent-clipped=4.0
+2024-08-25 04:37:58,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=8000.0, ans=0.62
+2024-08-25 04:38:01,613 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:38:19,042 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.11 vs. limit=13.58
+2024-08-25 04:38:35,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=8160.0, ans=0.03266666666666667
+2024-08-25 04:38:56,151 INFO [train.py:1114] (1/4) Epoch 1, batch 1550, loss[loss=0.4656, simple_loss=0.418, pruned_loss=0.1754, ctc_loss=0.3485, over 19624.00 frames. ], tot_loss[loss=0.4721, simple_loss=0.4126, pruned_loss=0.1841, ctc_loss=0.3547, over 3846925.98 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:39:01,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=8266.666666666666, ans=0.125
+2024-08-25 04:39:05,060 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=8320.0, ans=0.6088
+2024-08-25 04:39:05,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=8320.0, ans=0.09899494936611666
+2024-08-25 04:39:39,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=8480.0, ans=0.00902608695652174
+2024-08-25 04:39:39,390 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.18 vs. limit=13.86
+2024-08-25 04:39:46,376 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.01 vs. limit=10.7
+2024-08-25 04:39:47,230 INFO [train.py:1114] (1/4) Epoch 1, batch 1600, loss[loss=0.4418, simple_loss=0.4094, pruned_loss=0.162, ctc_loss=0.3166, over 19831.00 frames. ], tot_loss[loss=0.4627, simple_loss=0.4084, pruned_loss=0.1788, ctc_loss=0.3443, over 3836866.91 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:39:52,854 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.044e+02 2.368e+02 2.950e+02 6.795e+02, threshold=4.737e+02, percent-clipped=6.0
+2024-08-25 04:40:01,892 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.86 vs. limit=13.94
+2024-08-25 04:40:04,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=8586.666666666666, ans=0.5994666666666667
+2024-08-25 04:40:33,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=8746.666666666666, ans=0.030222222222222227
+2024-08-25 04:40:42,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=8800.0, ans=0.212
+2024-08-25 04:40:43,013 INFO [train.py:1114] (1/4) Epoch 1, batch 1650, loss[loss=0.4154, simple_loss=0.3867, pruned_loss=0.1534, ctc_loss=0.2949, over 19652.00 frames. ], tot_loss[loss=0.4535, simple_loss=0.4042, pruned_loss=0.1738, ctc_loss=0.335, over 3831942.97 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:41:50,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=8800.0, ans=0.07
+2024-08-25 04:43:04,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=8906.666666666666, ans=0.125
+2024-08-25 04:43:24,942 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=9013.333333333334, ans=0.0
+2024-08-25 04:43:28,637 INFO [train.py:1114] (1/4) Epoch 1, batch 1700, loss[loss=0.3467, simple_loss=0.3309, pruned_loss=0.124, ctc_loss=0.246, over 19670.00 frames. ], tot_loss[loss=0.4439, simple_loss=0.4002, pruned_loss=0.1684, ctc_loss=0.325, over 3845983.60 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:43:31,592 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.986e+02 2.386e+02 2.791e+02 4.935e+02, threshold=4.772e+02, percent-clipped=1.0
+2024-08-25 04:43:35,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=9066.666666666666, ans=0.20933333333333334
+2024-08-25 04:43:39,889 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.48 vs. limit=10.92
+2024-08-25 04:43:40,785 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.95 vs. limit=10.92
+2024-08-25 04:43:48,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=9173.333333333334, ans=0.125
+2024-08-25 04:43:56,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.94 vs. limit=14.42
+2024-08-25 04:44:00,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=9226.666666666666, ans=0.028222222222222225
+2024-08-25 04:44:03,836 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 04:44:07,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=9280.0, ans=10.98
+2024-08-25 04:45:26,323 INFO [train.py:1114] (1/4) Epoch 1, batch 1750, loss[loss=0.3455, simple_loss=0.3427, pruned_loss=0.1193, ctc_loss=0.2343, over 19627.00 frames. ], tot_loss[loss=0.4358, simple_loss=0.3967, pruned_loss=0.1643, ctc_loss=0.3167, over 3850913.43 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:45:34,439 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.60 vs. limit=11.0
+2024-08-25 04:46:11,497 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=9546.666666666666, ans=0.025
+2024-08-25 04:46:13,450 INFO [train.py:1114] (1/4) Epoch 1, batch 1800, loss[loss=0.4142, simple_loss=0.3973, pruned_loss=0.1512, ctc_loss=0.2903, over 19608.00 frames. ], tot_loss[loss=0.4305, simple_loss=0.3954, pruned_loss=0.1615, ctc_loss=0.3114, over 3852586.37 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:46:16,186 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.025e+02 2.321e+02 2.784e+02 4.120e+02, threshold=4.643e+02, percent-clipped=0.0
+2024-08-25 04:48:19,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=9813.333333333334, ans=0.07
+2024-08-25 04:48:26,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=9813.333333333334, ans=0.125
+2024-08-25 04:48:28,786 INFO [train.py:1114] (1/4) Epoch 1, batch 1850, loss[loss=0.397, simple_loss=0.3774, pruned_loss=0.1495, ctc_loss=0.2742, over 19578.00 frames. ], tot_loss[loss=0.4229, simple_loss=0.3923, pruned_loss=0.1578, ctc_loss=0.3036, over 3856379.05 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:48:37,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=9866.666666666666, ans=0.008724637681159421
+2024-08-25 04:48:38,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=9920.0, ans=0.0
+2024-08-25 04:48:44,805 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=9920.0, ans=0.0
+2024-08-25 04:48:49,668 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten.whitening_limit, batch_count=9973.333333333334, ans=11.24
+2024-08-25 04:48:52,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=9973.333333333334, ans=0.125
+2024-08-25 04:48:57,181 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.32 vs. limit=15.02
+2024-08-25 04:49:03,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=10026.666666666666, ans=0.19973333333333332
+2024-08-25 04:49:13,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=10080.0, ans=0.125
+2024-08-25 04:49:15,886 INFO [train.py:1114] (1/4) Epoch 1, batch 1900, loss[loss=0.3993, simple_loss=0.3942, pruned_loss=0.1439, ctc_loss=0.2746, over 19629.00 frames. ], tot_loss[loss=0.4176, simple_loss=0.3909, pruned_loss=0.1552, ctc_loss=0.2984, over 3860757.23 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:49:18,621 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.031e+02 2.370e+02 2.878e+02 5.610e+02, threshold=4.739e+02, percent-clipped=2.0
+2024-08-25 04:49:56,133 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=10186.666666666666, ans=0.125
+2024-08-25 04:50:12,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=10240.0, ans=0.025
+2024-08-25 04:50:12,927 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:50:31,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=10400.0, ans=0.125
+2024-08-25 04:50:31,873 INFO [train.py:1114] (1/4) Epoch 1, batch 1950, loss[loss=0.3744, simple_loss=0.3712, pruned_loss=0.1339, ctc_loss=0.2665, over 19583.00 frames. ], tot_loss[loss=0.4114, simple_loss=0.3893, pruned_loss=0.1522, ctc_loss=0.2926, over 3870139.29 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:50:43,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=10453.333333333334, ans=0.023111111111111107
+2024-08-25 04:50:49,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=10506.666666666666, ans=0.022888888888888893
+2024-08-25 04:50:49,959 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.39 vs. limit=4.5760000000000005
+2024-08-25 04:50:53,109 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=10506.666666666666, ans=0.0
+2024-08-25 04:50:55,170 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.70 vs. limit=11.44
+2024-08-25 04:50:55,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10506.666666666666, ans=0.19493333333333335
+2024-08-25 04:51:09,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=10560.0, ans=0.125
+2024-08-25 04:52:05,814 INFO [train.py:1114] (1/4) Epoch 1, batch 2000, loss[loss=0.3488, simple_loss=0.3495, pruned_loss=0.1248, ctc_loss=0.2465, over 19677.00 frames. ], tot_loss[loss=0.4078, simple_loss=0.3884, pruned_loss=0.1507, ctc_loss=0.2899, over 3854520.47 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:52:09,673 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 1.861e+02 2.137e+02 2.685e+02 4.799e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-25 04:52:10,971 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.47 vs. limit=11.5
+2024-08-25 04:53:25,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=10720.0, ans=0.0
+2024-08-25 04:53:38,444 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=10720.0, ans=0.125
+2024-08-25 04:53:46,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10720.0, ans=0.19279999999999997
+2024-08-25 04:54:20,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten.whitening_limit, batch_count=10880.0, ans=11.58
+2024-08-25 04:54:32,949 INFO [train.py:1114] (1/4) Epoch 1, batch 2050, loss[loss=0.3572, simple_loss=0.3558, pruned_loss=0.1303, ctc_loss=0.2449, over 19750.00 frames. ], tot_loss[loss=0.4011, simple_loss=0.385, pruned_loss=0.1479, ctc_loss=0.284, over 3850741.77 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:54:39,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=10933.333333333334, ans=0.125
+2024-08-25 04:54:49,417 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.02 vs. limit=4.648
+2024-08-25 04:55:22,206 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.05 vs. limit=10.546666666666667
+2024-08-25 04:55:26,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=11093.333333333334, ans=0.5117333333333334
+2024-08-25 04:55:35,638 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.36 vs. limit=4.672
+2024-08-25 04:55:42,293 INFO [train.py:1114] (1/4) Epoch 1, batch 2100, loss[loss=0.3977, simple_loss=0.3883, pruned_loss=0.1465, ctc_loss=0.2853, over 19772.00 frames. ], tot_loss[loss=0.3935, simple_loss=0.3816, pruned_loss=0.1443, ctc_loss=0.2773, over 3857647.84 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:56:34,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=11200.0, ans=0.188
+2024-08-25 04:56:36,117 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 1.936e+02 2.214e+02 2.535e+02 3.885e+02, threshold=4.428e+02, percent-clipped=0.0
+2024-08-25 04:56:37,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=11200.0, ans=0.508
+2024-08-25 04:56:41,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=11200.0, ans=0.020000000000000004
+2024-08-25 04:56:41,540 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.07 vs. limit=7.8
+2024-08-25 04:56:42,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=11253.333333333334, ans=0.18746666666666667
+2024-08-25 04:57:08,790 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.42 vs. limit=15.98
+2024-08-25 04:57:25,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=11360.0, ans=0.019333333333333338
+2024-08-25 04:57:27,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=11413.333333333334, ans=0.125
+2024-08-25 04:57:30,160 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.80 vs. limit=4.712
+2024-08-25 04:57:35,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=11466.666666666666, ans=0.125
+2024-08-25 04:57:35,994 INFO [train.py:1114] (1/4) Epoch 1, batch 2150, loss[loss=0.3526, simple_loss=0.3625, pruned_loss=0.1232, ctc_loss=0.2405, over 19609.00 frames. ], tot_loss[loss=0.3868, simple_loss=0.3785, pruned_loss=0.141, ctc_loss=0.271, over 3868410.32 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:57:36,220 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=11466.666666666666, ans=0.4986666666666667
+2024-08-25 04:58:57,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=11520.0, ans=0.125
+2024-08-25 04:58:58,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=11520.0, ans=0.125
+2024-08-25 04:59:30,049 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.46 vs. limit=11.879999999999999
+2024-08-25 04:59:34,060 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.30 vs. limit=11.879999999999999
+2024-08-25 04:59:36,680 INFO [train.py:1114] (1/4) Epoch 1, batch 2200, loss[loss=0.3941, simple_loss=0.3932, pruned_loss=0.1427, ctc_loss=0.2739, over 19555.00 frames. ], tot_loss[loss=0.3831, simple_loss=0.3771, pruned_loss=0.1392, ctc_loss=0.2678, over 3866197.78 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:59:40,227 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 1.884e+02 2.153e+02 2.810e+02 4.673e+02, threshold=4.307e+02, percent-clipped=1.0
+2024-08-25 04:59:40,906 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.10 vs. limit=16.3
+2024-08-25 04:59:41,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=11733.333333333334, ans=0.18266666666666664
+2024-08-25 04:59:45,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=11786.666666666666, ans=0.125
+2024-08-25 05:00:10,198 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=11893.333333333334, ans=0.125
+2024-08-25 05:00:33,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=12000.0, ans=0.48000000000000004
+2024-08-25 05:00:34,243 INFO [train.py:1114] (1/4) Epoch 1, batch 2250, loss[loss=0.3561, simple_loss=0.3674, pruned_loss=0.1236, ctc_loss=0.2439, over 19598.00 frames. ], tot_loss[loss=0.3802, simple_loss=0.3761, pruned_loss=0.1378, ctc_loss=0.2649, over 3866141.34 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:00:37,214 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.50 vs. limit=12.0
+2024-08-25 05:01:04,444 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=12053.333333333334, ans=0.4781333333333333
+2024-08-25 05:01:19,400 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.73 vs. limit=4.816
+2024-08-25 05:01:30,883 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=12160.0, ans=0.016
+2024-08-25 05:01:44,086 INFO [train.py:1114] (1/4) Epoch 1, batch 2300, loss[loss=0.3473, simple_loss=0.3555, pruned_loss=0.1223, ctc_loss=0.2361, over 19507.00 frames. ], tot_loss[loss=0.3767, simple_loss=0.3738, pruned_loss=0.1363, ctc_loss=0.2616, over 3860747.31 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:01:47,653 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.926e+02 2.114e+02 2.507e+02 4.625e+02, threshold=4.228e+02, percent-clipped=3.0
+2024-08-25 05:01:48,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=12266.666666666666, ans=0.015555555555555559
+2024-08-25 05:02:12,252 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.51 vs. limit=16.82
+2024-08-25 05:02:30,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=12533.333333333334, ans=0.125
+2024-08-25 05:02:30,767 INFO [train.py:1114] (1/4) Epoch 1, batch 2350, loss[loss=0.3935, simple_loss=0.3888, pruned_loss=0.1436, ctc_loss=0.2775, over 19700.00 frames. ], tot_loss[loss=0.3747, simple_loss=0.3732, pruned_loss=0.1354, ctc_loss=0.2596, over 3863328.77 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:04:30,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=12640.0, ans=0.125
+2024-08-25 05:04:38,733 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=12693.333333333334, ans=10.0
+2024-08-25 05:04:44,473 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.57 vs. limit=9.077333333333334
+2024-08-25 05:04:57,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=12746.666666666666, ans=0.025
+2024-08-25 05:05:04,665 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.56 vs. limit=6.5600000000000005
+2024-08-25 05:05:05,278 INFO [train.py:1114] (1/4) Epoch 1, batch 2400, loss[loss=0.4004, simple_loss=0.3957, pruned_loss=0.1467, ctc_loss=0.2791, over 19379.00 frames. ], tot_loss[loss=0.3757, simple_loss=0.3751, pruned_loss=0.1356, ctc_loss=0.2595, over 3858170.43 frames. ], batch size: 67, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:05:08,752 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.948e+02 2.252e+02 2.666e+02 4.870e+02, threshold=4.504e+02, percent-clipped=4.0
+2024-08-25 05:05:11,567 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.05 vs. limit=9.120000000000001
+2024-08-25 05:05:18,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=12853.333333333334, ans=0.00807536231884058
+2024-08-25 05:05:21,625 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.52 vs. limit=4.928
+2024-08-25 05:05:33,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=12960.0, ans=0.125
+2024-08-25 05:05:35,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=12960.0, ans=0.125
+2024-08-25 05:05:44,868 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.99 vs. limit=12.379999999999999
+2024-08-25 05:05:45,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=13013.333333333334, ans=0.3952
+2024-08-25 05:05:52,656 INFO [train.py:1114] (1/4) Epoch 1, batch 2450, loss[loss=0.4947, simple_loss=0.433, pruned_loss=0.2032, ctc_loss=0.3751, over 13653.00 frames. ], tot_loss[loss=0.3853, simple_loss=0.3806, pruned_loss=0.1408, ctc_loss=0.2684, over 3732983.70 frames. ], batch size: 140, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:06:18,971 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.56 vs. limit=8.266666666666666
+2024-08-25 05:06:27,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=13120.0, ans=0.008017391304347827
+2024-08-25 05:06:31,019 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=13173.333333333334, ans=0.125
+2024-08-25 05:07:49,748 INFO [train.py:1114] (1/4) Epoch 2, batch 0, loss[loss=0.3543, simple_loss=0.3605, pruned_loss=0.1275, ctc_loss=0.2328, over 19390.00 frames. ], tot_loss[loss=0.3543, simple_loss=0.3605, pruned_loss=0.1275, ctc_loss=0.2328, over 19390.00 frames. ], batch size: 48, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 05:07:49,749 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 05:09:16,713 INFO [train.py:1146] (1/4) Epoch 2, validation: loss=0.2886, simple_loss=0.3508, pruned_loss=0.0823, ctc_loss=0.1542, over 944034.00 frames.
+2024-08-25 05:09:16,714 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13283MB
+2024-08-25 05:09:16,914 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=13280.0, ans=0.1672
+2024-08-25 05:09:35,626 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 1.938e+02 2.191e+02 2.677e+02 6.592e+02, threshold=4.382e+02, percent-clipped=7.0
+2024-08-25 05:09:36,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=13333.333333333334, ans=0.125
+2024-08-25 05:09:52,528 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=13440.0, ans=0.1656
+2024-08-25 05:09:52,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=13440.0, ans=0.125
+2024-08-25 05:10:10,713 INFO [train.py:1114] (1/4) Epoch 2, batch 50, loss[loss=0.2993, simple_loss=0.3185, pruned_loss=0.1009, ctc_loss=0.1963, over 19743.00 frames. ], tot_loss[loss=0.3735, simple_loss=0.3755, pruned_loss=0.1345, ctc_loss=0.2565, over 845814.23 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:11:12,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=13653.333333333334, ans=0.4221333333333333
+2024-08-25 05:11:14,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=13653.333333333334, ans=0.125
+2024-08-25 05:11:46,555 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=13813.333333333334, ans=0.025
+2024-08-25 05:11:47,132 INFO [train.py:1114] (1/4) Epoch 2, batch 100, loss[loss=0.3074, simple_loss=0.3364, pruned_loss=0.09915, ctc_loss=0.2002, over 19718.00 frames. ], tot_loss[loss=0.3733, simple_loss=0.377, pruned_loss=0.1338, ctc_loss=0.2549, over 1499532.21 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:12:00,806 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.907e+02 2.167e+02 2.481e+02 4.957e+02, threshold=4.333e+02, percent-clipped=1.0
+2024-08-25 05:12:01,111 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=13866.666666666666, ans=0.0
+2024-08-25 05:12:14,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=13920.0, ans=0.125
+2024-08-25 05:12:22,084 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=13973.333333333334, ans=0.025
+2024-08-25 05:12:47,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1.whitening_limit, batch_count=14080.0, ans=8.52
+2024-08-25 05:12:50,537 INFO [train.py:1114] (1/4) Epoch 2, batch 150, loss[loss=0.3072, simple_loss=0.3237, pruned_loss=0.1056, ctc_loss=0.1984, over 19716.00 frames. ], tot_loss[loss=0.3652, simple_loss=0.3716, pruned_loss=0.1299, ctc_loss=0.2474, over 2028297.15 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:12:53,009 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.20 vs. limit=12.780000000000001
+2024-08-25 05:12:53,730 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=14080.0, ans=0.125
+2024-08-25 05:14:14,631 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.19 vs. limit=18.18
+2024-08-25 05:14:33,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=14240.0, ans=0.15760000000000002
+2024-08-25 05:14:50,529 INFO [train.py:1114] (1/4) Epoch 2, batch 200, loss[loss=0.3813, simple_loss=0.3779, pruned_loss=0.1382, ctc_loss=0.2706, over 18347.00 frames. ], tot_loss[loss=0.3586, simple_loss=0.3669, pruned_loss=0.1268, ctc_loss=0.2418, over 2435894.34 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:14:54,116 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.59 vs. limit=12.879999999999999
+2024-08-25 05:15:13,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=14400.0, ans=0.125
+2024-08-25 05:15:14,927 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.847e+02 2.110e+02 2.499e+02 4.235e+02, threshold=4.220e+02, percent-clipped=0.0
+2024-08-25 05:15:49,817 INFO [train.py:1114] (1/4) Epoch 2, batch 250, loss[loss=0.3884, simple_loss=0.3984, pruned_loss=0.1371, ctc_loss=0.2604, over 19410.00 frames. ], tot_loss[loss=0.3558, simple_loss=0.3657, pruned_loss=0.1252, ctc_loss=0.2387, over 2755875.80 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:16:18,440 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.17 vs. limit=18.5
+2024-08-25 05:16:21,449 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.73 vs. limit=9.866666666666667
+2024-08-25 05:16:26,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=14720.0, ans=0.09899494936611666
+2024-08-25 05:16:30,154 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.55 vs. limit=18.54
+2024-08-25 05:16:31,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=14720.0, ans=0.125
+2024-08-25 05:16:35,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=14720.0, ans=0.125
+2024-08-25 05:19:29,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=14826.666666666666, ans=0.125
+2024-08-25 05:19:35,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=14826.666666666666, ans=0.125
+2024-08-25 05:19:37,930 INFO [train.py:1114] (1/4) Epoch 2, batch 300, loss[loss=0.3925, simple_loss=0.3961, pruned_loss=0.1413, ctc_loss=0.2658, over 19532.00 frames. ], tot_loss[loss=0.3534, simple_loss=0.3642, pruned_loss=0.1241, ctc_loss=0.2362, over 3000767.60 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:19:43,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=14880.0, ans=0.1512
+2024-08-25 05:19:49,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=14880.0, ans=0.025
+2024-08-25 05:19:51,774 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.64 vs. limit=13.1
+2024-08-25 05:19:51,815 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.96 vs. limit=8.733333333333334
+2024-08-25 05:19:53,892 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.36 vs. limit=8.733333333333334
+2024-08-25 05:19:56,630 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.858e+02 2.099e+02 2.398e+02 3.801e+02, threshold=4.198e+02, percent-clipped=0.0
+2024-08-25 05:19:58,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=14933.333333333334, ans=0.004444444444444438
+2024-08-25 05:20:13,153 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=14986.666666666666, ans=0.007611594202898551
+2024-08-25 05:20:25,810 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=15040.0, ans=0.125
+2024-08-25 05:20:51,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=15093.333333333334, ans=0.125
+2024-08-25 05:20:51,296 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.01 vs. limit=18.82
+2024-08-25 05:20:54,653 INFO [train.py:1114] (1/4) Epoch 2, batch 350, loss[loss=0.2976, simple_loss=0.3227, pruned_loss=0.09887, ctc_loss=0.1868, over 19746.00 frames. ], tot_loss[loss=0.3524, simple_loss=0.3638, pruned_loss=0.1234, ctc_loss=0.2352, over 3189874.42 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:20:58,415 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.44 vs. limit=18.86
+2024-08-25 05:21:08,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=15200.0, ans=0.125
+2024-08-25 05:21:21,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=15253.333333333334, ans=0.125
+2024-08-25 05:21:32,667 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.40 vs. limit=13.219999999999999
+2024-08-25 05:22:06,919 INFO [train.py:1114] (1/4) Epoch 2, batch 400, loss[loss=0.3413, simple_loss=0.3606, pruned_loss=0.1165, ctc_loss=0.2227, over 19498.00 frames. ], tot_loss[loss=0.3514, simple_loss=0.3635, pruned_loss=0.1229, ctc_loss=0.2336, over 3341436.16 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:22:08,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=15413.333333333334, ans=0.007518840579710145
+2024-08-25 05:22:10,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=15413.333333333334, ans=0.0
+2024-08-25 05:22:20,586 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.895e+02 2.189e+02 2.528e+02 4.758e+02, threshold=4.379e+02, percent-clipped=2.0
+2024-08-25 05:22:25,727 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:22:28,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=15520.0, ans=0.0020000000000000018
+2024-08-25 05:23:46,286 INFO [train.py:1114] (1/4) Epoch 2, batch 450, loss[loss=0.3499, simple_loss=0.3691, pruned_loss=0.121, ctc_loss=0.2216, over 19607.00 frames. ], tot_loss[loss=0.3498, simple_loss=0.3622, pruned_loss=0.1222, ctc_loss=0.2322, over 3450016.16 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:23:51,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=15680.0, ans=0.025
+2024-08-25 05:23:52,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=15680.0, ans=0.025
+2024-08-25 05:24:07,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=15786.666666666666, ans=0.125
+2024-08-25 05:24:14,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=15840.0, ans=0.0006666666666666696
+2024-08-25 05:24:22,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=15840.0, ans=0.125
+2024-08-25 05:24:25,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=15893.333333333334, ans=0.125
+2024-08-25 05:24:29,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=15893.333333333334, ans=0.007414492753623188
+2024-08-25 05:24:37,929 INFO [train.py:1114] (1/4) Epoch 2, batch 500, loss[loss=0.3542, simple_loss=0.3731, pruned_loss=0.1201, ctc_loss=0.2377, over 19633.00 frames. ], tot_loss[loss=0.3464, simple_loss=0.3603, pruned_loss=0.1205, ctc_loss=0.229, over 3545224.30 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:26:00,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16000.0, ans=0.14
+2024-08-25 05:26:05,506 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.778e+02 2.035e+02 2.349e+02 4.286e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-25 05:26:19,490 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.61 vs. limit=19.54
+2024-08-25 05:26:24,092 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=16106.666666666666, ans=0.035
+2024-08-25 05:26:35,294 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.43 vs. limit=19.619999999999997
+2024-08-25 05:26:35,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=16160.0, ans=0.125
+2024-08-25 05:26:49,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=16160.0, ans=0.125
+2024-08-25 05:26:53,764 INFO [train.py:1114] (1/4) Epoch 2, batch 550, loss[loss=0.3619, simple_loss=0.3702, pruned_loss=0.1265, ctc_loss=0.2518, over 19278.00 frames. ], tot_loss[loss=0.3468, simple_loss=0.3606, pruned_loss=0.1207, ctc_loss=0.229, over 3606845.61 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:27:03,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=16213.333333333334, ans=0.125
+2024-08-25 05:27:07,745 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.30 vs. limit=13.58
+2024-08-25 05:27:16,190 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.26 vs. limit=19.7
+2024-08-25 05:27:27,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=16320.0, ans=0.125
+2024-08-25 05:27:36,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=16373.333333333334, ans=0.0
+2024-08-25 05:28:02,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=16373.333333333334, ans=0.3269333333333333
+2024-08-25 05:28:02,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=16373.333333333334, ans=0.125
+2024-08-25 05:28:05,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=16426.666666666668, ans=0.125
+2024-08-25 05:28:15,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=16426.666666666668, ans=0.0
+2024-08-25 05:28:19,445 INFO [train.py:1114] (1/4) Epoch 2, batch 600, loss[loss=0.356, simple_loss=0.3757, pruned_loss=0.1224, ctc_loss=0.2287, over 19448.00 frames. ], tot_loss[loss=0.3456, simple_loss=0.3601, pruned_loss=0.1201, ctc_loss=0.2276, over 3664934.48 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:28:19,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=16480.0, ans=0.125
+2024-08-25 05:28:34,466 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.917e+02 2.183e+02 2.770e+02 8.189e+02, threshold=4.366e+02, percent-clipped=5.0
+2024-08-25 05:28:38,638 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=16533.333333333332, ans=0.08466666666666667
+2024-08-25 05:28:55,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16640.0, ans=0.13360000000000002
+2024-08-25 05:29:14,185 INFO [train.py:1114] (1/4) Epoch 2, batch 650, loss[loss=0.3069, simple_loss=0.3377, pruned_loss=0.09826, ctc_loss=0.1991, over 19762.00 frames. ], tot_loss[loss=0.3424, simple_loss=0.3581, pruned_loss=0.1184, ctc_loss=0.2248, over 3715274.43 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:31:10,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=16746.666666666668, ans=0.07
+2024-08-25 05:31:10,815 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=16746.666666666668, ans=0.007228985507246377
+2024-08-25 05:31:27,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=16800.0, ans=0.125
+2024-08-25 05:31:38,787 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=16853.333333333332, ans=0.025
+2024-08-25 05:31:45,163 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=16906.666666666668, ans=0.025
+2024-08-25 05:31:56,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=16960.0, ans=10.0
+2024-08-25 05:32:06,451 INFO [train.py:1114] (1/4) Epoch 2, batch 700, loss[loss=0.3208, simple_loss=0.336, pruned_loss=0.1112, ctc_loss=0.2079, over 19719.00 frames. ], tot_loss[loss=0.342, simple_loss=0.358, pruned_loss=0.1182, ctc_loss=0.2242, over 3747259.36 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:32:06,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=17013.333333333332, ans=0.07
+2024-08-25 05:32:44,641 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.00 vs. limit=9.266666666666667
+2024-08-25 05:32:47,855 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.759e+02 2.005e+02 2.359e+02 5.033e+02, threshold=4.011e+02, percent-clipped=2.0
+2024-08-25 05:32:53,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=17066.666666666668, ans=0.0
+2024-08-25 05:33:14,211 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.44 vs. limit=13.94
+2024-08-25 05:33:15,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=17173.333333333332, ans=0.125
+2024-08-25 05:33:28,064 INFO [train.py:1114] (1/4) Epoch 2, batch 750, loss[loss=0.3358, simple_loss=0.3555, pruned_loss=0.1155, ctc_loss=0.2129, over 19500.00 frames. ], tot_loss[loss=0.3422, simple_loss=0.358, pruned_loss=0.1184, ctc_loss=0.224, over 3774238.27 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:35:19,260 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=17280.0, ans=0.125
+2024-08-25 05:37:33,318 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=17493.333333333332, ans=0.2877333333333334
+2024-08-25 05:37:40,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=17546.666666666668, ans=0.0
+2024-08-25 05:37:40,883 INFO [train.py:1114] (1/4) Epoch 2, batch 800, loss[loss=0.3057, simple_loss=0.3337, pruned_loss=0.101, ctc_loss=0.1893, over 19415.00 frames. ], tot_loss[loss=0.3408, simple_loss=0.3572, pruned_loss=0.1177, ctc_loss=0.2225, over 3796416.54 frames. ], batch size: 48, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 05:37:43,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=17546.666666666668, ans=0.025
+2024-08-25 05:37:44,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=17546.666666666668, ans=0.125
+2024-08-25 05:37:47,170 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.53 vs. limit=20.66
+2024-08-25 05:38:06,533 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.845e+02 2.130e+02 2.517e+02 4.310e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 05:38:21,141 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.47 vs. limit=9.413333333333334
+2024-08-25 05:38:40,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=17760.0, ans=0.0070086956521739135
+2024-08-25 05:38:48,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=17813.333333333332, ans=0.125
+2024-08-25 05:38:48,680 INFO [train.py:1114] (1/4) Epoch 2, batch 850, loss[loss=0.3463, simple_loss=0.3701, pruned_loss=0.1165, ctc_loss=0.2238, over 19672.00 frames. ], tot_loss[loss=0.3388, simple_loss=0.3559, pruned_loss=0.1167, ctc_loss=0.2205, over 3814423.31 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 16.0
+2024-08-25 05:38:59,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=17813.333333333332, ans=0.125
+2024-08-25 05:39:09,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=17866.666666666668, ans=0.0
+2024-08-25 05:39:30,413 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.35 vs. limit=14.24
+2024-08-25 05:39:33,608 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.01 vs. limit=14.24
+2024-08-25 05:39:58,487 INFO [train.py:1114] (1/4) Epoch 2, batch 900, loss[loss=0.3158, simple_loss=0.3344, pruned_loss=0.1082, ctc_loss=0.2018, over 19803.00 frames. ], tot_loss[loss=0.3394, simple_loss=0.3563, pruned_loss=0.1171, ctc_loss=0.2209, over 3818215.99 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:40:09,425 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.23 vs. limit=14.280000000000001
+2024-08-25 05:40:19,555 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.852e+02 2.189e+02 2.703e+02 9.878e+02, threshold=4.378e+02, percent-clipped=3.0
+2024-08-25 05:40:49,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=18240.0, ans=0.125
+2024-08-25 05:40:49,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=18240.0, ans=0.11760000000000001
+2024-08-25 05:41:14,124 INFO [train.py:1114] (1/4) Epoch 2, batch 950, loss[loss=0.2776, simple_loss=0.3105, pruned_loss=0.08921, ctc_loss=0.1656, over 19517.00 frames. ], tot_loss[loss=0.3389, simple_loss=0.356, pruned_loss=0.1169, ctc_loss=0.2201, over 3819215.99 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:41:23,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=18346.666666666668, ans=0.125
+2024-08-25 05:41:24,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=18400.0, ans=0.125
+2024-08-25 05:41:41,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=18453.333333333332, ans=0.0
+2024-08-25 05:42:02,045 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18560.0, ans=0.1144
+2024-08-25 05:42:03,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=18560.0, ans=0.0
+2024-08-25 05:42:06,446 INFO [train.py:1114] (1/4) Epoch 2, batch 1000, loss[loss=0.3228, simple_loss=0.3443, pruned_loss=0.1091, ctc_loss=0.2074, over 19834.00 frames. ], tot_loss[loss=0.3398, simple_loss=0.3568, pruned_loss=0.1173, ctc_loss=0.2204, over 3814199.32 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:42:21,294 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=18613.333333333332, ans=0.0
+2024-08-25 05:42:38,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=18666.666666666668, ans=0.125
+2024-08-25 05:42:41,293 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.839e+02 2.030e+02 2.416e+02 3.488e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-25 05:42:46,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=18720.0, ans=0.025
+2024-08-25 05:42:51,163 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.94 vs. limit=11.488
+2024-08-25 05:42:59,558 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.03 vs. limit=5.816
+2024-08-25 05:43:02,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=18773.333333333332, ans=0.00678840579710145
+2024-08-25 05:43:03,579 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.77 vs. limit=9.693333333333332
+2024-08-25 05:43:16,642 INFO [train.py:1114] (1/4) Epoch 2, batch 1050, loss[loss=0.3271, simple_loss=0.3547, pruned_loss=0.1091, ctc_loss=0.2035, over 19836.00 frames. ], tot_loss[loss=0.3373, simple_loss=0.3553, pruned_loss=0.1161, ctc_loss=0.2179, over 3821203.38 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:43:23,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=18880.0, ans=0.006765217391304348
+2024-08-25 05:43:31,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=18933.333333333332, ans=0.025
+2024-08-25 05:43:53,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=18986.666666666668, ans=0.125
+2024-08-25 05:44:00,489 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=19040.0, ans=0.48560000000000003
+2024-08-25 05:44:00,691 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=19040.0, ans=0.125
+2024-08-25 05:44:02,522 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=19040.0, ans=0.23360000000000003
+2024-08-25 05:44:04,899 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=3.68 vs. limit=14.64
+2024-08-25 05:44:18,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=19093.333333333332, ans=0.10906666666666667
+2024-08-25 05:44:23,180 INFO [train.py:1114] (1/4) Epoch 2, batch 1100, loss[loss=0.3232, simple_loss=0.3454, pruned_loss=0.1106, ctc_loss=0.1993, over 19593.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3543, pruned_loss=0.1151, ctc_loss=0.2157, over 3829216.55 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:44:39,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=19146.666666666668, ans=0.125
+2024-08-25 05:44:40,110 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.49 vs. limit=14.68
+2024-08-25 05:44:42,028 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=11.07 vs. limit=14.7
+2024-08-25 05:44:48,510 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.777e+02 2.009e+02 2.448e+02 3.967e+02, threshold=4.019e+02, percent-clipped=0.0
+2024-08-25 05:44:57,789 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.80 vs. limit=14.719999999999999
+2024-08-25 05:45:05,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=19306.666666666668, ans=0.10693333333333332
+2024-08-25 05:45:31,303 INFO [train.py:1114] (1/4) Epoch 2, batch 1150, loss[loss=0.3094, simple_loss=0.3356, pruned_loss=0.1023, ctc_loss=0.1965, over 19580.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3535, pruned_loss=0.1145, ctc_loss=0.2147, over 3829591.84 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:45:33,580 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19413.333333333332, ans=0.10586666666666669
+2024-08-25 05:45:45,924 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=19466.666666666668, ans=0.00663768115942029
+2024-08-25 05:45:52,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=19520.0, ans=0.0
+2024-08-25 05:47:24,629 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.06 vs. limit=22.22
+2024-08-25 05:47:24,716 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.07 vs. limit=14.86
+2024-08-25 05:47:34,831 INFO [train.py:1114] (1/4) Epoch 2, batch 1200, loss[loss=0.3091, simple_loss=0.3536, pruned_loss=0.09606, ctc_loss=0.1811, over 19837.00 frames. ], tot_loss[loss=0.3345, simple_loss=0.354, pruned_loss=0.1145, ctc_loss=0.2149, over 3824515.07 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 05:47:44,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=19733.333333333332, ans=0.125
+2024-08-25 05:47:46,160 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.13 vs. limit=22.3
+2024-08-25 05:47:50,321 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.798e+02 2.208e+02 2.852e+02 1.698e+03, threshold=4.415e+02, percent-clipped=3.0
+2024-08-25 05:48:04,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=19733.333333333332, ans=0.006579710144927537
+2024-08-25 05:48:20,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=19840.0, ans=0.0
+2024-08-25 05:48:21,563 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19840.0, ans=0.10160000000000002
+2024-08-25 05:48:40,349 INFO [train.py:1114] (1/4) Epoch 2, batch 1250, loss[loss=0.3444, simple_loss=0.3679, pruned_loss=0.1184, ctc_loss=0.2102, over 19534.00 frames. ], tot_loss[loss=0.3325, simple_loss=0.3532, pruned_loss=0.1133, ctc_loss=0.2129, over 3842435.77 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:48:43,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=19946.666666666668, ans=0.0
+2024-08-25 05:48:50,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=20000.0, ans=0.025
+2024-08-25 05:49:25,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=20106.666666666668, ans=0.125
+2024-08-25 05:49:29,923 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=20160.0, ans=0.125
+2024-08-25 05:49:31,874 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=20160.0, ans=0.125
+2024-08-25 05:49:32,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=20160.0, ans=0.125
+2024-08-25 05:49:36,921 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.55 vs. limit=10.0
+2024-08-25 05:49:37,279 INFO [train.py:1114] (1/4) Epoch 2, batch 1300, loss[loss=0.3401, simple_loss=0.3644, pruned_loss=0.115, ctc_loss=0.2146, over 18869.00 frames. ], tot_loss[loss=0.33, simple_loss=0.3519, pruned_loss=0.112, ctc_loss=0.2105, over 3846646.55 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:49:48,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=20266.666666666668, ans=0.125
+2024-08-25 05:49:52,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=20266.666666666668, ans=0.2
+2024-08-25 05:49:52,768 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.771e+02 1.898e+02 2.175e+02 3.765e+02, threshold=3.796e+02, percent-clipped=0.0
+2024-08-25 05:50:20,710 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:50:25,312 INFO [train.py:1114] (1/4) Epoch 2, batch 1350, loss[loss=0.3114, simple_loss=0.3389, pruned_loss=0.1031, ctc_loss=0.1942, over 19775.00 frames. ], tot_loss[loss=0.3293, simple_loss=0.3516, pruned_loss=0.1116, ctc_loss=0.2095, over 3858167.57 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:50:42,367 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=20533.333333333332, ans=0.0
+2024-08-25 05:50:45,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=20533.333333333332, ans=0.025
+2024-08-25 05:50:47,973 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=20533.333333333332, ans=0.0
+2024-08-25 05:50:53,063 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.11 vs. limit=15.0
+2024-08-25 05:50:57,863 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.91 vs. limit=10.0
+2024-08-25 05:51:04,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=20640.0, ans=0.0
+2024-08-25 05:51:19,094 INFO [train.py:1114] (1/4) Epoch 2, batch 1400, loss[loss=0.2869, simple_loss=0.3116, pruned_loss=0.09494, ctc_loss=0.1809, over 19678.00 frames. ], tot_loss[loss=0.3276, simple_loss=0.3505, pruned_loss=0.1107, ctc_loss=0.208, over 3864535.33 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:51:23,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=20746.666666666668, ans=0.0
+2024-08-25 05:51:25,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=20746.666666666668, ans=0.006359420289855073
+2024-08-25 05:51:30,141 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.68 vs. limit=10.0
+2024-08-25 05:51:34,341 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.933e+02 2.205e+02 2.519e+02 3.569e+02, threshold=4.410e+02, percent-clipped=0.0
+2024-08-25 05:51:40,686 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.12 vs. limit=15.0
+2024-08-25 05:51:48,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=20906.666666666668, ans=0.0
+2024-08-25 05:51:54,269 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20906.666666666668, ans=0.1
+2024-08-25 05:52:01,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=20960.0, ans=0.0
+2024-08-25 05:52:02,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=20960.0, ans=0.0
+2024-08-25 05:52:06,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=20960.0, ans=0.125
+2024-08-25 05:52:09,371 INFO [train.py:1114] (1/4) Epoch 2, batch 1450, loss[loss=0.3436, simple_loss=0.3679, pruned_loss=0.1164, ctc_loss=0.216, over 19723.00 frames. ], tot_loss[loss=0.3281, simple_loss=0.351, pruned_loss=0.111, ctc_loss=0.2082, over 3862719.65 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:52:33,459 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=21120.0, ans=10.0
+2024-08-25 05:52:43,742 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=21173.333333333332, ans=0.025
+2024-08-25 05:52:52,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=21226.666666666668, ans=0.2
+2024-08-25 05:52:57,019 INFO [train.py:1114] (1/4) Epoch 2, batch 1500, loss[loss=0.3221, simple_loss=0.3531, pruned_loss=0.1053, ctc_loss=0.2011, over 19583.00 frames. ], tot_loss[loss=0.3285, simple_loss=0.3515, pruned_loss=0.1111, ctc_loss=0.2084, over 3861872.94 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:53:13,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21333.333333333332, ans=0.1
+2024-08-25 05:53:17,230 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.832e+02 2.087e+02 2.558e+02 5.212e+02, threshold=4.175e+02, percent-clipped=3.0
+2024-08-25 05:53:18,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=21333.333333333332, ans=0.1
+2024-08-25 05:53:33,633 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.27 vs. limit=12.0
+2024-08-25 05:53:36,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21440.0, ans=0.1
+2024-08-25 05:53:45,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=21493.333333333332, ans=0.006197101449275363
+2024-08-25 05:53:47,381 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=21493.333333333332, ans=0.09899494936611666
+2024-08-25 05:53:58,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=21493.333333333332, ans=0.006197101449275363
+2024-08-25 05:54:02,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=21493.333333333332, ans=0.125
+2024-08-25 05:54:03,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=21493.333333333332, ans=0.006197101449275363
+2024-08-25 05:54:05,952 INFO [train.py:1114] (1/4) Epoch 2, batch 1550, loss[loss=0.3891, simple_loss=0.3891, pruned_loss=0.1432, ctc_loss=0.2567, over 19584.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.3517, pruned_loss=0.1116, ctc_loss=0.2091, over 3846711.90 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 05:54:06,608 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=10.06 vs. limit=15.0
+2024-08-25 05:54:18,745 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.99 vs. limit=15.0
+2024-08-25 05:54:21,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.27 vs. limit=15.0
+2024-08-25 05:54:32,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=21600.0, ans=0.05
+2024-08-25 05:54:35,634 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=21600.0, ans=0.125
+2024-08-25 05:54:37,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=21600.0, ans=0.125
+2024-08-25 05:54:47,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=21653.333333333332, ans=0.1
+2024-08-25 05:54:55,665 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.18 vs. limit=10.0
+2024-08-25 05:55:11,760 INFO [train.py:1114] (1/4) Epoch 2, batch 1600, loss[loss=0.3207, simple_loss=0.3563, pruned_loss=0.1028, ctc_loss=0.1985, over 19837.00 frames. ], tot_loss[loss=0.3285, simple_loss=0.351, pruned_loss=0.1113, ctc_loss=0.2088, over 3835638.61 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 05:55:13,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=21813.333333333332, ans=0.0
+2024-08-25 05:55:17,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=21813.333333333332, ans=0.2
+2024-08-25 05:55:27,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=21866.666666666668, ans=0.125
+2024-08-25 05:55:32,429 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.812e+02 2.122e+02 2.604e+02 4.336e+02, threshold=4.244e+02, percent-clipped=2.0
+2024-08-25 05:55:41,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=21920.0, ans=0.125
+2024-08-25 05:55:41,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=21920.0, ans=0.125
+2024-08-25 05:56:05,196 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.42 vs. limit=15.0
+2024-08-25 05:56:09,679 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=22026.666666666668, ans=0.125
+2024-08-25 05:56:13,283 INFO [train.py:1114] (1/4) Epoch 2, batch 1650, loss[loss=0.348, simple_loss=0.3672, pruned_loss=0.1215, ctc_loss=0.2146, over 19641.00 frames. ], tot_loss[loss=0.327, simple_loss=0.3498, pruned_loss=0.1106, ctc_loss=0.2073, over 3833478.71 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 16.0
+2024-08-25 05:56:13,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=22080.0, ans=0.0
+2024-08-25 05:56:37,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=22186.666666666668, ans=0.0
+2024-08-25 05:56:42,097 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=22186.666666666668, ans=0.0
+2024-08-25 05:57:05,965 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=14.39 vs. limit=15.0
+2024-08-25 05:57:11,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=22293.333333333332, ans=0.0
+2024-08-25 05:57:12,900 INFO [train.py:1114] (1/4) Epoch 2, batch 1700, loss[loss=0.2901, simple_loss=0.3142, pruned_loss=0.09545, ctc_loss=0.1877, over 19679.00 frames. ], tot_loss[loss=0.3249, simple_loss=0.3489, pruned_loss=0.1094, ctc_loss=0.2053, over 3848170.98 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:57:29,334 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.791e+02 2.005e+02 2.338e+02 3.555e+02, threshold=4.010e+02, percent-clipped=0.0
+2024-08-25 05:57:46,029 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=17.75 vs. limit=15.0
+2024-08-25 05:58:02,930 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=22506.666666666668, ans=0.125
+2024-08-25 05:58:27,924 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=22560.0, ans=0.2
+2024-08-25 05:58:34,072 INFO [train.py:1114] (1/4) Epoch 2, batch 1750, loss[loss=0.3049, simple_loss=0.3218, pruned_loss=0.1057, ctc_loss=0.1915, over 19643.00 frames. ], tot_loss[loss=0.3231, simple_loss=0.3476, pruned_loss=0.1086, ctc_loss=0.2037, over 3853458.26 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:58:35,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=22613.333333333332, ans=0.2
+2024-08-25 05:58:35,490 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=27.27 vs. limit=22.5
+2024-08-25 05:58:40,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=22613.333333333332, ans=0.0
+2024-08-25 05:58:43,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=22666.666666666668, ans=0.07
+2024-08-25 05:58:43,533 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.40 vs. limit=6.0
+2024-08-25 05:58:55,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=22720.0, ans=0.125
+2024-08-25 05:59:14,496 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.14 vs. limit=15.0
+2024-08-25 05:59:15,938 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=22826.666666666668, ans=0.125
+2024-08-25 05:59:19,959 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.85 vs. limit=22.5
+2024-08-25 05:59:24,711 INFO [train.py:1114] (1/4) Epoch 2, batch 1800, loss[loss=0.321, simple_loss=0.3529, pruned_loss=0.1045, ctc_loss=0.2002, over 19617.00 frames. ], tot_loss[loss=0.3232, simple_loss=0.3479, pruned_loss=0.1085, ctc_loss=0.2038, over 3854744.74 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 05:59:26,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=22880.0, ans=0.2
+2024-08-25 05:59:26,666 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:59:39,815 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.812e+02 2.002e+02 2.312e+02 3.839e+02, threshold=4.004e+02, percent-clipped=0.0
+2024-08-25 06:00:12,424 INFO [train.py:1114] (1/4) Epoch 2, batch 1850, loss[loss=0.343, simple_loss=0.3622, pruned_loss=0.1179, ctc_loss=0.22, over 19579.00 frames. ], tot_loss[loss=0.3224, simple_loss=0.3475, pruned_loss=0.1081, ctc_loss=0.2028, over 3858462.73 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 06:00:12,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=23146.666666666668, ans=0.125
+2024-08-25 06:00:19,991 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=23146.666666666668, ans=10.0
+2024-08-25 06:00:28,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=23200.0, ans=0.2
+2024-08-25 06:00:54,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=23360.0, ans=0.125
+2024-08-25 06:00:59,781 INFO [train.py:1114] (1/4) Epoch 2, batch 1900, loss[loss=0.3291, simple_loss=0.3591, pruned_loss=0.107, ctc_loss=0.2127, over 19635.00 frames. ], tot_loss[loss=0.3223, simple_loss=0.3478, pruned_loss=0.108, ctc_loss=0.2025, over 3861789.55 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 16.0
+2024-08-25 06:01:02,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=23413.333333333332, ans=0.2
+2024-08-25 06:01:07,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=23413.333333333332, ans=0.95
+2024-08-25 06:01:13,172 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.58 vs. limit=12.0
+2024-08-25 06:01:18,897 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.814e+02 2.067e+02 2.451e+02 4.716e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-25 06:01:41,638 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.82 vs. limit=22.5
+2024-08-25 06:01:52,057 INFO [train.py:1114] (1/4) Epoch 2, batch 1950, loss[loss=0.3302, simple_loss=0.3527, pruned_loss=0.1116, ctc_loss=0.2114, over 19600.00 frames. ], tot_loss[loss=0.3221, simple_loss=0.3482, pruned_loss=0.1076, ctc_loss=0.202, over 3870452.80 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 16.0
+2024-08-25 06:02:03,188 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=23733.333333333332, ans=0.0
+2024-08-25 06:02:21,767 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff3.min_abs, batch_count=23840.0, ans=0.2
+2024-08-25 06:02:23,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=23840.0, ans=0.125
+2024-08-25 06:02:33,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=23893.333333333332, ans=0.1
+2024-08-25 06:02:40,739 INFO [train.py:1114] (1/4) Epoch 2, batch 2000, loss[loss=0.2654, simple_loss=0.3013, pruned_loss=0.08289, ctc_loss=0.1593, over 19636.00 frames. ], tot_loss[loss=0.3229, simple_loss=0.3487, pruned_loss=0.1081, ctc_loss=0.2025, over 3854595.39 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 06:02:45,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=23946.666666666668, ans=0.125
+2024-08-25 06:02:48,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=23946.666666666668, ans=0.125
+2024-08-25 06:02:49,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=23946.666666666668, ans=0.125
+2024-08-25 06:02:52,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=24000.0, ans=0.125
+2024-08-25 06:02:57,876 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.781e+02 1.996e+02 2.377e+02 5.355e+02, threshold=3.992e+02, percent-clipped=1.0
+2024-08-25 06:03:03,857 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.84 vs. limit=15.0
+2024-08-25 06:03:07,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=24053.333333333332, ans=0.2
+2024-08-25 06:03:29,341 INFO [train.py:1114] (1/4) Epoch 2, batch 2050, loss[loss=0.2712, simple_loss=0.306, pruned_loss=0.08526, ctc_loss=0.1644, over 19707.00 frames. ], tot_loss[loss=0.3213, simple_loss=0.3472, pruned_loss=0.1074, ctc_loss=0.2014, over 3851137.52 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:03:56,283 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.84 vs. limit=15.0
+2024-08-25 06:04:12,475 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.78 vs. limit=15.0
+2024-08-25 06:04:17,657 INFO [train.py:1114] (1/4) Epoch 2, batch 2100, loss[loss=0.3176, simple_loss=0.3475, pruned_loss=0.1047, ctc_loss=0.1959, over 19760.00 frames. ], tot_loss[loss=0.3196, simple_loss=0.3459, pruned_loss=0.1066, ctc_loss=0.2, over 3857910.48 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:04:22,791 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.63 vs. limit=15.0
+2024-08-25 06:04:23,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=24480.0, ans=10.0
+2024-08-25 06:04:33,041 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.823e+02 2.012e+02 2.259e+02 3.531e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-25 06:04:41,162 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:04:43,395 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.81 vs. limit=15.0
+2024-08-25 06:04:56,973 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=24693.333333333332, ans=0.125
+2024-08-25 06:05:02,135 INFO [train.py:1114] (1/4) Epoch 2, batch 2150, loss[loss=0.3065, simple_loss=0.3351, pruned_loss=0.1022, ctc_loss=0.1837, over 19577.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.3448, pruned_loss=0.1058, ctc_loss=0.1983, over 3868715.03 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 06:05:12,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=24746.666666666668, ans=0.2
+2024-08-25 06:05:19,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=24800.0, ans=0.5
+2024-08-25 06:05:33,923 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=24906.666666666668, ans=0.005455072463768116
+2024-08-25 06:05:36,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 06:05:39,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=24906.666666666668, ans=0.09899494936611666
+2024-08-25 06:05:40,516 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:05:43,128 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 06:05:44,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 06:05:44,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=24960.0, ans=0.125
+2024-08-25 06:05:56,450 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.63 vs. limit=10.0
+2024-08-25 06:05:58,411 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.52 vs. limit=15.0
+2024-08-25 06:05:59,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=24960.0, ans=0.2
+2024-08-25 06:06:01,091 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.38 vs. limit=10.0
+2024-08-25 06:06:01,433 INFO [train.py:1114] (1/4) Epoch 2, batch 2200, loss[loss=0.3593, simple_loss=0.3804, pruned_loss=0.1234, ctc_loss=0.2285, over 19601.00 frames. ], tot_loss[loss=0.3188, simple_loss=0.3454, pruned_loss=0.1062, ctc_loss=0.1989, over 3867598.72 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:06:11,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=25013.333333333332, ans=0.125
+2024-08-25 06:06:25,288 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.924e+02 2.286e+02 2.709e+02 6.222e+02, threshold=4.573e+02, percent-clipped=4.0
+2024-08-25 06:06:25,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=25066.666666666668, ans=0.125
+2024-08-25 06:06:34,257 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=25120.0, ans=0.125
+2024-08-25 06:06:36,008 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=25173.333333333332, ans=0.005397101449275363
+2024-08-25 06:06:52,524 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.73 vs. limit=15.0
+2024-08-25 06:06:54,567 INFO [train.py:1114] (1/4) Epoch 2, batch 2250, loss[loss=0.3134, simple_loss=0.3504, pruned_loss=0.09975, ctc_loss=0.1921, over 19604.00 frames. ], tot_loss[loss=0.3185, simple_loss=0.3454, pruned_loss=0.1061, ctc_loss=0.1985, over 3867623.39 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:06:56,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=25280.0, ans=0.125
+2024-08-25 06:06:59,369 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.92 vs. limit=15.0
+2024-08-25 06:07:03,846 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.35 vs. limit=12.0
+2024-08-25 06:07:07,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=25333.333333333332, ans=0.125
+2024-08-25 06:07:07,450 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.19 vs. limit=15.0
+2024-08-25 06:07:07,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=25333.333333333332, ans=0.125
+2024-08-25 06:07:25,496 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=25440.0, ans=0.0
+2024-08-25 06:07:34,666 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.88 vs. limit=22.5
+2024-08-25 06:07:41,081 INFO [train.py:1114] (1/4) Epoch 2, batch 2300, loss[loss=0.291, simple_loss=0.3237, pruned_loss=0.09445, ctc_loss=0.1736, over 19519.00 frames. ], tot_loss[loss=0.3176, simple_loss=0.3443, pruned_loss=0.1058, ctc_loss=0.198, over 3861882.93 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 32.0
+2024-08-25 06:07:55,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25600.0, ans=0.1
+2024-08-25 06:07:58,722 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 1.775e+02 2.049e+02 2.504e+02 6.120e+02, threshold=4.097e+02, percent-clipped=1.0
+2024-08-25 06:08:05,239 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.70 vs. limit=15.0
+2024-08-25 06:08:11,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=25706.666666666668, ans=0.5
+2024-08-25 06:08:11,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25706.666666666668, ans=0.1
+2024-08-25 06:08:21,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=25760.0, ans=0.025
+2024-08-25 06:08:25,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=25760.0, ans=0.0
+2024-08-25 06:08:29,078 INFO [train.py:1114] (1/4) Epoch 2, batch 2350, loss[loss=0.3296, simple_loss=0.3598, pruned_loss=0.1087, ctc_loss=0.2051, over 19672.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3445, pruned_loss=0.1062, ctc_loss=0.1983, over 3864853.50 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:08:30,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=25813.333333333332, ans=0.125
+2024-08-25 06:08:33,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=25813.333333333332, ans=0.1
+2024-08-25 06:08:39,773 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=5.21 vs. limit=15.0
+2024-08-25 06:08:42,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=25866.666666666668, ans=0.0
+2024-08-25 06:08:51,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=25920.0, ans=0.2
+2024-08-25 06:09:05,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=25973.333333333332, ans=0.2
+2024-08-25 06:09:22,748 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=20.15 vs. limit=22.5
+2024-08-25 06:09:23,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=26026.666666666668, ans=0.125
+2024-08-25 06:09:26,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=26026.666666666668, ans=0.125
+2024-08-25 06:09:28,452 INFO [train.py:1114] (1/4) Epoch 2, batch 2400, loss[loss=0.3595, simple_loss=0.3727, pruned_loss=0.1264, ctc_loss=0.234, over 19187.00 frames. ], tot_loss[loss=0.3193, simple_loss=0.3462, pruned_loss=0.1065, ctc_loss=0.1988, over 3859043.96 frames. ], batch size: 71, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:09:37,832 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.99 vs. limit=10.0
+2024-08-25 06:09:41,329 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.35 vs. limit=12.0
+2024-08-25 06:09:43,437 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.803e+02 2.129e+02 2.459e+02 5.388e+02, threshold=4.257e+02, percent-clipped=1.0
+2024-08-25 06:09:46,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26186.666666666668, ans=0.1
+2024-08-25 06:10:13,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=26293.333333333332, ans=0.2
+2024-08-25 06:10:14,670 INFO [train.py:1114] (1/4) Epoch 2, batch 2450, loss[loss=0.412, simple_loss=0.3875, pruned_loss=0.1588, ctc_loss=0.297, over 13465.00 frames. ], tot_loss[loss=0.33, simple_loss=0.3523, pruned_loss=0.112, ctc_loss=0.209, over 3732884.53 frames. ], batch size: 141, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 06:10:17,016 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.68 vs. limit=15.0
+2024-08-25 06:10:25,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=26400.0, ans=0.125
+2024-08-25 06:10:28,029 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=25.56 vs. limit=22.5
+2024-08-25 06:10:33,913 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.14 vs. limit=22.5
+2024-08-25 06:10:42,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=26453.333333333332, ans=0.005118840579710145
+2024-08-25 06:10:43,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=11.05 vs. limit=12.0
+2024-08-25 06:11:53,152 INFO [train.py:1114] (1/4) Epoch 3, batch 0, loss[loss=0.3058, simple_loss=0.3247, pruned_loss=0.1041, ctc_loss=0.1964, over 19823.00 frames. ], tot_loss[loss=0.3058, simple_loss=0.3247, pruned_loss=0.1041, ctc_loss=0.1964, over 19823.00 frames. ], batch size: 49, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 06:11:53,508 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-25 06:12:07,831 INFO [train.py:1146] (1/4) Epoch 3, validation: loss=0.2565, simple_loss=0.3309, pruned_loss=0.06653, ctc_loss=0.1228, over 944034.00 frames.
+2024-08-25 06:12:07,832 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 13283MB
+2024-08-25 06:12:09,930 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=26554.666666666668, ans=0.125
+2024-08-25 06:12:18,211 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=26554.666666666668, ans=0.125
+2024-08-25 06:13:57,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=26608.0, ans=0.125
+2024-08-25 06:15:44,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=26661.333333333332, ans=0.005073623188405798
+2024-08-25 06:16:00,091 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.983e+02 2.286e+02 2.644e+02 3.774e+02, threshold=4.572e+02, percent-clipped=0.0
+2024-08-25 06:17:19,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=26714.666666666668, ans=0.025
+2024-08-25 06:17:19,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=26714.666666666668, ans=0.125
+2024-08-25 06:18:48,674 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=26768.0, ans=0.125
+2024-08-25 06:20:22,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=26768.0, ans=0.125
+2024-08-25 06:22:59,718 INFO [train.py:1114] (1/4) Epoch 3, batch 50, loss[loss=0.2482, simple_loss=0.2913, pruned_loss=0.07413, ctc_loss=0.1419, over 19667.00 frames. ], tot_loss[loss=0.3207, simple_loss=0.347, pruned_loss=0.1069, ctc_loss=0.2014, over 844347.37 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:23:30,650 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.97 vs. limit=15.0
+2024-08-25 06:23:38,072 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=26821.333333333332, ans=0.125
+2024-08-25 06:29:45,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=26874.666666666668, ans=0.2
+2024-08-25 06:31:29,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=26874.666666666668, ans=0.125
+2024-08-25 06:34:49,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=26874.666666666668, ans=0.2
+2024-08-25 06:40:51,612 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.12 vs. limit=15.0
+2024-08-25 06:44:24,665 INFO [train.py:1114] (1/4) Epoch 3, batch 100, loss[loss=0.2845, simple_loss=0.3255, pruned_loss=0.08814, ctc_loss=0.1681, over 19702.00 frames. ], tot_loss[loss=0.3216, simple_loss=0.3485, pruned_loss=0.107, ctc_loss=0.2016, over 1499014.27 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:46:48,791 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=27141.333333333332, ans=0.125
+2024-08-25 06:46:48,953 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.20 vs. limit=22.5
+2024-08-25 06:46:59,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=27141.333333333332, ans=0.125
+2024-08-25 06:47:46,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=27194.666666666668, ans=0.125
+2024-08-25 06:47:49,839 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.02 vs. limit=22.5
+2024-08-25 06:47:50,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=27194.666666666668, ans=0.2
+2024-08-25 06:48:13,340 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=4.53 vs. limit=15.0
+2024-08-25 06:48:15,480 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.744e+02 2.032e+02 2.291e+02 1.205e+03, threshold=4.063e+02, percent-clipped=1.0
+2024-08-25 06:50:29,662 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.13 vs. limit=22.5
+2024-08-25 06:50:33,595 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.60 vs. limit=15.0
+2024-08-25 06:50:41,243 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.29 vs. limit=15.0
+2024-08-25 06:50:43,552 INFO [train.py:1114] (1/4) Epoch 3, batch 150, loss[loss=0.3103, simple_loss=0.3288, pruned_loss=0.1072, ctc_loss=0.1936, over 19725.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.3453, pruned_loss=0.1051, ctc_loss=0.1977, over 2028246.68 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 06:51:34,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=27354.666666666668, ans=0.125
+2024-08-25 06:51:51,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=27354.666666666668, ans=0.125
+2024-08-25 06:53:48,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=27514.666666666668, ans=0.004888115942028985
+2024-08-25 06:54:02,148 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=27514.666666666668, ans=0.125
+2024-08-25 06:54:15,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=27568.0, ans=0.125
+2024-08-25 06:54:31,940 INFO [train.py:1114] (1/4) Epoch 3, batch 200, loss[loss=0.361, simple_loss=0.3786, pruned_loss=0.1255, ctc_loss=0.2311, over 18329.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3424, pruned_loss=0.1035, ctc_loss=0.1945, over 2436541.20 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:54:49,884 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.20 vs. limit=6.0
+2024-08-25 06:56:00,392 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.731e+02 1.977e+02 2.205e+02 3.305e+02, threshold=3.953e+02, percent-clipped=0.0
+2024-08-25 06:56:10,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=27781.333333333332, ans=0.125
+2024-08-25 06:56:34,804 INFO [train.py:1114] (1/4) Epoch 3, batch 250, loss[loss=0.3446, simple_loss=0.3719, pruned_loss=0.1159, ctc_loss=0.2138, over 19373.00 frames. ], tot_loss[loss=0.3117, simple_loss=0.3414, pruned_loss=0.1025, ctc_loss=0.1924, over 2755941.42 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:56:44,853 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.62 vs. limit=15.0
+2024-08-25 06:57:29,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 06:57:30,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=27941.333333333332, ans=0.0
+2024-08-25 07:00:43,499 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.14 vs. limit=22.5
+2024-08-25 07:02:40,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=28048.0, ans=0.004772173913043478
+2024-08-25 07:03:05,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=28101.333333333332, ans=0.125
+2024-08-25 07:03:23,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=28101.333333333332, ans=0.0
+2024-08-25 07:03:29,188 INFO [train.py:1114] (1/4) Epoch 3, batch 300, loss[loss=0.3242, simple_loss=0.3482, pruned_loss=0.1103, ctc_loss=0.1989, over 19525.00 frames. ], tot_loss[loss=0.3106, simple_loss=0.3406, pruned_loss=0.102, ctc_loss=0.1912, over 3000471.32 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:03:33,887 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.25 vs. limit=8.0
+2024-08-25 07:03:53,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=28154.666666666668, ans=0.125
+2024-08-25 07:04:32,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=28261.333333333332, ans=0.125
+2024-08-25 07:04:33,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=28261.333333333332, ans=0.125
+2024-08-25 07:04:35,015 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.53 vs. limit=22.5
+2024-08-25 07:04:40,405 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.83 vs. limit=15.0
+2024-08-25 07:04:43,844 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=28261.333333333332, ans=0.125
+2024-08-25 07:04:44,395 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.724e+02 1.968e+02 2.265e+02 3.417e+02, threshold=3.936e+02, percent-clipped=0.0
+2024-08-25 07:05:03,200 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 07:05:03,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 07:05:03,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=28314.666666666668, ans=0.04949747468305833
+2024-08-25 07:05:49,910 INFO [train.py:1114] (1/4) Epoch 3, batch 350, loss[loss=0.2857, simple_loss=0.3188, pruned_loss=0.09105, ctc_loss=0.1762, over 19763.00 frames. ], tot_loss[loss=0.3102, simple_loss=0.3403, pruned_loss=0.1018, ctc_loss=0.191, over 3190566.06 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:05:56,789 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=28421.333333333332, ans=0.0
+2024-08-25 07:07:33,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=28528.0, ans=0.004667826086956522
+2024-08-25 07:07:40,983 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.60 vs. limit=15.0
+2024-08-25 07:07:45,528 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=28581.333333333332, ans=0.004656231884057971
+2024-08-25 07:07:52,983 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.65 vs. limit=22.5
+2024-08-25 07:07:53,680 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28634.666666666668, ans=0.1
+2024-08-25 07:08:01,163 INFO [train.py:1114] (1/4) Epoch 3, batch 400, loss[loss=0.3057, simple_loss=0.3412, pruned_loss=0.09683, ctc_loss=0.1917, over 19530.00 frames. ], tot_loss[loss=0.3088, simple_loss=0.3396, pruned_loss=0.1011, ctc_loss=0.1897, over 3342662.72 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 07:08:21,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=28741.333333333332, ans=0.025
+2024-08-25 07:08:24,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=28794.666666666668, ans=0.0
+2024-08-25 07:08:29,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=28794.666666666668, ans=0.125
+2024-08-25 07:08:42,254 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.762e+02 1.982e+02 2.336e+02 5.420e+02, threshold=3.963e+02, percent-clipped=2.0
+2024-08-25 07:08:56,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=28901.333333333332, ans=0.025
+2024-08-25 07:08:57,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=28901.333333333332, ans=0.025
+2024-08-25 07:08:59,038 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.93 vs. limit=12.0
+2024-08-25 07:09:04,147 INFO [train.py:1114] (1/4) Epoch 3, batch 450, loss[loss=0.313, simple_loss=0.3449, pruned_loss=0.1027, ctc_loss=0.1891, over 19611.00 frames. ], tot_loss[loss=0.3088, simple_loss=0.3396, pruned_loss=0.1011, ctc_loss=0.1894, over 3450195.22 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:09:09,498 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.29 vs. limit=15.0
+2024-08-25 07:09:15,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=28954.666666666668, ans=0.2
+2024-08-25 07:09:23,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=29008.0, ans=0.125
+2024-08-25 07:09:37,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=29114.666666666668, ans=0.125
+2024-08-25 07:09:39,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=29114.666666666668, ans=0.0
+2024-08-25 07:09:56,832 INFO [train.py:1114] (1/4) Epoch 3, batch 500, loss[loss=0.318, simple_loss=0.3548, pruned_loss=0.1019, ctc_loss=0.1936, over 19675.00 frames. ], tot_loss[loss=0.3075, simple_loss=0.3388, pruned_loss=0.1004, ctc_loss=0.1882, over 3545219.68 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:10:43,384 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.753e+02 1.966e+02 2.327e+02 4.047e+02, threshold=3.932e+02, percent-clipped=2.0
+2024-08-25 07:10:46,756 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.06 vs. limit=15.0
+2024-08-25 07:11:02,989 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.23 vs. limit=22.5
+2024-08-25 07:11:10,801 INFO [train.py:1114] (1/4) Epoch 3, batch 550, loss[loss=0.3332, simple_loss=0.3587, pruned_loss=0.1116, ctc_loss=0.2114, over 19305.00 frames. ], tot_loss[loss=0.3083, simple_loss=0.3394, pruned_loss=0.1008, ctc_loss=0.1887, over 3607695.08 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:11:41,188 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=29488.0, ans=0.125
+2024-08-25 07:11:51,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=29541.333333333332, ans=0.125
+2024-08-25 07:11:51,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=29541.333333333332, ans=0.125
+2024-08-25 07:12:46,937 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.68 vs. limit=15.0
+2024-08-25 07:12:53,131 INFO [train.py:1114] (1/4) Epoch 3, batch 600, loss[loss=0.3215, simple_loss=0.3518, pruned_loss=0.1072, ctc_loss=0.192, over 19375.00 frames. ], tot_loss[loss=0.3072, simple_loss=0.3388, pruned_loss=0.1002, ctc_loss=0.1877, over 3665416.30 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:12:59,840 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=29754.666666666668, ans=0.1
+2024-08-25 07:13:37,725 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.812e+02 2.009e+02 2.360e+02 5.731e+02, threshold=4.017e+02, percent-clipped=3.0
+2024-08-25 07:13:42,805 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=29914.666666666668, ans=0.1
+2024-08-25 07:13:59,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29968.0, ans=0.1
+2024-08-25 07:14:02,707 INFO [train.py:1114] (1/4) Epoch 3, batch 650, loss[loss=0.2962, simple_loss=0.3363, pruned_loss=0.0931, ctc_loss=0.1746, over 19774.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.3375, pruned_loss=0.09941, ctc_loss=0.1862, over 3716429.58 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 16.0
+2024-08-25 07:14:11,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=30021.333333333332, ans=0.0
+2024-08-25 07:14:16,713 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.05 vs. limit=15.0
+2024-08-25 07:14:19,409 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.68 vs. limit=15.0
+2024-08-25 07:14:20,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=30074.666666666668, ans=0.004331594202898551
+2024-08-25 07:14:23,212 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.08 vs. limit=6.0
+2024-08-25 07:14:33,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=30128.0, ans=0.125
+2024-08-25 07:14:35,921 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=30181.333333333332, ans=0.125
+2024-08-25 07:14:44,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=30234.666666666668, ans=0.0
+2024-08-25 07:14:55,118 INFO [train.py:1114] (1/4) Epoch 3, batch 700, loss[loss=0.3249, simple_loss=0.3412, pruned_loss=0.1139, ctc_loss=0.2018, over 19739.00 frames. ], tot_loss[loss=0.3066, simple_loss=0.3385, pruned_loss=0.09992, ctc_loss=0.1871, over 3749289.09 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:15:02,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=30288.0, ans=0.1
+2024-08-25 07:15:03,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=30288.0, ans=0.05
+2024-08-25 07:15:22,029 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=30394.666666666668, ans=0.1
+2024-08-25 07:15:28,448 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.846e+02 1.998e+02 2.505e+02 9.071e+02, threshold=3.995e+02, percent-clipped=5.0
+2024-08-25 07:15:34,802 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.23 vs. limit=10.0
+2024-08-25 07:15:38,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=30448.0, ans=0.0
+2024-08-25 07:15:42,400 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 07:15:47,931 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30501.333333333332, ans=0.1
+2024-08-25 07:15:47,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=30501.333333333332, ans=0.125
+2024-08-25 07:15:56,733 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=30554.666666666668, ans=0.0
+2024-08-25 07:15:58,573 INFO [train.py:1114] (1/4) Epoch 3, batch 750, loss[loss=0.2859, simple_loss=0.3354, pruned_loss=0.08464, ctc_loss=0.1681, over 19487.00 frames. ], tot_loss[loss=0.3057, simple_loss=0.3376, pruned_loss=0.09965, ctc_loss=0.1865, over 3775271.28 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:16:06,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=30554.666666666668, ans=0.125
+2024-08-25 07:16:26,684 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=30608.0, ans=0.125
+2024-08-25 07:16:51,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=30714.666666666668, ans=0.004192463768115941
+2024-08-25 07:34:42,534 INFO [train.py:1114] (1/4) Epoch 3, batch 800, loss[loss=0.3075, simple_loss=0.329, pruned_loss=0.1044, ctc_loss=0.193, over 19807.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3371, pruned_loss=0.09947, ctc_loss=0.1862, over 3796174.33 frames. ], batch size: 49, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 07:39:39,828 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.35 vs. limit=15.0
+2024-08-25 07:39:50,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=30821.333333333332, ans=0.2
+2024-08-25 08:02:40,829 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.761e+02 1.928e+02 2.233e+02 3.899e+02, threshold=3.856e+02, percent-clipped=0.0
+2024-08-25 08:12:59,519 INFO [train.py:1114] (1/4) Epoch 3, batch 850, loss[loss=0.3026, simple_loss=0.3424, pruned_loss=0.09531, ctc_loss=0.1803, over 19655.00 frames. ], tot_loss[loss=0.3039, simple_loss=0.3359, pruned_loss=0.09888, ctc_loss=0.1852, over 3815868.27 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 08:23:27,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=31141.333333333332, ans=0.0
+2024-08-25 08:42:44,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=31301.333333333332, ans=0.125
+2024-08-25 08:44:41,569 INFO [train.py:1114] (1/4) Epoch 3, batch 900, loss[loss=0.249, simple_loss=0.2939, pruned_loss=0.07432, ctc_loss=0.1385, over 19422.00 frames. ], tot_loss[loss=0.3047, simple_loss=0.3365, pruned_loss=0.09926, ctc_loss=0.1859, over 3819560.69 frames. ], batch size: 48, lr: 3.72e-02, grad_scale: 32.0
+2024-08-25 08:47:09,525 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=31354.666666666668, ans=0.1
+2024-08-25 08:48:49,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=31408.0, ans=0.035
+2024-08-25 08:51:48,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=31461.333333333332, ans=0.1
+2024-08-25 08:51:48,537 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.03 vs. limit=15.0
+2024-08-25 08:52:10,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=31461.333333333332, ans=0.2
+2024-08-25 08:57:54,734 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.748e+02 1.945e+02 2.250e+02 3.446e+02, threshold=3.889e+02, percent-clipped=0.0
+2024-08-25 08:57:55,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=31514.666666666668, ans=0.004018550724637681
+2024-08-25 09:00:10,866 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.84 vs. limit=22.5
+2024-08-25 09:02:23,560 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=31568.0, ans=0.125
+2024-08-25 09:03:26,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=31568.0, ans=0.0
+2024-08-25 09:05:01,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=31568.0, ans=0.125
+2024-08-25 09:05:03,363 INFO [train.py:1114] (1/4) Epoch 3, batch 950, loss[loss=0.2791, simple_loss=0.3135, pruned_loss=0.08868, ctc_loss=0.1683, over 19491.00 frames. ], tot_loss[loss=0.3051, simple_loss=0.3367, pruned_loss=0.09949, ctc_loss=0.1865, over 3821438.83 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 32.0
+2024-08-25 09:16:38,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=31781.333333333332, ans=0.125
+2024-08-25 09:23:03,904 INFO [train.py:1114] (1/4) Epoch 3, batch 1000, loss[loss=0.2785, simple_loss=0.3204, pruned_loss=0.0853, ctc_loss=0.1652, over 19850.00 frames. ], tot_loss[loss=0.307, simple_loss=0.3383, pruned_loss=0.1003, ctc_loss=0.1875, over 3817613.11 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 16.0
+2024-08-25 09:29:07,851 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.873e+02 2.237e+02 2.628e+02 7.664e+02, threshold=4.475e+02, percent-clipped=6.0
+2024-08-25 09:29:25,000 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=32048.0, ans=0.1
+2024-08-25 09:29:25,211 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.86 vs. limit=15.0
+2024-08-25 09:30:17,283 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.14 vs. limit=22.5
+2024-08-25 09:32:27,624 INFO [train.py:1114] (1/4) Epoch 3, batch 1050, loss[loss=0.2998, simple_loss=0.3344, pruned_loss=0.09626, ctc_loss=0.1817, over 19827.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3369, pruned_loss=0.0995, ctc_loss=0.1861, over 3824558.96 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:32:31,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=32154.666666666668, ans=0.125
+2024-08-25 09:33:39,002 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=32208.0, ans=0.07
+2024-08-25 09:41:10,308 INFO [train.py:1114] (1/4) Epoch 3, batch 1100, loss[loss=0.3072, simple_loss=0.3371, pruned_loss=0.102, ctc_loss=0.1836, over 19577.00 frames. ], tot_loss[loss=0.3044, simple_loss=0.3366, pruned_loss=0.09907, ctc_loss=0.1854, over 3831467.53 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:41:42,415 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.66 vs. limit=6.0
+2024-08-25 09:41:52,770 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.79 vs. limit=22.5
+2024-08-25 09:42:46,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=32528.0, ans=0.0
+2024-08-25 09:43:23,046 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.681e+02 1.943e+02 2.357e+02 4.515e+02, threshold=3.887e+02, percent-clipped=1.0
+2024-08-25 09:45:14,600 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=14.86 vs. limit=15.0
+2024-08-25 09:45:15,742 INFO [train.py:1114] (1/4) Epoch 3, batch 1150, loss[loss=0.2932, simple_loss=0.3346, pruned_loss=0.09042, ctc_loss=0.1772, over 19585.00 frames. ], tot_loss[loss=0.3053, simple_loss=0.3372, pruned_loss=0.09953, ctc_loss=0.1861, over 3831042.27 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 16.0
+2024-08-25 09:53:24,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=32741.333333333332, ans=0.125
+2024-08-25 09:54:01,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=32794.666666666664, ans=0.0
+2024-08-25 09:55:14,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=32901.333333333336, ans=0.07
+2024-08-25 09:55:29,633 INFO [train.py:1114] (1/4) Epoch 3, batch 1200, loss[loss=0.3252, simple_loss=0.359, pruned_loss=0.1073, ctc_loss=0.1917, over 19842.00 frames. ], tot_loss[loss=0.3064, simple_loss=0.3385, pruned_loss=0.09978, ctc_loss=0.1867, over 3825539.56 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:55:39,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=32954.666666666664, ans=0.2
+2024-08-25 09:56:31,125 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.705e+02 1.941e+02 2.201e+02 4.168e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-25 09:56:31,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=33114.666666666664, ans=0.2
+2024-08-25 09:57:41,523 INFO [train.py:1114] (1/4) Epoch 3, batch 1250, loss[loss=0.3266, simple_loss=0.3593, pruned_loss=0.1072, ctc_loss=0.1992, over 19541.00 frames. ], tot_loss[loss=0.3064, simple_loss=0.3389, pruned_loss=0.09973, ctc_loss=0.1863, over 3843396.04 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:58:04,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=33274.666666666664, ans=0.0
+2024-08-25 09:58:48,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=33434.666666666664, ans=0.07
+2024-08-25 09:59:01,569 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=33434.666666666664, ans=0.125
+2024-08-25 09:59:01,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=33434.666666666664, ans=0.125
+2024-08-25 09:59:04,140 INFO [train.py:1114] (1/4) Epoch 3, batch 1300, loss[loss=0.2947, simple_loss=0.3391, pruned_loss=0.09068, ctc_loss=0.1724, over 18793.00 frames. ], tot_loss[loss=0.3045, simple_loss=0.3372, pruned_loss=0.09893, ctc_loss=0.1847, over 3847174.36 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 09:59:31,742 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=33541.333333333336, ans=0.125
+2024-08-25 09:59:48,218 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.674e+02 1.887e+02 2.172e+02 3.368e+02, threshold=3.774e+02, percent-clipped=0.0
+2024-08-25 09:59:58,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=33648.0, ans=0.125
+2024-08-25 10:00:13,470 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=33701.333333333336, ans=0.025
+2024-08-25 10:00:14,455 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=33701.333333333336, ans=0.2
+2024-08-25 10:00:20,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=33701.333333333336, ans=0.125
+2024-08-25 10:00:22,480 INFO [train.py:1114] (1/4) Epoch 3, batch 1350, loss[loss=0.3168, simple_loss=0.3513, pruned_loss=0.1035, ctc_loss=0.1883, over 19771.00 frames. ], tot_loss[loss=0.3026, simple_loss=0.3361, pruned_loss=0.09794, ctc_loss=0.1831, over 3856257.20 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 10:00:33,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=33754.666666666664, ans=0.0
+2024-08-25 10:00:52,352 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=33808.0, ans=0.125
+2024-08-25 10:00:52,385 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=33808.0, ans=0.125
+2024-08-25 10:00:58,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33808.0, ans=0.1
+2024-08-25 10:01:32,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=33914.666666666664, ans=0.04949747468305833
+2024-08-25 10:01:46,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=33914.666666666664, ans=0.125
+2024-08-25 10:02:01,542 INFO [train.py:1114] (1/4) Epoch 3, batch 1400, loss[loss=0.2894, simple_loss=0.3166, pruned_loss=0.09536, ctc_loss=0.1788, over 19699.00 frames. ], tot_loss[loss=0.3022, simple_loss=0.3355, pruned_loss=0.09791, ctc_loss=0.1827, over 3864232.26 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 32.0
+2024-08-25 10:02:20,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=34074.666666666664, ans=0.2
+2024-08-25 10:02:25,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=34074.666666666664, ans=0.025
+2024-08-25 10:02:33,791 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.84 vs. limit=15.0
+2024-08-25 10:02:45,304 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.896e+02 2.159e+02 2.528e+02 3.857e+02, threshold=4.318e+02, percent-clipped=1.0
+2024-08-25 10:02:54,690 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=34181.333333333336, ans=0.0
+2024-08-25 10:03:06,782 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.22 vs. limit=22.5
+2024-08-25 10:03:07,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=34234.666666666664, ans=0.2
+2024-08-25 10:03:12,625 INFO [train.py:1114] (1/4) Epoch 3, batch 1450, loss[loss=0.3199, simple_loss=0.3524, pruned_loss=0.1047, ctc_loss=0.1953, over 19654.00 frames. ], tot_loss[loss=0.303, simple_loss=0.3363, pruned_loss=0.09816, ctc_loss=0.1832, over 3862401.94 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:03:30,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=34341.333333333336, ans=0.2
+2024-08-25 10:04:19,346 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.42 vs. limit=15.0
+2024-08-25 10:04:21,623 INFO [train.py:1114] (1/4) Epoch 3, batch 1500, loss[loss=0.334, simple_loss=0.3629, pruned_loss=0.1115, ctc_loss=0.2053, over 19575.00 frames. ], tot_loss[loss=0.3029, simple_loss=0.3366, pruned_loss=0.09802, ctc_loss=0.183, over 3862253.01 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:04:25,049 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.09 vs. limit=15.0
+2024-08-25 10:04:28,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=34554.666666666664, ans=0.0033576811594202907
+2024-08-25 10:04:36,570 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.11 vs. limit=15.0
+2024-08-25 10:04:52,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=34661.333333333336, ans=0.0
+2024-08-25 10:04:55,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=34714.666666666664, ans=0.125
+2024-08-25 10:05:09,913 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.778e+02 1.971e+02 2.353e+02 5.678e+02, threshold=3.941e+02, percent-clipped=1.0
+2024-08-25 10:05:10,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34714.666666666664, ans=0.1
+2024-08-25 10:05:11,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=34714.666666666664, ans=0.125
+2024-08-25 10:05:11,888 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34714.666666666664, ans=0.1
+2024-08-25 10:05:29,598 INFO [train.py:1114] (1/4) Epoch 3, batch 1550, loss[loss=0.3476, simple_loss=0.3666, pruned_loss=0.1208, ctc_loss=0.2174, over 19587.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3368, pruned_loss=0.09841, ctc_loss=0.1836, over 3847577.24 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:05:37,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=34821.333333333336, ans=0.125
+2024-08-25 10:05:37,287 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=34821.333333333336, ans=0.125
+2024-08-25 10:06:03,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34981.333333333336, ans=0.1
+2024-08-25 10:06:08,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34981.333333333336, ans=0.1
+2024-08-25 10:06:10,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=34981.333333333336, ans=0.125
+2024-08-25 10:06:22,169 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.38 vs. limit=15.0
+2024-08-25 10:06:42,357 INFO [train.py:1114] (1/4) Epoch 3, batch 1600, loss[loss=0.3407, simple_loss=0.3688, pruned_loss=0.1143, ctc_loss=0.2102, over 19839.00 frames. ], tot_loss[loss=0.304, simple_loss=0.3367, pruned_loss=0.09871, ctc_loss=0.1844, over 3835820.56 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:07:05,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=35141.333333333336, ans=0.0032301449275362317
+2024-08-25 10:07:26,109 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=35194.666666666664, ans=0.025
+2024-08-25 10:07:47,933 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 1.752e+02 2.032e+02 2.338e+02 4.104e+02, threshold=4.064e+02, percent-clipped=1.0
+2024-08-25 10:07:55,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=35301.333333333336, ans=0.125
+2024-08-25 10:08:06,817 INFO [train.py:1114] (1/4) Epoch 3, batch 1650, loss[loss=0.3008, simple_loss=0.3383, pruned_loss=0.09706, ctc_loss=0.1733, over 19638.00 frames. ], tot_loss[loss=0.303, simple_loss=0.336, pruned_loss=0.0983, ctc_loss=0.1836, over 3831935.54 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 10:08:08,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=35354.666666666664, ans=0.0031837681159420303
+2024-08-25 10:08:12,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten.whitening_limit, batch_count=35354.666666666664, ans=15.0
+2024-08-25 10:08:14,647 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=35354.666666666664, ans=0.125
+2024-08-25 10:08:19,628 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.22 vs. limit=6.0
+2024-08-25 10:08:22,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=35408.0, ans=0.125
+2024-08-25 10:08:46,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=35514.666666666664, ans=0.0
+2024-08-25 10:08:54,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=35514.666666666664, ans=0.5
+2024-08-25 10:08:55,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=35568.0, ans=0.0
+2024-08-25 10:09:00,404 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:09:00,506 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=35568.0, ans=0.125
+2024-08-25 10:09:04,855 INFO [train.py:1114] (1/4) Epoch 3, batch 1700, loss[loss=0.2859, simple_loss=0.3133, pruned_loss=0.09377, ctc_loss=0.1775, over 19649.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3352, pruned_loss=0.09733, ctc_loss=0.182, over 3846531.43 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:09:15,284 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.44 vs. limit=22.5
+2024-08-25 10:09:21,142 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=5.31 vs. limit=15.0
+2024-08-25 10:09:22,542 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=35728.0, ans=0.125
+2024-08-25 10:09:52,817 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.835e+02 2.022e+02 2.484e+02 3.793e+02, threshold=4.043e+02, percent-clipped=0.0
+2024-08-25 10:10:09,478 INFO [train.py:1114] (1/4) Epoch 3, batch 1750, loss[loss=0.2663, simple_loss=0.3009, pruned_loss=0.08336, ctc_loss=0.1623, over 19681.00 frames. ], tot_loss[loss=0.3, simple_loss=0.3337, pruned_loss=0.09691, ctc_loss=0.1809, over 3851205.57 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:10:28,682 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=8.55 vs. limit=12.0
+2024-08-25 10:10:37,651 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=35994.666666666664, ans=0.125
+2024-08-25 10:11:02,520 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.87 vs. limit=10.0
+2024-08-25 10:11:20,661 INFO [train.py:1114] (1/4) Epoch 3, batch 1800, loss[loss=0.2628, simple_loss=0.3119, pruned_loss=0.07763, ctc_loss=0.1459, over 19609.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3341, pruned_loss=0.09707, ctc_loss=0.1812, over 3853268.25 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:11:27,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=36154.666666666664, ans=0.125
+2024-08-25 10:11:28,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=36154.666666666664, ans=0.125
+2024-08-25 10:11:29,539 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.60 vs. limit=15.0
+2024-08-25 10:11:52,946 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.758e+02 2.042e+02 2.396e+02 4.902e+02, threshold=4.083e+02, percent-clipped=1.0
+2024-08-25 10:12:24,015 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=36368.0, ans=0.125
+2024-08-25 10:12:33,954 INFO [train.py:1114] (1/4) Epoch 3, batch 1850, loss[loss=0.3174, simple_loss=0.3477, pruned_loss=0.1051, ctc_loss=0.1925, over 19608.00 frames. ], tot_loss[loss=0.2991, simple_loss=0.3331, pruned_loss=0.09657, ctc_loss=0.1801, over 3856041.44 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:12:34,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=36421.333333333336, ans=0.125
+2024-08-25 10:13:07,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=36528.0, ans=0.125
+2024-08-25 10:13:08,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36528.0, ans=0.1
+2024-08-25 10:13:12,973 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=36581.333333333336, ans=0.035
+2024-08-25 10:13:31,591 INFO [train.py:1114] (1/4) Epoch 3, batch 1900, loss[loss=0.3041, simple_loss=0.3456, pruned_loss=0.09431, ctc_loss=0.1851, over 19659.00 frames. ], tot_loss[loss=0.2993, simple_loss=0.3335, pruned_loss=0.0965, ctc_loss=0.18, over 3861295.52 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 32.0
+2024-08-25 10:14:04,863 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.09 vs. limit=15.0
+2024-08-25 10:14:09,008 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=36741.333333333336, ans=0.125
+2024-08-25 10:14:14,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=36741.333333333336, ans=0.125
+2024-08-25 10:14:24,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=36794.666666666664, ans=0.125
+2024-08-25 10:14:26,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=36794.666666666664, ans=0.04949747468305833
+2024-08-25 10:14:27,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=36848.0, ans=0.125
+2024-08-25 10:14:29,229 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.725e+02 1.920e+02 2.285e+02 4.448e+02, threshold=3.841e+02, percent-clipped=1.0
+2024-08-25 10:14:32,802 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.88 vs. limit=15.0
+2024-08-25 10:14:47,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=36901.333333333336, ans=0.0
+2024-08-25 10:14:54,735 INFO [train.py:1114] (1/4) Epoch 3, batch 1950, loss[loss=0.2683, simple_loss=0.3082, pruned_loss=0.08376, ctc_loss=0.1523, over 19591.00 frames. ], tot_loss[loss=0.2993, simple_loss=0.3342, pruned_loss=0.09629, ctc_loss=0.1796, over 3870141.84 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:15:02,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=36954.666666666664, ans=10.0
+2024-08-25 10:15:51,810 INFO [train.py:1114] (1/4) Epoch 3, batch 2000, loss[loss=0.2595, simple_loss=0.2964, pruned_loss=0.08001, ctc_loss=0.1565, over 19661.00 frames. ], tot_loss[loss=0.3001, simple_loss=0.3348, pruned_loss=0.09662, ctc_loss=0.1802, over 3854345.99 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:16:11,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=37328.0, ans=0.002754782608695652
+2024-08-25 10:16:19,104 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.904e+02 2.146e+02 2.566e+02 5.347e+02, threshold=4.293e+02, percent-clipped=2.0
+2024-08-25 10:16:44,037 INFO [train.py:1114] (1/4) Epoch 3, batch 2050, loss[loss=0.2662, simple_loss=0.3062, pruned_loss=0.08184, ctc_loss=0.1565, over 19725.00 frames. ], tot_loss[loss=0.2989, simple_loss=0.3336, pruned_loss=0.09615, ctc_loss=0.1795, over 3850800.98 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 32.0
+2024-08-25 10:16:44,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=37488.0, ans=0.125
+2024-08-25 10:16:57,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=37488.0, ans=0.125
+2024-08-25 10:17:08,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=37541.333333333336, ans=0.2
+2024-08-25 10:17:09,329 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.09 vs. limit=10.0
+2024-08-25 10:17:21,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=37594.666666666664, ans=0.0
+2024-08-25 10:17:27,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=37648.0, ans=0.07
+2024-08-25 10:17:56,058 INFO [train.py:1114] (1/4) Epoch 3, batch 2100, loss[loss=0.287, simple_loss=0.3255, pruned_loss=0.0907, ctc_loss=0.1679, over 19758.00 frames. ], tot_loss[loss=0.2979, simple_loss=0.3331, pruned_loss=0.09565, ctc_loss=0.1785, over 3857825.56 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 10:18:18,071 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=37754.666666666664, ans=0.5
+2024-08-25 10:18:19,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=37754.666666666664, ans=0.0
+2024-08-25 10:18:35,498 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=37754.666666666664, ans=0.125
+2024-08-25 10:18:39,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=37808.0, ans=0.025
+2024-08-25 10:18:41,042 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.76 vs. limit=22.5
+2024-08-25 10:18:46,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=37808.0, ans=0.125
+2024-08-25 10:19:20,760 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.776e+02 1.971e+02 2.246e+02 3.814e+02, threshold=3.941e+02, percent-clipped=0.0
+2024-08-25 10:20:09,497 INFO [train.py:1114] (1/4) Epoch 3, batch 2150, loss[loss=0.2869, simple_loss=0.3267, pruned_loss=0.08966, ctc_loss=0.1695, over 19594.00 frames. ], tot_loss[loss=0.296, simple_loss=0.3314, pruned_loss=0.09481, ctc_loss=0.1772, over 3868883.36 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 10:20:51,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=38181.333333333336, ans=0.125
+2024-08-25 10:20:53,698 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=19.55 vs. limit=22.5
+2024-08-25 10:20:56,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=38181.333333333336, ans=0.0
+2024-08-25 10:21:09,810 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.30 vs. limit=15.0
+2024-08-25 10:21:10,424 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:21:11,133 INFO [train.py:1114] (1/4) Epoch 3, batch 2200, loss[loss=0.294, simple_loss=0.3365, pruned_loss=0.0914, ctc_loss=0.1716, over 19580.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.332, pruned_loss=0.09483, ctc_loss=0.1773, over 3868042.63 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:21:18,277 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38288.0, ans=0.1
+2024-08-25 10:21:30,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=38341.333333333336, ans=0.09899494936611666
+2024-08-25 10:21:56,464 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.750e+02 1.922e+02 2.212e+02 3.187e+02, threshold=3.844e+02, percent-clipped=0.0
+2024-08-25 10:22:21,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=38501.333333333336, ans=0.0024997101449275357
+2024-08-25 10:22:28,991 INFO [train.py:1114] (1/4) Epoch 3, batch 2250, loss[loss=0.292, simple_loss=0.3338, pruned_loss=0.09075, ctc_loss=0.1716, over 19603.00 frames. ], tot_loss[loss=0.2964, simple_loss=0.3322, pruned_loss=0.09487, ctc_loss=0.1771, over 3868149.79 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:22:30,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=38554.666666666664, ans=0.002488115942028987
+2024-08-25 10:22:31,143 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.56 vs. limit=12.0
+2024-08-25 10:22:55,359 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.34 vs. limit=6.0
+2024-08-25 10:23:01,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=38661.333333333336, ans=0.002464927536231884
+2024-08-25 10:23:17,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=38714.666666666664, ans=0.125
+2024-08-25 10:23:29,346 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:23:40,914 INFO [train.py:1114] (1/4) Epoch 3, batch 2300, loss[loss=0.2613, simple_loss=0.3004, pruned_loss=0.08032, ctc_loss=0.1538, over 19527.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.3309, pruned_loss=0.09465, ctc_loss=0.1769, over 3862623.77 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:23:54,493 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=38874.666666666664, ans=0.125
+2024-08-25 10:24:03,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=38928.0, ans=0.0
+2024-08-25 10:24:03,733 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=38928.0, ans=0.125
+2024-08-25 10:24:06,274 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:24:13,779 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.820e+02 2.030e+02 2.354e+02 3.970e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-25 10:24:13,988 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=38981.333333333336, ans=0.09899494936611666
+2024-08-25 10:24:14,934 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=16.46 vs. limit=15.0
+2024-08-25 10:24:43,020 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=39034.666666666664, ans=0.125
+2024-08-25 10:24:48,819 INFO [train.py:1114] (1/4) Epoch 3, batch 2350, loss[loss=0.329, simple_loss=0.3609, pruned_loss=0.108, ctc_loss=0.2025, over 19678.00 frames. ], tot_loss[loss=0.2952, simple_loss=0.3306, pruned_loss=0.09454, ctc_loss=0.1769, over 3864981.09 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:25:01,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=39088.0, ans=0.1
+2024-08-25 10:25:06,809 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=39141.333333333336, ans=0.125
+2024-08-25 10:25:20,494 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=39194.666666666664, ans=0.1
+2024-08-25 10:25:23,277 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.87 vs. limit=15.0
+2024-08-25 10:25:38,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.90 vs. limit=15.0
+2024-08-25 10:25:52,398 INFO [train.py:1114] (1/4) Epoch 3, batch 2400, loss[loss=0.3264, simple_loss=0.3583, pruned_loss=0.1062, ctc_loss=0.2052, over 19316.00 frames. ], tot_loss[loss=0.2986, simple_loss=0.3335, pruned_loss=0.09599, ctc_loss=0.1792, over 3859318.23 frames. ], batch size: 71, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 10:25:55,449 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.31 vs. limit=12.0
+2024-08-25 10:26:02,264 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.40 vs. limit=15.0
+2024-08-25 10:26:04,649 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=39408.0, ans=0.025
+2024-08-25 10:26:06,706 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.27 vs. limit=22.5
+2024-08-25 10:26:11,983 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.02 vs. limit=15.0
+2024-08-25 10:26:40,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=39514.666666666664, ans=0.125
+2024-08-25 10:26:41,539 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.777e+02 2.047e+02 2.383e+02 4.291e+02, threshold=4.094e+02, percent-clipped=1.0
+2024-08-25 10:27:07,784 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.69 vs. limit=15.0
+2024-08-25 10:27:14,171 INFO [train.py:1114] (1/4) Epoch 3, batch 2450, loss[loss=0.3815, simple_loss=0.3729, pruned_loss=0.1421, ctc_loss=0.2651, over 13715.00 frames. ], tot_loss[loss=0.3091, simple_loss=0.3395, pruned_loss=0.1015, ctc_loss=0.1891, over 3731134.73 frames. ], batch size: 140, lr: 3.53e-02, grad_scale: 16.0
+2024-08-25 10:27:47,548 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.23 vs. limit=15.0
+2024-08-25 10:27:48,998 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=39781.333333333336, ans=0.0022214492753623175
+2024-08-25 10:39:24,826 INFO [train.py:1050] (1/4) Caught exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=89707, OpType=ALLREDUCE, NumelIn=745, NumelOut=745, Timeout(ms)=600000) ran for 600003 milliseconds before timing out..
+2024-08-25 10:39:24,827 INFO [checkpoint.py:75] (1/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/bad-model-1.pt
+2024-08-25 10:39:39,763 INFO [train.py:1413] (1/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/batch-41f60be0-7cef-6aa3-6aed-cf4a4599a084.pt
+2024-08-25 10:39:39,825 INFO [train.py:1419] (1/4) features shape: torch.Size([48, 1633, 80])
+2024-08-25 10:39:39,828 INFO [train.py:1423] (1/4) num tokens: 3940
diff --git a/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-2 b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-2
new file mode 100644
index 0000000000000000000000000000000000000000..74b60af8c4fc998f110faf34da8919250ba12a6e
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-2
@@ -0,0 +1,1156 @@
+2024-08-25 03:46:09,313 INFO [train.py:1182] (2/4) Training started
+2024-08-25 03:46:09,314 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-25 03:46:09,373 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 03:46:09,373 INFO [train.py:1212] (2/4) About to create model
+2024-08-25 03:46:10,428 INFO [train.py:1216] (2/4) Number of model parameters: 65805511
+2024-08-25 03:46:10,531 INFO [train.py:1231] (2/4) Using DDP
+2024-08-25 03:46:14,820 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-25 03:46:16,505 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-25 03:46:16,507 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-25 03:46:16,584 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-25 03:46:16,613 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-25 03:46:16,935 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-25 03:46:16,935 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 03:50:49,731 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=44.40 vs. limit=7.5
+2024-08-25 03:50:50,510 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 11561MB
+2024-08-25 03:50:51,648 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 11561MB
+2024-08-25 03:51:20,162 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 11561MB
+2024-08-25 03:51:21,412 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 11561MB
+2024-08-25 03:51:43,055 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 11561MB
+2024-08-25 03:51:44,348 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 11561MB
+2024-08-25 03:53:11,522 INFO [train.py:1114] (2/4) Epoch 1, batch 0, loss[loss=8.684, simple_loss=7.024, pruned_loss=6.921, ctc_loss=4.834, over 19418.00 frames. ], tot_loss[loss=8.684, simple_loss=7.024, pruned_loss=6.921, ctc_loss=4.834, over 19418.00 frames. ], batch size: 48, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 03:53:11,523 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 03:53:26,557 INFO [train.py:1146] (2/4) Epoch 1, validation: loss=8.842, simple_loss=7.151, pruned_loss=6.961, ctc_loss=4.966, over 944034.00 frames.
+2024-08-25 03:53:26,558 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 11601MB
+2024-08-25 03:53:28,034 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.47 vs. limit=7.5
+2024-08-25 03:53:33,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=0.0, ans=0.3
+2024-08-25 03:53:38,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=0.0, ans=0.5
+2024-08-25 03:53:42,753 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=26.54 vs. limit=7.5
+2024-08-25 03:53:47,306 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.09 vs. limit=7.5
+2024-08-25 03:54:10,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=53.333333333333336, ans=0.0988
+2024-08-25 03:54:11,400 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.16 vs. limit=7.52
+2024-08-25 03:54:36,709 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.008e+03 4.149e+03 4.360e+03 5.530e+03 5.553e+03, threshold=1.744e+04, percent-clipped=0.0
+2024-08-25 03:54:56,955 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=465.31 vs. limit=7.54
+2024-08-25 03:54:57,280 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=11.64 vs. limit=5.026666666666666
+2024-08-25 03:54:59,603 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=492.37 vs. limit=5.026666666666666
+2024-08-25 03:55:15,617 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=503.85 vs. limit=5.053333333333334
+2024-08-25 03:55:20,859 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=12.69 vs. limit=7.54
+2024-08-25 03:55:46,189 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.063e+03 1.598e+03 4.141e+03 5.530e+03 6.572e+03, threshold=1.656e+04, percent-clipped=0.0
+2024-08-25 03:55:46,701 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=169.61 vs. limit=5.053333333333334
+2024-08-25 03:57:12,400 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=180.11 vs. limit=4.032
+2024-08-25 03:57:30,445 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=110.75 vs. limit=5.08
+2024-08-25 04:00:13,980 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=213.33333333333334, ans=0.49
+2024-08-25 04:00:14,872 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.873e+02 1.048e+03 1.328e+03 4.149e+03 6.572e+03, threshold=5.310e+03, percent-clipped=0.0
+2024-08-25 04:00:18,248 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=17.67 vs. limit=7.58
+2024-08-25 04:00:19,559 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=29.40 vs. limit=7.66
+2024-08-25 04:00:39,836 INFO [train.py:1114] (2/4) Epoch 1, batch 50, loss[loss=1.627, simple_loss=1.075, pruned_loss=1.224, ctc_loss=2.078, over 19747.00 frames. ], tot_loss[loss=3.747, simple_loss=2.908, pruned_loss=2.556, ctc_loss=2.881, over 844203.64 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 04:00:50,923 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=13.14 vs. limit=4.1066666666666665
+2024-08-25 04:00:50,929 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=21.42 vs. limit=7.6
+2024-08-25 04:00:59,157 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=25.18 vs. limit=5.066666666666666
+2024-08-25 04:01:03,710 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=320.0, ans=5.2
+2024-08-25 04:01:03,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=320.0, ans=0.049
+2024-08-25 04:01:27,099 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=373.3333333333333, ans=0.2962666666666667
+2024-08-25 04:01:50,089 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=30.89 vs. limit=7.78
+2024-08-25 04:01:59,704 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.25 vs. limit=5.093333333333334
+2024-08-25 04:02:05,521 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=49.87 vs. limit=7.66
+2024-08-25 04:02:10,698 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=488.91 vs. limit=7.66
+2024-08-25 04:02:31,898 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=57.70 vs. limit=7.86
+2024-08-25 04:02:39,854 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=480.0, ans=0.4775
+2024-08-25 04:02:40,364 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=138.84 vs. limit=7.68
+2024-08-25 04:02:42,635 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=480.0, ans=0.0892
+2024-08-25 04:02:48,080 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=46.53 vs. limit=7.86
+2024-08-25 04:02:49,181 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=25.47 vs. limit=5.24
+2024-08-25 04:02:49,290 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=31.95 vs. limit=7.68
+2024-08-25 04:02:53,773 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=24.61 vs. limit=7.68
+2024-08-25 04:02:59,370 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=66.53 vs. limit=7.7
+2024-08-25 04:02:59,897 INFO [train.py:1114] (2/4) Epoch 1, batch 100, loss[loss=1.423, simple_loss=1.005, pruned_loss=1.26, ctc_loss=1.3, over 19704.00 frames. ], tot_loss[loss=2.582, simple_loss=1.909, pruned_loss=1.864, ctc_loss=2.351, over 1496570.41 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 04:03:07,085 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.807e+02 4.974e+02 8.674e+02 1.328e+03 6.572e+03, threshold=1.735e+03, percent-clipped=0.0
+2024-08-25 04:03:18,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.whiten.whitening_limit, batch_count=586.6666666666666, ans=4.234666666666667
+2024-08-25 04:03:24,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=586.6666666666666, ans=0.2941333333333333
+2024-08-25 04:03:25,853 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=121.70 vs. limit=7.72
+2024-08-25 04:03:32,163 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=172.86 vs. limit=7.72
+2024-08-25 04:03:36,620 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=23.15 vs. limit=5.16
+2024-08-25 04:03:40,394 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=97.97 vs. limit=7.74
+2024-08-25 04:03:58,602 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=92.62 vs. limit=7.76
+2024-08-25 04:04:02,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=693.3333333333334, ans=0.4675
+2024-08-25 04:04:11,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=746.6666666666666, ans=0.46499999999999997
+2024-08-25 04:04:11,557 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=104.81 vs. limit=7.78
+2024-08-25 04:04:22,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=800.0, ans=0.0475
+2024-08-25 04:04:22,866 INFO [train.py:1114] (2/4) Epoch 1, batch 150, loss[loss=1.175, simple_loss=0.8141, pruned_loss=1.03, ctc_loss=1.093, over 19716.00 frames. ], tot_loss[loss=2.05, simple_loss=1.493, pruned_loss=1.571, ctc_loss=1.869, over 2026976.04 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 04:04:27,006 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=78.11 vs. limit=7.8
+2024-08-25 04:04:30,407 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=800.0, ans=0.0475
+2024-08-25 04:04:34,799 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=35.26 vs. limit=8.14
+2024-08-25 04:04:38,956 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=29.15 vs. limit=8.14
+2024-08-25 04:04:43,504 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=68.16 vs. limit=7.82
+2024-08-25 04:04:51,264 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=49.55 vs. limit=7.84
+2024-08-25 04:05:10,414 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=47.26 vs. limit=7.84
+2024-08-25 04:05:12,459 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=9.80 vs. limit=5.226666666666667
+2024-08-25 04:05:19,481 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=17.04 vs. limit=5.48
+2024-08-25 04:05:44,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=1013.3333333333334, ans=0.4525
+2024-08-25 04:05:46,317 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=1013.3333333333334, ans=0.5
+2024-08-25 04:05:50,434 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=12.76 vs. limit=5.253333333333333
+2024-08-25 04:05:53,924 INFO [train.py:1114] (2/4) Epoch 1, batch 200, loss[loss=1.256, simple_loss=0.8652, pruned_loss=1.01, ctc_loss=1.211, over 18042.00 frames. ], tot_loss[loss=1.76, simple_loss=1.265, pruned_loss=1.381, ctc_loss=1.621, over 2434889.36 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 04:05:55,897 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.53 vs. limit=4.426666666666667
+2024-08-25 04:05:56,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=1066.6666666666667, ans=0.7606666666666667
+2024-08-25 04:05:57,468 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 7.117e+01 1.191e+02 1.554e+02 2.219e+02 5.914e+02, threshold=3.108e+02, percent-clipped=0.0
+2024-08-25 04:06:00,431 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=26.30 vs. limit=7.9
+2024-08-25 04:06:05,126 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=160.47 vs. limit=7.92
+2024-08-25 04:06:08,033 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=1120.0, ans=0.8608
+2024-08-25 04:06:08,211 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=54.79 vs. limit=7.92
+2024-08-25 04:06:08,525 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=145.49 vs. limit=7.92
+2024-08-25 04:06:15,460 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.43 vs. limit=8.34
+2024-08-25 04:06:16,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1173.3333333333333, ans=0.28826666666666667
+2024-08-25 04:06:16,802 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=15.83 vs. limit=5.586666666666667
+2024-08-25 04:06:18,884 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten.whitening_limit, batch_count=1173.3333333333333, ans=7.94
+2024-08-25 04:06:20,084 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.21 vs. limit=5.293333333333333
+2024-08-25 04:06:26,016 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.61 vs. limit=8.38
+2024-08-25 04:06:40,185 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.50 vs. limit=8.46
+2024-08-25 04:06:57,328 INFO [train.py:1114] (2/4) Epoch 1, batch 250, loss[loss=1.321, simple_loss=0.8957, pruned_loss=1.043, ctc_loss=1.295, over 19449.00 frames. ], tot_loss[loss=1.586, simple_loss=1.126, pruned_loss=1.254, ctc_loss=1.481, over 2755846.84 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 04:07:38,382 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=29.26 vs. limit=8.0
+2024-08-25 04:07:39,638 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=76.05 vs. limit=8.0
+2024-08-25 04:07:39,778 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=27.83 vs. limit=5.666666666666667
+2024-08-25 04:07:39,892 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=35.35 vs. limit=8.0
+2024-08-25 04:08:00,006 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1440.0, ans=0.28559999999999997
+2024-08-25 04:08:02,940 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=70.33 vs. limit=8.58
+2024-08-25 04:08:02,941 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=1440.0, ans=8.58
+2024-08-25 04:08:03,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=1440.0, ans=0.4325
+2024-08-25 04:08:25,922 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=1546.6666666666667, ans=0.4275
+2024-08-25 04:08:26,153 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=1546.6666666666667, ans=0.142
+2024-08-25 04:09:08,481 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.52 vs. limit=4.618666666666667
+2024-08-25 04:09:08,592 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=36.82 vs. limit=8.08
+2024-08-25 04:09:11,358 INFO [train.py:1114] (2/4) Epoch 1, batch 300, loss[loss=1.247, simple_loss=0.8373, pruned_loss=0.9629, ctc_loss=1.228, over 19531.00 frames. ], tot_loss[loss=1.471, simple_loss=1.032, pruned_loss=1.162, ctc_loss=1.39, over 2999758.88 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 04:09:12,910 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=1600.0, ans=0.425
+2024-08-25 04:09:14,915 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 8.125e+01 1.367e+02 1.753e+02 2.332e+02 3.681e+02, threshold=3.505e+02, percent-clipped=6.0
+2024-08-25 04:10:23,607 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=95.50 vs. limit=8.12
+2024-08-25 04:10:25,774 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=19.56 vs. limit=8.12
+2024-08-25 04:10:35,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=1706.6666666666667, ans=0.2866666666666666
+2024-08-25 04:10:42,358 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=37.93 vs. limit=8.14
+2024-08-25 04:10:50,869 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.13 vs. limit=8.82
+2024-08-25 04:11:08,004 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=16.15 vs. limit=8.18
+2024-08-25 04:11:11,350 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=43.30 vs. limit=8.18
+2024-08-25 04:11:13,103 INFO [train.py:1114] (2/4) Epoch 1, batch 350, loss[loss=1.102, simple_loss=0.7333, pruned_loss=0.8468, ctc_loss=1.072, over 19758.00 frames. ], tot_loss[loss=1.393, simple_loss=0.9663, pruned_loss=1.094, ctc_loss=1.329, over 3190055.67 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 04:11:19,275 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1866.6666666666667, ans=0.2813333333333333
+2024-08-25 04:11:21,503 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1866.6666666666667, ans=0.2813333333333333
+2024-08-25 04:11:27,548 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.86 vs. limit=4.768
+2024-08-25 04:11:30,919 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.26 vs. limit=8.94
+2024-08-25 04:11:35,571 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=21.77 vs. limit=5.986666666666666
+2024-08-25 04:11:45,928 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=11.12 vs. limit=8.24
+2024-08-25 04:11:49,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 04:11:49,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=2026.6666666666667, ans=6.266666666666667
+2024-08-25 04:11:51,849 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=37.12 vs. limit=8.26
+2024-08-25 04:11:52,067 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=27.21 vs. limit=9.02
+2024-08-25 04:11:53,055 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=20.95 vs. limit=8.26
+2024-08-25 04:11:54,281 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=32.29 vs. limit=8.26
+2024-08-25 04:12:02,564 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.95 vs. limit=8.28
+2024-08-25 04:12:11,676 INFO [train.py:1114] (2/4) Epoch 1, batch 400, loss[loss=1.131, simple_loss=0.7571, pruned_loss=0.8203, ctc_loss=1.101, over 19484.00 frames. ], tot_loss[loss=1.333, simple_loss=0.9162, pruned_loss=1.035, ctc_loss=1.277, over 3342861.84 frames. ], batch size: 54, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 04:12:15,149 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 9.241e+01 1.644e+02 2.144e+02 2.768e+02 4.713e+02, threshold=4.287e+02, percent-clipped=10.0
+2024-08-25 04:12:19,625 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=24.27 vs. limit=9.1
+2024-08-25 04:12:23,812 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=2186.6666666666665, ans=0.22666666666666668
+2024-08-25 04:12:33,277 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.15 vs. limit=9.14
+2024-08-25 04:12:36,371 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=1.165e+01
+2024-08-25 04:12:40,517 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.21 vs. limit=9.18
+2024-08-25 04:12:49,727 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.43 vs. limit=9.22
+2024-08-25 04:12:58,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=2293.3333333333335, ans=0.08566666666666667
+2024-08-25 04:13:01,198 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=17.20 vs. limit=6.173333333333333
+2024-08-25 04:13:05,891 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=16.56 vs. limit=5.586666666666667
+2024-08-25 04:13:11,466 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.27 vs. limit=9.3
+2024-08-25 04:13:12,048 INFO [train.py:1114] (2/4) Epoch 1, batch 450, loss[loss=1.105, simple_loss=0.7551, pruned_loss=0.7328, ctc_loss=1.065, over 19621.00 frames. ], tot_loss[loss=1.282, simple_loss=0.8783, pruned_loss=0.9737, ctc_loss=1.232, over 3451125.70 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 04:13:14,564 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=2400.0, ans=0.3875
+2024-08-25 04:13:16,910 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=26.17 vs. limit=6.2
+2024-08-25 04:14:10,430 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=2453.3333333333335, ans=0.8141333333333334
+2024-08-25 04:14:12,033 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=18.33 vs. limit=8.42
+2024-08-25 04:14:12,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=2506.6666666666665, ans=0.1866666666666667
+2024-08-25 04:14:14,212 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=11.30 vs. limit=8.44
+2024-08-25 04:14:26,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=2560.0, ans=0.104
+2024-08-25 04:14:28,962 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.49 vs. limit=8.46
+2024-08-25 04:14:44,408 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.80 vs. limit=8.48
+2024-08-25 04:14:47,703 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.52 vs. limit=8.48
+2024-08-25 04:14:51,709 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.57 vs. limit=9.46
+2024-08-25 04:14:53,362 INFO [train.py:1114] (2/4) Epoch 1, batch 500, loss[loss=0.9956, simple_loss=0.6927, pruned_loss=0.6055, ctc_loss=0.9628, over 19683.00 frames. ], tot_loss[loss=1.218, simple_loss=0.8351, pruned_loss=0.8938, ctc_loss=1.171, over 3546755.56 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:14:58,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=2666.6666666666665, ans=0.375
+2024-08-25 04:14:59,588 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 2.224e+02 2.884e+02 3.405e+02 7.334e+02, threshold=5.768e+02, percent-clipped=15.0
+2024-08-25 04:14:59,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=2666.6666666666665, ans=0.24
+2024-08-25 04:15:03,406 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.81 vs. limit=8.5
+2024-08-25 04:15:06,089 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.22 vs. limit=9.5
+2024-08-25 04:15:12,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=2720.0, ans=0.3725
+2024-08-25 04:15:29,490 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.73 vs. limit=8.56
+2024-08-25 04:15:47,755 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.03 vs. limit=8.58
+2024-08-25 04:15:48,562 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=2880.0, ans=0.088
+2024-08-25 04:15:52,586 INFO [train.py:1114] (2/4) Epoch 1, batch 550, loss[loss=0.9125, simple_loss=0.641, pruned_loss=0.5192, ctc_loss=0.8955, over 19210.00 frames. ], tot_loss[loss=1.152, simple_loss=0.7932, pruned_loss=0.8142, ctc_loss=1.11, over 3608352.33 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:15:52,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=2933.3333333333335, ans=0.03399999999999999
+2024-08-25 04:16:02,302 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.08 vs. limit=8.6
+2024-08-25 04:16:03,340 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.41 vs. limit=8.620000000000001
+2024-08-25 04:16:04,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=2986.6666666666665, ans=0.36
+2024-08-25 04:16:16,779 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=12.35 vs. limit=9.78
+2024-08-25 04:16:19,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=3040.0, ans=9.78
+2024-08-25 04:16:19,236 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.95 vs. limit=9.78
+2024-08-25 04:16:20,126 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3040.0, ans=0.2696
+2024-08-25 04:16:42,978 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=3093.3333333333335, ans=0.355
+2024-08-25 04:17:00,431 INFO [train.py:1114] (2/4) Epoch 1, batch 600, loss[loss=0.8499, simple_loss=0.6123, pruned_loss=0.4579, ctc_loss=0.7999, over 19434.00 frames. ], tot_loss[loss=1.082, simple_loss=0.7496, pruned_loss=0.7335, ctc_loss=1.042, over 3666673.28 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:17:00,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=3200.0, ans=0.08
+2024-08-25 04:17:01,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=3200.0, ans=0.09899494936611666
+2024-08-25 04:17:03,439 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.22 vs. limit=5.8
+2024-08-25 04:17:03,764 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.809e+02 3.766e+02 4.633e+02 8.655e+02, threshold=7.532e+02, percent-clipped=12.0
+2024-08-25 04:17:10,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_na.min_abs, batch_count=3253.3333333333335, ans=0.01701333333333333
+2024-08-25 04:17:15,387 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.09 vs. limit=9.94
+2024-08-25 04:17:20,384 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=3253.3333333333335, ans=0.34750000000000003
+2024-08-25 04:18:07,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=3360.0, ans=0.07400000000000001
+2024-08-25 04:18:26,484 INFO [train.py:1114] (2/4) Epoch 1, batch 650, loss[loss=0.7524, simple_loss=0.551, pruned_loss=0.3768, ctc_loss=0.7152, over 19734.00 frames. ], tot_loss[loss=1.008, simple_loss=0.7044, pruned_loss=0.6542, ctc_loss=0.9685, over 3716831.64 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:18:32,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=3466.6666666666665, ans=0.3375
+2024-08-25 04:18:40,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3520.0, ans=0.2648
+2024-08-25 04:19:11,726 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.30 vs. limit=8.86
+2024-08-25 04:19:13,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=3626.6666666666665, ans=0.32999999999999996
+2024-08-25 04:19:21,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=3680.0, ans=0.3275
+2024-08-25 04:19:25,312 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=3733.3333333333335, ans=0.04000000000000001
+2024-08-25 04:20:32,366 INFO [train.py:1114] (2/4) Epoch 1, batch 700, loss[loss=0.6746, simple_loss=0.4988, pruned_loss=0.3284, ctc_loss=0.6337, over 19719.00 frames. ], tot_loss[loss=0.9438, simple_loss=0.6662, pruned_loss=0.5864, ctc_loss=0.9029, over 3748642.13 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:20:34,805 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=3733.3333333333335, ans=0.04000000000000001
+2024-08-25 04:20:35,545 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.600e+02 3.309e+02 4.487e+02 1.180e+03, threshold=6.619e+02, percent-clipped=3.0
+2024-08-25 04:20:43,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.min_positive, batch_count=3786.6666666666665, ans=0.21213333333333334
+2024-08-25 04:20:47,598 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3786.6666666666665, ans=0.26213333333333333
+2024-08-25 04:20:53,235 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=3840.0, ans=0.32
+2024-08-25 04:21:11,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3893.3333333333335, ans=0.26106666666666667
+2024-08-25 04:21:19,702 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.85 vs. limit=5.986666666666666
+2024-08-25 04:21:25,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=4000.0, ans=0.3125
+2024-08-25 04:21:26,550 INFO [train.py:1114] (2/4) Epoch 1, batch 750, loss[loss=0.6431, simple_loss=0.4956, pruned_loss=0.2889, ctc_loss=0.5701, over 19497.00 frames. ], tot_loss[loss=0.8839, simple_loss=0.631, pruned_loss=0.5263, ctc_loss=0.8389, over 3774549.71 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:21:33,569 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.42 vs. limit=6.0
+2024-08-25 04:21:38,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=4053.3333333333335, ans=0.07466666666666667
+2024-08-25 04:22:07,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=4160.0, ans=0.2624
+2024-08-25 04:22:09,747 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=4160.0, ans=0.305
+2024-08-25 04:22:33,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=4213.333333333333, ans=0.07
+2024-08-25 04:22:40,630 INFO [train.py:1114] (2/4) Epoch 1, batch 800, loss[loss=0.6197, simple_loss=0.4744, pruned_loss=0.2869, ctc_loss=0.5413, over 19408.00 frames. ], tot_loss[loss=0.8308, simple_loss=0.6008, pruned_loss=0.4744, ctc_loss=0.7793, over 3794397.83 frames. ], batch size: 48, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:22:42,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=4266.666666666667, ans=10.7
+2024-08-25 04:22:43,241 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.47 vs. limit=9.1
+2024-08-25 04:22:43,860 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.484e+02 3.479e+02 4.307e+02 9.603e+02, threshold=6.957e+02, percent-clipped=4.0
+2024-08-25 04:22:48,499 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=4266.666666666667, ans=0.2573333333333333
+2024-08-25 04:23:12,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=4373.333333333333, ans=6.093333333333334
+2024-08-25 04:23:18,940 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.49 vs. limit=10.82
+2024-08-25 04:23:21,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=4426.666666666667, ans=0.025
+2024-08-25 04:23:21,806 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=4426.666666666667, ans=0.09899494936611666
+2024-08-25 04:23:22,008 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=12.59 vs. limit=10.82
+2024-08-25 04:23:30,208 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.03 vs. limit=7.213333333333333
+2024-08-25 04:23:42,670 INFO [train.py:1114] (2/4) Epoch 1, batch 850, loss[loss=0.6227, simple_loss=0.4924, pruned_loss=0.2633, ctc_loss=0.5395, over 19636.00 frames. ], tot_loss[loss=0.7803, simple_loss=0.5722, pruned_loss=0.4275, ctc_loss=0.7217, over 3813762.31 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:23:52,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=4586.666666666667, ans=0.009872463768115942
+2024-08-25 04:23:52,806 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.09 vs. limit=9.22
+2024-08-25 04:23:58,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=4586.666666666667, ans=0.7394666666666667
+2024-08-25 04:24:22,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=4693.333333333333, ans=0.28
+2024-08-25 04:24:36,249 INFO [train.py:1114] (2/4) Epoch 1, batch 900, loss[loss=0.5051, simple_loss=0.4094, pruned_loss=0.2066, ctc_loss=0.4173, over 19400.00 frames. ], tot_loss[loss=0.7398, simple_loss=0.5499, pruned_loss=0.3902, ctc_loss=0.6744, over 3818394.56 frames. ], batch size: 48, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:24:39,553 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.433e+02 3.203e+02 4.513e+02 7.559e+02, threshold=6.406e+02, percent-clipped=2.0
+2024-08-25 04:24:53,596 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4853.333333333333, ans=0.25146666666666667
+2024-08-25 04:25:01,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=4906.666666666667, ans=0.27
+2024-08-25 04:25:05,973 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.92 vs. limit=9.34
+2024-08-25 04:25:20,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=4960.0, ans=0.7264
+2024-08-25 04:25:22,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=5013.333333333333, ans=0.06866666666666668
+2024-08-25 04:25:32,734 INFO [train.py:1114] (2/4) Epoch 1, batch 950, loss[loss=0.5119, simple_loss=0.4161, pruned_loss=0.2089, ctc_loss=0.4225, over 19500.00 frames. ], tot_loss[loss=0.7028, simple_loss=0.5296, pruned_loss=0.3574, ctc_loss=0.6305, over 3819361.22 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:25:34,143 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.32 vs. limit=6.266666666666667
+2024-08-25 04:25:51,301 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=5120.0, ans=0.009756521739130435
+2024-08-25 04:25:53,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=5173.333333333333, ans=0.009744927536231884
+2024-08-25 04:26:01,130 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.15 vs. limit=9.44
+2024-08-25 04:26:10,284 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.66 vs. limit=9.46
+2024-08-25 04:26:18,067 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=5280.0, ans=0.009721739130434783
+2024-08-25 04:26:33,400 INFO [train.py:1114] (2/4) Epoch 1, batch 1000, loss[loss=0.4833, simple_loss=0.4035, pruned_loss=0.1882, ctc_loss=0.3857, over 19850.00 frames. ], tot_loss[loss=0.6715, simple_loss=0.5128, pruned_loss=0.3303, ctc_loss=0.5924, over 3815620.21 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:26:36,696 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.226e+02 2.758e+02 3.479e+02 9.619e+02, threshold=5.516e+02, percent-clipped=3.0
+2024-08-25 04:26:39,027 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=5333.333333333333, ans=0.25
+2024-08-25 04:26:42,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=5333.333333333333, ans=11.5
+2024-08-25 04:26:52,477 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=5.902e-02
+2024-08-25 04:27:05,561 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.81 vs. limit=11.620000000000001
+2024-08-25 04:27:09,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=5493.333333333333, ans=0.2824
+2024-08-25 04:27:20,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=5546.666666666667, ans=0.24
+2024-08-25 04:27:21,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=5546.666666666667, ans=0.24
+2024-08-25 04:27:25,682 INFO [train.py:1114] (2/4) Epoch 1, batch 1050, loss[loss=0.5239, simple_loss=0.4401, pruned_loss=0.2044, ctc_loss=0.4116, over 19854.00 frames. ], tot_loss[loss=0.6382, simple_loss=0.4947, pruned_loss=0.3034, ctc_loss=0.5534, over 3822603.09 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:27:26,268 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.75 vs. limit=9.6
+2024-08-25 04:27:28,108 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=5600.0, ans=7.8
+2024-08-25 04:27:29,302 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.20 vs. limit=9.6
+2024-08-25 04:27:53,408 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.70 vs. limit=9.64
+2024-08-25 04:27:54,277 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.82 vs. limit=3.856
+2024-08-25 04:28:01,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=5760.0, ans=0.22999999999999998
+2024-08-25 04:28:05,371 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=12.68 vs. limit=11.82
+2024-08-25 04:28:20,176 INFO [train.py:1114] (2/4) Epoch 1, batch 1100, loss[loss=0.5003, simple_loss=0.4165, pruned_loss=0.1995, ctc_loss=0.3981, over 19580.00 frames. ], tot_loss[loss=0.6104, simple_loss=0.4799, pruned_loss=0.2814, ctc_loss=0.5201, over 3829879.19 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:28:23,245 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.143e+02 2.593e+02 3.421e+02 4.407e+02, threshold=5.186e+02, percent-clipped=0.0
+2024-08-25 04:28:25,548 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=5866.666666666667, ans=0.09899494936611666
+2024-08-25 04:28:40,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=5973.333333333333, ans=0.03133333333333334
+2024-08-25 04:28:40,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=5973.333333333333, ans=0.21999999999999997
+2024-08-25 04:28:52,182 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:28:56,423 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=6026.666666666667, ans=0.041555555555555554
+2024-08-25 04:28:57,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=6026.666666666667, ans=0.0
+2024-08-25 04:28:58,608 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=6026.666666666667, ans=0.031166666666666665
+2024-08-25 04:29:10,255 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=12.69 vs. limit=12.059999999999999
+2024-08-25 04:29:14,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=6080.0, ans=0.21500000000000002
+2024-08-25 04:29:15,901 INFO [train.py:1114] (2/4) Epoch 1, batch 1150, loss[loss=0.4637, simple_loss=0.4001, pruned_loss=0.1752, ctc_loss=0.3526, over 19591.00 frames. ], tot_loss[loss=0.5881, simple_loss=0.4684, pruned_loss=0.264, ctc_loss=0.4927, over 3828919.76 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 04:29:49,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=6240.0, ans=0.04066666666666667
+2024-08-25 04:32:06,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=6293.333333333333, ans=0.07
+2024-08-25 04:32:19,855 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.14 vs. limit=12.26
+2024-08-25 04:32:25,877 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=6346.666666666667, ans=0.2025
+2024-08-25 04:32:27,982 INFO [train.py:1114] (2/4) Epoch 1, batch 1200, loss[loss=0.4879, simple_loss=0.4246, pruned_loss=0.181, ctc_loss=0.3744, over 19843.00 frames. ], tot_loss[loss=0.5703, simple_loss=0.4597, pruned_loss=0.2501, ctc_loss=0.4702, over 3823874.27 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:32:31,078 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.077e+02 2.797e+02 3.799e+02 8.339e+02, threshold=5.594e+02, percent-clipped=11.0
+2024-08-25 04:32:48,179 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.85 vs. limit=12.379999999999999
+2024-08-25 04:32:55,473 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.22 vs. limit=12.379999999999999
+2024-08-25 04:32:59,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:04,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=6560.0, ans=0.2344
+2024-08-25 04:33:06,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=6560.0, ans=0.025
+2024-08-25 04:33:09,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=6613.333333333333, ans=0.07
+2024-08-25 04:33:10,330 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=6613.333333333333, ans=0.6685333333333334
+2024-08-25 04:33:10,414 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=6613.333333333333, ans=0.23386666666666667
+2024-08-25 04:33:19,307 INFO [train.py:1114] (2/4) Epoch 1, batch 1250, loss[loss=0.5084, simple_loss=0.436, pruned_loss=0.1967, ctc_loss=0.3919, over 19546.00 frames. ], tot_loss[loss=0.55, simple_loss=0.4498, pruned_loss=0.235, ctc_loss=0.4458, over 3841753.42 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:33:28,058 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.14 vs. limit=12.5
+2024-08-25 04:33:36,675 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.51 vs. limit=10.02
+2024-08-25 04:33:41,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=6773.333333333333, ans=0.0
+2024-08-25 04:33:43,330 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=6773.333333333333, ans=0.009397101449275363
+2024-08-25 04:33:47,333 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=6773.333333333333, ans=0.0
+2024-08-25 04:34:12,500 INFO [train.py:1114] (2/4) Epoch 1, batch 1300, loss[loss=0.5032, simple_loss=0.4364, pruned_loss=0.1964, ctc_loss=0.3706, over 18845.00 frames. ], tot_loss[loss=0.5321, simple_loss=0.4407, pruned_loss=0.2227, ctc_loss=0.4244, over 3845178.19 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:34:15,554 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.007e+02 2.492e+02 3.309e+02 5.533e+02, threshold=4.985e+02, percent-clipped=0.0
+2024-08-25 04:34:35,598 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=14.45 vs. limit=12.780000000000001
+2024-08-25 04:34:38,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=7040.0, ans=0.13488
+2024-08-25 04:35:04,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=7146.666666666667, ans=0.03688888888888889
+2024-08-25 04:35:10,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=7200.0, ans=0.648
+2024-08-25 04:35:11,274 INFO [train.py:1114] (2/4) Epoch 1, batch 1350, loss[loss=0.4704, simple_loss=0.4118, pruned_loss=0.181, ctc_loss=0.348, over 19763.00 frames. ], tot_loss[loss=0.516, simple_loss=0.433, pruned_loss=0.2117, ctc_loss=0.4052, over 3856291.44 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:35:16,838 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.93 vs. limit=12.9
+2024-08-25 04:35:24,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=7253.333333333333, ans=0.15999999999999998
+2024-08-25 04:35:26,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=7253.333333333333, ans=0.15999999999999998
+2024-08-25 04:35:33,783 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:36:02,099 INFO [train.py:1114] (2/4) Epoch 1, batch 1400, loss[loss=0.419, simple_loss=0.3718, pruned_loss=0.1603, ctc_loss=0.3016, over 19658.00 frames. ], tot_loss[loss=0.5021, simple_loss=0.4261, pruned_loss=0.2027, ctc_loss=0.3892, over 3863215.76 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:36:05,097 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.980e+02 2.233e+02 2.820e+02 5.701e+02, threshold=4.466e+02, percent-clipped=2.0
+2024-08-25 04:36:06,472 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=7466.666666666667, ans=0.6386666666666667
+2024-08-25 04:36:30,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=7573.333333333333, ans=0.14500000000000002
+2024-08-25 04:36:50,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=7680.0, ans=0.14
+2024-08-25 04:36:51,358 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.16 vs. limit=4.152
+2024-08-25 04:36:54,782 INFO [train.py:1114] (2/4) Epoch 1, batch 1450, loss[loss=0.4705, simple_loss=0.4243, pruned_loss=0.1779, ctc_loss=0.3307, over 19655.00 frames. ], tot_loss[loss=0.4916, simple_loss=0.4215, pruned_loss=0.1959, ctc_loss=0.3764, over 3861439.54 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:36:59,080 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=7733.333333333333, ans=0.1375
+2024-08-25 04:37:08,716 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=7786.666666666667, ans=0.009176811594202899
+2024-08-25 04:37:10,776 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=7786.666666666667, ans=0.0
+2024-08-25 04:37:12,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=7786.666666666667, ans=0.6274666666666666
+2024-08-25 04:37:29,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=7893.333333333333, ans=0.025
+2024-08-25 04:37:39,170 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=7946.666666666667, ans=0.033555555555555554
+2024-08-25 04:37:39,345 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.58 vs. limit=13.46
+2024-08-25 04:37:44,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=7946.666666666667, ans=0.1275
+2024-08-25 04:37:48,626 INFO [train.py:1114] (2/4) Epoch 1, batch 1500, loss[loss=0.4525, simple_loss=0.4162, pruned_loss=0.1662, ctc_loss=0.3177, over 19592.00 frames. ], tot_loss[loss=0.4807, simple_loss=0.4168, pruned_loss=0.189, ctc_loss=0.3639, over 3861829.57 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:37:52,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=8000.0, ans=0.03333333333333334
+2024-08-25 04:37:54,385 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 1.987e+02 2.351e+02 3.240e+02 5.717e+02, threshold=4.702e+02, percent-clipped=4.0
+2024-08-25 04:38:08,890 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.01 vs. limit=7.013333333333334
+2024-08-25 04:38:19,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=8106.666666666667, ans=0.025
+2024-08-25 04:38:56,150 INFO [train.py:1114] (2/4) Epoch 1, batch 1550, loss[loss=0.4752, simple_loss=0.4209, pruned_loss=0.1826, ctc_loss=0.3594, over 19606.00 frames. ], tot_loss[loss=0.4722, simple_loss=0.4128, pruned_loss=0.1841, ctc_loss=0.3546, over 3847991.69 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:38:59,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=8266.666666666666, ans=0.125
+2024-08-25 04:39:03,056 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=8266.666666666666, ans=0.125
+2024-08-25 04:39:09,198 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=8320.0, ans=0.05
+2024-08-25 04:39:47,206 INFO [train.py:1114] (2/4) Epoch 1, batch 1600, loss[loss=0.4587, simple_loss=0.4223, pruned_loss=0.1709, ctc_loss=0.3261, over 19855.00 frames. ], tot_loss[loss=0.4637, simple_loss=0.4091, pruned_loss=0.1794, ctc_loss=0.345, over 3835807.66 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:39:52,844 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.044e+02 2.368e+02 2.950e+02 6.795e+02, threshold=4.737e+02, percent-clipped=6.0
+2024-08-25 04:40:03,531 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:40:30,342 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=8693.333333333334, ans=0.21306666666666665
+2024-08-25 04:40:42,984 INFO [train.py:1114] (2/4) Epoch 1, batch 1650, loss[loss=0.4572, simple_loss=0.4145, pruned_loss=0.1727, ctc_loss=0.3412, over 19650.00 frames. ], tot_loss[loss=0.4549, simple_loss=0.4052, pruned_loss=0.1745, ctc_loss=0.3358, over 3832121.91 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:40:44,378 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=8800.0, ans=0.04949747468305833
+2024-08-25 04:42:03,351 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.00 vs. limit=7.213333333333333
+2024-08-25 04:42:48,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=8906.666666666666, ans=0.21093333333333333
+2024-08-25 04:43:21,843 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=9013.333333333334, ans=0.02911111111111111
+2024-08-25 04:43:24,860 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:43:28,136 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.40 vs. limit=10.9
+2024-08-25 04:43:28,616 INFO [train.py:1114] (2/4) Epoch 1, batch 1700, loss[loss=0.3433, simple_loss=0.3344, pruned_loss=0.1191, ctc_loss=0.2401, over 19698.00 frames. ], tot_loss[loss=0.4456, simple_loss=0.4011, pruned_loss=0.1694, ctc_loss=0.3263, over 3846604.51 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:43:31,593 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.986e+02 2.386e+02 2.791e+02 4.935e+02, threshold=4.772e+02, percent-clipped=1.0
+2024-08-25 04:43:55,437 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=9173.333333333334, ans=0.5789333333333333
+2024-08-25 04:43:56,513 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=9226.666666666666, ans=0.07
+2024-08-25 04:44:02,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 04:44:12,692 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.92 vs. limit=14.46
+2024-08-25 04:45:23,911 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.61 vs. limit=10.98
+2024-08-25 04:45:26,293 INFO [train.py:1114] (2/4) Epoch 1, batch 1750, loss[loss=0.3472, simple_loss=0.3354, pruned_loss=0.1257, ctc_loss=0.2351, over 19669.00 frames. ], tot_loss[loss=0.4367, simple_loss=0.3973, pruned_loss=0.1648, ctc_loss=0.3172, over 3851010.48 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:45:27,624 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.54 vs. limit=11.0
+2024-08-25 04:45:32,413 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.83 vs. limit=7.733333333333333
+2024-08-25 04:45:36,234 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.78 vs. limit=9.693333333333332
+2024-08-25 04:45:36,796 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=9386.666666666666, ans=0.5714666666666668
+2024-08-25 04:45:38,010 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.18 vs. limit=7.346666666666666
+2024-08-25 04:45:44,742 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.49 vs. limit=14.58
+2024-08-25 04:46:01,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=9493.333333333334, ans=0.05
+2024-08-25 04:46:13,428 INFO [train.py:1114] (2/4) Epoch 1, batch 1800, loss[loss=0.4117, simple_loss=0.4007, pruned_loss=0.1472, ctc_loss=0.287, over 19630.00 frames. ], tot_loss[loss=0.4314, simple_loss=0.3958, pruned_loss=0.1621, ctc_loss=0.3116, over 3851386.34 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:46:16,181 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.025e+02 2.321e+02 2.784e+02 4.120e+02, threshold=4.643e+02, percent-clipped=0.0
+2024-08-25 04:46:17,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=9600.0, ans=0.125
+2024-08-25 04:46:17,412 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=9600.0, ans=0.02666666666666667
+2024-08-25 04:46:41,336 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.82 vs. limit=7.904
+2024-08-25 04:46:45,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=9760.0, ans=0.125
+2024-08-25 04:48:18,868 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.15 vs. limit=14.86
+2024-08-25 04:48:23,599 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.98 vs. limit=11.18
+2024-08-25 04:48:28,775 INFO [train.py:1114] (2/4) Epoch 1, batch 1850, loss[loss=0.4105, simple_loss=0.3955, pruned_loss=0.15, ctc_loss=0.2904, over 19577.00 frames. ], tot_loss[loss=0.423, simple_loss=0.3921, pruned_loss=0.1581, ctc_loss=0.3039, over 3855035.66 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:48:29,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=9866.666666666666, ans=0.025
+2024-08-25 04:48:30,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=9866.666666666666, ans=0.5546666666666666
+2024-08-25 04:48:30,967 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.35 vs. limit=4.48
+2024-08-25 04:48:32,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=9866.666666666666, ans=0.125
+2024-08-25 04:48:44,161 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.27 vs. limit=7.48
+2024-08-25 04:48:44,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=9920.0, ans=0.125
+2024-08-25 04:48:52,556 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.65 vs. limit=11.24
+2024-08-25 04:49:05,133 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:49:07,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=10080.0, ans=0.125
+2024-08-25 04:49:10,967 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.52 vs. limit=8.032
+2024-08-25 04:49:15,872 INFO [train.py:1114] (2/4) Epoch 1, batch 1900, loss[loss=0.3972, simple_loss=0.3927, pruned_loss=0.1435, ctc_loss=0.27, over 19635.00 frames. ], tot_loss[loss=0.4177, simple_loss=0.3906, pruned_loss=0.1555, ctc_loss=0.2987, over 3860409.62 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:49:18,609 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.031e+02 2.370e+02 2.878e+02 5.610e+02, threshold=4.739e+02, percent-clipped=2.0
+2024-08-25 04:49:24,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=10186.666666666666, ans=0.025
+2024-08-25 04:49:53,414 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=10186.666666666666, ans=0.008655072463768116
+2024-08-25 04:50:06,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=10240.0, ans=0.125
+2024-08-25 04:50:19,323 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.whiten.whitening_limit, batch_count=10293.333333333334, ans=8.117333333333335
+2024-08-25 04:50:25,426 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=10346.666666666666, ans=0.035
+2024-08-25 04:50:25,532 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=10346.666666666666, ans=0.0
+2024-08-25 04:50:31,846 INFO [train.py:1114] (2/4) Epoch 1, batch 1950, loss[loss=0.3603, simple_loss=0.3565, pruned_loss=0.1307, ctc_loss=0.249, over 19602.00 frames. ], tot_loss[loss=0.4123, simple_loss=0.3895, pruned_loss=0.1527, ctc_loss=0.2937, over 3870016.29 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:50:44,008 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=10453.333333333334, ans=0.125
+2024-08-25 04:50:55,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=10506.666666666666, ans=0.125
+2024-08-25 04:51:16,354 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=10560.0, ans=0.125
+2024-08-25 04:52:05,800 INFO [train.py:1114] (2/4) Epoch 1, batch 2000, loss[loss=0.3076, simple_loss=0.3223, pruned_loss=0.1047, ctc_loss=0.2086, over 19660.00 frames. ], tot_loss[loss=0.4082, simple_loss=0.3883, pruned_loss=0.151, ctc_loss=0.2905, over 3853565.15 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:52:09,665 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 1.861e+02 2.137e+02 2.685e+02 4.799e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-25 04:52:13,523 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=10666.666666666666, ans=0.5266666666666667
+2024-08-25 04:53:22,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=10666.666666666666, ans=0.0
+2024-08-25 04:53:38,520 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=10720.0, ans=0.008539130434782608
+2024-08-25 04:53:39,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=10720.0, ans=0.125
+2024-08-25 04:53:41,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=10720.0, ans=0.5248
+2024-08-25 04:53:55,034 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.37 vs. limit=15.58
+2024-08-25 04:54:13,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10826.666666666666, ans=0.19173333333333334
+2024-08-25 04:54:17,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=10826.666666666666, ans=0.5210666666666668
+2024-08-25 04:54:28,810 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:54:32,958 INFO [train.py:1114] (2/4) Epoch 1, batch 2050, loss[loss=0.3706, simple_loss=0.3624, pruned_loss=0.1366, ctc_loss=0.2637, over 19688.00 frames. ], tot_loss[loss=0.402, simple_loss=0.3855, pruned_loss=0.1484, ctc_loss=0.2851, over 3851076.60 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:54:34,080 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=10933.333333333334, ans=0.02111111111111111
+2024-08-25 04:54:34,524 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.16 vs. limit=8.373333333333335
+2024-08-25 04:54:44,537 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.77 vs. limit=11.6
+2024-08-25 04:54:49,326 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=11.27 vs. limit=10.493333333333332
+2024-08-25 04:54:53,125 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.79 vs. limit=15.74
+2024-08-25 04:55:22,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=11093.333333333334, ans=0.18906666666666666
+2024-08-25 04:55:33,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=11146.666666666666, ans=0.125
+2024-08-25 04:55:33,326 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.47 vs. limit=11.68
+2024-08-25 04:55:41,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=11200.0, ans=0.008434782608695653
+2024-08-25 04:55:42,297 INFO [train.py:1114] (2/4) Epoch 1, batch 2100, loss[loss=0.3676, simple_loss=0.379, pruned_loss=0.1274, ctc_loss=0.2537, over 19755.00 frames. ], tot_loss[loss=0.3955, simple_loss=0.3826, pruned_loss=0.1453, ctc_loss=0.2793, over 3858190.80 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:56:30,746 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=11200.0, ans=0.125
+2024-08-25 04:56:35,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=11200.0, ans=0.125
+2024-08-25 04:56:36,118 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 1.936e+02 2.214e+02 2.535e+02 3.885e+02, threshold=4.428e+02, percent-clipped=0.0
+2024-08-25 04:56:37,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11200.0, ans=0.188
+2024-08-25 04:56:38,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=11200.0, ans=0.025
+2024-08-25 04:56:39,031 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.97 vs. limit=11.7
+2024-08-25 04:56:59,108 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=11253.333333333334, ans=0.125
+2024-08-25 04:57:07,198 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11306.666666666666, ans=0.18693333333333334
+2024-08-25 04:57:09,767 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.08 vs. limit=15.98
+2024-08-25 04:57:16,738 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:57:22,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=11360.0, ans=0.019333333333333338
+2024-08-25 04:57:33,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=11413.333333333334, ans=0.0
+2024-08-25 04:57:35,995 INFO [train.py:1114] (2/4) Epoch 1, batch 2150, loss[loss=0.3265, simple_loss=0.3487, pruned_loss=0.1097, ctc_loss=0.212, over 19853.00 frames. ], tot_loss[loss=0.3905, simple_loss=0.3806, pruned_loss=0.1429, ctc_loss=0.2747, over 3868217.85 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:58:14,116 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.07 vs. limit=16.1
+2024-08-25 04:58:45,897 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.72 vs. limit=7.866666666666666
+2024-08-25 04:59:00,098 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=11520.0, ans=0.07
+2024-08-25 04:59:36,667 INFO [train.py:1114] (2/4) Epoch 1, batch 2200, loss[loss=0.3692, simple_loss=0.3797, pruned_loss=0.1292, ctc_loss=0.2504, over 19608.00 frames. ], tot_loss[loss=0.386, simple_loss=0.3788, pruned_loss=0.1407, ctc_loss=0.2703, over 3866449.55 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:59:37,711 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=11733.333333333334, ans=0.125
+2024-08-25 04:59:40,225 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 1.884e+02 2.153e+02 2.810e+02 4.673e+02, threshold=4.307e+02, percent-clipped=1.0
+2024-08-25 04:59:48,898 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.01 vs. limit=16.34
+2024-08-25 04:59:58,750 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.50 vs. limit=16.380000000000003
+2024-08-25 05:00:09,198 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=11893.333333333334, ans=0.125
+2024-08-25 05:00:30,656 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=11946.666666666666, ans=0.025
+2024-08-25 05:00:32,500 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=11946.666666666666, ans=0.4818666666666667
+2024-08-25 05:00:34,224 INFO [train.py:1114] (2/4) Epoch 1, batch 2250, loss[loss=0.3553, simple_loss=0.3716, pruned_loss=0.1219, ctc_loss=0.2381, over 19622.00 frames. ], tot_loss[loss=0.3828, simple_loss=0.3778, pruned_loss=0.1391, ctc_loss=0.2671, over 3867289.53 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:00:37,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=12000.0, ans=0.125
+2024-08-25 05:01:30,698 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=12160.0, ans=0.016
+2024-08-25 05:01:44,084 INFO [train.py:1114] (2/4) Epoch 1, batch 2300, loss[loss=0.3489, simple_loss=0.3572, pruned_loss=0.1216, ctc_loss=0.2436, over 19513.00 frames. ], tot_loss[loss=0.3786, simple_loss=0.3753, pruned_loss=0.1372, ctc_loss=0.263, over 3861513.50 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:01:47,657 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.926e+02 2.114e+02 2.507e+02 4.625e+02, threshold=4.228e+02, percent-clipped=3.0
+2024-08-25 05:02:17,311 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=12426.666666666666, ans=0.125
+2024-08-25 05:02:30,774 INFO [train.py:1114] (2/4) Epoch 1, batch 2350, loss[loss=0.3903, simple_loss=0.3946, pruned_loss=0.1403, ctc_loss=0.2637, over 19677.00 frames. ], tot_loss[loss=0.3764, simple_loss=0.3746, pruned_loss=0.1362, ctc_loss=0.2605, over 3863663.06 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:02:31,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=12533.333333333334, ans=0.0
+2024-08-25 05:02:36,236 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:04:31,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=12640.0, ans=0.17359999999999998
+2024-08-25 05:04:43,377 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=12693.333333333334, ans=17.02
+2024-08-25 05:04:56,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=12746.666666666666, ans=0.125
+2024-08-25 05:04:59,812 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=12746.666666666666, ans=0.125
+2024-08-25 05:05:05,284 INFO [train.py:1114] (2/4) Epoch 1, batch 2400, loss[loss=0.3825, simple_loss=0.3882, pruned_loss=0.1363, ctc_loss=0.2604, over 19247.00 frames. ], tot_loss[loss=0.3759, simple_loss=0.3755, pruned_loss=0.1356, ctc_loss=0.2596, over 3858080.96 frames. ], batch size: 71, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:05:05,513 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=12800.0, ans=0.125
+2024-08-25 05:05:06,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=12800.0, ans=0.172
+2024-08-25 05:05:08,755 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.948e+02 2.252e+02 2.666e+02 4.870e+02, threshold=4.504e+02, percent-clipped=4.0
+2024-08-25 05:05:15,021 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.71 vs. limit=12.32
+2024-08-25 05:05:19,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=12853.333333333334, ans=0.125
+2024-08-25 05:05:22,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=12853.333333333334, ans=0.013111111111111108
+2024-08-25 05:05:34,891 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=12960.0, ans=0.09899494936611666
+2024-08-25 05:05:40,657 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=12960.0, ans=0.012666666666666666
+2024-08-25 05:05:46,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=13013.333333333334, ans=0.44453333333333334
+2024-08-25 05:05:51,089 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=13013.333333333334, ans=0.125
+2024-08-25 05:05:52,655 INFO [train.py:1114] (2/4) Epoch 1, batch 2450, loss[loss=0.4987, simple_loss=0.4325, pruned_loss=0.2063, ctc_loss=0.3804, over 13508.00 frames. ], tot_loss[loss=0.3858, simple_loss=0.381, pruned_loss=0.141, ctc_loss=0.2687, over 3731381.37 frames. ], batch size: 141, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:06:17,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=13066.666666666666, ans=0.125
+2024-08-25 05:06:28,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=13120.0, ans=0.012000000000000004
+2024-08-25 05:06:31,958 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.09 vs. limit=4.976
+2024-08-25 05:06:35,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=13173.333333333334, ans=0.025
+2024-08-25 05:07:49,749 INFO [train.py:1114] (2/4) Epoch 2, batch 0, loss[loss=0.342, simple_loss=0.3499, pruned_loss=0.1212, ctc_loss=0.2294, over 19423.00 frames. ], tot_loss[loss=0.342, simple_loss=0.3499, pruned_loss=0.1212, ctc_loss=0.2294, over 19423.00 frames. ], batch size: 48, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 05:07:49,750 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 05:09:14,179 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.4.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([2.6524, 4.0124, 3.8649, 3.8429], device='cuda:2')
+2024-08-25 05:09:16,712 INFO [train.py:1146] (2/4) Epoch 2, validation: loss=0.2886, simple_loss=0.3508, pruned_loss=0.0823, ctc_loss=0.1542, over 944034.00 frames.
+2024-08-25 05:09:16,712 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13259MB
+2024-08-25 05:09:17,007 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=13280.0, ans=0.1672
+2024-08-25 05:09:24,227 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.98 vs. limit=8.32
+2024-08-25 05:09:35,624 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 1.938e+02 2.191e+02 2.677e+02 6.592e+02, threshold=4.382e+02, percent-clipped=7.0
+2024-08-25 05:09:38,348 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=5.87 vs. limit=6.666666666666667
+2024-08-25 05:09:43,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=13386.666666666666, ans=0.4314666666666667
+2024-08-25 05:09:43,610 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 05:09:44,544 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 05:10:10,146 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.27 vs. limit=8.386666666666667
+2024-08-25 05:10:10,708 INFO [train.py:1114] (2/4) Epoch 2, batch 50, loss[loss=0.282, simple_loss=0.3032, pruned_loss=0.09346, ctc_loss=0.185, over 19707.00 frames. ], tot_loss[loss=0.3688, simple_loss=0.3726, pruned_loss=0.132, ctc_loss=0.2523, over 845808.55 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:10:33,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=13546.666666666666, ans=0.125
+2024-08-25 05:10:45,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=13600.0, ans=0.09899494936611666
+2024-08-25 05:11:07,156 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.67 vs. limit=12.620000000000001
+2024-08-25 05:11:12,853 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=13653.333333333334, ans=0.009777777777777774
+2024-08-25 05:11:14,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=13653.333333333334, ans=0.125
+2024-08-25 05:11:20,665 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:11:47,135 INFO [train.py:1114] (2/4) Epoch 2, batch 100, loss[loss=0.3215, simple_loss=0.3371, pruned_loss=0.1111, ctc_loss=0.2094, over 19733.00 frames. ], tot_loss[loss=0.3686, simple_loss=0.374, pruned_loss=0.1314, ctc_loss=0.2509, over 1500191.62 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:11:49,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=13813.333333333334, ans=0.125
+2024-08-25 05:11:53,164 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=13813.333333333334, ans=0.009111111111111105
+2024-08-25 05:12:00,809 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.907e+02 2.167e+02 2.481e+02 4.957e+02, threshold=4.333e+02, percent-clipped=1.0
+2024-08-25 05:12:01,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=13866.666666666666, ans=0.125
+2024-08-25 05:12:02,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=13866.666666666666, ans=0.0
+2024-08-25 05:12:13,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=13920.0, ans=0.125
+2024-08-25 05:12:15,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=13973.333333333334, ans=0.0
+2024-08-25 05:12:23,036 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=13973.333333333334, ans=0.125
+2024-08-25 05:12:50,515 INFO [train.py:1114] (2/4) Epoch 2, batch 150, loss[loss=0.2924, simple_loss=0.3177, pruned_loss=0.09527, ctc_loss=0.1912, over 19700.00 frames. ], tot_loss[loss=0.363, simple_loss=0.3698, pruned_loss=0.1289, ctc_loss=0.2458, over 2029517.07 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:12:52,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=14080.0, ans=0.007808695652173913
+2024-08-25 05:13:21,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=14186.666666666666, ans=0.125
+2024-08-25 05:13:26,047 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=14186.666666666666, ans=0.125
+2024-08-25 05:14:15,329 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.05 vs. limit=8.56
+2024-08-25 05:14:47,772 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=14293.333333333334, ans=10.0
+2024-08-25 05:14:50,535 INFO [train.py:1114] (2/4) Epoch 2, batch 200, loss[loss=0.3685, simple_loss=0.3686, pruned_loss=0.1338, ctc_loss=0.2521, over 18357.00 frames. ], tot_loss[loss=0.3571, simple_loss=0.3661, pruned_loss=0.126, ctc_loss=0.2402, over 2437654.93 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:15:13,301 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=14400.0, ans=0.156
+2024-08-25 05:15:14,931 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.847e+02 2.110e+02 2.499e+02 4.235e+02, threshold=4.220e+02, percent-clipped=0.0
+2024-08-25 05:15:22,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=14453.333333333334, ans=0.125
+2024-08-25 05:15:45,766 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=14560.0, ans=0.06532000000000002
+2024-08-25 05:15:49,792 INFO [train.py:1114] (2/4) Epoch 2, batch 250, loss[loss=0.3627, simple_loss=0.3765, pruned_loss=0.1258, ctc_loss=0.2431, over 19354.00 frames. ], tot_loss[loss=0.3556, simple_loss=0.3659, pruned_loss=0.125, ctc_loss=0.2379, over 2756627.41 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:16:06,427 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=14613.333333333334, ans=0.3885333333333333
+2024-08-25 05:16:26,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=14720.0, ans=0.38480000000000003
+2024-08-25 05:16:29,743 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:16:35,348 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=14720.0, ans=13.02
+2024-08-25 05:16:45,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=14773.333333333334, ans=0.005111111111111108
+2024-08-25 05:19:29,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=14826.666666666666, ans=0.125
+2024-08-25 05:19:37,924 INFO [train.py:1114] (2/4) Epoch 2, batch 300, loss[loss=0.3541, simple_loss=0.3747, pruned_loss=0.1223, ctc_loss=0.2225, over 19528.00 frames. ], tot_loss[loss=0.3535, simple_loss=0.3644, pruned_loss=0.1241, ctc_loss=0.2359, over 3001049.19 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:19:49,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=14880.0, ans=0.007634782608695653
+2024-08-25 05:19:56,629 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.858e+02 2.099e+02 2.398e+02 3.801e+02, threshold=4.198e+02, percent-clipped=0.0
+2024-08-25 05:20:13,097 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=14986.666666666666, ans=0.007611594202898551
+2024-08-25 05:20:51,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=15093.333333333334, ans=0.125
+2024-08-25 05:20:54,634 INFO [train.py:1114] (2/4) Epoch 2, batch 350, loss[loss=0.2934, simple_loss=0.326, pruned_loss=0.09399, ctc_loss=0.1823, over 19768.00 frames. ], tot_loss[loss=0.3519, simple_loss=0.3636, pruned_loss=0.1233, ctc_loss=0.2343, over 3190772.48 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:20:59,201 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=15146.666666666666, ans=0.125
+2024-08-25 05:20:59,226 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=15146.666666666666, ans=0.125
+2024-08-25 05:21:09,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=15200.0, ans=0.025
+2024-08-25 05:21:16,482 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=15253.333333333334, ans=0.125
+2024-08-25 05:21:44,113 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=15360.0, ans=0.125
+2024-08-25 05:21:50,980 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=15360.0, ans=0.36240000000000006
+2024-08-25 05:22:06,916 INFO [train.py:1114] (2/4) Epoch 2, batch 400, loss[loss=0.3478, simple_loss=0.3664, pruned_loss=0.1195, ctc_loss=0.2253, over 19488.00 frames. ], tot_loss[loss=0.3513, simple_loss=0.3632, pruned_loss=0.123, ctc_loss=0.2333, over 3343165.54 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:22:10,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=15413.333333333334, ans=0.3605333333333334
+2024-08-25 05:22:13,367 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.03 vs. limit=13.280000000000001
+2024-08-25 05:22:20,574 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.895e+02 2.189e+02 2.528e+02 4.758e+02, threshold=4.379e+02, percent-clipped=2.0
+2024-08-25 05:22:26,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=15520.0, ans=0.3568
+2024-08-25 05:22:35,306 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.39 vs. limit=13.32
+2024-08-25 05:22:40,373 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.16 vs. limit=10.229333333333333
+2024-08-25 05:22:57,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=15626.666666666666, ans=0.09899494936611666
+2024-08-25 05:23:46,273 INFO [train.py:1114] (2/4) Epoch 2, batch 450, loss[loss=0.3573, simple_loss=0.3789, pruned_loss=0.1219, ctc_loss=0.2297, over 19608.00 frames. ], tot_loss[loss=0.3493, simple_loss=0.362, pruned_loss=0.122, ctc_loss=0.2313, over 3449550.98 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:23:50,389 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=15680.0, ans=0.35120000000000007
+2024-08-25 05:23:55,601 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.59 vs. limit=13.4
+2024-08-25 05:23:59,184 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.22 vs. limit=13.4
+2024-08-25 05:23:59,819 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=15733.333333333334, ans=0.05446666666666666
+2024-08-25 05:24:06,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15786.666666666666, ans=0.14213333333333333
+2024-08-25 05:24:07,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=15786.666666666666, ans=0.00743768115942029
+2024-08-25 05:24:15,288 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=15840.0, ans=0.1416
+2024-08-25 05:24:33,004 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=15893.333333333334, ans=0.007414492753623188
+2024-08-25 05:24:37,913 INFO [train.py:1114] (2/4) Epoch 2, batch 500, loss[loss=0.3674, simple_loss=0.3812, pruned_loss=0.1283, ctc_loss=0.2421, over 19707.00 frames. ], tot_loss[loss=0.3469, simple_loss=0.3608, pruned_loss=0.1207, ctc_loss=0.2292, over 3545511.59 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:24:38,167 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=15946.666666666666, ans=0.00022222222222222088
+2024-08-25 05:25:45,407 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=15946.666666666666, ans=0.125
+2024-08-25 05:26:05,497 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.778e+02 2.035e+02 2.349e+02 4.286e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-25 05:26:09,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=16000.0, ans=0.33999999999999997
+2024-08-25 05:26:24,062 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=16106.666666666666, ans=0.125
+2024-08-25 05:26:29,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=16160.0, ans=0.0
+2024-08-25 05:26:33,527 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.94 vs. limit=13.559999999999999
+2024-08-25 05:26:35,946 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=16160.0, ans=0.0
+2024-08-25 05:26:49,257 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=16160.0, ans=0.125
+2024-08-25 05:26:49,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=16160.0, ans=0.33440000000000003
+2024-08-25 05:26:53,745 INFO [train.py:1114] (2/4) Epoch 2, batch 550, loss[loss=0.3568, simple_loss=0.373, pruned_loss=0.123, ctc_loss=0.2365, over 19245.00 frames. ], tot_loss[loss=0.3468, simple_loss=0.3607, pruned_loss=0.1206, ctc_loss=0.2288, over 3607977.97 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:27:02,534 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.11 vs. limit=13.58
+2024-08-25 05:27:04,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=16213.333333333334, ans=0.025
+2024-08-25 05:27:04,216 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16213.333333333334, ans=0.13786666666666667
+2024-08-25 05:27:28,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=16320.0, ans=0.125
+2024-08-25 05:27:36,112 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=16373.333333333334, ans=0.3269333333333333
+2024-08-25 05:28:05,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=16426.666666666668, ans=0.125
+2024-08-25 05:28:14,723 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:28:19,446 INFO [train.py:1114] (2/4) Epoch 2, batch 600, loss[loss=0.3703, simple_loss=0.384, pruned_loss=0.1296, ctc_loss=0.2436, over 19326.00 frames. ], tot_loss[loss=0.3462, simple_loss=0.3607, pruned_loss=0.1203, ctc_loss=0.2276, over 3665666.24 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:28:21,000 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.87 vs. limit=19.86
+2024-08-25 05:28:28,859 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=16480.0, ans=0.125
+2024-08-25 05:28:34,469 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.917e+02 2.183e+02 2.770e+02 8.189e+02, threshold=4.366e+02, percent-clipped=5.0
+2024-08-25 05:28:54,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=16640.0, ans=0.0
+2024-08-25 05:29:14,160 INFO [train.py:1114] (2/4) Epoch 2, batch 650, loss[loss=0.3229, simple_loss=0.3458, pruned_loss=0.1074, ctc_loss=0.2129, over 19772.00 frames. ], tot_loss[loss=0.3428, simple_loss=0.3585, pruned_loss=0.1186, ctc_loss=0.2246, over 3716440.71 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:29:20,629 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.39 vs. limit=13.780000000000001
+2024-08-25 05:31:12,981 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=16746.666666666668, ans=0.125
+2024-08-25 05:31:32,645 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=16853.333333333332, ans=0.125
+2024-08-25 05:31:51,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=16906.666666666668, ans=0.0
+2024-08-25 05:31:54,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=16906.666666666668, ans=0.125
+2024-08-25 05:32:01,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=16960.0, ans=0.125
+2024-08-25 05:32:02,873 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.64 vs. limit=13.86
+2024-08-25 05:32:06,448 INFO [train.py:1114] (2/4) Epoch 2, batch 700, loss[loss=0.3142, simple_loss=0.3367, pruned_loss=0.1049, ctc_loss=0.2048, over 19727.00 frames. ], tot_loss[loss=0.3428, simple_loss=0.3588, pruned_loss=0.1185, ctc_loss=0.2243, over 3748435.88 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:32:47,851 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.759e+02 2.005e+02 2.359e+02 5.033e+02, threshold=4.011e+02, percent-clipped=2.0
+2024-08-25 05:32:57,179 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=17120.0, ans=0.30080000000000007
+2024-08-25 05:33:09,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=17173.333333333332, ans=0.1282666666666667
+2024-08-25 05:33:21,280 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=17226.666666666668, ans=0.125
+2024-08-25 05:33:28,051 INFO [train.py:1114] (2/4) Epoch 2, batch 750, loss[loss=0.3284, simple_loss=0.3625, pruned_loss=0.1065, ctc_loss=0.2029, over 19498.00 frames. ], tot_loss[loss=0.3411, simple_loss=0.3575, pruned_loss=0.1178, ctc_loss=0.2226, over 3774949.39 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:35:14,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=17280.0, ans=0.0
+2024-08-25 05:36:53,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=17440.0, ans=0.125
+2024-08-25 05:36:59,230 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.74 vs. limit=14.04
+2024-08-25 05:37:37,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=17493.333333333332, ans=0.125
+2024-08-25 05:37:40,887 INFO [train.py:1114] (2/4) Epoch 2, batch 800, loss[loss=0.3183, simple_loss=0.337, pruned_loss=0.1107, ctc_loss=0.1954, over 19417.00 frames. ], tot_loss[loss=0.3396, simple_loss=0.3566, pruned_loss=0.1171, ctc_loss=0.2213, over 3796406.38 frames. ], batch size: 48, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 05:37:55,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=17546.666666666668, ans=20.66
+2024-08-25 05:38:04,839 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=17600.0, ans=0.28400000000000003
+2024-08-25 05:38:06,528 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.845e+02 2.130e+02 2.517e+02 4.310e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 05:38:15,799 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.49 vs. limit=14.120000000000001
+2024-08-25 05:38:46,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=17760.0, ans=0.0070086956521739135
+2024-08-25 05:38:48,687 INFO [train.py:1114] (2/4) Epoch 2, batch 850, loss[loss=0.3508, simple_loss=0.3651, pruned_loss=0.1236, ctc_loss=0.2233, over 19649.00 frames. ], tot_loss[loss=0.338, simple_loss=0.3554, pruned_loss=0.1163, ctc_loss=0.2197, over 3815173.93 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 16.0
+2024-08-25 05:38:49,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=17813.333333333332, ans=0.006997101449275362
+2024-08-25 05:38:53,604 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=17813.333333333332, ans=0.2765333333333334
+2024-08-25 05:39:00,457 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:39:58,479 INFO [train.py:1114] (2/4) Epoch 2, batch 900, loss[loss=0.2929, simple_loss=0.3148, pruned_loss=0.09927, ctc_loss=0.1811, over 19420.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3548, pruned_loss=0.116, ctc_loss=0.2189, over 3818259.72 frames. ], batch size: 48, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:40:01,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=18080.0, ans=0.025
+2024-08-25 05:40:08,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=18080.0, ans=0.006939130434782609
+2024-08-25 05:40:19,556 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.852e+02 2.189e+02 2.703e+02 9.878e+02, threshold=4.378e+02, percent-clipped=3.0
+2024-08-25 05:40:33,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=18240.0, ans=0.125
+2024-08-25 05:40:55,555 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=18240.0, ans=0.125
+2024-08-25 05:40:55,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=18240.0, ans=0.11760000000000001
+2024-08-25 05:41:14,127 INFO [train.py:1114] (2/4) Epoch 2, batch 950, loss[loss=0.3012, simple_loss=0.3334, pruned_loss=0.09728, ctc_loss=0.1862, over 19501.00 frames. ], tot_loss[loss=0.3375, simple_loss=0.3553, pruned_loss=0.1161, ctc_loss=0.2189, over 3818663.54 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:41:51,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=18506.666666666668, ans=0.1149333333333333
+2024-08-25 05:41:57,554 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.67 vs. limit=21.42
+2024-08-25 05:42:06,454 INFO [train.py:1114] (2/4) Epoch 2, batch 1000, loss[loss=0.2808, simple_loss=0.3207, pruned_loss=0.08747, ctc_loss=0.165, over 19851.00 frames. ], tot_loss[loss=0.3378, simple_loss=0.3558, pruned_loss=0.1161, ctc_loss=0.2188, over 3815174.09 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:42:21,841 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.72 vs. limit=5.0
+2024-08-25 05:42:26,009 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=18666.666666666668, ans=0.025
+2024-08-25 05:42:41,290 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.839e+02 2.030e+02 2.416e+02 3.488e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-25 05:42:44,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=18720.0, ans=0.125
+2024-08-25 05:42:49,935 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=18720.0, ans=0.125
+2024-08-25 05:42:52,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=18720.0, ans=10.0
+2024-08-25 05:42:53,575 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=18720.0, ans=0.125
+2024-08-25 05:42:58,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=18773.333333333332, ans=0.125
+2024-08-25 05:43:05,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=18773.333333333332, ans=0.125
+2024-08-25 05:43:06,435 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=5.29 vs. limit=11.530666666666667
+2024-08-25 05:43:12,223 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.21 vs. limit=14.559999999999999
+2024-08-25 05:43:16,659 INFO [train.py:1114] (2/4) Epoch 2, batch 1050, loss[loss=0.3101, simple_loss=0.3499, pruned_loss=0.09718, ctc_loss=0.1899, over 19847.00 frames. ], tot_loss[loss=0.336, simple_loss=0.3546, pruned_loss=0.1152, ctc_loss=0.2174, over 3821771.57 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:43:52,708 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=18986.666666666668, ans=0.125
+2024-08-25 05:44:04,289 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=19040.0, ans=0.006730434782608695
+2024-08-25 05:44:22,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff3.min_abs, batch_count=19146.666666666668, ans=0.2
+2024-08-25 05:44:23,169 INFO [train.py:1114] (2/4) Epoch 2, batch 1100, loss[loss=0.2902, simple_loss=0.3323, pruned_loss=0.09012, ctc_loss=0.1699, over 19590.00 frames. ], tot_loss[loss=0.3331, simple_loss=0.353, pruned_loss=0.1137, ctc_loss=0.2147, over 3831001.47 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:44:23,637 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.85 vs. limit=14.68
+2024-08-25 05:44:25,326 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=19146.666666666668, ans=0.05
+2024-08-25 05:44:26,443 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.73 vs. limit=9.786666666666667
+2024-08-25 05:44:48,513 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.777e+02 2.009e+02 2.448e+02 3.967e+02, threshold=4.019e+02, percent-clipped=0.0
+2024-08-25 05:45:07,337 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.44 vs. limit=14.74
+2024-08-25 05:45:31,302 INFO [train.py:1114] (2/4) Epoch 2, batch 1150, loss[loss=0.3135, simple_loss=0.3415, pruned_loss=0.1044, ctc_loss=0.1917, over 19605.00 frames. ], tot_loss[loss=0.3323, simple_loss=0.3524, pruned_loss=0.1133, ctc_loss=0.2138, over 3830964.62 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:45:31,466 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=19413.333333333332, ans=0.0
+2024-08-25 05:45:54,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=19520.0, ans=0.025
+2024-08-25 05:46:06,326 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=19573.333333333332, ans=0.125
+2024-08-25 05:47:34,835 INFO [train.py:1114] (2/4) Epoch 2, batch 1200, loss[loss=0.3216, simple_loss=0.3522, pruned_loss=0.1061, ctc_loss=0.1966, over 19853.00 frames. ], tot_loss[loss=0.3341, simple_loss=0.3541, pruned_loss=0.1141, ctc_loss=0.2147, over 3825842.79 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 05:47:43,835 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=19733.333333333332, ans=0.0
+2024-08-25 05:47:50,318 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.798e+02 2.208e+02 2.852e+02 1.698e+03, threshold=4.415e+02, percent-clipped=3.0
+2024-08-25 05:48:05,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=19786.666666666668, ans=0.125
+2024-08-25 05:48:22,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=19840.0, ans=0.006556521739130435
+2024-08-25 05:48:31,468 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.55 vs. limit=14.96
+2024-08-25 05:48:40,335 INFO [train.py:1114] (2/4) Epoch 2, batch 1250, loss[loss=0.3176, simple_loss=0.3476, pruned_loss=0.104, ctc_loss=0.199, over 19508.00 frames. ], tot_loss[loss=0.3325, simple_loss=0.3537, pruned_loss=0.1131, ctc_loss=0.2128, over 3843963.19 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:48:50,507 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=20000.0, ans=0.125
+2024-08-25 05:48:57,120 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=20000.0, ans=0.025
+2024-08-25 05:49:00,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=20000.0, ans=0.125
+2024-08-25 05:49:07,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20053.333333333332, ans=0.1
+2024-08-25 05:49:19,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=20106.666666666668, ans=0.125
+2024-08-25 05:49:27,150 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=20160.0, ans=0.125
+2024-08-25 05:49:34,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=20160.0, ans=0.0
+2024-08-25 05:49:36,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=20213.333333333332, ans=0.125
+2024-08-25 05:49:37,284 INFO [train.py:1114] (2/4) Epoch 2, batch 1300, loss[loss=0.3737, simple_loss=0.379, pruned_loss=0.1336, ctc_loss=0.2534, over 18856.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3523, pruned_loss=0.1121, ctc_loss=0.2105, over 3848656.50 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:49:42,452 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.75 vs. limit=10.0
+2024-08-25 05:49:52,768 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.771e+02 1.898e+02 2.175e+02 3.765e+02, threshold=3.796e+02, percent-clipped=0.0
+2024-08-25 05:49:53,034 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:50:12,157 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=20373.333333333332, ans=0.2
+2024-08-25 05:50:25,297 INFO [train.py:1114] (2/4) Epoch 2, batch 1350, loss[loss=0.3151, simple_loss=0.3473, pruned_loss=0.1025, ctc_loss=0.195, over 19769.00 frames. ], tot_loss[loss=0.3288, simple_loss=0.3514, pruned_loss=0.1113, ctc_loss=0.209, over 3859181.43 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:50:32,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=20480.0, ans=0.125
+2024-08-25 05:50:52,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=20586.666666666668, ans=0.125
+2024-08-25 05:50:53,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=20586.666666666668, ans=0.0
+2024-08-25 05:51:04,578 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.34 vs. limit=15.0
+2024-08-25 05:51:17,450 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=20693.333333333332, ans=0.0
+2024-08-25 05:51:19,088 INFO [train.py:1114] (2/4) Epoch 2, batch 1400, loss[loss=0.2717, simple_loss=0.2979, pruned_loss=0.08991, ctc_loss=0.1642, over 19692.00 frames. ], tot_loss[loss=0.3283, simple_loss=0.3511, pruned_loss=0.1111, ctc_loss=0.2084, over 3865934.75 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:51:26,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=20746.666666666668, ans=0.125
+2024-08-25 05:51:29,215 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.97 vs. limit=12.0
+2024-08-25 05:51:34,336 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.933e+02 2.205e+02 2.519e+02 3.569e+02, threshold=4.410e+02, percent-clipped=0.0
+2024-08-25 05:51:36,477 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=20800.0, ans=0.0
+2024-08-25 05:51:44,079 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=20853.333333333332, ans=0.006336231884057971
+2024-08-25 05:51:46,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=20853.333333333332, ans=0.0
+2024-08-25 05:52:02,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=20960.0, ans=0.1
+2024-08-25 05:52:05,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=20960.0, ans=0.1
+2024-08-25 05:52:06,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=20960.0, ans=0.125
+2024-08-25 05:52:09,363 INFO [train.py:1114] (2/4) Epoch 2, batch 1450, loss[loss=0.3385, simple_loss=0.3651, pruned_loss=0.1131, ctc_loss=0.2144, over 19650.00 frames. ], tot_loss[loss=0.3281, simple_loss=0.3511, pruned_loss=0.111, ctc_loss=0.2078, over 3864306.48 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:52:10,572 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=21013.333333333332, ans=0.0
+2024-08-25 05:52:11,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=21013.333333333332, ans=0.05
+2024-08-25 05:52:27,913 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.18 vs. limit=15.0
+2024-08-25 05:52:28,731 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21120.0, ans=0.1
+2024-08-25 05:52:33,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=21120.0, ans=0.125
+2024-08-25 05:52:35,287 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=21120.0, ans=0.125
+2024-08-25 05:52:45,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=21173.333333333332, ans=0.0
+2024-08-25 05:52:53,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=21226.666666666668, ans=0.125
+2024-08-25 05:52:57,000 INFO [train.py:1114] (2/4) Epoch 2, batch 1500, loss[loss=0.3251, simple_loss=0.3481, pruned_loss=0.1095, ctc_loss=0.2079, over 19595.00 frames. ], tot_loss[loss=0.3278, simple_loss=0.3511, pruned_loss=0.1108, ctc_loss=0.2072, over 3863794.10 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:53:14,527 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=21333.333333333332, ans=0.04949747468305833
+2024-08-25 05:53:15,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=21333.333333333332, ans=0.0
+2024-08-25 05:53:17,224 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.832e+02 2.087e+02 2.558e+02 5.212e+02, threshold=4.175e+02, percent-clipped=3.0
+2024-08-25 05:53:32,356 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=21386.666666666668, ans=0.2
+2024-08-25 05:53:37,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=21440.0, ans=0.0
+2024-08-25 05:53:47,820 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.47 vs. limit=22.5
+2024-08-25 05:53:48,239 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=21493.333333333332, ans=10.0
+2024-08-25 05:54:04,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=21493.333333333332, ans=0.125
+2024-08-25 05:54:05,953 INFO [train.py:1114] (2/4) Epoch 2, batch 1550, loss[loss=0.3418, simple_loss=0.3667, pruned_loss=0.1159, ctc_loss=0.2129, over 19601.00 frames. ], tot_loss[loss=0.3282, simple_loss=0.3513, pruned_loss=0.1111, ctc_loss=0.2077, over 3847595.28 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 05:54:12,051 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.75 vs. limit=15.0
+2024-08-25 05:54:20,565 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=25.10 vs. limit=22.5
+2024-08-25 05:54:21,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21600.0, ans=0.1
+2024-08-25 05:54:36,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=21600.0, ans=0.0061739130434782605
+2024-08-25 05:54:47,209 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=21653.333333333332, ans=0.125
+2024-08-25 05:54:49,217 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=21706.666666666668, ans=0.125
+2024-08-25 05:55:11,748 INFO [train.py:1114] (2/4) Epoch 2, batch 1600, loss[loss=0.3288, simple_loss=0.3572, pruned_loss=0.1088, ctc_loss=0.2068, over 19842.00 frames. ], tot_loss[loss=0.3278, simple_loss=0.3508, pruned_loss=0.1109, ctc_loss=0.2075, over 3836408.51 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 05:55:14,050 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=21813.333333333332, ans=0.0
+2024-08-25 05:55:17,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=21813.333333333332, ans=0.5
+2024-08-25 05:55:21,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=21866.666666666668, ans=0.0
+2024-08-25 05:55:23,213 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.96 vs. limit=12.0
+2024-08-25 05:55:24,848 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:55:28,919 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.30 vs. limit=15.0
+2024-08-25 05:55:32,422 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.812e+02 2.122e+02 2.604e+02 4.336e+02, threshold=4.244e+02, percent-clipped=2.0
+2024-08-25 05:55:42,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=21920.0, ans=0.125
+2024-08-25 05:55:42,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21920.0, ans=0.1
+2024-08-25 05:56:00,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21973.333333333332, ans=0.1
+2024-08-25 05:56:13,288 INFO [train.py:1114] (2/4) Epoch 2, batch 1650, loss[loss=0.3657, simple_loss=0.3876, pruned_loss=0.1249, ctc_loss=0.2345, over 19633.00 frames. ], tot_loss[loss=0.3276, simple_loss=0.3506, pruned_loss=0.1108, ctc_loss=0.2075, over 3832415.29 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 16.0
+2024-08-25 05:56:14,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22080.0, ans=0.1
+2024-08-25 05:56:14,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=22080.0, ans=0.07
+2024-08-25 05:56:26,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=22133.333333333332, ans=0.07
+2024-08-25 05:56:46,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22240.0, ans=0.1
+2024-08-25 05:57:07,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=22293.333333333332, ans=0.0
+2024-08-25 05:57:11,972 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=22346.666666666668, ans=0.0
+2024-08-25 05:57:12,872 INFO [train.py:1114] (2/4) Epoch 2, batch 1700, loss[loss=0.2912, simple_loss=0.3173, pruned_loss=0.0958, ctc_loss=0.1836, over 19693.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3495, pruned_loss=0.1096, ctc_loss=0.2056, over 3846676.64 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:57:29,326 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.791e+02 2.005e+02 2.338e+02 3.555e+02, threshold=4.010e+02, percent-clipped=0.0
+2024-08-25 05:57:46,086 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.24 vs. limit=12.0
+2024-08-25 05:58:05,050 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.26 vs. limit=22.5
+2024-08-25 05:58:11,238 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=22560.0, ans=0.005965217391304348
+2024-08-25 05:58:30,043 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.53 vs. limit=15.0
+2024-08-25 05:58:34,080 INFO [train.py:1114] (2/4) Epoch 2, batch 1750, loss[loss=0.2778, simple_loss=0.311, pruned_loss=0.08884, ctc_loss=0.1672, over 19650.00 frames. ], tot_loss[loss=0.3237, simple_loss=0.348, pruned_loss=0.1089, ctc_loss=0.204, over 3850482.56 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:58:35,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=22613.333333333332, ans=0.1
+2024-08-25 05:58:51,153 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=7.70 vs. limit=12.0
+2024-08-25 05:58:52,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=22720.0, ans=0.0
+2024-08-25 05:58:56,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=22720.0, ans=0.125
+2024-08-25 05:59:07,238 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.22 vs. limit=15.0
+2024-08-25 05:59:07,309 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.82 vs. limit=15.0
+2024-08-25 05:59:13,639 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=10.34 vs. limit=15.0
+2024-08-25 05:59:14,106 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=22773.333333333332, ans=0.125
+2024-08-25 05:59:14,514 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.89 vs. limit=12.0
+2024-08-25 05:59:15,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=22826.666666666668, ans=0.125
+2024-08-25 05:59:24,718 INFO [train.py:1114] (2/4) Epoch 2, batch 1800, loss[loss=0.3311, simple_loss=0.3582, pruned_loss=0.1111, ctc_loss=0.2043, over 19615.00 frames. ], tot_loss[loss=0.3238, simple_loss=0.3481, pruned_loss=0.1089, ctc_loss=0.2043, over 3853017.99 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 05:59:26,723 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=22880.0, ans=0.125
+2024-08-25 05:59:27,869 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.03 vs. limit=15.0
+2024-08-25 05:59:32,314 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=22880.0, ans=0.125
+2024-08-25 05:59:39,810 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.812e+02 2.002e+02 2.312e+02 3.839e+02, threshold=4.004e+02, percent-clipped=0.0
+2024-08-25 05:59:49,791 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=22986.666666666668, ans=0.0
+2024-08-25 06:00:08,474 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=23093.333333333332, ans=0.025
+2024-08-25 06:00:12,427 INFO [train.py:1114] (2/4) Epoch 2, batch 1850, loss[loss=0.3455, simple_loss=0.368, pruned_loss=0.1163, ctc_loss=0.2264, over 19588.00 frames. ], tot_loss[loss=0.3226, simple_loss=0.3475, pruned_loss=0.1083, ctc_loss=0.2029, over 3854273.72 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 06:00:20,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=23200.0, ans=0.1
+2024-08-25 06:00:29,958 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=23253.333333333332, ans=0.1
+2024-08-25 06:00:54,552 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=23360.0, ans=0.125
+2024-08-25 06:00:55,392 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:00:59,798 INFO [train.py:1114] (2/4) Epoch 2, batch 1900, loss[loss=0.3452, simple_loss=0.3651, pruned_loss=0.1183, ctc_loss=0.2215, over 19637.00 frames. ], tot_loss[loss=0.3233, simple_loss=0.3482, pruned_loss=0.1085, ctc_loss=0.2035, over 3859639.00 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 16.0
+2024-08-25 06:01:08,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=23413.333333333332, ans=0.1
+2024-08-25 06:01:18,893 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.814e+02 2.067e+02 2.451e+02 4.716e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-25 06:01:34,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.73 vs. limit=15.0
+2024-08-25 06:01:52,057 INFO [train.py:1114] (2/4) Epoch 2, batch 1950, loss[loss=0.3023, simple_loss=0.3369, pruned_loss=0.0987, ctc_loss=0.1756, over 19582.00 frames. ], tot_loss[loss=0.3232, simple_loss=0.3488, pruned_loss=0.1082, ctc_loss=0.2029, over 3868670.13 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 16.0
+2024-08-25 06:02:03,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=23733.333333333332, ans=0.125
+2024-08-25 06:02:22,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=23840.0, ans=0.0
+2024-08-25 06:02:24,377 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=23840.0, ans=0.125
+2024-08-25 06:02:40,716 INFO [train.py:1114] (2/4) Epoch 2, batch 2000, loss[loss=0.2752, simple_loss=0.3036, pruned_loss=0.08907, ctc_loss=0.1716, over 19678.00 frames. ], tot_loss[loss=0.3236, simple_loss=0.3491, pruned_loss=0.1084, ctc_loss=0.2034, over 3854662.64 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 06:02:40,879 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=23946.666666666668, ans=0.125
+2024-08-25 06:02:43,472 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.48 vs. limit=22.5
+2024-08-25 06:02:46,049 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=23946.666666666668, ans=0.0
+2024-08-25 06:02:49,855 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=23946.666666666668, ans=0.07
+2024-08-25 06:02:53,534 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=24000.0, ans=0.125
+2024-08-25 06:02:57,868 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.781e+02 1.996e+02 2.377e+02 5.355e+02, threshold=3.992e+02, percent-clipped=1.0
+2024-08-25 06:03:26,917 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.83 vs. limit=10.0
+2024-08-25 06:03:29,347 INFO [train.py:1114] (2/4) Epoch 2, batch 2050, loss[loss=0.2615, simple_loss=0.2948, pruned_loss=0.08284, ctc_loss=0.1562, over 19714.00 frames. ], tot_loss[loss=0.3216, simple_loss=0.3472, pruned_loss=0.1076, ctc_loss=0.2019, over 3850224.89 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:03:38,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=24266.666666666668, ans=0.0
+2024-08-25 06:04:03,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=24373.333333333332, ans=0.035
+2024-08-25 06:04:13,114 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=24480.0, ans=0.2
+2024-08-25 06:04:17,646 INFO [train.py:1114] (2/4) Epoch 2, batch 2100, loss[loss=0.3017, simple_loss=0.3379, pruned_loss=0.09692, ctc_loss=0.1791, over 19778.00 frames. ], tot_loss[loss=0.319, simple_loss=0.3455, pruned_loss=0.1063, ctc_loss=0.1996, over 3858887.10 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:04:24,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=24480.0, ans=0.125
+2024-08-25 06:04:33,038 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.823e+02 2.012e+02 2.259e+02 3.531e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-25 06:04:34,396 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.95 vs. limit=15.0
+2024-08-25 06:04:42,998 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=24586.666666666668, ans=0.125
+2024-08-25 06:04:48,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=24640.0, ans=0.125
+2024-08-25 06:04:56,086 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=24693.333333333332, ans=0.0
+2024-08-25 06:05:02,134 INFO [train.py:1114] (2/4) Epoch 2, batch 2150, loss[loss=0.2801, simple_loss=0.3242, pruned_loss=0.0844, ctc_loss=0.1679, over 19844.00 frames. ], tot_loss[loss=0.3166, simple_loss=0.3439, pruned_loss=0.1051, ctc_loss=0.1974, over 3869343.48 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 06:05:06,742 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=24746.666666666668, ans=0.0
+2024-08-25 06:05:29,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=24853.333333333332, ans=0.125
+2024-08-25 06:05:33,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=24906.666666666668, ans=0.005455072463768116
+2024-08-25 06:05:36,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 06:05:39,523 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=24906.666666666668, ans=0.09899494936611666
+2024-08-25 06:05:40,523 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:05:43,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 06:05:44,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=24906.666666666668, ans=0.125
+2024-08-25 06:05:44,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=24960.0, ans=0.125
+2024-08-25 06:05:56,433 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.62 vs. limit=10.0
+2024-08-25 06:05:58,445 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.38 vs. limit=15.0
+2024-08-25 06:05:59,030 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=24960.0, ans=0.125
+2024-08-25 06:05:59,320 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.72 vs. limit=22.5
+2024-08-25 06:06:01,074 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=11.44 vs. limit=15.0
+2024-08-25 06:06:01,432 INFO [train.py:1114] (2/4) Epoch 2, batch 2200, loss[loss=0.3075, simple_loss=0.3474, pruned_loss=0.0974, ctc_loss=0.1823, over 19581.00 frames. ], tot_loss[loss=0.3166, simple_loss=0.344, pruned_loss=0.1051, ctc_loss=0.1971, over 3867352.30 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:06:02,789 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.42 vs. limit=22.5
+2024-08-25 06:06:06,712 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25013.333333333332, ans=0.1
+2024-08-25 06:06:11,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25013.333333333332, ans=0.1
+2024-08-25 06:06:12,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=25013.333333333332, ans=0.005431884057971015
+2024-08-25 06:06:12,334 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.07 vs. limit=15.0
+2024-08-25 06:06:13,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=25066.666666666668, ans=0.125
+2024-08-25 06:06:15,516 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=25066.666666666668, ans=0.09899494936611666
+2024-08-25 06:06:16,717 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=31.20 vs. limit=22.5
+2024-08-25 06:06:19,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25066.666666666668, ans=0.1
+2024-08-25 06:06:25,284 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.924e+02 2.286e+02 2.709e+02 6.222e+02, threshold=4.573e+02, percent-clipped=4.0
+2024-08-25 06:06:34,249 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=25120.0, ans=0.2
+2024-08-25 06:06:37,801 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:06:54,562 INFO [train.py:1114] (2/4) Epoch 2, batch 2250, loss[loss=0.3233, simple_loss=0.3563, pruned_loss=0.1056, ctc_loss=0.1977, over 19623.00 frames. ], tot_loss[loss=0.3168, simple_loss=0.3443, pruned_loss=0.1052, ctc_loss=0.1972, over 3866805.21 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:07:06,144 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=25333.333333333332, ans=0.04949747468305833
+2024-08-25 06:07:06,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25333.333333333332, ans=0.1
+2024-08-25 06:07:08,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=25333.333333333332, ans=0.025
+2024-08-25 06:07:10,802 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.02 vs. limit=15.0
+2024-08-25 06:07:12,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 06:07:33,667 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.81 vs. limit=22.5
+2024-08-25 06:07:40,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=25546.666666666668, ans=0.125
+2024-08-25 06:07:41,088 INFO [train.py:1114] (2/4) Epoch 2, batch 2300, loss[loss=0.2961, simple_loss=0.3224, pruned_loss=0.09704, ctc_loss=0.1893, over 19496.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3437, pruned_loss=0.1054, ctc_loss=0.1973, over 3860956.39 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 32.0
+2024-08-25 06:07:58,725 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 1.775e+02 2.049e+02 2.504e+02 6.120e+02, threshold=4.097e+02, percent-clipped=1.0
+2024-08-25 06:08:02,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=25653.333333333332, ans=0.005292753623188406
+2024-08-25 06:08:06,027 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.98 vs. limit=15.0
+2024-08-25 06:08:10,361 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=25653.333333333332, ans=15.0
+2024-08-25 06:08:10,986 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25706.666666666668, ans=0.1
+2024-08-25 06:08:20,582 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=25760.0, ans=0.125
+2024-08-25 06:08:20,978 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.47 vs. limit=15.0
+2024-08-25 06:08:29,085 INFO [train.py:1114] (2/4) Epoch 2, batch 2350, loss[loss=0.3297, simple_loss=0.3521, pruned_loss=0.1123, ctc_loss=0.207, over 19700.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3438, pruned_loss=0.1057, ctc_loss=0.1977, over 3863339.85 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:08:36,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=25813.333333333332, ans=0.1
+2024-08-25 06:09:03,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=25973.333333333332, ans=0.125
+2024-08-25 06:09:28,459 INFO [train.py:1114] (2/4) Epoch 2, batch 2400, loss[loss=0.3512, simple_loss=0.3654, pruned_loss=0.1216, ctc_loss=0.2344, over 19256.00 frames. ], tot_loss[loss=0.3192, simple_loss=0.3459, pruned_loss=0.1064, ctc_loss=0.1991, over 3857805.16 frames. ], batch size: 71, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:09:43,436 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.803e+02 2.129e+02 2.459e+02 5.388e+02, threshold=4.257e+02, percent-clipped=1.0
+2024-08-25 06:09:44,635 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=26133.333333333332, ans=0.125
+2024-08-25 06:09:52,139 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=10.51 vs. limit=10.0
+2024-08-25 06:10:02,090 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=18.06 vs. limit=15.0
+2024-08-25 06:10:03,230 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.35 vs. limit=15.0
+2024-08-25 06:10:12,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=26293.333333333332, ans=0.0
+2024-08-25 06:10:14,654 INFO [train.py:1114] (2/4) Epoch 2, batch 2450, loss[loss=0.4336, simple_loss=0.4037, pruned_loss=0.1681, ctc_loss=0.3184, over 12860.00 frames. ], tot_loss[loss=0.3297, simple_loss=0.3521, pruned_loss=0.1118, ctc_loss=0.2091, over 3730769.54 frames. ], batch size: 141, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 06:10:22,536 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.78 vs. limit=15.0
+2024-08-25 06:10:23,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=26400.0, ans=0.05
+2024-08-25 06:10:33,929 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.49 vs. limit=22.5
+2024-08-25 06:10:38,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=26453.333333333332, ans=0.125
+2024-08-25 06:11:35,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=26554.666666666668, ans=0.07
+2024-08-25 06:11:53,137 INFO [train.py:1114] (2/4) Epoch 3, batch 0, loss[loss=0.3167, simple_loss=0.3391, pruned_loss=0.1057, ctc_loss=0.2076, over 19414.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3391, pruned_loss=0.1057, ctc_loss=0.2076, over 19414.00 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 06:11:53,508 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-25 06:12:07,835 INFO [train.py:1146] (2/4) Epoch 3, validation: loss=0.2565, simple_loss=0.3309, pruned_loss=0.06653, ctc_loss=0.1228, over 944034.00 frames.
+2024-08-25 06:12:07,837 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 13259MB
+2024-08-25 06:12:09,859 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=26554.666666666668, ans=0.2
+2024-08-25 06:12:18,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=26554.666666666668, ans=0.2
+2024-08-25 06:12:39,512 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.20 vs. limit=22.5
+2024-08-25 06:12:49,110 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.86 vs. limit=15.0
+2024-08-25 06:13:57,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=26608.0, ans=0.125
+2024-08-25 06:15:43,327 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=26661.333333333332, ans=0.05
+2024-08-25 06:16:00,080 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.983e+02 2.286e+02 2.644e+02 3.774e+02, threshold=4.572e+02, percent-clipped=0.0
+2024-08-25 06:17:18,318 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=26714.666666666668, ans=0.025
+2024-08-25 06:17:18,357 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=26714.666666666668, ans=0.125
+2024-08-25 06:18:46,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=26768.0, ans=0.0
+2024-08-25 06:22:59,721 INFO [train.py:1114] (2/4) Epoch 3, batch 50, loss[loss=0.2665, simple_loss=0.3024, pruned_loss=0.08399, ctc_loss=0.1566, over 19698.00 frames. ], tot_loss[loss=0.3235, simple_loss=0.349, pruned_loss=0.1082, ctc_loss=0.2039, over 844960.58 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:23:30,436 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:29:36,917 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=12.52 vs. limit=15.0
+2024-08-25 06:30:18,382 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=26874.666666666668, ans=0.125
+2024-08-25 06:30:23,699 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=26874.666666666668, ans=0.5
+2024-08-25 06:44:24,666 INFO [train.py:1114] (2/4) Epoch 3, batch 100, loss[loss=0.2764, simple_loss=0.3186, pruned_loss=0.08518, ctc_loss=0.1598, over 19726.00 frames. ], tot_loss[loss=0.321, simple_loss=0.3483, pruned_loss=0.1067, ctc_loss=0.2008, over 1500110.56 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:47:32,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=27141.333333333332, ans=22.5
+2024-08-25 06:47:48,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=27194.666666666668, ans=0.00495768115942029
+2024-08-25 06:48:13,158 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.88 vs. limit=22.5
+2024-08-25 06:48:15,486 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.744e+02 2.032e+02 2.291e+02 1.205e+03, threshold=4.063e+02, percent-clipped=1.0
+2024-08-25 06:50:43,549 INFO [train.py:1114] (2/4) Epoch 3, batch 150, loss[loss=0.2966, simple_loss=0.3201, pruned_loss=0.09999, ctc_loss=0.1827, over 19704.00 frames. ], tot_loss[loss=0.3166, simple_loss=0.3454, pruned_loss=0.1045, ctc_loss=0.1969, over 2028676.42 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 06:51:50,560 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.38 vs. limit=15.0
+2024-08-25 06:53:48,859 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.07 vs. limit=22.5
+2024-08-25 06:54:31,942 INFO [train.py:1114] (2/4) Epoch 3, batch 200, loss[loss=0.3397, simple_loss=0.3568, pruned_loss=0.118, ctc_loss=0.2162, over 18180.00 frames. ], tot_loss[loss=0.3137, simple_loss=0.3426, pruned_loss=0.1035, ctc_loss=0.1947, over 2435563.59 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:54:47,686 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=27621.333333333332, ans=0.025
+2024-08-25 06:55:04,020 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.69 vs. limit=15.0
+2024-08-25 06:56:00,392 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.731e+02 1.977e+02 2.205e+02 3.305e+02, threshold=3.953e+02, percent-clipped=0.0
+2024-08-25 06:56:20,576 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:56:34,801 INFO [train.py:1114] (2/4) Epoch 3, batch 250, loss[loss=0.3128, simple_loss=0.3443, pruned_loss=0.1012, ctc_loss=0.1969, over 19405.00 frames. ], tot_loss[loss=0.3118, simple_loss=0.3419, pruned_loss=0.1024, ctc_loss=0.1925, over 2755297.25 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:57:28,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 06:57:32,473 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27994.666666666668, ans=0.1
+2024-08-25 07:00:43,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=28048.0, ans=0.1
+2024-08-25 07:02:41,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=28048.0, ans=0.125
+2024-08-25 07:02:46,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=28048.0, ans=0.2
+2024-08-25 07:02:49,403 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 07:03:29,188 INFO [train.py:1114] (2/4) Epoch 3, batch 300, loss[loss=0.2881, simple_loss=0.3347, pruned_loss=0.08668, ctc_loss=0.1705, over 19532.00 frames. ], tot_loss[loss=0.3106, simple_loss=0.3411, pruned_loss=0.1019, ctc_loss=0.191, over 2999990.84 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:04:04,897 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=28208.0, ans=0.2
+2024-08-25 07:04:13,298 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.53 vs. limit=22.5
+2024-08-25 07:04:27,097 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=28208.0, ans=0.1
+2024-08-25 07:04:44,395 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.724e+02 1.968e+02 2.265e+02 3.417e+02, threshold=3.936e+02, percent-clipped=0.0
+2024-08-25 07:04:44,928 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys.whitening_limit, batch_count=28314.666666666668, ans=6.0
+2024-08-25 07:05:18,322 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.93 vs. limit=15.0
+2024-08-25 07:05:36,011 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.49 vs. limit=12.0
+2024-08-25 07:05:49,898 INFO [train.py:1114] (2/4) Epoch 3, batch 350, loss[loss=0.2745, simple_loss=0.3075, pruned_loss=0.08835, ctc_loss=0.1621, over 19709.00 frames. ], tot_loss[loss=0.3099, simple_loss=0.3407, pruned_loss=0.1015, ctc_loss=0.1903, over 3190487.02 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:05:51,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=28421.333333333332, ans=0.125
+2024-08-25 07:05:57,817 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=28421.333333333332, ans=0.125
+2024-08-25 07:07:29,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=28528.0, ans=0.2
+2024-08-25 07:08:01,163 INFO [train.py:1114] (2/4) Epoch 3, batch 400, loss[loss=0.307, simple_loss=0.3411, pruned_loss=0.0981, ctc_loss=0.192, over 19501.00 frames. ], tot_loss[loss=0.309, simple_loss=0.3399, pruned_loss=0.1011, ctc_loss=0.1894, over 3341906.26 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 07:08:01,863 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.54 vs. limit=12.0
+2024-08-25 07:08:15,058 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28741.333333333332, ans=0.1
+2024-08-25 07:08:20,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=28741.333333333332, ans=0.0
+2024-08-25 07:08:24,283 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=28794.666666666668, ans=0.1
+2024-08-25 07:08:42,255 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.762e+02 1.982e+02 2.336e+02 5.420e+02, threshold=3.963e+02, percent-clipped=2.0
+2024-08-25 07:08:54,966 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=28901.333333333332, ans=0.025
+2024-08-25 07:09:04,150 INFO [train.py:1114] (2/4) Epoch 3, batch 450, loss[loss=0.2905, simple_loss=0.3334, pruned_loss=0.09053, ctc_loss=0.1664, over 19616.00 frames. ], tot_loss[loss=0.3087, simple_loss=0.3397, pruned_loss=0.101, ctc_loss=0.1892, over 3450576.39 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:09:08,221 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=28954.666666666668, ans=0.05
+2024-08-25 07:09:27,085 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.21 vs. limit=22.5
+2024-08-25 07:09:35,494 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.72 vs. limit=15.0
+2024-08-25 07:09:42,223 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.33 vs. limit=15.0
+2024-08-25 07:09:56,838 INFO [train.py:1114] (2/4) Epoch 3, batch 500, loss[loss=0.3002, simple_loss=0.3425, pruned_loss=0.09416, ctc_loss=0.1737, over 19718.00 frames. ], tot_loss[loss=0.3072, simple_loss=0.3386, pruned_loss=0.1003, ctc_loss=0.1879, over 3546365.53 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:10:16,724 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.39 vs. limit=22.5
+2024-08-25 07:10:24,023 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=29274.666666666668, ans=0.1
+2024-08-25 07:10:43,377 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.753e+02 1.966e+02 2.327e+02 4.047e+02, threshold=3.932e+02, percent-clipped=2.0
+2024-08-25 07:11:10,797 INFO [train.py:1114] (2/4) Epoch 3, batch 550, loss[loss=0.3457, simple_loss=0.3663, pruned_loss=0.1173, ctc_loss=0.2262, over 19277.00 frames. ], tot_loss[loss=0.309, simple_loss=0.3394, pruned_loss=0.1014, ctc_loss=0.1896, over 3609812.95 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:11:37,166 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=29488.0, ans=0.125
+2024-08-25 07:11:39,181 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=29488.0, ans=0.125
+2024-08-25 07:11:40,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=29488.0, ans=0.125
+2024-08-25 07:12:01,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=29541.333333333332, ans=0.125
+2024-08-25 07:12:01,710 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.60 vs. limit=15.0
+2024-08-25 07:12:14,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=29648.0, ans=0.125
+2024-08-25 07:12:40,858 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=29701.333333333332, ans=0.125
+2024-08-25 07:12:40,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=29701.333333333332, ans=0.0
+2024-08-25 07:12:53,138 INFO [train.py:1114] (2/4) Epoch 3, batch 600, loss[loss=0.3417, simple_loss=0.367, pruned_loss=0.1162, ctc_loss=0.2098, over 19378.00 frames. ], tot_loss[loss=0.3077, simple_loss=0.3387, pruned_loss=0.1006, ctc_loss=0.1882, over 3667534.10 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:12:53,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=29754.666666666668, ans=0.1
+2024-08-25 07:13:11,072 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.09 vs. limit=6.0
+2024-08-25 07:13:31,908 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.85 vs. limit=15.0
+2024-08-25 07:13:37,725 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.812e+02 2.009e+02 2.360e+02 5.731e+02, threshold=4.017e+02, percent-clipped=3.0
+2024-08-25 07:13:38,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=29914.666666666668, ans=0.004366376811594202
+2024-08-25 07:13:53,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=29968.0, ans=0.125
+2024-08-25 07:13:54,334 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=29968.0, ans=0.125
+2024-08-25 07:14:02,709 INFO [train.py:1114] (2/4) Epoch 3, batch 650, loss[loss=0.2939, simple_loss=0.3292, pruned_loss=0.09426, ctc_loss=0.1751, over 19773.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3368, pruned_loss=0.09938, ctc_loss=0.1861, over 3717508.31 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 16.0
+2024-08-25 07:14:07,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=30021.333333333332, ans=0.035
+2024-08-25 07:14:07,771 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=30021.333333333332, ans=0.125
+2024-08-25 07:14:10,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=30021.333333333332, ans=0.125
+2024-08-25 07:14:15,383 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=30074.666666666668, ans=0.025
+2024-08-25 07:14:16,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=30074.666666666668, ans=0.125
+2024-08-25 07:14:26,735 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=30128.0, ans=0.125
+2024-08-25 07:14:39,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=30181.333333333332, ans=0.0
+2024-08-25 07:14:42,695 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=30181.333333333332, ans=0.125
+2024-08-25 07:14:55,113 INFO [train.py:1114] (2/4) Epoch 3, batch 700, loss[loss=0.2639, simple_loss=0.3106, pruned_loss=0.0776, ctc_loss=0.1549, over 19726.00 frames. ], tot_loss[loss=0.306, simple_loss=0.3377, pruned_loss=0.09983, ctc_loss=0.1868, over 3749069.74 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:14:57,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=30288.0, ans=0.004285217391304348
+2024-08-25 07:14:59,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=30288.0, ans=0.0
+2024-08-25 07:15:02,384 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=30288.0, ans=0.125
+2024-08-25 07:15:03,070 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=30288.0, ans=0.125
+2024-08-25 07:15:18,916 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.17 vs. limit=6.0
+2024-08-25 07:15:22,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=30394.666666666668, ans=0.2
+2024-08-25 07:15:28,453 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.846e+02 1.998e+02 2.505e+02 9.071e+02, threshold=3.995e+02, percent-clipped=5.0
+2024-08-25 07:15:29,972 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=30448.0, ans=0.125
+2024-08-25 07:15:37,321 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=30448.0, ans=0.0
+2024-08-25 07:15:58,577 INFO [train.py:1114] (2/4) Epoch 3, batch 750, loss[loss=0.3048, simple_loss=0.3415, pruned_loss=0.09795, ctc_loss=0.1804, over 19501.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.3373, pruned_loss=0.09959, ctc_loss=0.1863, over 3774217.50 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:16:09,166 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.55 vs. limit=12.0
+2024-08-25 07:16:22,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=30608.0, ans=0.125
+2024-08-25 07:16:29,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=30608.0, ans=0.0
+2024-08-25 07:16:31,072 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=4.36 vs. limit=15.0
+2024-08-25 07:16:46,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=30661.333333333332, ans=0.125
+2024-08-25 07:19:10,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=30714.666666666668, ans=0.1
+2024-08-25 07:21:16,435 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.63 vs. limit=22.5
+2024-08-25 07:34:42,530 INFO [train.py:1114] (2/4) Epoch 3, batch 800, loss[loss=0.2981, simple_loss=0.3287, pruned_loss=0.09765, ctc_loss=0.1807, over 19404.00 frames. ], tot_loss[loss=0.305, simple_loss=0.337, pruned_loss=0.09931, ctc_loss=0.1858, over 3795776.34 frames. ], batch size: 48, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 08:02:14,744 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=30928.0, ans=0.125
+2024-08-25 08:02:40,819 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.761e+02 1.928e+02 2.233e+02 3.899e+02, threshold=3.856e+02, percent-clipped=0.0
+2024-08-25 08:12:59,505 INFO [train.py:1114] (2/4) Epoch 3, batch 850, loss[loss=0.3225, simple_loss=0.3588, pruned_loss=0.1034, ctc_loss=0.1983, over 19648.00 frames. ], tot_loss[loss=0.304, simple_loss=0.3363, pruned_loss=0.09882, ctc_loss=0.185, over 3815617.43 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 08:27:21,828 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.21 vs. limit=10.0
+2024-08-25 08:41:05,512 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=31301.333333333332, ans=0.125
+2024-08-25 08:44:41,572 INFO [train.py:1114] (2/4) Epoch 3, batch 900, loss[loss=0.2713, simple_loss=0.3109, pruned_loss=0.08433, ctc_loss=0.1577, over 19410.00 frames. ], tot_loss[loss=0.305, simple_loss=0.337, pruned_loss=0.09936, ctc_loss=0.1858, over 3818244.52 frames. ], batch size: 48, lr: 3.72e-02, grad_scale: 32.0
+2024-08-25 08:48:07,062 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=31354.666666666668, ans=0.125
+2024-08-25 08:49:30,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=31408.0, ans=0.0
+2024-08-25 08:51:42,119 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 08:57:47,299 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.05 vs. limit=15.0
+2024-08-25 08:57:54,734 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.748e+02 1.945e+02 2.250e+02 3.446e+02, threshold=3.889e+02, percent-clipped=0.0
+2024-08-25 09:02:00,037 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=31514.666666666668, ans=0.0
+2024-08-25 09:02:10,756 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=31568.0, ans=0.125
+2024-08-25 09:04:09,972 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=31568.0, ans=0.025
+2024-08-25 09:05:03,363 INFO [train.py:1114] (2/4) Epoch 3, batch 950, loss[loss=0.2828, simple_loss=0.3224, pruned_loss=0.08785, ctc_loss=0.169, over 19489.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.3372, pruned_loss=0.09957, ctc_loss=0.1862, over 3820235.48 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 32.0
+2024-08-25 09:12:46,453 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.77 vs. limit=22.5
+2024-08-25 09:16:15,316 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=31728.0, ans=0.07
+2024-08-25 09:20:47,855 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.11 vs. limit=10.0
+2024-08-25 09:23:03,906 INFO [train.py:1114] (2/4) Epoch 3, batch 1000, loss[loss=0.2808, simple_loss=0.3189, pruned_loss=0.08767, ctc_loss=0.1682, over 19853.00 frames. ], tot_loss[loss=0.3071, simple_loss=0.3384, pruned_loss=0.1004, ctc_loss=0.1874, over 3815699.50 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 16.0
+2024-08-25 09:23:58,121 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.43 vs. limit=10.0
+2024-08-25 09:25:53,181 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=31941.333333333332, ans=0.125
+2024-08-25 09:27:51,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=31994.666666666668, ans=0.0039142028985507255
+2024-08-25 09:28:26,222 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.83 vs. limit=22.5
+2024-08-25 09:29:07,852 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.873e+02 2.237e+02 2.628e+02 7.664e+02, threshold=4.475e+02, percent-clipped=6.0
+2024-08-25 09:32:11,568 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.39 vs. limit=15.0
+2024-08-25 09:32:27,623 INFO [train.py:1114] (2/4) Epoch 3, batch 1050, loss[loss=0.3104, simple_loss=0.3565, pruned_loss=0.09611, ctc_loss=0.1805, over 19842.00 frames. ], tot_loss[loss=0.3059, simple_loss=0.3374, pruned_loss=0.09986, ctc_loss=0.1867, over 3821755.97 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:32:27,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=32154.666666666668, ans=0.125
+2024-08-25 09:32:33,680 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.93 vs. limit=6.0
+2024-08-25 09:32:49,469 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.91 vs. limit=12.0
+2024-08-25 09:33:24,311 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.65 vs. limit=15.0
+2024-08-25 09:33:26,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=32208.0, ans=0.125
+2024-08-25 09:33:26,781 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.25 vs. limit=22.5
+2024-08-25 09:36:56,745 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.23 vs. limit=12.0
+2024-08-25 09:41:10,309 INFO [train.py:1114] (2/4) Epoch 3, batch 1100, loss[loss=0.2756, simple_loss=0.3191, pruned_loss=0.08411, ctc_loss=0.16, over 19593.00 frames. ], tot_loss[loss=0.3048, simple_loss=0.3368, pruned_loss=0.09921, ctc_loss=0.1858, over 3828945.65 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:41:42,535 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.93 vs. limit=12.0
+2024-08-25 09:42:35,187 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=32528.0, ans=0.0
+2024-08-25 09:42:47,757 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.41 vs. limit=22.5
+2024-08-25 09:43:23,047 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.681e+02 1.943e+02 2.357e+02 4.515e+02, threshold=3.887e+02, percent-clipped=1.0
+2024-08-25 09:45:13,178 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=32634.666666666668, ans=0.2
+2024-08-25 09:45:15,743 INFO [train.py:1114] (2/4) Epoch 3, batch 1150, loss[loss=0.2753, simple_loss=0.319, pruned_loss=0.0836, ctc_loss=0.161, over 19602.00 frames. ], tot_loss[loss=0.3043, simple_loss=0.3362, pruned_loss=0.09913, ctc_loss=0.1855, over 3828331.19 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 16.0
+2024-08-25 09:52:14,803 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=32741.333333333332, ans=0.2
+2024-08-25 09:52:17,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=32741.333333333332, ans=0.003751884057971014
+2024-08-25 09:54:46,768 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.44 vs. limit=15.0
+2024-08-25 09:55:21,116 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.11 vs. limit=15.0
+2024-08-25 09:55:21,205 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=13.25 vs. limit=15.0
+2024-08-25 09:55:29,632 INFO [train.py:1114] (2/4) Epoch 3, batch 1200, loss[loss=0.3172, simple_loss=0.3477, pruned_loss=0.1037, ctc_loss=0.1984, over 19838.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.3373, pruned_loss=0.09953, ctc_loss=0.1862, over 3824201.59 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:55:30,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=32954.666666666664, ans=0.025
+2024-08-25 09:55:41,838 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=32954.666666666664, ans=0.1
+2024-08-25 09:56:31,120 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.705e+02 1.941e+02 2.201e+02 4.168e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-25 09:57:41,499 INFO [train.py:1114] (2/4) Epoch 3, batch 1250, loss[loss=0.3428, simple_loss=0.3651, pruned_loss=0.1179, ctc_loss=0.212, over 19526.00 frames. ], tot_loss[loss=0.3043, simple_loss=0.337, pruned_loss=0.0988, ctc_loss=0.1848, over 3841897.57 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:58:19,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33328.0, ans=0.1
+2024-08-25 09:58:20,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=33328.0, ans=0.125
+2024-08-25 09:58:26,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=33328.0, ans=0.0036243478260869558
+2024-08-25 09:58:42,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=33381.333333333336, ans=0.125
+2024-08-25 09:58:56,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33434.666666666664, ans=0.1
+2024-08-25 09:58:56,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=33434.666666666664, ans=0.2
+2024-08-25 09:59:04,141 INFO [train.py:1114] (2/4) Epoch 3, batch 1300, loss[loss=0.3289, simple_loss=0.3536, pruned_loss=0.1106, ctc_loss=0.2074, over 18863.00 frames. ], tot_loss[loss=0.3033, simple_loss=0.3362, pruned_loss=0.09844, ctc_loss=0.1839, over 3846381.76 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 09:59:23,589 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=33541.333333333336, ans=0.0035779710144927535
+2024-08-25 09:59:46,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=33648.0, ans=0.0
+2024-08-25 09:59:47,448 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=33648.0, ans=0.125
+2024-08-25 09:59:48,221 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.674e+02 1.887e+02 2.172e+02 3.368e+02, threshold=3.774e+02, percent-clipped=0.0
+2024-08-25 10:00:05,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=33701.333333333336, ans=0.125
+2024-08-25 10:00:09,477 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=33701.333333333336, ans=0.125
+2024-08-25 10:00:14,398 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=33701.333333333336, ans=0.125
+2024-08-25 10:00:19,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=33701.333333333336, ans=0.003543188405797101
+2024-08-25 10:00:22,488 INFO [train.py:1114] (2/4) Epoch 3, batch 1350, loss[loss=0.2857, simple_loss=0.3324, pruned_loss=0.08732, ctc_loss=0.1607, over 19784.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3349, pruned_loss=0.09742, ctc_loss=0.1823, over 3857682.71 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 10:00:36,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33754.666666666664, ans=0.1
+2024-08-25 10:00:39,467 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33754.666666666664, ans=0.1
+2024-08-25 10:00:45,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=33754.666666666664, ans=0.1
+2024-08-25 10:01:34,073 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33914.666666666664, ans=0.1
+2024-08-25 10:02:01,550 INFO [train.py:1114] (2/4) Epoch 3, batch 1400, loss[loss=0.2555, simple_loss=0.2931, pruned_loss=0.08046, ctc_loss=0.1425, over 19666.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3347, pruned_loss=0.09731, ctc_loss=0.1818, over 3864933.31 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 32.0
+2024-08-25 10:02:15,008 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34074.666666666664, ans=0.1
+2024-08-25 10:02:16,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=34074.666666666664, ans=0.125
+2024-08-25 10:02:27,788 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=34074.666666666664, ans=0.125
+2024-08-25 10:02:28,753 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=34128.0, ans=0.0
+2024-08-25 10:02:31,936 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.91 vs. limit=15.0
+2024-08-25 10:02:33,719 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.09 vs. limit=15.0
+2024-08-25 10:02:40,372 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=19.87 vs. limit=22.5
+2024-08-25 10:02:45,294 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.896e+02 2.159e+02 2.528e+02 3.857e+02, threshold=4.318e+02, percent-clipped=1.0
+2024-08-25 10:03:00,970 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=34181.333333333336, ans=0.003438840579710144
+2024-08-25 10:03:12,609 INFO [train.py:1114] (2/4) Epoch 3, batch 1450, loss[loss=0.3395, simple_loss=0.3694, pruned_loss=0.1126, ctc_loss=0.2112, over 19668.00 frames. ], tot_loss[loss=0.3015, simple_loss=0.3352, pruned_loss=0.0974, ctc_loss=0.1822, over 3863076.85 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:03:20,369 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=34288.0, ans=0.0
+2024-08-25 10:03:23,590 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.96 vs. limit=6.0
+2024-08-25 10:04:06,585 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.35 vs. limit=15.0
+2024-08-25 10:04:21,626 INFO [train.py:1114] (2/4) Epoch 3, batch 1500, loss[loss=0.2966, simple_loss=0.3417, pruned_loss=0.09247, ctc_loss=0.1664, over 19571.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3354, pruned_loss=0.09744, ctc_loss=0.1822, over 3863439.76 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:04:24,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=34554.666666666664, ans=0.0033576811594202907
+2024-08-25 10:04:25,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34554.666666666664, ans=0.1
+2024-08-25 10:04:50,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34661.333333333336, ans=0.1
+2024-08-25 10:04:53,030 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34661.333333333336, ans=0.1
+2024-08-25 10:05:09,919 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.778e+02 1.971e+02 2.353e+02 5.678e+02, threshold=3.941e+02, percent-clipped=1.0
+2024-08-25 10:05:19,517 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.88 vs. limit=12.0
+2024-08-25 10:05:29,603 INFO [train.py:1114] (2/4) Epoch 3, batch 1550, loss[loss=0.3454, simple_loss=0.3692, pruned_loss=0.1183, ctc_loss=0.2128, over 19608.00 frames. ], tot_loss[loss=0.3019, simple_loss=0.3352, pruned_loss=0.09776, ctc_loss=0.1826, over 3847786.25 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:05:32,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=34821.333333333336, ans=0.125
+2024-08-25 10:05:55,872 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=34928.0, ans=0.125
+2024-08-25 10:06:06,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34981.333333333336, ans=0.1
+2024-08-25 10:06:15,346 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:06:42,358 INFO [train.py:1114] (2/4) Epoch 3, batch 1600, loss[loss=0.3042, simple_loss=0.3351, pruned_loss=0.09798, ctc_loss=0.1936, over 19818.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3345, pruned_loss=0.09755, ctc_loss=0.1824, over 3836873.53 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:06:44,191 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=35088.0, ans=0.125
+2024-08-25 10:07:47,931 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 1.752e+02 2.032e+02 2.338e+02 4.104e+02, threshold=4.064e+02, percent-clipped=1.0
+2024-08-25 10:07:49,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=35248.0, ans=0.2
+2024-08-25 10:08:06,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=35354.666666666664, ans=0.07
+2024-08-25 10:08:06,825 INFO [train.py:1114] (2/4) Epoch 3, batch 1650, loss[loss=0.3226, simple_loss=0.3538, pruned_loss=0.1052, ctc_loss=0.2023, over 19662.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3344, pruned_loss=0.0973, ctc_loss=0.182, over 3832747.85 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 10:08:16,581 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=35408.0, ans=0.003172173913043479
+2024-08-25 10:08:20,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=35408.0, ans=0.2
+2024-08-25 10:08:29,313 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.84 vs. limit=6.0
+2024-08-25 10:08:48,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=35514.666666666664, ans=0.5
+2024-08-25 10:08:50,733 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=35514.666666666664, ans=0.125
+2024-08-25 10:08:56,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=35568.0, ans=0.0031373913043478262
+2024-08-25 10:09:04,853 INFO [train.py:1114] (2/4) Epoch 3, batch 1700, loss[loss=0.2609, simple_loss=0.2898, pruned_loss=0.08397, ctc_loss=0.1601, over 19677.00 frames. ], tot_loss[loss=0.2992, simple_loss=0.3335, pruned_loss=0.09638, ctc_loss=0.1806, over 3846372.42 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:09:19,733 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.61 vs. limit=15.0
+2024-08-25 10:09:52,818 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.835e+02 2.022e+02 2.484e+02 3.793e+02, threshold=4.043e+02, percent-clipped=0.0
+2024-08-25 10:10:09,478 INFO [train.py:1114] (2/4) Epoch 3, batch 1750, loss[loss=0.2934, simple_loss=0.323, pruned_loss=0.09797, ctc_loss=0.1699, over 19688.00 frames. ], tot_loss[loss=0.2988, simple_loss=0.3331, pruned_loss=0.09622, ctc_loss=0.1801, over 3849674.85 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:10:25,812 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=35941.333333333336, ans=0.125
+2024-08-25 10:10:26,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=35941.333333333336, ans=0.125
+2024-08-25 10:10:27,042 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.14 vs. limit=22.5
+2024-08-25 10:10:27,107 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.10 vs. limit=10.0
+2024-08-25 10:10:40,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=35994.666666666664, ans=0.125
+2024-08-25 10:11:14,077 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.22 vs. limit=15.0
+2024-08-25 10:11:15,195 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.71 vs. limit=10.0
+2024-08-25 10:11:20,669 INFO [train.py:1114] (2/4) Epoch 3, batch 1800, loss[loss=0.2817, simple_loss=0.3363, pruned_loss=0.08143, ctc_loss=0.1607, over 19622.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.3333, pruned_loss=0.09605, ctc_loss=0.1801, over 3851657.31 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:11:24,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=36154.666666666664, ans=0.0
+2024-08-25 10:11:24,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=36154.666666666664, ans=0.0
+2024-08-25 10:11:52,949 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.758e+02 2.042e+02 2.396e+02 4.902e+02, threshold=4.083e+02, percent-clipped=1.0
+2024-08-25 10:12:02,546 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=36368.0, ans=0.002963478260869565
+2024-08-25 10:12:25,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=36368.0, ans=0.002963478260869565
+2024-08-25 10:12:33,957 INFO [train.py:1114] (2/4) Epoch 3, batch 1850, loss[loss=0.3028, simple_loss=0.3431, pruned_loss=0.09531, ctc_loss=0.1795, over 19591.00 frames. ], tot_loss[loss=0.2984, simple_loss=0.3331, pruned_loss=0.09586, ctc_loss=0.1797, over 3855442.08 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:12:44,795 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.41 vs. limit=15.0
+2024-08-25 10:13:00,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=36528.0, ans=0.125
+2024-08-25 10:13:01,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=36528.0, ans=0.1
+2024-08-25 10:13:09,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=36528.0, ans=0.0
+2024-08-25 10:13:10,443 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=36528.0, ans=0.2
+2024-08-25 10:13:24,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=36634.666666666664, ans=0.125
+2024-08-25 10:13:31,593 INFO [train.py:1114] (2/4) Epoch 3, batch 1900, loss[loss=0.3199, simple_loss=0.3555, pruned_loss=0.1026, ctc_loss=0.1978, over 19664.00 frames. ], tot_loss[loss=0.2995, simple_loss=0.3343, pruned_loss=0.0963, ctc_loss=0.1803, over 3860442.41 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 32.0
+2024-08-25 10:13:55,382 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.84 vs. limit=15.0
+2024-08-25 10:14:07,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36741.333333333336, ans=0.1
+2024-08-25 10:14:09,336 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.50 vs. limit=15.0
+2024-08-25 10:14:15,172 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=36794.666666666664, ans=0.125
+2024-08-25 10:14:21,545 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:14:25,001 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=36794.666666666664, ans=0.125
+2024-08-25 10:14:29,198 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.725e+02 1.920e+02 2.285e+02 4.448e+02, threshold=3.841e+02, percent-clipped=1.0
+2024-08-25 10:14:33,375 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=36848.0, ans=0.125
+2024-08-25 10:14:38,321 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=36901.333333333336, ans=0.2
+2024-08-25 10:14:54,740 INFO [train.py:1114] (2/4) Epoch 3, batch 1950, loss[loss=0.2622, simple_loss=0.308, pruned_loss=0.07936, ctc_loss=0.1443, over 19572.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3355, pruned_loss=0.09658, ctc_loss=0.1804, over 3869401.96 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:15:03,472 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=36954.666666666664, ans=0.125
+2024-08-25 10:15:22,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=37008.0, ans=0.2
+2024-08-25 10:15:29,927 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:15:51,815 INFO [train.py:1114] (2/4) Epoch 3, batch 2000, loss[loss=0.2591, simple_loss=0.293, pruned_loss=0.08259, ctc_loss=0.1498, over 19680.00 frames. ], tot_loss[loss=0.3025, simple_loss=0.3368, pruned_loss=0.09769, ctc_loss=0.1823, over 3853775.13 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:16:00,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=37274.666666666664, ans=0.125
+2024-08-25 10:16:01,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37274.666666666664, ans=0.1
+2024-08-25 10:16:01,896 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=37274.666666666664, ans=0.0
+2024-08-25 10:16:04,531 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.03 vs. limit=12.0
+2024-08-25 10:16:06,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=37274.666666666664, ans=0.95
+2024-08-25 10:16:08,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=37328.0, ans=0.0
+2024-08-25 10:16:17,906 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.12 vs. limit=15.0
+2024-08-25 10:16:19,098 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.904e+02 2.146e+02 2.566e+02 5.347e+02, threshold=4.293e+02, percent-clipped=2.0
+2024-08-25 10:16:21,353 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.49 vs. limit=15.0
+2024-08-25 10:16:42,412 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=37434.666666666664, ans=0.125
+2024-08-25 10:16:42,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=37434.666666666664, ans=0.04949747468305833
+2024-08-25 10:16:44,032 INFO [train.py:1114] (2/4) Epoch 3, batch 2050, loss[loss=0.2715, simple_loss=0.3095, pruned_loss=0.08432, ctc_loss=0.1618, over 19733.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.3357, pruned_loss=0.09757, ctc_loss=0.1818, over 3850119.84 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 32.0
+2024-08-25 10:16:47,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=37488.0, ans=0.125
+2024-08-25 10:17:00,356 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=37488.0, ans=0.2
+2024-08-25 10:17:27,008 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=37648.0, ans=0.125
+2024-08-25 10:17:43,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=37701.333333333336, ans=0.125
+2024-08-25 10:17:53,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=37701.333333333336, ans=0.2
+2024-08-25 10:17:56,061 INFO [train.py:1114] (2/4) Epoch 3, batch 2100, loss[loss=0.311, simple_loss=0.3474, pruned_loss=0.09896, ctc_loss=0.1916, over 19774.00 frames. ], tot_loss[loss=0.2993, simple_loss=0.334, pruned_loss=0.09631, ctc_loss=0.1797, over 3857857.51 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 10:18:06,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37754.666666666664, ans=0.1
+2024-08-25 10:18:17,436 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.89 vs. limit=10.0
+2024-08-25 10:18:39,858 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=37808.0, ans=0.125
+2024-08-25 10:18:41,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=37808.0, ans=0.2
+2024-08-25 10:19:08,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=37861.333333333336, ans=0.0
+2024-08-25 10:19:12,578 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.87 vs. limit=15.0
+2024-08-25 10:19:20,764 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.776e+02 1.971e+02 2.246e+02 3.814e+02, threshold=3.941e+02, percent-clipped=0.0
+2024-08-25 10:19:40,467 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=37914.666666666664, ans=0.5
+2024-08-25 10:20:09,494 INFO [train.py:1114] (2/4) Epoch 3, batch 2150, loss[loss=0.2844, simple_loss=0.3242, pruned_loss=0.08904, ctc_loss=0.1664, over 19867.00 frames. ], tot_loss[loss=0.2977, simple_loss=0.3328, pruned_loss=0.09563, ctc_loss=0.1783, over 3869079.99 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 10:20:14,058 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=38021.333333333336, ans=0.125
+2024-08-25 10:20:54,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=38181.333333333336, ans=0.125
+2024-08-25 10:21:11,141 INFO [train.py:1114] (2/4) Epoch 3, batch 2200, loss[loss=0.3043, simple_loss=0.3417, pruned_loss=0.09691, ctc_loss=0.1825, over 19555.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3329, pruned_loss=0.09542, ctc_loss=0.1782, over 3867006.86 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:21:42,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=38341.333333333336, ans=0.125
+2024-08-25 10:21:42,604 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=38341.333333333336, ans=0.0
+2024-08-25 10:21:56,467 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.750e+02 1.922e+02 2.212e+02 3.187e+02, threshold=3.844e+02, percent-clipped=0.0
+2024-08-25 10:22:12,039 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=38448.0, ans=0.125
+2024-08-25 10:22:28,993 INFO [train.py:1114] (2/4) Epoch 3, batch 2250, loss[loss=0.2701, simple_loss=0.3114, pruned_loss=0.08298, ctc_loss=0.157, over 19614.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.3324, pruned_loss=0.09505, ctc_loss=0.1775, over 3866779.90 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:22:29,134 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=38554.666666666664, ans=0.0
+2024-08-25 10:22:32,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=38554.666666666664, ans=0.002488115942028987
+2024-08-25 10:22:33,378 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38554.666666666664, ans=0.1
+2024-08-25 10:23:08,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=38714.666666666664, ans=0.125
+2024-08-25 10:23:08,939 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=38714.666666666664, ans=0.0
+2024-08-25 10:23:09,714 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=38714.666666666664, ans=0.2
+2024-08-25 10:23:19,677 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=38714.666666666664, ans=0.2
+2024-08-25 10:23:32,593 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38768.0, ans=0.1
+2024-08-25 10:23:40,906 INFO [train.py:1114] (2/4) Epoch 3, batch 2300, loss[loss=0.2764, simple_loss=0.3146, pruned_loss=0.08597, ctc_loss=0.1657, over 19505.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.3314, pruned_loss=0.09501, ctc_loss=0.1775, over 3860084.14 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:24:04,493 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=38928.0, ans=0.125
+2024-08-25 10:24:07,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=38928.0, ans=0.125
+2024-08-25 10:24:09,721 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=38928.0, ans=0.0024069565217391302
+2024-08-25 10:24:13,778 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.820e+02 2.030e+02 2.354e+02 3.970e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-25 10:24:46,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=39034.666666666664, ans=0.0
+2024-08-25 10:24:48,813 INFO [train.py:1114] (2/4) Epoch 3, batch 2350, loss[loss=0.3344, simple_loss=0.3625, pruned_loss=0.1119, ctc_loss=0.2062, over 19681.00 frames. ], tot_loss[loss=0.2959, simple_loss=0.3312, pruned_loss=0.09485, ctc_loss=0.1771, over 3862694.85 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:24:58,012 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.00 vs. limit=22.5
+2024-08-25 10:25:10,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=39141.333333333336, ans=0.002360579710144927
+2024-08-25 10:25:23,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=39194.666666666664, ans=0.125
+2024-08-25 10:25:34,672 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.09 vs. limit=15.0
+2024-08-25 10:25:52,380 INFO [train.py:1114] (2/4) Epoch 3, batch 2400, loss[loss=0.3279, simple_loss=0.3617, pruned_loss=0.1068, ctc_loss=0.2012, over 19295.00 frames. ], tot_loss[loss=0.2986, simple_loss=0.3337, pruned_loss=0.09594, ctc_loss=0.1791, over 3858731.18 frames. ], batch size: 71, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 10:25:55,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=39354.666666666664, ans=0.125
+2024-08-25 10:26:04,126 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.58 vs. limit=15.0
+2024-08-25 10:26:17,365 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.17 vs. limit=22.5
+2024-08-25 10:26:41,533 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.777e+02 2.047e+02 2.383e+02 4.291e+02, threshold=4.094e+02, percent-clipped=1.0
+2024-08-25 10:27:11,342 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.68 vs. limit=6.0
+2024-08-25 10:27:14,153 INFO [train.py:1114] (2/4) Epoch 3, batch 2450, loss[loss=0.4046, simple_loss=0.3818, pruned_loss=0.1548, ctc_loss=0.2945, over 12663.00 frames. ], tot_loss[loss=0.3087, simple_loss=0.3395, pruned_loss=0.1012, ctc_loss=0.1887, over 3731513.20 frames. ], batch size: 140, lr: 3.53e-02, grad_scale: 16.0
+2024-08-25 10:27:22,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=39621.333333333336, ans=0.125
+2024-08-25 10:27:25,897 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.61 vs. limit=6.0
+2024-08-25 10:27:47,360 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=7.19 vs. limit=12.0
+2024-08-25 10:27:52,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=39781.333333333336, ans=0.125
+2024-08-25 10:39:24,826 INFO [train.py:1050] (2/4) Caught exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=89707, OpType=ALLREDUCE, NumelIn=745, NumelOut=745, Timeout(ms)=600000) ran for 600005 milliseconds before timing out..
+2024-08-25 10:39:24,827 INFO [checkpoint.py:75] (2/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/bad-model-2.pt
diff --git a/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-3 b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-3
new file mode 100644
index 0000000000000000000000000000000000000000..f46c1060cd83d0acf2584e325356bee8d4dc542e
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-25-03-46-09-3
@@ -0,0 +1,1186 @@
+2024-08-25 03:46:09,309 INFO [train.py:1182] (3/4) Training started
+2024-08-25 03:46:09,310 INFO [train.py:1192] (3/4) Device: cuda:3
+2024-08-25 03:46:09,373 INFO [train.py:1210] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2649.int.cedar.computecanada.ca', 'IP address': '172.16.146.86'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-25 03:46:09,373 INFO [train.py:1212] (3/4) About to create model
+2024-08-25 03:46:10,409 INFO [train.py:1216] (3/4) Number of model parameters: 65805511
+2024-08-25 03:46:10,554 INFO [train.py:1231] (3/4) Using DDP
+2024-08-25 03:46:14,820 INFO [asr_datamodule.py:894] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:696] (3/4) Disable MUSAN
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:714] (3/4) Enable SpecAugment
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:715] (3/4) Time warp factor: 80
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:725] (3/4) Num frame mask: 10
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:738] (3/4) About to create train dataset
+2024-08-25 03:46:14,898 INFO [asr_datamodule.py:765] (3/4) Using DynamicBucketingSampler.
+2024-08-25 03:46:16,490 INFO [asr_datamodule.py:782] (3/4) About to create train dataloader
+2024-08-25 03:46:16,492 INFO [asr_datamodule.py:911] (3/4) About to get dev-clean cuts
+2024-08-25 03:46:16,584 INFO [asr_datamodule.py:918] (3/4) About to get dev-other cuts
+2024-08-25 03:46:16,612 INFO [asr_datamodule.py:814] (3/4) About to create dev dataset
+2024-08-25 03:46:16,952 INFO [asr_datamodule.py:831] (3/4) About to create dev dataloader
+2024-08-25 03:46:16,952 INFO [train.py:1435] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-25 03:50:49,730 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=512, metric=49.34 vs. limit=7.5
+2024-08-25 03:50:50,511 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 11590MB
+2024-08-25 03:50:51,641 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 11590MB
+2024-08-25 03:51:20,158 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 11590MB
+2024-08-25 03:51:21,404 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 11590MB
+2024-08-25 03:51:42,535 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=75.11 vs. limit=7.5
+2024-08-25 03:51:43,057 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 11590MB
+2024-08-25 03:51:43,892 INFO [scaling.py:1024] (3/4) Whitening: name=None, num_groups=1, num_channels=384, metric=90.53 vs. limit=4.0
+2024-08-25 03:51:44,351 INFO [train.py:1463] (3/4) Maximum memory allocated so far is 11590MB
+2024-08-25 03:53:11,522 INFO [train.py:1114] (3/4) Epoch 1, batch 0, loss[loss=8.668, simple_loss=7.015, pruned_loss=6.859, ctc_loss=4.827, over 19817.00 frames. ], tot_loss[loss=8.668, simple_loss=7.015, pruned_loss=6.859, ctc_loss=4.827, over 19817.00 frames. ], batch size: 49, lr: 2.25e-02, grad_scale: 1.0
+2024-08-25 03:53:11,522 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 03:53:26,569 INFO [train.py:1146] (3/4) Epoch 1, validation: loss=8.842, simple_loss=7.151, pruned_loss=6.961, ctc_loss=4.966, over 944034.00 frames.
+2024-08-25 03:53:26,569 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 11984MB
+2024-08-25 03:53:28,139 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.15 vs. limit=3.0
+2024-08-25 03:53:32,584 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=0.0, ans=0.5
+2024-08-25 03:53:38,716 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=0.0, ans=0.1
+2024-08-25 03:53:38,966 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=13.07 vs. limit=7.5
+2024-08-25 03:53:42,421 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=0.0, ans=0.2
+2024-08-25 03:53:42,734 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=25.59 vs. limit=7.5
+2024-08-25 03:54:08,700 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=0.0, ans=0.25
+2024-08-25 03:54:20,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=53.333333333333336, ans=0.49333333333333335
+2024-08-25 03:54:36,706 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.008e+03 4.149e+03 4.360e+03 5.530e+03 5.553e+03, threshold=1.744e+04, percent-clipped=0.0
+2024-08-25 03:54:37,100 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=53.333333333333336, ans=5.033333333333333
+2024-08-25 03:54:50,793 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=252.23 vs. limit=7.52
+2024-08-25 03:55:20,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=106.66666666666667, ans=0.495
+2024-08-25 03:55:46,187 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.063e+03 1.598e+03 4.141e+03 5.530e+03 6.572e+03, threshold=1.656e+04, percent-clipped=0.0
+2024-08-25 03:55:46,764 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=44.43 vs. limit=7.54
+2024-08-25 03:55:55,734 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=202.79 vs. limit=7.54
+2024-08-25 03:57:06,789 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=86.83 vs. limit=7.54
+2024-08-25 03:57:07,911 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=108.49 vs. limit=7.54
+2024-08-25 03:57:29,188 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=27.35 vs. limit=5.04
+2024-08-25 04:00:12,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=213.33333333333334, ans=0.192
+2024-08-25 04:00:14,864 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.873e+02 1.048e+03 1.328e+03 4.149e+03 6.572e+03, threshold=5.310e+03, percent-clipped=0.0
+2024-08-25 04:00:16,947 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=213.33333333333334, ans=0.49
+2024-08-25 04:00:24,536 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=8.93 vs. limit=4.085333333333334
+2024-08-25 04:00:34,972 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=56.57 vs. limit=7.58
+2024-08-25 04:00:38,874 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=266.6666666666667, ans=0.235
+2024-08-25 04:00:39,862 INFO [train.py:1114] (3/4) Epoch 1, batch 50, loss[loss=1.633, simple_loss=1.088, pruned_loss=1.246, ctc_loss=2.033, over 19710.00 frames. ], tot_loss[loss=3.75, simple_loss=2.912, pruned_loss=2.557, ctc_loss=2.878, over 845595.77 frames. ], batch size: 47, lr: 2.48e-02, grad_scale: 0.25
+2024-08-25 04:00:54,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=266.6666666666667, ans=0.4875
+2024-08-25 04:00:58,236 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=173.92 vs. limit=7.6
+2024-08-25 04:01:12,731 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=215.51 vs. limit=7.62
+2024-08-25 04:01:20,796 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=75.41 vs. limit=7.74
+2024-08-25 04:01:20,984 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=179.53 vs. limit=5.16
+2024-08-25 04:01:26,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=373.3333333333333, ans=0.0916
+2024-08-25 04:02:00,788 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=36.11 vs. limit=7.78
+2024-08-25 04:02:03,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=426.6666666666667, ans=0.29573333333333335
+2024-08-25 04:02:05,239 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=426.6666666666667, ans=0.48
+2024-08-25 04:02:06,584 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=426.6666666666667, ans=0.48
+2024-08-25 04:02:21,706 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=39.56 vs. limit=7.66
+2024-08-25 04:02:33,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=480.0, ans=7.86
+2024-08-25 04:02:39,869 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=480.0, ans=0.182
+2024-08-25 04:02:59,891 INFO [train.py:1114] (3/4) Epoch 1, batch 100, loss[loss=1.369, simple_loss=0.9627, pruned_loss=1.205, ctc_loss=1.279, over 19721.00 frames. ], tot_loss[loss=2.588, simple_loss=1.913, pruned_loss=1.868, ctc_loss=2.357, over 1499372.19 frames. ], batch size: 51, lr: 2.70e-02, grad_scale: 0.5
+2024-08-25 04:03:04,900 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=33.18 vs. limit=7.7
+2024-08-25 04:03:07,084 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.807e+02 4.974e+02 8.674e+02 1.328e+03 6.572e+03, threshold=1.735e+03, percent-clipped=0.0
+2024-08-25 04:03:07,697 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=32.98 vs. limit=7.9
+2024-08-25 04:03:13,868 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=20.80 vs. limit=7.7
+2024-08-25 04:03:27,354 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=25.45 vs. limit=7.94
+2024-08-25 04:03:38,788 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=55.16 vs. limit=7.74
+2024-08-25 04:03:52,057 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=255.81 vs. limit=7.76
+2024-08-25 04:03:58,743 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=34.12 vs. limit=8.02
+2024-08-25 04:04:01,126 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=693.3333333333334, ans=0.4675
+2024-08-25 04:04:12,260 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=746.6666666666666, ans=5.466666666666667
+2024-08-25 04:04:22,881 INFO [train.py:1114] (3/4) Epoch 1, batch 150, loss[loss=1.176, simple_loss=0.8152, pruned_loss=1.03, ctc_loss=1.092, over 19706.00 frames. ], tot_loss[loss=2.052, simple_loss=1.495, pruned_loss=1.571, ctc_loss=1.871, over 2028131.03 frames. ], batch size: 47, lr: 2.93e-02, grad_scale: 0.5
+2024-08-25 04:04:29,736 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=182.82 vs. limit=5.4
+2024-08-25 04:04:39,980 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.85 vs. limit=5.213333333333333
+2024-08-25 04:04:42,591 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=219.15 vs. limit=7.82
+2024-08-25 04:04:49,211 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=29.12 vs. limit=8.18
+2024-08-25 04:05:07,692 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=12.94 vs. limit=7.84
+2024-08-25 04:05:08,814 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=34.56 vs. limit=8.18
+2024-08-25 04:05:08,924 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=117.77 vs. limit=7.84
+2024-08-25 04:05:08,934 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=122.51 vs. limit=7.84
+2024-08-25 04:05:08,992 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=55.42 vs. limit=7.84
+2024-08-25 04:05:14,608 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=44.55 vs. limit=7.86
+2024-08-25 04:05:17,335 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=7.33 vs. limit=4.384
+2024-08-25 04:05:18,539 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=59.87 vs. limit=8.22
+2024-08-25 04:05:40,433 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=1013.3333333333334, ans=0.162
+2024-08-25 04:05:46,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=1013.3333333333334, ans=0.4525
+2024-08-25 04:05:49,411 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=12.35 vs. limit=7.88
+2024-08-25 04:05:51,603 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.55 vs. limit=5.253333333333333
+2024-08-25 04:05:51,810 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=38.07 vs. limit=7.88
+2024-08-25 04:05:53,001 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=96.93 vs. limit=7.9
+2024-08-25 04:05:53,923 INFO [train.py:1114] (3/4) Epoch 1, batch 200, loss[loss=1.247, simple_loss=0.8591, pruned_loss=1.003, ctc_loss=1.201, over 18388.00 frames. ], tot_loss[loss=1.762, simple_loss=1.267, pruned_loss=1.382, ctc_loss=1.624, over 2436267.97 frames. ], batch size: 85, lr: 3.15e-02, grad_scale: 1.0
+2024-08-25 04:05:56,500 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=1066.6666666666667, ans=0.16
+2024-08-25 04:05:57,468 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 7.117e+01 1.191e+02 1.554e+02 2.219e+02 5.914e+02, threshold=3.108e+02, percent-clipped=0.0
+2024-08-25 04:05:59,063 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=57.21 vs. limit=7.9
+2024-08-25 04:05:59,133 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=26.51 vs. limit=8.3
+2024-08-25 04:06:07,941 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=1120.0, ans=0.4475
+2024-08-25 04:06:15,274 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.58 vs. limit=5.28
+2024-08-25 04:06:17,972 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=40.03 vs. limit=8.38
+2024-08-25 04:06:24,920 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=20.40 vs. limit=7.94
+2024-08-25 04:06:30,609 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=92.29 vs. limit=7.96
+2024-08-25 04:06:35,530 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=8.95 vs. limit=5.306666666666667
+2024-08-25 04:06:39,309 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=9.34 vs. limit=8.42
+2024-08-25 04:06:42,707 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=13.00 vs. limit=7.98
+2024-08-25 04:06:54,287 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=27.51 vs. limit=8.46
+2024-08-25 04:06:54,295 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.whiten.whitening_limit, batch_count=1280.0, ans=4.5120000000000005
+2024-08-25 04:06:57,356 INFO [train.py:1114] (3/4) Epoch 1, batch 250, loss[loss=1.205, simple_loss=0.8165, pruned_loss=0.9521, ctc_loss=1.187, over 19410.00 frames. ], tot_loss[loss=1.587, simple_loss=1.126, pruned_loss=1.254, ctc_loss=1.482, over 2756308.20 frames. ], batch size: 67, lr: 3.38e-02, grad_scale: 1.0
+2024-08-25 04:06:57,848 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=63.71 vs. limit=8.0
+2024-08-25 04:07:36,857 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=43.25 vs. limit=8.0
+2024-08-25 04:07:37,067 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.41 vs. limit=8.5
+2024-08-25 04:07:37,316 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=99.86 vs. limit=5.666666666666667
+2024-08-25 04:07:40,961 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=12.72 vs. limit=8.0
+2024-08-25 04:07:41,749 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1386.6666666666667, ans=0.28613333333333335
+2024-08-25 04:08:00,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1440.0, ans=0.28559999999999997
+2024-08-25 04:08:02,620 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=1440.0, ans=0.4325
+2024-08-25 04:08:02,625 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=1440.0, ans=0.4325
+2024-08-25 04:08:02,814 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=28.20 vs. limit=8.58
+2024-08-25 04:08:03,037 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.92 vs. limit=8.04
+2024-08-25 04:08:04,101 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=5.60 vs. limit=4.576
+2024-08-25 04:08:04,394 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=17.77 vs. limit=5.0
+2024-08-25 04:08:10,102 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.71 vs. limit=5.373333333333333
+2024-08-25 04:08:19,456 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=14.26 vs. limit=5.373333333333333
+2024-08-25 04:08:59,660 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=1546.6666666666667, ans=0.2845333333333333
+2024-08-25 04:09:07,400 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=63.08 vs. limit=8.08
+2024-08-25 04:09:10,775 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=1600.0, ans=8.1
+2024-08-25 04:09:11,351 INFO [train.py:1114] (3/4) Epoch 1, batch 300, loss[loss=1.262, simple_loss=0.8488, pruned_loss=0.9775, ctc_loss=1.233, over 19532.00 frames. ], tot_loss[loss=1.471, simple_loss=1.032, pruned_loss=1.162, ctc_loss=1.39, over 3001332.80 frames. ], batch size: 61, lr: 3.60e-02, grad_scale: 2.0
+2024-08-25 04:09:14,916 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 8.125e+01 1.367e+02 1.753e+02 2.332e+02 3.681e+02, threshold=3.505e+02, percent-clipped=6.0
+2024-08-25 04:09:20,255 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=51.23 vs. limit=8.1
+2024-08-25 04:09:33,497 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=35.28 vs. limit=8.1
+2024-08-25 04:10:25,431 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=1653.3333333333333, ans=0.8421333333333334
+2024-08-25 04:10:34,494 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten.whitening_limit, batch_count=1706.6666666666667, ans=8.14
+2024-08-25 04:10:36,389 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=12.93 vs. limit=8.14
+2024-08-25 04:11:02,054 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=50.35 vs. limit=8.18
+2024-08-25 04:11:06,917 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=103.77 vs. limit=8.18
+2024-08-25 04:11:07,549 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=1813.3333333333333, ans=0.0592
+2024-08-25 04:11:11,489 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=8.29 vs. limit=5.906666666666666
+2024-08-25 04:11:12,117 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=1866.6666666666667, ans=0.13
+2024-08-25 04:11:13,109 INFO [train.py:1114] (3/4) Epoch 1, batch 350, loss[loss=1.102, simple_loss=0.7286, pruned_loss=0.8398, ctc_loss=1.103, over 19760.00 frames. ], tot_loss[loss=1.393, simple_loss=0.9665, pruned_loss=1.094, ctc_loss=1.329, over 3191955.11 frames. ], batch size: 48, lr: 3.83e-02, grad_scale: 2.0
+2024-08-25 04:11:17,472 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=19.16 vs. limit=8.9
+2024-08-25 04:11:18,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1866.6666666666667, ans=0.2813333333333333
+2024-08-25 04:11:20,399 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1866.6666666666667, ans=0.2813333333333333
+2024-08-25 04:11:31,210 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=135.82 vs. limit=8.22
+2024-08-25 04:11:35,990 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=11.98 vs. limit=8.24
+2024-08-25 04:11:38,108 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=29.68 vs. limit=8.24
+2024-08-25 04:11:39,462 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=50.13 vs. limit=8.24
+2024-08-25 04:11:48,135 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=2026.6666666666667, ans=0.8290666666666667
+2024-08-25 04:11:48,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 04:11:56,333 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 04:11:57,616 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=2026.6666666666667, ans=0.405
+2024-08-25 04:11:59,076 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=45.84 vs. limit=8.28
+2024-08-25 04:12:11,684 INFO [train.py:1114] (3/4) Epoch 1, batch 400, loss[loss=1.132, simple_loss=0.7609, pruned_loss=0.8206, ctc_loss=1.088, over 19503.00 frames. ], tot_loss[loss=1.33, simple_loss=0.9148, pruned_loss=1.033, ctc_loss=1.276, over 3344538.75 frames. ], batch size: 54, lr: 4.05e-02, grad_scale: 4.0
+2024-08-25 04:12:15,148 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 9.241e+01 1.644e+02 2.144e+02 2.768e+02 4.713e+02, threshold=4.287e+02, percent-clipped=10.0
+2024-08-25 04:12:15,697 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=16.68 vs. limit=8.3
+2024-08-25 04:12:22,976 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=15.47 vs. limit=8.32
+2024-08-25 04:12:25,311 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.04 vs. limit=9.14
+2024-08-25 04:12:28,709 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=25.27 vs. limit=8.32
+2024-08-25 04:12:30,946 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=55.61 vs. limit=8.32
+2024-08-25 04:12:39,255 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=28.42 vs. limit=8.34
+2024-08-25 04:12:48,576 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.52 vs. limit=8.36
+2024-08-25 04:12:48,746 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=14.18 vs. limit=8.36
+2024-08-25 04:12:54,493 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.95 vs. limit=9.22
+2024-08-25 04:12:57,959 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=34.86 vs. limit=8.36
+2024-08-25 04:13:12,078 INFO [train.py:1114] (3/4) Epoch 1, batch 450, loss[loss=1.123, simple_loss=0.7652, pruned_loss=0.7465, ctc_loss=1.089, over 19616.00 frames. ], tot_loss[loss=1.28, simple_loss=0.8767, pruned_loss=0.972, ctc_loss=1.229, over 3453240.13 frames. ], batch size: 55, lr: 4.28e-02, grad_scale: 4.0
+2024-08-25 04:13:13,435 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=2400.0, ans=0.11
+2024-08-25 04:13:17,262 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=20.88 vs. limit=5.6
+2024-08-25 04:14:11,670 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=2453.3333333333335, ans=0.1933333333333333
+2024-08-25 04:14:25,337 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=2560.0, ans=0.2744
+2024-08-25 04:14:36,317 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=2560.0, ans=0.5
+2024-08-25 04:14:38,930 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.70 vs. limit=8.48
+2024-08-25 04:14:45,449 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=2613.3333333333335, ans=0.3775
+2024-08-25 04:14:53,356 INFO [train.py:1114] (3/4) Epoch 1, batch 500, loss[loss=1.037, simple_loss=0.7217, pruned_loss=0.6281, ctc_loss=1.005, over 19686.00 frames. ], tot_loss[loss=1.217, simple_loss=0.8349, pruned_loss=0.8934, ctc_loss=1.169, over 3547250.01 frames. ], batch size: 63, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:14:53,697 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=2666.6666666666665, ans=0.375
+2024-08-25 04:14:59,593 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.247e+02 2.224e+02 2.884e+02 3.405e+02 7.334e+02, threshold=5.768e+02, percent-clipped=15.0
+2024-08-25 04:15:04,759 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=9.74 vs. limit=8.5
+2024-08-25 04:15:11,180 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=2720.0, ans=0.8048000000000001
+2024-08-25 04:15:13,951 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=10.17 vs. limit=8.52
+2024-08-25 04:15:19,005 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=2773.3333333333335, ans=0.37
+2024-08-25 04:15:23,990 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.77 vs. limit=9.58
+2024-08-25 04:15:45,536 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.69 vs. limit=8.58
+2024-08-25 04:15:52,597 INFO [train.py:1114] (3/4) Epoch 1, batch 550, loss[loss=0.9838, simple_loss=0.6925, pruned_loss=0.5695, ctc_loss=0.9427, over 19216.00 frames. ], tot_loss[loss=1.152, simple_loss=0.7933, pruned_loss=0.8145, ctc_loss=1.109, over 3608044.28 frames. ], batch size: 71, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:15:53,314 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.25 vs. limit=9.7
+2024-08-25 04:16:01,240 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.53 vs. limit=5.733333333333333
+2024-08-25 04:16:02,953 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=2986.6666666666665, ans=0.27013333333333334
+2024-08-25 04:16:08,912 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=6.60 vs. limit=5.1946666666666665
+2024-08-25 04:16:18,077 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.56 vs. limit=8.64
+2024-08-25 04:16:20,297 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.38 vs. limit=8.64
+2024-08-25 04:16:35,649 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.48 vs. limit=9.82
+2024-08-25 04:16:37,826 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.87 vs. limit=8.66
+2024-08-25 04:16:58,223 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=3146.6666666666665, ans=0.7898666666666667
+2024-08-25 04:17:00,427 INFO [train.py:1114] (3/4) Epoch 1, batch 600, loss[loss=0.8782, simple_loss=0.6307, pruned_loss=0.4667, ctc_loss=0.8471, over 19385.00 frames. ], tot_loss[loss=1.082, simple_loss=0.75, pruned_loss=0.7338, ctc_loss=1.042, over 3666354.70 frames. ], batch size: 67, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:17:03,774 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.809e+02 3.766e+02 4.633e+02 8.655e+02, threshold=7.532e+02, percent-clipped=12.0
+2024-08-25 04:17:08,451 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=3200.0, ans=7.0
+2024-08-25 04:17:10,902 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.72 vs. limit=5.8133333333333335
+2024-08-25 04:17:17,567 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=3253.3333333333335, ans=9.94
+2024-08-25 04:17:18,449 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.14 vs. limit=9.94
+2024-08-25 04:18:05,754 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=3360.0, ans=0.07400000000000001
+2024-08-25 04:18:09,361 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=8.94 vs. limit=5.84
+2024-08-25 04:18:10,341 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.47 vs. limit=3.504
+2024-08-25 04:18:26,491 INFO [train.py:1114] (3/4) Epoch 1, batch 650, loss[loss=0.7101, simple_loss=0.5224, pruned_loss=0.3516, ctc_loss=0.6728, over 19771.00 frames. ], tot_loss[loss=1.009, simple_loss=0.7054, pruned_loss=0.6553, ctc_loss=0.9697, over 3716874.53 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:18:28,990 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=3466.6666666666665, ans=0.7786666666666667
+2024-08-25 04:18:31,345 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=3466.6666666666665, ans=0.3375
+2024-08-25 04:18:32,812 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.69 vs. limit=8.8
+2024-08-25 04:18:35,379 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=3466.6666666666665, ans=0.3375
+2024-08-25 04:18:39,040 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.73 vs. limit=5.88
+2024-08-25 04:18:47,924 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=3520.0, ans=0.7768
+2024-08-25 04:18:58,824 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.35 vs. limit=8.84
+2024-08-25 04:19:07,555 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.37 vs. limit=5.906666666666666
+2024-08-25 04:19:21,085 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=3680.0, ans=0.3275
+2024-08-25 04:19:23,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=3680.0, ans=0.2552
+2024-08-25 04:20:32,376 INFO [train.py:1114] (3/4) Epoch 1, batch 700, loss[loss=0.7132, simple_loss=0.5258, pruned_loss=0.3529, ctc_loss=0.6647, over 19732.00 frames. ], tot_loss[loss=0.9466, simple_loss=0.6682, pruned_loss=0.5882, ctc_loss=0.9051, over 3748893.67 frames. ], batch size: 51, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:20:32,703 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=3733.3333333333335, ans=0.325
+2024-08-25 04:20:35,549 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.600e+02 3.309e+02 4.487e+02 1.180e+03, threshold=6.619e+02, percent-clipped=3.0
+2024-08-25 04:20:40,485 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.32 vs. limit=5.933333333333334
+2024-08-25 04:20:40,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=3733.3333333333335, ans=0.033333333333333326
+2024-08-25 04:20:41,501 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.30 vs. limit=8.9
+2024-08-25 04:20:45,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=3786.6666666666665, ans=0.3225
+2024-08-25 04:20:51,385 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.03 vs. limit=5.946666666666666
+2024-08-25 04:20:55,953 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.19 vs. limit=6.92
+2024-08-25 04:20:59,942 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.58 vs. limit=8.94
+2024-08-25 04:21:06,653 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.81 vs. limit=6.946666666666667
+2024-08-25 04:21:08,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3893.3333333333335, ans=0.26106666666666667
+2024-08-25 04:21:14,270 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=3893.3333333333335, ans=0.7637333333333334
+2024-08-25 04:21:19,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=3946.6666666666665, ans=0.26053333333333334
+2024-08-25 04:21:23,593 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=3946.6666666666665, ans=0.315
+2024-08-25 04:21:26,582 INFO [train.py:1114] (3/4) Epoch 1, batch 750, loss[loss=0.6701, simple_loss=0.5169, pruned_loss=0.3015, ctc_loss=0.5909, over 19497.00 frames. ], tot_loss[loss=0.8862, simple_loss=0.6326, pruned_loss=0.5279, ctc_loss=0.8411, over 3776162.02 frames. ], batch size: 54, lr: 4.49e-02, grad_scale: 8.0
+2024-08-25 04:21:26,912 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=4000.0, ans=0.3125
+2024-08-25 04:21:50,001 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.24 vs. limit=10.58
+2024-08-25 04:21:55,698 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=10.85 vs. limit=10.58
+2024-08-25 04:22:04,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=4160.0, ans=0.025
+2024-08-25 04:22:06,464 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=4160.0, ans=0.04933333333333333
+2024-08-25 04:22:27,445 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=4160.0, ans=0.037000000000000005
+2024-08-25 04:22:34,294 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=4213.333333333333, ans=10.66
+2024-08-25 04:22:36,417 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.33 vs. limit=6.053333333333333
+2024-08-25 04:22:40,639 INFO [train.py:1114] (3/4) Epoch 1, batch 800, loss[loss=0.5806, simple_loss=0.4515, pruned_loss=0.2559, ctc_loss=0.5086, over 19823.00 frames. ], tot_loss[loss=0.8308, simple_loss=0.6007, pruned_loss=0.4745, ctc_loss=0.7796, over 3797711.47 frames. ], batch size: 49, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:22:40,915 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=4266.666666666667, ans=0.7506666666666667
+2024-08-25 04:22:43,871 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.484e+02 3.479e+02 4.307e+02 9.603e+02, threshold=6.957e+02, percent-clipped=4.0
+2024-08-25 04:22:45,390 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=4266.666666666667, ans=7.666666666666667
+2024-08-25 04:22:51,475 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.49 vs. limit=10.74
+2024-08-25 04:22:52,474 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.02 vs. limit=9.120000000000001
+2024-08-25 04:22:56,427 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4320.0, ans=0.2568
+2024-08-25 04:23:11,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=4373.333333333333, ans=0.29500000000000004
+2024-08-25 04:23:16,678 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=4426.666666666667, ans=0.04822222222222222
+2024-08-25 04:23:17,591 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4426.666666666667, ans=0.2557333333333333
+2024-08-25 04:23:18,700 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=4426.666666666667, ans=0.035
+2024-08-25 04:23:34,665 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.96 vs. limit=10.86
+2024-08-25 04:23:34,831 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.42 vs. limit=9.18
+2024-08-25 04:23:42,673 INFO [train.py:1114] (3/4) Epoch 1, batch 850, loss[loss=0.6173, simple_loss=0.4879, pruned_loss=0.2674, ctc_loss=0.5202, over 19649.00 frames. ], tot_loss[loss=0.7804, simple_loss=0.5723, pruned_loss=0.4276, ctc_loss=0.7218, over 3815877.14 frames. ], batch size: 59, lr: 4.49e-02, grad_scale: 16.0
+2024-08-25 04:23:45,165 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=4533.333333333333, ans=0.2875
+2024-08-25 04:23:46,522 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1.whitening_limit, batch_count=4533.333333333333, ans=6.133333333333333
+2024-08-25 04:23:50,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4533.333333333333, ans=0.25466666666666665
+2024-08-25 04:23:53,003 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.29 vs. limit=9.22
+2024-08-25 04:23:56,735 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=4586.666666666667, ans=0.28500000000000003
+2024-08-25 04:24:02,024 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=4586.666666666667, ans=0.04755555555555556
+2024-08-25 04:24:21,270 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=4693.333333333333, ans=0.009849275362318841
+2024-08-25 04:24:31,591 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.66 vs. limit=11.06
+2024-08-25 04:24:34,371 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=4746.666666666667, ans=0.27749999999999997
+2024-08-25 04:24:36,277 INFO [train.py:1114] (3/4) Epoch 1, batch 900, loss[loss=0.5298, simple_loss=0.4242, pruned_loss=0.2272, ctc_loss=0.4317, over 19807.00 frames. ], tot_loss[loss=0.739, simple_loss=0.5492, pruned_loss=0.3901, ctc_loss=0.6732, over 3820209.74 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:24:39,550 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.433e+02 3.203e+02 4.513e+02 7.559e+02, threshold=6.406e+02, percent-clipped=2.0
+2024-08-25 04:24:50,482 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=4853.333333333333, ans=0.00981449275362319
+2024-08-25 04:25:17,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=4960.0, ans=0.26749999999999996
+2024-08-25 04:25:19,264 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=4960.0, ans=8.1
+2024-08-25 04:25:32,739 INFO [train.py:1114] (3/4) Epoch 1, batch 950, loss[loss=0.5095, simple_loss=0.416, pruned_loss=0.2052, ctc_loss=0.4199, over 19512.00 frames. ], tot_loss[loss=0.7016, simple_loss=0.5286, pruned_loss=0.3571, ctc_loss=0.6292, over 3821989.98 frames. ], batch size: 49, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:25:37,262 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=5066.666666666667, ans=0.2625
+2024-08-25 04:25:40,902 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.03 vs. limit=6.266666666666667
+2024-08-25 04:25:50,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=5120.0, ans=0.26
+2024-08-25 04:26:16,161 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=5226.666666666667, ans=0.7170666666666667
+2024-08-25 04:26:26,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=5280.0, ans=0.009721739130434783
+2024-08-25 04:26:32,796 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=5.18 vs. limit=5.0
+2024-08-25 04:26:33,421 INFO [train.py:1114] (3/4) Epoch 1, batch 1000, loss[loss=0.5013, simple_loss=0.4178, pruned_loss=0.1953, ctc_loss=0.4027, over 19852.00 frames. ], tot_loss[loss=0.6711, simple_loss=0.5122, pruned_loss=0.3305, ctc_loss=0.5921, over 3818261.56 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:26:36,694 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.226e+02 2.758e+02 3.479e+02 9.619e+02, threshold=5.516e+02, percent-clipped=3.0
+2024-08-25 04:26:54,709 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=5440.0, ans=0.24559999999999998
+2024-08-25 04:26:58,807 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=5440.0, ans=0.7096
+2024-08-25 04:27:04,397 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.20 vs. limit=9.56
+2024-08-25 04:27:09,149 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=5493.333333333333, ans=0.2425
+2024-08-25 04:27:21,648 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=5546.666666666667, ans=0.24
+2024-08-25 04:27:25,679 INFO [train.py:1114] (3/4) Epoch 1, batch 1050, loss[loss=0.5558, simple_loss=0.464, pruned_loss=0.2216, ctc_loss=0.4334, over 19830.00 frames. ], tot_loss[loss=0.6378, simple_loss=0.4942, pruned_loss=0.3035, ctc_loss=0.553, over 3824917.30 frames. ], batch size: 57, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:27:35,341 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=5653.333333333333, ans=0.7021333333333334
+2024-08-25 04:28:07,789 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.56 vs. limit=6.453333333333333
+2024-08-25 04:28:20,176 INFO [train.py:1114] (3/4) Epoch 1, batch 1100, loss[loss=0.4597, simple_loss=0.3909, pruned_loss=0.1744, ctc_loss=0.3624, over 19577.00 frames. ], tot_loss[loss=0.6097, simple_loss=0.4793, pruned_loss=0.2813, ctc_loss=0.5195, over 3832258.35 frames. ], batch size: 52, lr: 4.48e-02, grad_scale: 16.0
+2024-08-25 04:28:23,253 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.143e+02 2.593e+02 3.421e+02 4.407e+02, threshold=5.186e+02, percent-clipped=0.0
+2024-08-25 04:28:24,595 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=5866.666666666667, ans=0.22499999999999998
+2024-08-25 04:28:37,495 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=5920.0, ans=0.009582608695652174
+2024-08-25 04:28:44,870 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=5973.333333333333, ans=0.04177777777777778
+2024-08-25 04:28:50,186 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=5973.333333333333, ans=0.6909333333333334
+2024-08-25 04:28:50,235 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=5973.333333333333, ans=0.19026666666666667
+2024-08-25 04:29:02,076 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=6080.0, ans=0.21500000000000002
+2024-08-25 04:29:13,481 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.77 vs. limit=8.04
+2024-08-25 04:29:15,900 INFO [train.py:1114] (3/4) Epoch 1, batch 1150, loss[loss=0.4715, simple_loss=0.4058, pruned_loss=0.1792, ctc_loss=0.3588, over 19574.00 frames. ], tot_loss[loss=0.588, simple_loss=0.4681, pruned_loss=0.2642, ctc_loss=0.4929, over 3829761.48 frames. ], batch size: 52, lr: 4.47e-02, grad_scale: 16.0
+2024-08-25 04:29:21,970 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.28 vs. limit=9.8
+2024-08-25 04:29:25,103 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.48 vs. limit=12.1
+2024-08-25 04:29:30,725 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.45 vs. limit=6.546666666666667
+2024-08-25 04:29:51,747 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=6240.0, ans=0.6816
+2024-08-25 04:32:27,984 INFO [train.py:1114] (3/4) Epoch 1, batch 1200, loss[loss=0.5099, simple_loss=0.4397, pruned_loss=0.193, ctc_loss=0.3933, over 19835.00 frames. ], tot_loss[loss=0.57, simple_loss=0.4593, pruned_loss=0.2499, ctc_loss=0.4705, over 3825141.66 frames. ], batch size: 57, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:32:31,069 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.077e+02 2.797e+02 3.799e+02 8.339e+02, threshold=5.594e+02, percent-clipped=11.0
+2024-08-25 04:32:49,130 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.49 vs. limit=9.94
+2024-08-25 04:32:53,459 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.72 vs. limit=9.94
+2024-08-25 04:32:56,598 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.96 vs. limit=8.253333333333334
+2024-08-25 04:33:01,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:06,310 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=6560.0, ans=0.1925
+2024-08-25 04:33:08,408 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=6613.333333333333, ans=0.6685333333333334
+2024-08-25 04:33:11,438 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=6613.333333333333, ans=0.19
+2024-08-25 04:33:11,524 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=6613.333333333333, ans=0.03911111111111111
+2024-08-25 04:33:17,955 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.27 vs. limit=12.46
+2024-08-25 04:33:19,312 INFO [train.py:1114] (3/4) Epoch 1, batch 1250, loss[loss=0.5128, simple_loss=0.4377, pruned_loss=0.2007, ctc_loss=0.3948, over 19546.00 frames. ], tot_loss[loss=0.55, simple_loss=0.4496, pruned_loss=0.2352, ctc_loss=0.4465, over 3843098.91 frames. ], batch size: 61, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:33:44,373 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=6773.333333333333, ans=0.009397101449275363
+2024-08-25 04:33:49,482 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=6773.333333333333, ans=0.1825
+2024-08-25 04:33:51,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=6826.666666666667, ans=0.23173333333333335
+2024-08-25 04:33:52,557 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=6826.666666666667, ans=0.009385507246376813
+2024-08-25 04:33:59,005 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.66 vs. limit=10.06
+2024-08-25 04:34:10,783 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=6880.0, ans=0.1775
+2024-08-25 04:34:10,807 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=6880.0, ans=9.3
+2024-08-25 04:34:12,497 INFO [train.py:1114] (3/4) Epoch 1, batch 1300, loss[loss=0.5159, simple_loss=0.4397, pruned_loss=0.2059, ctc_loss=0.39, over 18922.00 frames. ], tot_loss[loss=0.5331, simple_loss=0.441, pruned_loss=0.2234, ctc_loss=0.4262, over 3847791.71 frames. ], batch size: 76, lr: 4.47e-02, grad_scale: 32.0
+2024-08-25 04:34:15,550 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.007e+02 2.492e+02 3.309e+02 5.533e+02, threshold=4.985e+02, percent-clipped=0.0
+2024-08-25 04:34:16,717 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=6933.333333333333, ans=0.175
+2024-08-25 04:34:26,177 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=6986.666666666667, ans=0.23013333333333333
+2024-08-25 04:34:32,263 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=6986.666666666667, ans=0.1725
+2024-08-25 04:34:39,851 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=12.33 vs. limit=12.780000000000001
+2024-08-25 04:34:58,128 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=7093.333333333333, ans=0.16749999999999998
+2024-08-25 04:35:05,438 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=7146.666666666667, ans=0.16499999999999998
+2024-08-25 04:35:11,275 INFO [train.py:1114] (3/4) Epoch 1, batch 1350, loss[loss=0.412, simple_loss=0.3799, pruned_loss=0.1437, ctc_loss=0.2934, over 19754.00 frames. ], tot_loss[loss=0.5169, simple_loss=0.4332, pruned_loss=0.2124, ctc_loss=0.407, over 3858533.61 frames. ], batch size: 54, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:35:11,474 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=7200.0, ans=0.03666666666666667
+2024-08-25 04:35:17,675 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=7200.0, ans=0.22799999999999998
+2024-08-25 04:35:25,710 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=7253.333333333333, ans=0.15999999999999998
+2024-08-25 04:35:46,922 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=7360.0, ans=0.036000000000000004
+2024-08-25 04:36:02,099 INFO [train.py:1114] (3/4) Epoch 1, batch 1400, loss[loss=0.3892, simple_loss=0.3531, pruned_loss=0.1417, ctc_loss=0.281, over 19661.00 frames. ], tot_loss[loss=0.5029, simple_loss=0.4262, pruned_loss=0.2034, ctc_loss=0.3908, over 3865756.22 frames. ], batch size: 46, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:36:02,533 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.12 vs. limit=8.733333333333334
+2024-08-25 04:36:05,091 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 1.980e+02 2.233e+02 2.820e+02 5.701e+02, threshold=4.466e+02, percent-clipped=2.0
+2024-08-25 04:36:19,906 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.43 vs. limit=13.14
+2024-08-25 04:36:25,623 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=7573.333333333333, ans=0.14500000000000002
+2024-08-25 04:36:30,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=7573.333333333333, ans=0.14500000000000002
+2024-08-25 04:36:48,994 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=7680.0, ans=0.052000000000000005
+2024-08-25 04:36:51,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=7680.0, ans=0.2232
+2024-08-25 04:36:54,784 INFO [train.py:1114] (3/4) Epoch 1, batch 1450, loss[loss=0.5256, simple_loss=0.4513, pruned_loss=0.2125, ctc_loss=0.3899, over 19644.00 frames. ], tot_loss[loss=0.4917, simple_loss=0.4211, pruned_loss=0.1963, ctc_loss=0.3775, over 3863304.18 frames. ], batch size: 63, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:37:11,911 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=7786.666666666667, ans=0.0
+2024-08-25 04:37:31,088 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=7893.333333333333, ans=0.13
+2024-08-25 04:37:41,315 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=7946.666666666667, ans=0.1275
+2024-08-25 04:37:47,883 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=8000.0, ans=0.0
+2024-08-25 04:37:48,162 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.00 vs. limit=13.5
+2024-08-25 04:37:48,637 INFO [train.py:1114] (3/4) Epoch 1, batch 1500, loss[loss=0.506, simple_loss=0.4425, pruned_loss=0.1991, ctc_loss=0.3761, over 19599.00 frames. ], tot_loss[loss=0.4828, simple_loss=0.4177, pruned_loss=0.1904, ctc_loss=0.3664, over 3863668.54 frames. ], batch size: 57, lr: 4.46e-02, grad_scale: 32.0
+2024-08-25 04:37:54,382 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 1.987e+02 2.351e+02 3.240e+02 5.717e+02, threshold=4.702e+02, percent-clipped=4.0
+2024-08-25 04:37:59,852 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.68 vs. limit=10.5
+2024-08-25 04:38:01,856 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.13 vs. limit=10.52
+2024-08-25 04:38:04,925 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.09 vs. limit=13.54
+2024-08-25 04:38:10,740 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=8106.666666666667, ans=0.125
+2024-08-25 04:38:36,719 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=8160.0, ans=0.125
+2024-08-25 04:38:50,450 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=8213.333333333334, ans=0.125
+2024-08-25 04:38:52,816 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.57 vs. limit=10.58
+2024-08-25 04:38:56,144 INFO [train.py:1114] (3/4) Epoch 1, batch 1550, loss[loss=0.4848, simple_loss=0.4358, pruned_loss=0.1861, ctc_loss=0.3473, over 19615.00 frames. ], tot_loss[loss=0.4729, simple_loss=0.4132, pruned_loss=0.1846, ctc_loss=0.3553, over 3847809.76 frames. ], batch size: 60, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:39:05,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=8320.0, ans=0.2168
+2024-08-25 04:39:24,185 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:39:28,403 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=8426.666666666666, ans=0.03155555555555556
+2024-08-25 04:39:47,208 INFO [train.py:1114] (3/4) Epoch 1, batch 1600, loss[loss=0.4384, simple_loss=0.4059, pruned_loss=0.1619, ctc_loss=0.3109, over 19840.00 frames. ], tot_loss[loss=0.4642, simple_loss=0.4091, pruned_loss=0.1799, ctc_loss=0.3456, over 3837198.14 frames. ], batch size: 57, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:39:48,773 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.13 vs. limit=4.28
+2024-08-25 04:39:52,848 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.044e+02 2.368e+02 2.950e+02 6.795e+02, threshold=4.737e+02, percent-clipped=6.0
+2024-08-25 04:40:03,654 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=8586.666666666666, ans=0.0
+2024-08-25 04:40:08,724 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=8586.666666666666, ans=0.030888888888888893
+2024-08-25 04:40:15,172 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.99 vs. limit=4.296
+2024-08-25 04:40:17,920 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=8640.0, ans=0.025
+2024-08-25 04:40:31,699 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.27 vs. limit=10.76
+2024-08-25 04:40:42,979 INFO [train.py:1114] (3/4) Epoch 1, batch 1650, loss[loss=0.454, simple_loss=0.42, pruned_loss=0.1683, ctc_loss=0.3272, over 19670.00 frames. ], tot_loss[loss=0.4545, simple_loss=0.405, pruned_loss=0.1743, ctc_loss=0.3358, over 3834030.75 frames. ], batch size: 59, lr: 4.45e-02, grad_scale: 32.0
+2024-08-25 04:42:03,039 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=8853.333333333334, ans=0.21146666666666664
+2024-08-25 04:43:06,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=8906.666666666666, ans=0.125
+2024-08-25 04:43:23,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=9013.333333333334, ans=0.125
+2024-08-25 04:43:28,615 INFO [train.py:1114] (3/4) Epoch 1, batch 1700, loss[loss=0.3536, simple_loss=0.3393, pruned_loss=0.1259, ctc_loss=0.2481, over 19677.00 frames. ], tot_loss[loss=0.4454, simple_loss=0.4013, pruned_loss=0.1692, ctc_loss=0.3262, over 3848340.67 frames. ], batch size: 46, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:43:31,591 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.986e+02 2.386e+02 2.791e+02 4.935e+02, threshold=4.772e+02, percent-clipped=1.0
+2024-08-25 04:43:57,351 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 04:44:04,774 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=9226.666666666666, ans=0.125
+2024-08-25 04:45:26,300 INFO [train.py:1114] (3/4) Epoch 1, batch 1750, loss[loss=0.3672, simple_loss=0.3488, pruned_loss=0.1351, ctc_loss=0.2553, over 19672.00 frames. ], tot_loss[loss=0.4368, simple_loss=0.3973, pruned_loss=0.1649, ctc_loss=0.3179, over 3853438.43 frames. ], batch size: 45, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:45:28,420 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=9333.333333333334, ans=0.125
+2024-08-25 04:45:32,979 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=9333.333333333334, ans=0.20666666666666667
+2024-08-25 04:45:58,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=9493.333333333334, ans=0.20506666666666665
+2024-08-25 04:46:02,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=9493.333333333334, ans=0.125
+2024-08-25 04:46:10,653 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.34 vs. limit=11.08
+2024-08-25 04:46:13,427 INFO [train.py:1114] (3/4) Epoch 1, batch 1800, loss[loss=0.4291, simple_loss=0.4106, pruned_loss=0.1584, ctc_loss=0.2955, over 19613.00 frames. ], tot_loss[loss=0.4303, simple_loss=0.3948, pruned_loss=0.1616, ctc_loss=0.3112, over 3854221.20 frames. ], batch size: 55, lr: 4.44e-02, grad_scale: 32.0
+2024-08-25 04:46:16,175 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.025e+02 2.321e+02 2.784e+02 4.120e+02, threshold=4.643e+02, percent-clipped=0.0
+2024-08-25 04:46:21,574 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=7.76 vs. limit=7.4
+2024-08-25 04:46:44,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=9760.0, ans=0.04949747468305833
+2024-08-25 04:48:28,770 INFO [train.py:1114] (3/4) Epoch 1, batch 1850, loss[loss=0.4141, simple_loss=0.3927, pruned_loss=0.1543, ctc_loss=0.2957, over 19592.00 frames. ], tot_loss[loss=0.4217, simple_loss=0.3912, pruned_loss=0.1573, ctc_loss=0.3028, over 3859030.19 frames. ], batch size: 57, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:48:32,572 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=9866.666666666666, ans=0.125
+2024-08-25 04:48:40,093 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.97 vs. limit=7.48
+2024-08-25 04:48:53,355 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.99 vs. limit=11.24
+2024-08-25 04:49:08,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=10080.0, ans=0.125
+2024-08-25 04:49:15,870 INFO [train.py:1114] (3/4) Epoch 1, batch 1900, loss[loss=0.4109, simple_loss=0.3971, pruned_loss=0.1506, ctc_loss=0.2932, over 19655.00 frames. ], tot_loss[loss=0.4163, simple_loss=0.3897, pruned_loss=0.1548, ctc_loss=0.2975, over 3863302.01 frames. ], batch size: 59, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:49:18,611 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.031e+02 2.370e+02 2.878e+02 5.610e+02, threshold=4.739e+02, percent-clipped=2.0
+2024-08-25 04:49:22,386 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=10133.333333333334, ans=0.125
+2024-08-25 04:49:53,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=10186.666666666666, ans=0.008655072463768116
+2024-08-25 04:50:03,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=10240.0, ans=0.125
+2024-08-25 04:50:05,809 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.61 vs. limit=15.18
+2024-08-25 04:50:07,419 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=10240.0, ans=0.125
+2024-08-25 04:50:15,558 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=10293.333333333334, ans=0.023777777777777773
+2024-08-25 04:50:18,747 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.78 vs. limit=8.117333333333335
+2024-08-25 04:50:19,995 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=10293.333333333334, ans=0.125
+2024-08-25 04:50:22,169 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=14.72 vs. limit=15.26
+2024-08-25 04:50:31,839 INFO [train.py:1114] (3/4) Epoch 1, batch 1950, loss[loss=0.3722, simple_loss=0.3695, pruned_loss=0.1348, ctc_loss=0.2557, over 19608.00 frames. ], tot_loss[loss=0.4117, simple_loss=0.389, pruned_loss=0.1525, ctc_loss=0.293, over 3871559.95 frames. ], batch size: 52, lr: 4.43e-02, grad_scale: 32.0
+2024-08-25 04:50:49,462 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10506.666666666666, ans=0.19493333333333335
+2024-08-25 04:50:52,176 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=10506.666666666666, ans=0.04949747468305833
+2024-08-25 04:50:56,087 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.19 vs. limit=11.44
+2024-08-25 04:50:56,618 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=10506.666666666666, ans=0.025
+2024-08-25 04:51:06,762 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10560.0, ans=0.1944
+2024-08-25 04:51:20,871 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.34 vs. limit=15.42
+2024-08-25 04:51:21,802 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.10 vs. limit=15.42
+2024-08-25 04:52:05,789 INFO [train.py:1114] (3/4) Epoch 1, batch 2000, loss[loss=0.3341, simple_loss=0.3361, pruned_loss=0.1202, ctc_loss=0.2296, over 19619.00 frames. ], tot_loss[loss=0.4082, simple_loss=0.3884, pruned_loss=0.1511, ctc_loss=0.29, over 3857002.57 frames. ], batch size: 45, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:52:09,669 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 1.861e+02 2.137e+02 2.685e+02 4.799e+02, threshold=4.274e+02, percent-clipped=1.0
+2024-08-25 04:52:14,278 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=10666.666666666666, ans=0.022222222222222227
+2024-08-25 04:53:21,764 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=10666.666666666666, ans=0.0
+2024-08-25 04:53:39,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10720.0, ans=0.19279999999999997
+2024-08-25 04:53:41,418 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=10720.0, ans=0.125
+2024-08-25 04:54:13,818 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=10826.666666666666, ans=0.19173333333333334
+2024-08-25 04:54:28,351 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.89 vs. limit=15.66
+2024-08-25 04:54:32,627 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.10 vs. limit=4.64
+2024-08-25 04:54:32,954 INFO [train.py:1114] (3/4) Epoch 1, batch 2050, loss[loss=0.3395, simple_loss=0.3342, pruned_loss=0.1253, ctc_loss=0.2358, over 19731.00 frames. ], tot_loss[loss=0.4017, simple_loss=0.3853, pruned_loss=0.1483, ctc_loss=0.2844, over 3852270.95 frames. ], batch size: 47, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:54:33,154 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=10933.333333333334, ans=0.125
+2024-08-25 04:54:42,072 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.86 vs. limit=11.6
+2024-08-25 04:54:48,393 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.38 vs. limit=11.620000000000001
+2024-08-25 04:54:49,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10986.666666666666, ans=0.19013333333333332
+2024-08-25 04:55:20,410 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11093.333333333334, ans=0.18906666666666666
+2024-08-25 04:55:38,876 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=11146.666666666666, ans=0.5098666666666667
+2024-08-25 04:55:41,626 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=11200.0, ans=0.125
+2024-08-25 04:55:42,291 INFO [train.py:1114] (3/4) Epoch 1, batch 2100, loss[loss=0.3761, simple_loss=0.3774, pruned_loss=0.1339, ctc_loss=0.2672, over 19783.00 frames. ], tot_loss[loss=0.3949, simple_loss=0.3821, pruned_loss=0.1452, ctc_loss=0.2787, over 3858651.34 frames. ], batch size: 54, lr: 4.42e-02, grad_scale: 32.0
+2024-08-25 04:56:33,673 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11200.0, ans=0.188
+2024-08-25 04:56:36,111 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 1.936e+02 2.214e+02 2.535e+02 3.885e+02, threshold=4.428e+02, percent-clipped=0.0
+2024-08-25 04:56:38,924 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.20 vs. limit=11.7
+2024-08-25 04:56:43,263 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=11253.333333333334, ans=0.125
+2024-08-25 04:57:03,759 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=11253.333333333334, ans=0.18746666666666667
+2024-08-25 04:57:16,708 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 04:57:18,830 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=11360.0, ans=0.05
+2024-08-25 04:57:18,877 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=11360.0, ans=0.125
+2024-08-25 04:57:22,216 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11360.0, ans=0.18639999999999998
+2024-08-25 04:57:28,040 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=11413.333333333334, ans=0.125
+2024-08-25 04:57:35,988 INFO [train.py:1114] (3/4) Epoch 1, batch 2150, loss[loss=0.3606, simple_loss=0.3719, pruned_loss=0.1253, ctc_loss=0.247, over 19848.00 frames. ], tot_loss[loss=0.3893, simple_loss=0.3799, pruned_loss=0.1424, ctc_loss=0.2731, over 3870799.03 frames. ], batch size: 52, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:58:49,692 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=11466.666666666666, ans=0.125
+2024-08-25 04:58:55,508 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.48 vs. limit=4.728
+2024-08-25 04:58:57,174 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=11520.0, ans=0.125
+2024-08-25 04:59:32,841 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=11680.0, ans=0.125
+2024-08-25 04:59:34,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=11680.0, ans=0.125
+2024-08-25 04:59:36,674 INFO [train.py:1114] (3/4) Epoch 1, batch 2200, loss[loss=0.4068, simple_loss=0.4019, pruned_loss=0.15, ctc_loss=0.2796, over 19589.00 frames. ], tot_loss[loss=0.3859, simple_loss=0.3786, pruned_loss=0.1408, ctc_loss=0.2702, over 3868780.41 frames. ], batch size: 57, lr: 4.41e-02, grad_scale: 32.0
+2024-08-25 04:59:40,226 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 1.884e+02 2.153e+02 2.810e+02 4.673e+02, threshold=4.307e+02, percent-clipped=1.0
+2024-08-25 05:00:04,886 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=11893.333333333334, ans=0.48373333333333335
+2024-08-25 05:00:06,530 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:00:32,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=11946.666666666666, ans=16.46
+2024-08-25 05:00:34,228 INFO [train.py:1114] (3/4) Epoch 1, batch 2250, loss[loss=0.407, simple_loss=0.4059, pruned_loss=0.1478, ctc_loss=0.281, over 19623.00 frames. ], tot_loss[loss=0.383, simple_loss=0.378, pruned_loss=0.1393, ctc_loss=0.2669, over 3868154.38 frames. ], batch size: 55, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:00:40,517 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=12000.0, ans=0.0
+2024-08-25 05:01:20,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=12106.666666666666, ans=0.125
+2024-08-25 05:01:36,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=12213.333333333334, ans=0.125
+2024-08-25 05:01:39,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=12213.333333333334, ans=0.17786666666666667
+2024-08-25 05:01:44,088 INFO [train.py:1114] (3/4) Epoch 1, batch 2300, loss[loss=0.3385, simple_loss=0.348, pruned_loss=0.1182, ctc_loss=0.2314, over 19494.00 frames. ], tot_loss[loss=0.3794, simple_loss=0.3756, pruned_loss=0.1378, ctc_loss=0.2636, over 3861800.96 frames. ], batch size: 49, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:01:47,655 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 1.926e+02 2.114e+02 2.507e+02 4.625e+02, threshold=4.228e+02, percent-clipped=3.0
+2024-08-25 05:02:19,030 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=12426.666666666666, ans=0.025
+2024-08-25 05:02:25,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=12480.0, ans=0.1752
+2024-08-25 05:02:31,685 INFO [train.py:1114] (3/4) Epoch 1, batch 2350, loss[loss=0.4219, simple_loss=0.4085, pruned_loss=0.1582, ctc_loss=0.2968, over 19678.00 frames. ], tot_loss[loss=0.3772, simple_loss=0.3748, pruned_loss=0.1367, ctc_loss=0.2613, over 3863961.48 frames. ], batch size: 63, lr: 4.40e-02, grad_scale: 32.0
+2024-08-25 05:04:25,712 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=12586.666666666666, ans=0.125
+2024-08-25 05:04:25,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=12586.666666666666, ans=0.125
+2024-08-25 05:04:34,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=12640.0, ans=0.3896
+2024-08-25 05:04:37,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=12693.333333333334, ans=0.125
+2024-08-25 05:04:42,309 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=12693.333333333334, ans=0.025
+2024-08-25 05:04:43,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=12693.333333333334, ans=0.17306666666666667
+2024-08-25 05:04:54,428 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=12693.333333333334, ans=0.125
+2024-08-25 05:04:59,750 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=12746.666666666666, ans=0.125
+2024-08-25 05:05:00,664 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=12746.666666666666, ans=0.17253333333333334
+2024-08-25 05:05:05,283 INFO [train.py:1114] (3/4) Epoch 1, batch 2400, loss[loss=0.4001, simple_loss=0.3968, pruned_loss=0.1457, ctc_loss=0.2803, over 19283.00 frames. ], tot_loss[loss=0.3785, simple_loss=0.377, pruned_loss=0.137, ctc_loss=0.2618, over 3858589.54 frames. ], batch size: 71, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:05:08,758 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.948e+02 2.252e+02 2.666e+02 4.870e+02, threshold=4.504e+02, percent-clipped=4.0
+2024-08-25 05:05:13,782 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=12800.0, ans=0.013333333333333336
+2024-08-25 05:05:14,096 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.11 vs. limit=8.2
+2024-08-25 05:05:16,571 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=12853.333333333334, ans=0.07
+2024-08-25 05:05:29,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=12906.666666666666, ans=0.125
+2024-08-25 05:05:33,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=12960.0, ans=0.125
+2024-08-25 05:05:41,856 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=20.22 vs. limit=12.36
+2024-08-25 05:05:49,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=13013.333333333334, ans=0.025
+2024-08-25 05:05:51,900 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=13066.666666666666, ans=0.125
+2024-08-25 05:05:52,664 INFO [train.py:1114] (3/4) Epoch 1, batch 2450, loss[loss=0.4692, simple_loss=0.4166, pruned_loss=0.1899, ctc_loss=0.355, over 13283.00 frames. ], tot_loss[loss=0.3874, simple_loss=0.382, pruned_loss=0.1418, ctc_loss=0.2701, over 3730669.53 frames. ], batch size: 141, lr: 4.39e-02, grad_scale: 32.0
+2024-08-25 05:06:26,238 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=13120.0, ans=0.16879999999999998
+2024-08-25 05:06:29,094 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=13120.0, ans=0.125
+2024-08-25 05:06:31,302 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.20 vs. limit=12.440000000000001
+2024-08-25 05:07:49,749 INFO [train.py:1114] (3/4) Epoch 2, batch 0, loss[loss=0.3595, simple_loss=0.3603, pruned_loss=0.1297, ctc_loss=0.2484, over 19819.00 frames. ], tot_loss[loss=0.3595, simple_loss=0.3603, pruned_loss=0.1297, ctc_loss=0.2484, over 19819.00 frames. ], batch size: 49, lr: 4.30e-02, grad_scale: 32.0
+2024-08-25 05:07:49,750 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 05:09:14,200 INFO [zipformer.py:1858] (3/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.8633, 5.1646, 5.4777, 5.3215], device='cuda:3')
+2024-08-25 05:09:16,707 INFO [train.py:1146] (3/4) Epoch 2, validation: loss=0.2886, simple_loss=0.3508, pruned_loss=0.0823, ctc_loss=0.1542, over 944034.00 frames.
+2024-08-25 05:09:16,708 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13361MB
+2024-08-25 05:09:35,637 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 1.938e+02 2.191e+02 2.677e+02 6.592e+02, threshold=4.382e+02, percent-clipped=7.0
+2024-08-25 05:09:37,074 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=13333.333333333334, ans=0.43333333333333335
+2024-08-25 05:09:44,533 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=13386.666666666666, ans=0.16613333333333333
+2024-08-25 05:09:44,663 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 05:09:48,575 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=13386.666666666666, ans=0.125
+2024-08-25 05:09:53,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=13440.0, ans=0.010666666666666672
+2024-08-25 05:10:10,726 INFO [train.py:1114] (3/4) Epoch 2, batch 50, loss[loss=0.3325, simple_loss=0.3427, pruned_loss=0.118, ctc_loss=0.2158, over 19694.00 frames. ], tot_loss[loss=0.3724, simple_loss=0.3755, pruned_loss=0.1336, ctc_loss=0.2548, over 844976.60 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:10:34,494 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=13546.666666666666, ans=0.125
+2024-08-25 05:10:36,822 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.54 vs. limit=12.58
+2024-08-25 05:11:14,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=13653.333333333334, ans=0.00790144927536232
+2024-08-25 05:11:16,897 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=13706.666666666666, ans=0.125
+2024-08-25 05:11:42,625 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.22 vs. limit=9.504
+2024-08-25 05:11:47,147 INFO [train.py:1114] (3/4) Epoch 2, batch 100, loss[loss=0.3359, simple_loss=0.3499, pruned_loss=0.1183, ctc_loss=0.2136, over 19724.00 frames. ], tot_loss[loss=0.37, simple_loss=0.3751, pruned_loss=0.1324, ctc_loss=0.2505, over 1499358.22 frames. ], batch size: 51, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:11:50,414 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=13813.333333333334, ans=0.125
+2024-08-25 05:11:53,461 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.35 vs. limit=12.68
+2024-08-25 05:11:53,535 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.07 vs. limit=12.68
+2024-08-25 05:11:56,255 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=13866.666666666666, ans=0.125
+2024-08-25 05:12:00,812 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.439e+02 1.907e+02 2.167e+02 2.481e+02 4.957e+02, threshold=4.333e+02, percent-clipped=1.0
+2024-08-25 05:12:15,340 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=13973.333333333334, ans=0.125
+2024-08-25 05:12:17,307 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=13973.333333333334, ans=0.007831884057971014
+2024-08-25 05:12:28,852 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:12:30,895 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:12:50,512 INFO [train.py:1114] (3/4) Epoch 2, batch 150, loss[loss=0.3317, simple_loss=0.338, pruned_loss=0.1182, ctc_loss=0.2226, over 19711.00 frames. ], tot_loss[loss=0.3632, simple_loss=0.3706, pruned_loss=0.129, ctc_loss=0.2446, over 2027441.50 frames. ], batch size: 47, lr: 4.29e-02, grad_scale: 32.0
+2024-08-25 05:12:54,012 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.93 vs. limit=12.780000000000001
+2024-08-25 05:12:57,592 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=14080.0, ans=0.008
+2024-08-25 05:14:13,123 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=14240.0, ans=0.125
+2024-08-25 05:14:37,622 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=14240.0, ans=0.4016
+2024-08-25 05:14:50,524 INFO [train.py:1114] (3/4) Epoch 2, batch 200, loss[loss=0.3849, simple_loss=0.3851, pruned_loss=0.1399, ctc_loss=0.2625, over 18255.00 frames. ], tot_loss[loss=0.3579, simple_loss=0.3668, pruned_loss=0.1264, ctc_loss=0.2402, over 2435304.42 frames. ], batch size: 85, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:14:52,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=14346.666666666666, ans=0.125
+2024-08-25 05:15:14,932 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.847e+02 2.110e+02 2.499e+02 4.235e+02, threshold=4.220e+02, percent-clipped=0.0
+2024-08-25 05:15:27,551 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=14453.333333333334, ans=0.007727536231884058
+2024-08-25 05:15:44,892 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=14560.0, ans=0.125
+2024-08-25 05:15:49,795 INFO [train.py:1114] (3/4) Epoch 2, batch 250, loss[loss=0.3681, simple_loss=0.3802, pruned_loss=0.1289, ctc_loss=0.2453, over 19406.00 frames. ], tot_loss[loss=0.3554, simple_loss=0.3656, pruned_loss=0.1251, ctc_loss=0.2376, over 2754696.94 frames. ], batch size: 67, lr: 4.28e-02, grad_scale: 32.0
+2024-08-25 05:16:15,243 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=14613.333333333334, ans=0.125
+2024-08-25 05:16:36,759 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=14773.333333333334, ans=0.005111111111111108
+2024-08-25 05:16:38,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=14773.333333333334, ans=0.125
+2024-08-25 05:16:41,706 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=14773.333333333334, ans=0.125
+2024-08-25 05:19:35,295 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14826.666666666666, ans=0.15173333333333333
+2024-08-25 05:19:37,926 INFO [train.py:1114] (3/4) Epoch 2, batch 300, loss[loss=0.3512, simple_loss=0.3683, pruned_loss=0.1201, ctc_loss=0.2345, over 19500.00 frames. ], tot_loss[loss=0.3525, simple_loss=0.3639, pruned_loss=0.1235, ctc_loss=0.2349, over 2999500.08 frames. ], batch size: 61, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:19:51,461 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=14933.333333333334, ans=0.125
+2024-08-25 05:19:51,703 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.96 vs. limit=13.1
+2024-08-25 05:19:56,630 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 1.858e+02 2.099e+02 2.398e+02 3.801e+02, threshold=4.198e+02, percent-clipped=0.0
+2024-08-25 05:20:09,137 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=14986.666666666666, ans=0.004222222222222224
+2024-08-25 05:20:37,805 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=15093.333333333334, ans=0.37173333333333336
+2024-08-25 05:20:54,632 INFO [train.py:1114] (3/4) Epoch 2, batch 350, loss[loss=0.2961, simple_loss=0.3254, pruned_loss=0.09564, ctc_loss=0.189, over 19780.00 frames. ], tot_loss[loss=0.3515, simple_loss=0.3637, pruned_loss=0.123, ctc_loss=0.2334, over 3190079.29 frames. ], batch size: 48, lr: 4.27e-02, grad_scale: 32.0
+2024-08-25 05:21:01,068 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=15146.666666666666, ans=0.09853333333333333
+2024-08-25 05:21:05,368 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.58 vs. limit=10.058666666666667
+2024-08-25 05:21:05,968 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=15200.0, ans=0.003333333333333334
+2024-08-25 05:21:08,273 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.33 vs. limit=13.2
+2024-08-25 05:21:37,257 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=15306.666666666666, ans=0.125
+2024-08-25 05:21:40,461 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.66 vs. limit=13.24
+2024-08-25 05:21:42,296 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.66 vs. limit=5.295999999999999
+2024-08-25 05:21:50,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=15360.0, ans=0.125
+2024-08-25 05:22:06,934 INFO [train.py:1114] (3/4) Epoch 2, batch 400, loss[loss=0.3416, simple_loss=0.3729, pruned_loss=0.1126, ctc_loss=0.2124, over 19496.00 frames. ], tot_loss[loss=0.3498, simple_loss=0.3626, pruned_loss=0.1221, ctc_loss=0.2319, over 3342521.02 frames. ], batch size: 54, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:22:16,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=15466.666666666666, ans=0.14533333333333334
+2024-08-25 05:22:18,865 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=15466.666666666666, ans=0.125
+2024-08-25 05:22:20,585 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.895e+02 2.189e+02 2.528e+02 4.758e+02, threshold=4.379e+02, percent-clipped=2.0
+2024-08-25 05:22:23,720 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15466.666666666666, ans=0.14533333333333334
+2024-08-25 05:22:36,959 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=15520.0, ans=0.05
+2024-08-25 05:22:39,352 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=15573.333333333334, ans=0.125
+2024-08-25 05:23:46,271 INFO [train.py:1114] (3/4) Epoch 2, batch 450, loss[loss=0.3208, simple_loss=0.3474, pruned_loss=0.106, ctc_loss=0.2051, over 19623.00 frames. ], tot_loss[loss=0.3492, simple_loss=0.3624, pruned_loss=0.1217, ctc_loss=0.2311, over 3449587.75 frames. ], batch size: 55, lr: 4.26e-02, grad_scale: 32.0
+2024-08-25 05:23:49,306 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=15680.0, ans=0.09899494936611666
+2024-08-25 05:23:57,393 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.89 vs. limit=13.4
+2024-08-25 05:23:59,000 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=15733.333333333334, ans=0.007449275362318841
+2024-08-25 05:24:00,041 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=15733.333333333334, ans=0.025
+2024-08-25 05:24:02,940 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.69 vs. limit=13.4
+2024-08-25 05:24:07,667 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=15786.666666666666, ans=0.00743768115942029
+2024-08-25 05:24:14,507 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.19 vs. limit=8.96
+2024-08-25 05:24:23,130 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=15840.0, ans=0.025
+2024-08-25 05:24:27,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=15893.333333333334, ans=0.14106666666666667
+2024-08-25 05:24:32,878 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=15893.333333333334, ans=0.035
+2024-08-25 05:24:37,906 INFO [train.py:1114] (3/4) Epoch 2, batch 500, loss[loss=0.3454, simple_loss=0.3657, pruned_loss=0.1177, ctc_loss=0.2243, over 19682.00 frames. ], tot_loss[loss=0.346, simple_loss=0.3602, pruned_loss=0.1202, ctc_loss=0.2285, over 3545523.35 frames. ], batch size: 63, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:25:49,880 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.19 vs. limit=12.973333333333333
+2024-08-25 05:25:56,669 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=15946.666666666666, ans=0.125
+2024-08-25 05:26:05,504 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.383e+02 1.778e+02 2.035e+02 2.349e+02 4.286e+02, threshold=4.071e+02, percent-clipped=0.0
+2024-08-25 05:26:07,737 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=16000.0, ans=0.44
+2024-08-25 05:26:26,454 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten.whitening_limit, batch_count=16106.666666666666, ans=19.58
+2024-08-25 05:26:30,096 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=16160.0, ans=0.025
+2024-08-25 05:26:33,107 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=16160.0, ans=0.125
+2024-08-25 05:26:53,245 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.81 vs. limit=19.66
+2024-08-25 05:26:53,741 INFO [train.py:1114] (3/4) Epoch 2, batch 550, loss[loss=0.3834, simple_loss=0.3954, pruned_loss=0.1349, ctc_loss=0.2542, over 19287.00 frames. ], tot_loss[loss=0.346, simple_loss=0.3602, pruned_loss=0.1202, ctc_loss=0.2284, over 3607067.87 frames. ], batch size: 71, lr: 4.25e-02, grad_scale: 32.0
+2024-08-25 05:26:53,911 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=16213.333333333334, ans=0.125
+2024-08-25 05:27:29,674 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=16320.0, ans=0.4448
+2024-08-25 05:28:12,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=16426.666666666668, ans=0.125
+2024-08-25 05:28:12,868 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=16426.666666666668, ans=0.125
+2024-08-25 05:28:19,023 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.23 vs. limit=10.591999999999999
+2024-08-25 05:28:19,453 INFO [train.py:1114] (3/4) Epoch 2, batch 600, loss[loss=0.3863, simple_loss=0.3903, pruned_loss=0.1387, ctc_loss=0.262, over 19377.00 frames. ], tot_loss[loss=0.3456, simple_loss=0.36, pruned_loss=0.1201, ctc_loss=0.2275, over 3665159.69 frames. ], batch size: 67, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:28:29,262 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.99 vs. limit=13.68
+2024-08-25 05:28:34,468 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.917e+02 2.183e+02 2.770e+02 8.189e+02, threshold=4.366e+02, percent-clipped=5.0
+2024-08-25 05:28:36,557 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16533.333333333332, ans=0.13466666666666668
+2024-08-25 05:28:36,939 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.08 vs. limit=19.9
+2024-08-25 05:28:55,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=16640.0, ans=0.0
+2024-08-25 05:29:00,371 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.42 vs. limit=13.74
+2024-08-25 05:29:02,132 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=19.35 vs. limit=19.98
+2024-08-25 05:29:13,564 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=16746.666666666668, ans=0.007228985507246377
+2024-08-25 05:29:14,161 INFO [train.py:1114] (3/4) Epoch 2, batch 650, loss[loss=0.3065, simple_loss=0.3374, pruned_loss=0.1004, ctc_loss=0.187, over 19757.00 frames. ], tot_loss[loss=0.3425, simple_loss=0.3581, pruned_loss=0.1186, ctc_loss=0.2246, over 3715297.44 frames. ], batch size: 54, lr: 4.24e-02, grad_scale: 32.0
+2024-08-25 05:31:20,352 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16800.0, ans=0.132
+2024-08-25 05:31:25,143 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=16800.0, ans=0.0
+2024-08-25 05:31:25,153 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=16800.0, ans=0.125
+2024-08-25 05:32:01,713 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=16960.0, ans=0.3064
+2024-08-25 05:32:06,461 INFO [train.py:1114] (3/4) Epoch 2, batch 700, loss[loss=0.3402, simple_loss=0.3554, pruned_loss=0.1182, ctc_loss=0.2214, over 19719.00 frames. ], tot_loss[loss=0.3422, simple_loss=0.3582, pruned_loss=0.1183, ctc_loss=0.2237, over 3747694.77 frames. ], batch size: 51, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:32:34,539 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=17013.333333333332, ans=0.125
+2024-08-25 05:32:39,195 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=17013.333333333332, ans=0.125
+2024-08-25 05:32:39,756 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=14.51 vs. limit=13.879999999999999
+2024-08-25 05:32:43,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=17066.666666666668, ans=0.125
+2024-08-25 05:32:45,339 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=17066.666666666668, ans=0.0
+2024-08-25 05:32:47,852 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.759e+02 2.005e+02 2.359e+02 5.033e+02, threshold=4.011e+02, percent-clipped=2.0
+2024-08-25 05:32:59,493 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.41 vs. limit=20.34
+2024-08-25 05:33:00,088 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=17120.0, ans=0.09899494936611666
+2024-08-25 05:33:03,987 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=17120.0, ans=0.007147826086956522
+2024-08-25 05:33:18,484 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=17226.666666666668, ans=0.1277333333333333
+2024-08-25 05:33:25,179 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=17226.666666666668, ans=0.1277333333333333
+2024-08-25 05:33:28,048 INFO [train.py:1114] (3/4) Epoch 2, batch 750, loss[loss=0.3149, simple_loss=0.3499, pruned_loss=0.1019, ctc_loss=0.1904, over 19518.00 frames. ], tot_loss[loss=0.3402, simple_loss=0.357, pruned_loss=0.1173, ctc_loss=0.222, over 3772835.56 frames. ], batch size: 54, lr: 4.23e-02, grad_scale: 32.0
+2024-08-25 05:33:28,344 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=17280.0, ans=0.125
+2024-08-25 05:36:53,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=17440.0, ans=0.125
+2024-08-25 05:37:24,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=17440.0, ans=0.28959999999999997
+2024-08-25 05:37:34,240 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=17493.333333333332, ans=0.0381866666666667
+2024-08-25 05:37:40,887 INFO [train.py:1114] (3/4) Epoch 2, batch 800, loss[loss=0.3255, simple_loss=0.3392, pruned_loss=0.1155, ctc_loss=0.2024, over 19833.00 frames. ], tot_loss[loss=0.3394, simple_loss=0.3567, pruned_loss=0.1168, ctc_loss=0.2209, over 3794352.44 frames. ], batch size: 49, lr: 4.22e-02, grad_scale: 32.0
+2024-08-25 05:37:44,060 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=17546.666666666668, ans=0.007055072463768117
+2024-08-25 05:38:06,531 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.845e+02 2.130e+02 2.517e+02 4.310e+02, threshold=4.259e+02, percent-clipped=1.0
+2024-08-25 05:38:15,785 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.01 vs. limit=20.740000000000002
+2024-08-25 05:38:35,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=17706.666666666668, ans=0.125
+2024-08-25 05:38:48,679 INFO [train.py:1114] (3/4) Epoch 2, batch 850, loss[loss=0.3642, simple_loss=0.3807, pruned_loss=0.1255, ctc_loss=0.2415, over 19660.00 frames. ], tot_loss[loss=0.3375, simple_loss=0.3553, pruned_loss=0.116, ctc_loss=0.2188, over 3814929.35 frames. ], batch size: 59, lr: 4.22e-02, grad_scale: 16.0
+2024-08-25 05:38:58,969 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=17813.333333333332, ans=0.2765333333333334
+2024-08-25 05:39:12,778 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.99 vs. limit=11.146666666666668
+2024-08-25 05:39:15,558 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=17920.0, ans=0.006973913043478261
+2024-08-25 05:39:34,683 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.75 vs. limit=9.506666666666668
+2024-08-25 05:39:58,492 INFO [train.py:1114] (3/4) Epoch 2, batch 900, loss[loss=0.2933, simple_loss=0.3286, pruned_loss=0.09385, ctc_loss=0.1759, over 19419.00 frames. ], tot_loss[loss=0.338, simple_loss=0.3557, pruned_loss=0.1163, ctc_loss=0.2191, over 3818536.94 frames. ], batch size: 48, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:40:14,984 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=18133.333333333332, ans=0.006927536231884059
+2024-08-25 05:40:19,550 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.852e+02 2.189e+02 2.703e+02 9.878e+02, threshold=4.378e+02, percent-clipped=3.0
+2024-08-25 05:40:48,507 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.88 vs. limit=11.296
+2024-08-25 05:41:01,563 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.68 vs. limit=14.36
+2024-08-25 05:41:08,561 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18293.333333333332, ans=0.11706666666666668
+2024-08-25 05:41:14,142 INFO [train.py:1114] (3/4) Epoch 2, batch 950, loss[loss=0.3032, simple_loss=0.3333, pruned_loss=0.09873, ctc_loss=0.1894, over 19495.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3553, pruned_loss=0.116, ctc_loss=0.2183, over 3819408.56 frames. ], batch size: 49, lr: 4.21e-02, grad_scale: 8.0
+2024-08-25 05:41:18,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18346.666666666668, ans=0.11653333333333332
+2024-08-25 05:41:53,617 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=18506.666666666668, ans=0.07
+2024-08-25 05:42:01,950 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=18560.0, ans=0.1144
+2024-08-25 05:42:06,454 INFO [train.py:1114] (3/4) Epoch 2, batch 1000, loss[loss=0.3371, simple_loss=0.3537, pruned_loss=0.1149, ctc_loss=0.2269, over 19839.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3556, pruned_loss=0.116, ctc_loss=0.218, over 3815288.34 frames. ], batch size: 52, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:42:21,456 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=18613.333333333332, ans=0.025
+2024-08-25 05:42:37,205 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.48 vs. limit=11.466666666666667
+2024-08-25 05:42:41,285 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 1.839e+02 2.030e+02 2.416e+02 3.488e+02, threshold=4.061e+02, percent-clipped=0.0
+2024-08-25 05:43:00,759 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.65 vs. limit=21.58
+2024-08-25 05:43:02,222 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=18773.333333333332, ans=0.0
+2024-08-25 05:43:04,717 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.73 vs. limit=21.58
+2024-08-25 05:43:07,135 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=18826.666666666668, ans=0.125
+2024-08-25 05:43:09,105 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=18826.666666666668, ans=0.07
+2024-08-25 05:43:11,297 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.23 vs. limit=14.559999999999999
+2024-08-25 05:43:16,624 INFO [train.py:1114] (3/4) Epoch 2, batch 1050, loss[loss=0.3712, simple_loss=0.3911, pruned_loss=0.1277, ctc_loss=0.2399, over 19854.00 frames. ], tot_loss[loss=0.3361, simple_loss=0.3547, pruned_loss=0.1154, ctc_loss=0.2169, over 3821712.04 frames. ], batch size: 57, lr: 4.20e-02, grad_scale: 8.0
+2024-08-25 05:43:18,992 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=18880.0, ans=0.23919999999999997
+2024-08-25 05:43:30,861 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.12 vs. limit=11.573333333333334
+2024-08-25 05:43:51,811 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=18986.666666666668, ans=0.0
+2024-08-25 05:43:54,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=18986.666666666668, ans=0.025
+2024-08-25 05:44:07,264 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=19040.0, ans=0.23360000000000003
+2024-08-25 05:44:13,780 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=9.24 vs. limit=14.66
+2024-08-25 05:44:23,176 INFO [train.py:1114] (3/4) Epoch 2, batch 1100, loss[loss=0.3169, simple_loss=0.3415, pruned_loss=0.1049, ctc_loss=0.2059, over 19594.00 frames. ], tot_loss[loss=0.335, simple_loss=0.3541, pruned_loss=0.1148, ctc_loss=0.2158, over 3829183.81 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:44:43,837 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=19200.0, ans=0.125
+2024-08-25 05:44:46,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=19200.0, ans=0.05
+2024-08-25 05:44:48,513 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.777e+02 2.009e+02 2.448e+02 3.967e+02, threshold=4.019e+02, percent-clipped=0.0
+2024-08-25 05:44:53,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=19253.333333333332, ans=0.125
+2024-08-25 05:44:54,787 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.65 vs. limit=14.719999999999999
+2024-08-25 05:45:19,028 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=19360.0, ans=0.10640000000000002
+2024-08-25 05:45:31,299 INFO [train.py:1114] (3/4) Epoch 2, batch 1150, loss[loss=0.3074, simple_loss=0.3424, pruned_loss=0.09781, ctc_loss=0.1923, over 19585.00 frames. ], tot_loss[loss=0.3355, simple_loss=0.3543, pruned_loss=0.1152, ctc_loss=0.2163, over 3829453.92 frames. ], batch size: 52, lr: 4.19e-02, grad_scale: 8.0
+2024-08-25 05:45:32,828 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=19413.333333333332, ans=14.780000000000001
+2024-08-25 05:45:42,504 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.63 vs. limit=14.8
+2024-08-25 05:45:46,318 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.64 vs. limit=14.8
+2024-08-25 05:47:20,453 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=19573.333333333332, ans=0.05
+2024-08-25 05:47:34,849 INFO [train.py:1114] (3/4) Epoch 2, batch 1200, loss[loss=0.3411, simple_loss=0.3685, pruned_loss=0.1133, ctc_loss=0.218, over 19835.00 frames. ], tot_loss[loss=0.3359, simple_loss=0.3549, pruned_loss=0.1152, ctc_loss=0.2162, over 3825699.81 frames. ], batch size: 57, lr: 4.18e-02, grad_scale: 16.0
+2024-08-25 05:47:40,963 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:47:50,324 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 1.798e+02 2.208e+02 2.852e+02 1.698e+03, threshold=4.415e+02, percent-clipped=3.0
+2024-08-25 05:48:08,259 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=19786.666666666668, ans=0.125
+2024-08-25 05:48:38,606 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=19893.333333333332, ans=0.125
+2024-08-25 05:48:38,708 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=19893.333333333332, ans=0.20373333333333343
+2024-08-25 05:48:40,348 INFO [train.py:1114] (3/4) Epoch 2, batch 1250, loss[loss=0.3566, simple_loss=0.3779, pruned_loss=0.1217, ctc_loss=0.2298, over 19560.00 frames. ], tot_loss[loss=0.3339, simple_loss=0.3544, pruned_loss=0.114, ctc_loss=0.2139, over 3843546.43 frames. ], batch size: 61, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:48:59,612 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 05:49:03,391 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=20000.0, ans=0.025
+2024-08-25 05:49:05,238 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=20000.0, ans=0.125
+2024-08-25 05:49:08,076 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=20053.333333333332, ans=0.2
+2024-08-25 05:49:10,044 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.whiten.whitening_limit, batch_count=20053.333333333332, ans=12.0
+2024-08-25 05:49:10,731 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=20053.333333333332, ans=0.125
+2024-08-25 05:49:15,566 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=20053.333333333332, ans=0.2
+2024-08-25 05:49:17,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=20106.666666666668, ans=0.1
+2024-08-25 05:49:23,166 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=20106.666666666668, ans=0.0
+2024-08-25 05:49:37,280 INFO [train.py:1114] (3/4) Epoch 2, batch 1300, loss[loss=0.3729, simple_loss=0.3821, pruned_loss=0.131, ctc_loss=0.2543, over 18948.00 frames. ], tot_loss[loss=0.332, simple_loss=0.353, pruned_loss=0.113, ctc_loss=0.2122, over 3847154.83 frames. ], batch size: 76, lr: 4.17e-02, grad_scale: 16.0
+2024-08-25 05:49:42,238 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=20213.333333333332, ans=0.125
+2024-08-25 05:49:52,771 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.771e+02 1.898e+02 2.175e+02 3.765e+02, threshold=3.796e+02, percent-clipped=0.0
+2024-08-25 05:50:21,891 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=20426.666666666668, ans=0.125
+2024-08-25 05:50:25,291 INFO [train.py:1114] (3/4) Epoch 2, batch 1350, loss[loss=0.3019, simple_loss=0.3358, pruned_loss=0.0973, ctc_loss=0.1838, over 19775.00 frames. ], tot_loss[loss=0.3297, simple_loss=0.3516, pruned_loss=0.1119, ctc_loss=0.2101, over 3858684.74 frames. ], batch size: 54, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:50:47,021 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=20533.333333333332, ans=0.125
+2024-08-25 05:50:54,633 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=20586.666666666668, ans=0.025
+2024-08-25 05:51:00,450 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=20640.0, ans=0.125
+2024-08-25 05:51:01,342 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=20640.0, ans=0.125
+2024-08-25 05:51:04,685 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=9.91 vs. limit=15.0
+2024-08-25 05:51:17,370 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=20693.333333333332, ans=0.125
+2024-08-25 05:51:19,085 INFO [train.py:1114] (3/4) Epoch 2, batch 1400, loss[loss=0.2716, simple_loss=0.3025, pruned_loss=0.08613, ctc_loss=0.171, over 19670.00 frames. ], tot_loss[loss=0.3281, simple_loss=0.3506, pruned_loss=0.111, ctc_loss=0.2085, over 3865506.63 frames. ], batch size: 46, lr: 4.16e-02, grad_scale: 16.0
+2024-08-25 05:51:34,337 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.933e+02 2.205e+02 2.519e+02 3.569e+02, threshold=4.410e+02, percent-clipped=0.0
+2024-08-25 05:51:38,485 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=20853.333333333332, ans=0.0
+2024-08-25 05:51:39,449 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=20853.333333333332, ans=0.0
+2024-08-25 05:51:47,144 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=20906.666666666668, ans=0.0
+2024-08-25 05:51:47,466 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.60 vs. limit=22.5
+2024-08-25 05:51:54,321 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=20906.666666666668, ans=0.125
+2024-08-25 05:52:07,776 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=20960.0, ans=0.1
+2024-08-25 05:52:09,369 INFO [train.py:1114] (3/4) Epoch 2, batch 1450, loss[loss=0.3598, simple_loss=0.3762, pruned_loss=0.1239, ctc_loss=0.2388, over 19681.00 frames. ], tot_loss[loss=0.3287, simple_loss=0.3513, pruned_loss=0.1113, ctc_loss=0.2089, over 3862745.91 frames. ], batch size: 63, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:52:14,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=21013.333333333332, ans=0.025
+2024-08-25 05:52:15,359 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=21013.333333333332, ans=0.0
+2024-08-25 05:52:16,269 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=21013.333333333332, ans=0.125
+2024-08-25 05:52:16,456 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.33 vs. limit=15.0
+2024-08-25 05:52:18,026 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=21066.666666666668, ans=0.0
+2024-08-25 05:52:20,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=21066.666666666668, ans=0.2
+2024-08-25 05:52:21,956 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=21066.666666666668, ans=0.0
+2024-08-25 05:52:27,726 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=21120.0, ans=0.0
+2024-08-25 05:52:40,980 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=21173.333333333332, ans=0.2
+2024-08-25 05:52:41,024 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=21173.333333333332, ans=0.09899494936611666
+2024-08-25 05:52:45,438 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=21173.333333333332, ans=0.0
+2024-08-25 05:52:47,473 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=21226.666666666668, ans=0.95
+2024-08-25 05:52:50,630 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.02 vs. limit=6.0
+2024-08-25 05:52:56,993 INFO [train.py:1114] (3/4) Epoch 2, batch 1500, loss[loss=0.3341, simple_loss=0.3664, pruned_loss=0.1102, ctc_loss=0.2037, over 19574.00 frames. ], tot_loss[loss=0.3283, simple_loss=0.3512, pruned_loss=0.111, ctc_loss=0.2082, over 3862326.39 frames. ], batch size: 57, lr: 4.15e-02, grad_scale: 16.0
+2024-08-25 05:53:06,928 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=21333.333333333332, ans=0.0
+2024-08-25 05:53:06,966 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=21333.333333333332, ans=0.125
+2024-08-25 05:53:12,760 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=21333.333333333332, ans=0.1
+2024-08-25 05:53:14,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=21333.333333333332, ans=0.125
+2024-08-25 05:53:17,237 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.832e+02 2.087e+02 2.558e+02 5.212e+02, threshold=4.175e+02, percent-clipped=3.0
+2024-08-25 05:53:25,928 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21386.666666666668, ans=0.1
+2024-08-25 05:53:34,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=21386.666666666668, ans=0.1
+2024-08-25 05:53:40,085 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21440.0, ans=0.1
+2024-08-25 05:53:45,767 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=21493.333333333332, ans=0.2
+2024-08-25 05:54:05,974 INFO [train.py:1114] (3/4) Epoch 2, batch 1550, loss[loss=0.3586, simple_loss=0.3755, pruned_loss=0.1247, ctc_loss=0.2306, over 19604.00 frames. ], tot_loss[loss=0.3288, simple_loss=0.3515, pruned_loss=0.1113, ctc_loss=0.2088, over 3847724.41 frames. ], batch size: 60, lr: 4.14e-02, grad_scale: 16.0
+2024-08-25 05:54:11,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=21546.666666666668, ans=0.025
+2024-08-25 05:54:18,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=21546.666666666668, ans=0.2
+2024-08-25 05:54:22,272 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=21600.0, ans=0.1
+2024-08-25 05:54:23,244 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=21600.0, ans=0.025
+2024-08-25 05:54:44,336 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=21653.333333333332, ans=0.125
+2024-08-25 05:54:52,423 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=21706.666666666668, ans=0.2
+2024-08-25 05:55:05,404 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=21760.0, ans=0.125
+2024-08-25 05:55:07,245 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=21760.0, ans=0.2
+2024-08-25 05:55:11,749 INFO [train.py:1114] (3/4) Epoch 2, batch 1600, loss[loss=0.3511, simple_loss=0.3693, pruned_loss=0.1222, ctc_loss=0.2211, over 19836.00 frames. ], tot_loss[loss=0.3284, simple_loss=0.3511, pruned_loss=0.1111, ctc_loss=0.2085, over 3835939.44 frames. ], batch size: 57, lr: 4.13e-02, grad_scale: 32.0
+2024-08-25 05:55:17,057 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=21813.333333333332, ans=0.0
+2024-08-25 05:55:32,432 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.812e+02 2.122e+02 2.604e+02 4.336e+02, threshold=4.244e+02, percent-clipped=2.0
+2024-08-25 05:55:42,773 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=21920.0, ans=0.0
+2024-08-25 05:55:54,746 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=21973.333333333332, ans=0.125
+2024-08-25 05:56:06,721 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=22026.666666666668, ans=0.1
+2024-08-25 05:56:12,978 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.18 vs. limit=22.5
+2024-08-25 05:56:13,292 INFO [train.py:1114] (3/4) Epoch 2, batch 1650, loss[loss=0.3566, simple_loss=0.3742, pruned_loss=0.1236, ctc_loss=0.2295, over 19647.00 frames. ], tot_loss[loss=0.3279, simple_loss=0.3505, pruned_loss=0.1111, ctc_loss=0.208, over 3832389.91 frames. ], batch size: 59, lr: 4.13e-02, grad_scale: 16.0
+2024-08-25 05:56:18,940 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.65 vs. limit=22.5
+2024-08-25 05:57:07,473 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=22293.333333333332, ans=0.006023188405797101
+2024-08-25 05:57:12,877 INFO [train.py:1114] (3/4) Epoch 2, batch 1700, loss[loss=0.2588, simple_loss=0.2994, pruned_loss=0.07805, ctc_loss=0.1555, over 19685.00 frames. ], tot_loss[loss=0.3251, simple_loss=0.3491, pruned_loss=0.1095, ctc_loss=0.2053, over 3846786.50 frames. ], batch size: 46, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:57:23,111 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=22400.0, ans=0.0
+2024-08-25 05:57:29,329 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 1.791e+02 2.005e+02 2.338e+02 3.555e+02, threshold=4.010e+02, percent-clipped=0.0
+2024-08-25 05:57:47,785 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.21 vs. limit=22.5
+2024-08-25 05:58:03,062 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.59 vs. limit=12.0
+2024-08-25 05:58:34,078 INFO [train.py:1114] (3/4) Epoch 2, batch 1750, loss[loss=0.2856, simple_loss=0.3168, pruned_loss=0.09171, ctc_loss=0.1773, over 19653.00 frames. ], tot_loss[loss=0.324, simple_loss=0.3483, pruned_loss=0.109, ctc_loss=0.2042, over 3852038.36 frames. ], batch size: 45, lr: 4.12e-02, grad_scale: 16.0
+2024-08-25 05:58:37,481 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.70 vs. limit=10.0
+2024-08-25 05:58:41,401 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=22613.333333333332, ans=0.005953623188405798
+2024-08-25 05:58:49,730 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=22666.666666666668, ans=0.125
+2024-08-25 05:58:55,253 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=22720.0, ans=0.0
+2024-08-25 05:59:03,584 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.17 vs. limit=15.0
+2024-08-25 05:59:07,792 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=22773.333333333332, ans=0.025
+2024-08-25 05:59:17,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=22826.666666666668, ans=0.05
+2024-08-25 05:59:24,716 INFO [train.py:1114] (3/4) Epoch 2, batch 1800, loss[loss=0.3253, simple_loss=0.3546, pruned_loss=0.1062, ctc_loss=0.2089, over 19625.00 frames. ], tot_loss[loss=0.3238, simple_loss=0.3482, pruned_loss=0.1089, ctc_loss=0.2045, over 3853550.70 frames. ], batch size: 55, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 05:59:28,584 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=22880.0, ans=0.0
+2024-08-25 05:59:29,574 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=22880.0, ans=0.025
+2024-08-25 05:59:39,816 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 1.812e+02 2.002e+02 2.312e+02 3.839e+02, threshold=4.004e+02, percent-clipped=0.0
+2024-08-25 05:59:47,126 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=22986.666666666668, ans=0.025
+2024-08-25 05:59:58,661 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=23040.0, ans=0.125
+2024-08-25 06:00:12,420 INFO [train.py:1114] (3/4) Epoch 2, batch 1850, loss[loss=0.3372, simple_loss=0.3619, pruned_loss=0.1138, ctc_loss=0.2125, over 19602.00 frames. ], tot_loss[loss=0.3226, simple_loss=0.3477, pruned_loss=0.1082, ctc_loss=0.2029, over 3856750.56 frames. ], batch size: 57, lr: 4.11e-02, grad_scale: 16.0
+2024-08-25 06:00:20,861 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=23200.0, ans=0.125
+2024-08-25 06:00:41,775 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=23306.666666666668, ans=0.125
+2024-08-25 06:00:51,907 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 06:00:53,491 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=23360.0, ans=0.125
+2024-08-25 06:00:59,794 INFO [train.py:1114] (3/4) Epoch 2, batch 1900, loss[loss=0.3312, simple_loss=0.3629, pruned_loss=0.1093, ctc_loss=0.2023, over 19659.00 frames. ], tot_loss[loss=0.3219, simple_loss=0.3477, pruned_loss=0.1077, ctc_loss=0.2019, over 3861920.01 frames. ], batch size: 59, lr: 4.10e-02, grad_scale: 16.0
+2024-08-25 06:01:18,891 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.426e+02 1.814e+02 2.067e+02 2.451e+02 4.716e+02, threshold=4.135e+02, percent-clipped=1.0
+2024-08-25 06:01:23,522 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=23466.666666666668, ans=0.025
+2024-08-25 06:01:52,060 INFO [train.py:1114] (3/4) Epoch 2, batch 1950, loss[loss=0.2986, simple_loss=0.3346, pruned_loss=0.09613, ctc_loss=0.1759, over 19583.00 frames. ], tot_loss[loss=0.3223, simple_loss=0.3486, pruned_loss=0.1077, ctc_loss=0.2017, over 3870854.51 frames. ], batch size: 52, lr: 4.09e-02, grad_scale: 16.0
+2024-08-25 06:02:10,865 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.27 vs. limit=15.0
+2024-08-25 06:02:13,148 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=23786.666666666668, ans=0.125
+2024-08-25 06:02:27,536 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.24 vs. limit=15.0
+2024-08-25 06:02:37,279 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=23893.333333333332, ans=0.125
+2024-08-25 06:02:40,712 INFO [train.py:1114] (3/4) Epoch 2, batch 2000, loss[loss=0.2849, simple_loss=0.3087, pruned_loss=0.09574, ctc_loss=0.1743, over 19653.00 frames. ], tot_loss[loss=0.3235, simple_loss=0.3491, pruned_loss=0.1083, ctc_loss=0.2029, over 3855664.61 frames. ], batch size: 45, lr: 4.09e-02, grad_scale: 32.0
+2024-08-25 06:02:47,061 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=23946.666666666668, ans=0.125
+2024-08-25 06:02:53,519 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=24000.0, ans=10.0
+2024-08-25 06:02:57,864 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 1.781e+02 1.996e+02 2.377e+02 5.355e+02, threshold=3.992e+02, percent-clipped=1.0
+2024-08-25 06:02:58,148 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=24000.0, ans=0.125
+2024-08-25 06:03:01,872 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=24053.333333333332, ans=0.0
+2024-08-25 06:03:06,277 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=24053.333333333332, ans=0.125
+2024-08-25 06:03:15,328 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.90 vs. limit=15.0
+2024-08-25 06:03:21,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=24160.0, ans=10.0
+2024-08-25 06:03:29,345 INFO [train.py:1114] (3/4) Epoch 2, batch 2050, loss[loss=0.2822, simple_loss=0.3108, pruned_loss=0.09158, ctc_loss=0.176, over 19710.00 frames. ], tot_loss[loss=0.3218, simple_loss=0.3476, pruned_loss=0.1077, ctc_loss=0.2016, over 3851469.00 frames. ], batch size: 47, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:03:34,243 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=20.10 vs. limit=22.5
+2024-08-25 06:04:17,639 INFO [train.py:1114] (3/4) Epoch 2, batch 2100, loss[loss=0.3173, simple_loss=0.3505, pruned_loss=0.1036, ctc_loss=0.1923, over 19771.00 frames. ], tot_loss[loss=0.3197, simple_loss=0.3461, pruned_loss=0.1067, ctc_loss=0.1997, over 3858121.59 frames. ], batch size: 54, lr: 4.08e-02, grad_scale: 32.0
+2024-08-25 06:04:20,416 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=24480.0, ans=0.125
+2024-08-25 06:04:26,054 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=24533.333333333332, ans=0.005536231884057972
+2024-08-25 06:04:30,516 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=24533.333333333332, ans=0.125
+2024-08-25 06:04:33,042 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.823e+02 2.012e+02 2.259e+02 3.531e+02, threshold=4.024e+02, percent-clipped=0.0
+2024-08-25 06:04:36,814 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=24586.666666666668, ans=0.125
+2024-08-25 06:05:01,520 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=24746.666666666668, ans=0.2
+2024-08-25 06:05:02,139 INFO [train.py:1114] (3/4) Epoch 2, batch 2150, loss[loss=0.2963, simple_loss=0.3297, pruned_loss=0.09566, ctc_loss=0.1791, over 19847.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3449, pruned_loss=0.106, ctc_loss=0.1984, over 3870257.90 frames. ], batch size: 52, lr: 4.07e-02, grad_scale: 32.0
+2024-08-25 06:05:02,252 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=24746.666666666668, ans=0.2
+2024-08-25 06:05:12,250 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=24746.666666666668, ans=0.025
+2024-08-25 06:05:12,443 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.16 vs. limit=15.0
+2024-08-25 06:05:24,887 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=24853.333333333332, ans=0.0
+2024-08-25 06:05:33,833 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=24906.666666666668, ans=0.0
+2024-08-25 06:05:36,539 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=24906.666666666668, ans=0.1
+2024-08-25 06:05:53,562 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=24960.0, ans=0.125
+2024-08-25 06:05:58,447 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.64 vs. limit=12.0
+2024-08-25 06:06:00,655 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=25013.333333333332, ans=0.035
+2024-08-25 06:06:01,436 INFO [train.py:1114] (3/4) Epoch 2, batch 2200, loss[loss=0.3251, simple_loss=0.3531, pruned_loss=0.1072, ctc_loss=0.207, over 19587.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.3446, pruned_loss=0.1055, ctc_loss=0.1976, over 3869038.25 frames. ], batch size: 57, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:06:07,478 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=25013.333333333332, ans=0.0
+2024-08-25 06:06:07,839 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.60 vs. limit=10.0
+2024-08-25 06:06:08,439 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=25013.333333333332, ans=0.025
+2024-08-25 06:06:25,293 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.924e+02 2.286e+02 2.709e+02 6.222e+02, threshold=4.573e+02, percent-clipped=4.0
+2024-08-25 06:06:26,526 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=25066.666666666668, ans=0.125
+2024-08-25 06:06:30,737 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=25120.0, ans=0.125
+2024-08-25 06:06:32,604 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=25120.0, ans=0.0
+2024-08-25 06:06:37,850 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=25173.333333333332, ans=0.125
+2024-08-25 06:06:48,393 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=25226.666666666668, ans=0.125
+2024-08-25 06:06:54,574 INFO [train.py:1114] (3/4) Epoch 2, batch 2250, loss[loss=0.3454, simple_loss=0.3701, pruned_loss=0.1159, ctc_loss=0.2225, over 19606.00 frames. ], tot_loss[loss=0.3168, simple_loss=0.3444, pruned_loss=0.1051, ctc_loss=0.1973, over 3868519.86 frames. ], batch size: 55, lr: 4.06e-02, grad_scale: 32.0
+2024-08-25 06:07:12,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 06:07:19,198 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=25386.666666666668, ans=0.125
+2024-08-25 06:07:36,152 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=25493.333333333332, ans=0.125
+2024-08-25 06:07:41,120 INFO [train.py:1114] (3/4) Epoch 2, batch 2300, loss[loss=0.2759, simple_loss=0.3141, pruned_loss=0.08516, ctc_loss=0.1686, over 19513.00 frames. ], tot_loss[loss=0.3162, simple_loss=0.3434, pruned_loss=0.1051, ctc_loss=0.1969, over 3862000.82 frames. ], batch size: 49, lr: 4.05e-02, grad_scale: 32.0
+2024-08-25 06:07:44,568 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=25546.666666666668, ans=0.0
+2024-08-25 06:07:53,920 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.87 vs. limit=15.0
+2024-08-25 06:07:58,723 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 1.775e+02 2.049e+02 2.504e+02 6.120e+02, threshold=4.097e+02, percent-clipped=1.0
+2024-08-25 06:08:10,058 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=25653.333333333332, ans=0.125
+2024-08-25 06:08:21,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=25760.0, ans=0.0
+2024-08-25 06:08:24,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=25760.0, ans=0.125
+2024-08-25 06:08:25,827 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=25760.0, ans=0.025
+2024-08-25 06:08:29,084 INFO [train.py:1114] (3/4) Epoch 2, batch 2350, loss[loss=0.3097, simple_loss=0.3466, pruned_loss=0.09879, ctc_loss=0.1881, over 19655.00 frames. ], tot_loss[loss=0.316, simple_loss=0.3432, pruned_loss=0.1051, ctc_loss=0.1966, over 3864149.94 frames. ], batch size: 63, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:08:34,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25813.333333333332, ans=0.1
+2024-08-25 06:08:34,570 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=25813.333333333332, ans=0.005257971014492754
+2024-08-25 06:08:36,698 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=25813.333333333332, ans=0.125
+2024-08-25 06:08:40,560 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=25866.666666666668, ans=0.125
+2024-08-25 06:08:58,495 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.44 vs. limit=22.5
+2024-08-25 06:09:11,444 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=25973.333333333332, ans=15.0
+2024-08-25 06:09:26,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=26026.666666666668, ans=0.0
+2024-08-25 06:09:28,483 INFO [train.py:1114] (3/4) Epoch 2, batch 2400, loss[loss=0.3349, simple_loss=0.3552, pruned_loss=0.1142, ctc_loss=0.2152, over 19243.00 frames. ], tot_loss[loss=0.3191, simple_loss=0.3461, pruned_loss=0.1063, ctc_loss=0.1987, over 3858949.39 frames. ], batch size: 71, lr: 4.04e-02, grad_scale: 32.0
+2024-08-25 06:09:36,594 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=26133.333333333332, ans=0.125
+2024-08-25 06:09:43,432 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.803e+02 2.129e+02 2.459e+02 5.388e+02, threshold=4.257e+02, percent-clipped=1.0
+2024-08-25 06:09:59,968 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=26240.0, ans=0.035
+2024-08-25 06:10:08,428 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=26293.333333333332, ans=0.2
+2024-08-25 06:10:14,651 INFO [train.py:1114] (3/4) Epoch 2, batch 2450, loss[loss=0.4284, simple_loss=0.4002, pruned_loss=0.1676, ctc_loss=0.3036, over 13311.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.3517, pruned_loss=0.1116, ctc_loss=0.2085, over 3728997.81 frames. ], batch size: 140, lr: 4.03e-02, grad_scale: 32.0
+2024-08-25 06:10:25,238 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.72 vs. limit=22.5
+2024-08-25 06:10:27,714 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=26400.0, ans=0.125
+2024-08-25 06:10:36,034 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.02 vs. limit=22.5
+2024-08-25 06:10:46,541 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=26453.333333333332, ans=0.05
+2024-08-25 06:10:47,599 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.80 vs. limit=22.5
+2024-08-25 06:10:49,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=26506.666666666668, ans=0.125
+2024-08-25 06:10:55,993 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=26506.666666666668, ans=0.125
+2024-08-25 06:11:37,032 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=26554.666666666668, ans=0.0
+2024-08-25 06:11:53,144 INFO [train.py:1114] (3/4) Epoch 3, batch 0, loss[loss=0.3271, simple_loss=0.3387, pruned_loss=0.1157, ctc_loss=0.2103, over 19796.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3387, pruned_loss=0.1157, ctc_loss=0.2103, over 19796.00 frames. ], batch size: 49, lr: 3.83e-02, grad_scale: 32.0
+2024-08-25 06:11:53,508 INFO [train.py:1137] (3/4) Computing validation loss
+2024-08-25 06:12:07,821 INFO [train.py:1146] (3/4) Epoch 3, validation: loss=0.2565, simple_loss=0.3309, pruned_loss=0.06653, ctc_loss=0.1228, over 944034.00 frames.
+2024-08-25 06:12:07,822 INFO [train.py:1147] (3/4) Maximum memory allocated so far is 13361MB
+2024-08-25 06:12:20,912 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=26554.666666666668, ans=22.5
+2024-08-25 06:13:57,848 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=26608.0, ans=0.0
+2024-08-25 06:13:58,803 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.15 vs. limit=15.0
+2024-08-25 06:14:54,909 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=26661.333333333332, ans=0.1
+2024-08-25 06:15:23,247 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=25.93 vs. limit=22.5
+2024-08-25 06:16:00,085 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 1.983e+02 2.286e+02 2.644e+02 3.774e+02, threshold=4.572e+02, percent-clipped=0.0
+2024-08-25 06:17:19,267 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26714.666666666668, ans=0.1
+2024-08-25 06:17:28,371 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26714.666666666668, ans=0.1
+2024-08-25 06:17:49,466 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=26714.666666666668, ans=0.025
+2024-08-25 06:20:23,744 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.58 vs. limit=10.0
+2024-08-25 06:22:59,724 INFO [train.py:1114] (3/4) Epoch 3, batch 50, loss[loss=0.2713, simple_loss=0.316, pruned_loss=0.08206, ctc_loss=0.1564, over 19705.00 frames. ], tot_loss[loss=0.328, simple_loss=0.3518, pruned_loss=0.1106, ctc_loss=0.2078, over 844545.25 frames. ], batch size: 47, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:26:49,786 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.53 vs. limit=6.0
+2024-08-25 06:29:16,367 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.49 vs. limit=15.0
+2024-08-25 06:29:45,793 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=26874.666666666668, ans=0.07
+2024-08-25 06:31:29,775 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=26874.666666666668, ans=0.1
+2024-08-25 06:34:57,965 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=26874.666666666668, ans=0.005027246376811594
+2024-08-25 06:38:56,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=26981.333333333332, ans=0.125
+2024-08-25 06:44:24,688 INFO [train.py:1114] (3/4) Epoch 3, batch 100, loss[loss=0.277, simple_loss=0.3175, pruned_loss=0.08615, ctc_loss=0.1607, over 19696.00 frames. ], tot_loss[loss=0.3245, simple_loss=0.3509, pruned_loss=0.1084, ctc_loss=0.2035, over 1498666.18 frames. ], batch size: 51, lr: 3.82e-02, grad_scale: 16.0
+2024-08-25 06:45:45,064 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.45 vs. limit=5.0
+2024-08-25 06:47:04,548 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=27141.333333333332, ans=0.125
+2024-08-25 06:47:59,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=27194.666666666668, ans=0.125
+2024-08-25 06:48:15,477 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.744e+02 2.032e+02 2.291e+02 1.205e+03, threshold=4.063e+02, percent-clipped=1.0
+2024-08-25 06:50:12,246 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=27301.333333333332, ans=0.125
+2024-08-25 06:50:43,564 INFO [train.py:1114] (3/4) Epoch 3, batch 150, loss[loss=0.2829, simple_loss=0.3154, pruned_loss=0.09021, ctc_loss=0.1751, over 19710.00 frames. ], tot_loss[loss=0.3175, simple_loss=0.3453, pruned_loss=0.1053, ctc_loss=0.1975, over 2028092.68 frames. ], batch size: 47, lr: 3.81e-02, grad_scale: 16.0
+2024-08-25 06:51:34,362 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=27354.666666666668, ans=0.125
+2024-08-25 06:51:51,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=27354.666666666668, ans=0.125
+2024-08-25 06:52:42,919 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=27461.333333333332, ans=0.0
+2024-08-25 06:53:55,951 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=27514.666666666668, ans=0.0
+2024-08-25 06:54:14,725 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=27568.0, ans=0.125
+2024-08-25 06:54:31,967 INFO [train.py:1114] (3/4) Epoch 3, batch 200, loss[loss=0.3488, simple_loss=0.3682, pruned_loss=0.1203, ctc_loss=0.2218, over 18470.00 frames. ], tot_loss[loss=0.3138, simple_loss=0.3425, pruned_loss=0.1036, ctc_loss=0.1943, over 2435922.79 frames. ], batch size: 85, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:55:11,966 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=26.30 vs. limit=22.5
+2024-08-25 06:55:24,539 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=27728.0, ans=0.1
+2024-08-25 06:56:00,391 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.731e+02 1.977e+02 2.205e+02 3.305e+02, threshold=3.953e+02, percent-clipped=0.0
+2024-08-25 06:56:04,299 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=27781.333333333332, ans=0.125
+2024-08-25 06:56:09,164 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=27781.333333333332, ans=0.125
+2024-08-25 06:56:21,774 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.31 vs. limit=22.5
+2024-08-25 06:56:25,073 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=27834.666666666668, ans=0.2
+2024-08-25 06:56:34,804 INFO [train.py:1114] (3/4) Epoch 3, batch 250, loss[loss=0.3595, simple_loss=0.3766, pruned_loss=0.1266, ctc_loss=0.223, over 19425.00 frames. ], tot_loss[loss=0.3122, simple_loss=0.3418, pruned_loss=0.1028, ctc_loss=0.1924, over 2756123.64 frames. ], batch size: 67, lr: 3.80e-02, grad_scale: 16.0
+2024-08-25 06:56:55,398 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=27888.0, ans=0.0
+2024-08-25 06:57:28,248 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=27941.333333333332, ans=0.0
+2024-08-25 06:57:29,595 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 06:57:30,497 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=27941.333333333332, ans=0.125
+2024-08-25 07:00:42,331 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=28048.0, ans=0.125
+2024-08-25 07:02:51,003 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=28101.333333333332, ans=0.125
+2024-08-25 07:02:51,120 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=28101.333333333332, ans=0.125
+2024-08-25 07:03:11,753 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=24.09 vs. limit=22.5
+2024-08-25 07:03:29,194 INFO [train.py:1114] (3/4) Epoch 3, batch 300, loss[loss=0.3515, simple_loss=0.3768, pruned_loss=0.1203, ctc_loss=0.2138, over 19505.00 frames. ], tot_loss[loss=0.3104, simple_loss=0.3405, pruned_loss=0.102, ctc_loss=0.1906, over 3001487.37 frames. ], batch size: 61, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:03:34,770 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=28154.666666666668, ans=0.09899494936611666
+2024-08-25 07:03:35,766 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=28154.666666666668, ans=0.2
+2024-08-25 07:03:53,888 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.48 vs. limit=15.0
+2024-08-25 07:04:06,150 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=28208.0, ans=22.5
+2024-08-25 07:04:28,146 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=28261.333333333332, ans=0.0
+2024-08-25 07:04:42,887 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=28261.333333333332, ans=0.125
+2024-08-25 07:04:44,393 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.395e+02 1.724e+02 1.968e+02 2.265e+02 3.417e+02, threshold=3.936e+02, percent-clipped=0.0
+2024-08-25 07:05:02,166 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=28314.666666666668, ans=0.004714202898550725
+2024-08-25 07:05:02,206 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=28314.666666666668, ans=0.125
+2024-08-25 07:05:16,423 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.18 vs. limit=15.0
+2024-08-25 07:05:49,893 INFO [train.py:1114] (3/4) Epoch 3, batch 350, loss[loss=0.2523, simple_loss=0.2951, pruned_loss=0.07581, ctc_loss=0.1446, over 19746.00 frames. ], tot_loss[loss=0.3101, simple_loss=0.3405, pruned_loss=0.1018, ctc_loss=0.1904, over 3191503.78 frames. ], batch size: 48, lr: 3.79e-02, grad_scale: 16.0
+2024-08-25 07:05:52,093 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=28421.333333333332, ans=0.004691014492753623
+2024-08-25 07:05:56,108 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=28421.333333333332, ans=22.5
+2024-08-25 07:07:24,942 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=21.86 vs. limit=22.5
+2024-08-25 07:07:45,589 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=28581.333333333332, ans=0.125
+2024-08-25 07:07:58,895 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.13 vs. limit=12.0
+2024-08-25 07:08:01,172 INFO [train.py:1114] (3/4) Epoch 3, batch 400, loss[loss=0.3056, simple_loss=0.3455, pruned_loss=0.09512, ctc_loss=0.1889, over 19500.00 frames. ], tot_loss[loss=0.3093, simple_loss=0.3401, pruned_loss=0.1013, ctc_loss=0.1897, over 3341862.26 frames. ], batch size: 54, lr: 3.78e-02, grad_scale: 32.0
+2024-08-25 07:08:18,778 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=28741.333333333332, ans=0.025
+2024-08-25 07:08:27,296 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=28794.666666666668, ans=0.125
+2024-08-25 07:08:27,319 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=28794.666666666668, ans=0.2
+2024-08-25 07:08:42,253 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.762e+02 1.982e+02 2.336e+02 5.420e+02, threshold=3.963e+02, percent-clipped=2.0
+2024-08-25 07:08:54,981 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=28901.333333333332, ans=0.0
+2024-08-25 07:09:04,136 INFO [train.py:1114] (3/4) Epoch 3, batch 450, loss[loss=0.2703, simple_loss=0.3249, pruned_loss=0.07689, ctc_loss=0.1546, over 19607.00 frames. ], tot_loss[loss=0.3087, simple_loss=0.34, pruned_loss=0.1009, ctc_loss=0.1891, over 3449635.25 frames. ], batch size: 55, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:09:09,420 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.08 vs. limit=15.0
+2024-08-25 07:09:09,604 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.50 vs. limit=15.0
+2024-08-25 07:09:29,436 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=29061.333333333332, ans=0.2
+2024-08-25 07:09:37,016 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=29114.666666666668, ans=0.2
+2024-08-25 07:09:56,837 INFO [train.py:1114] (3/4) Epoch 3, batch 500, loss[loss=0.3119, simple_loss=0.3495, pruned_loss=0.1008, ctc_loss=0.1815, over 19696.00 frames. ], tot_loss[loss=0.307, simple_loss=0.3385, pruned_loss=0.1002, ctc_loss=0.1876, over 3545514.73 frames. ], batch size: 63, lr: 3.77e-02, grad_scale: 32.0
+2024-08-25 07:10:43,382 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.753e+02 1.966e+02 2.327e+02 4.047e+02, threshold=3.932e+02, percent-clipped=2.0
+2024-08-25 07:11:10,802 INFO [train.py:1114] (3/4) Epoch 3, batch 550, loss[loss=0.3526, simple_loss=0.3709, pruned_loss=0.1203, ctc_loss=0.2341, over 19297.00 frames. ], tot_loss[loss=0.3078, simple_loss=0.3388, pruned_loss=0.1007, ctc_loss=0.1882, over 3608395.42 frames. ], batch size: 71, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:11:37,568 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.94 vs. limit=6.0
+2024-08-25 07:11:41,086 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=29488.0, ans=0.125
+2024-08-25 07:12:10,395 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.34 vs. limit=22.5
+2024-08-25 07:12:15,819 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=29648.0, ans=0.125
+2024-08-25 07:12:41,846 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=29701.333333333332, ans=0.2
+2024-08-25 07:12:42,637 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=29701.333333333332, ans=0.125
+2024-08-25 07:12:53,160 INFO [train.py:1114] (3/4) Epoch 3, batch 600, loss[loss=0.3363, simple_loss=0.3654, pruned_loss=0.1111, ctc_loss=0.2123, over 19344.00 frames. ], tot_loss[loss=0.3071, simple_loss=0.3384, pruned_loss=0.1004, ctc_loss=0.1877, over 3665281.45 frames. ], batch size: 67, lr: 3.76e-02, grad_scale: 16.0
+2024-08-25 07:12:54,333 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=29754.666666666668, ans=0.125
+2024-08-25 07:13:31,838 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=2.560e-03
+2024-08-25 07:13:37,729 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.812e+02 2.009e+02 2.360e+02 5.731e+02, threshold=4.017e+02, percent-clipped=3.0
+2024-08-25 07:13:38,021 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=29914.666666666668, ans=0.004366376811594202
+2024-08-25 07:13:44,733 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=29914.666666666668, ans=0.2
+2024-08-25 07:13:54,411 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=29968.0, ans=0.125
+2024-08-25 07:14:02,713 INFO [train.py:1114] (3/4) Epoch 3, batch 650, loss[loss=0.3354, simple_loss=0.3537, pruned_loss=0.1156, ctc_loss=0.2144, over 19747.00 frames. ], tot_loss[loss=0.3057, simple_loss=0.3373, pruned_loss=0.0998, ctc_loss=0.1863, over 3715143.58 frames. ], batch size: 54, lr: 3.75e-02, grad_scale: 16.0
+2024-08-25 07:14:03,922 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=30021.333333333332, ans=0.125
+2024-08-25 07:14:06,282 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.35 vs. limit=6.0
+2024-08-25 07:14:15,371 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=30074.666666666668, ans=0.0
+2024-08-25 07:14:15,417 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=30074.666666666668, ans=0.025
+2024-08-25 07:14:30,102 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=30128.0, ans=0.0
+2024-08-25 07:14:40,547 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=30181.333333333332, ans=0.2
+2024-08-25 07:14:55,123 INFO [train.py:1114] (3/4) Epoch 3, batch 700, loss[loss=0.2922, simple_loss=0.3294, pruned_loss=0.09384, ctc_loss=0.1682, over 19722.00 frames. ], tot_loss[loss=0.3059, simple_loss=0.3377, pruned_loss=0.09979, ctc_loss=0.1862, over 3747185.94 frames. ], batch size: 51, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:14:59,257 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=30288.0, ans=0.05
+2024-08-25 07:15:03,261 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=30288.0, ans=0.125
+2024-08-25 07:15:07,239 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.44 vs. limit=15.0
+2024-08-25 07:15:22,938 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=30394.666666666668, ans=0.004262028985507247
+2024-08-25 07:15:28,451 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.846e+02 1.998e+02 2.505e+02 9.071e+02, threshold=3.995e+02, percent-clipped=5.0
+2024-08-25 07:15:32,930 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=30448.0, ans=0.125
+2024-08-25 07:15:33,648 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=30448.0, ans=0.0
+2024-08-25 07:15:38,286 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=30448.0, ans=0.2
+2024-08-25 07:15:44,190 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 07:15:48,979 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=30501.333333333332, ans=0.125
+2024-08-25 07:15:58,573 INFO [train.py:1114] (3/4) Epoch 3, batch 750, loss[loss=0.3039, simple_loss=0.3439, pruned_loss=0.09625, ctc_loss=0.1786, over 19504.00 frames. ], tot_loss[loss=0.3042, simple_loss=0.3365, pruned_loss=0.09903, ctc_loss=0.1847, over 3774363.66 frames. ], batch size: 54, lr: 3.74e-02, grad_scale: 16.0
+2024-08-25 07:16:00,645 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=30554.666666666668, ans=0.025
+2024-08-25 07:16:22,857 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=30608.0, ans=0.125
+2024-08-25 07:16:29,688 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=30608.0, ans=0.0
+2024-08-25 07:16:35,389 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=30661.333333333332, ans=0.0
+2024-08-25 07:16:43,239 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.10 vs. limit=15.0
+2024-08-25 07:16:46,685 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=30661.333333333332, ans=0.025
+2024-08-25 07:18:51,216 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=30714.666666666668, ans=0.0
+2024-08-25 07:29:29,264 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=30768.0, ans=0.1
+2024-08-25 07:34:42,530 INFO [train.py:1114] (3/4) Epoch 3, batch 800, loss[loss=0.2629, simple_loss=0.3045, pruned_loss=0.08059, ctc_loss=0.1501, over 19416.00 frames. ], tot_loss[loss=0.3039, simple_loss=0.3364, pruned_loss=0.09883, ctc_loss=0.1845, over 3794555.81 frames. ], batch size: 48, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 07:37:24,064 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30821.333333333332, ans=0.1
+2024-08-25 08:02:40,818 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.761e+02 1.928e+02 2.233e+02 3.899e+02, threshold=3.856e+02, percent-clipped=0.0
+2024-08-25 08:03:02,322 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.20 vs. limit=22.5
+2024-08-25 08:12:59,503 INFO [train.py:1114] (3/4) Epoch 3, batch 850, loss[loss=0.3156, simple_loss=0.3509, pruned_loss=0.1017, ctc_loss=0.1922, over 19669.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3364, pruned_loss=0.09846, ctc_loss=0.1841, over 3814526.82 frames. ], batch size: 59, lr: 3.73e-02, grad_scale: 32.0
+2024-08-25 08:17:57,595 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=31088.0, ans=0.125
+2024-08-25 08:20:22,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=31141.333333333332, ans=0.025
+2024-08-25 08:27:20,930 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.64 vs. limit=22.5
+2024-08-25 08:32:09,983 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=31248.0, ans=0.035
+2024-08-25 08:35:54,821 INFO [scaling.py:1024] (3/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.29 vs. limit=5.0
+2024-08-25 08:44:41,589 INFO [train.py:1114] (3/4) Epoch 3, batch 900, loss[loss=0.2574, simple_loss=0.3001, pruned_loss=0.07837, ctc_loss=0.1451, over 19806.00 frames. ], tot_loss[loss=0.3041, simple_loss=0.3366, pruned_loss=0.09882, ctc_loss=0.1846, over 3817708.33 frames. ], batch size: 49, lr: 3.72e-02, grad_scale: 32.0
+2024-08-25 08:47:05,823 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.98 vs. limit=15.0
+2024-08-25 08:48:06,046 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=31354.666666666668, ans=0.125
+2024-08-25 08:48:53,316 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=31408.0, ans=0.1
+2024-08-25 08:55:51,671 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=31461.333333333332, ans=0.004030144927536232
+2024-08-25 08:57:54,734 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 1.748e+02 1.945e+02 2.250e+02 3.446e+02, threshold=3.889e+02, percent-clipped=0.0
+2024-08-25 09:00:55,203 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=31514.666666666668, ans=0.125
+2024-08-25 09:03:41,305 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=31568.0, ans=0.025
+2024-08-25 09:05:03,362 INFO [train.py:1114] (3/4) Epoch 3, batch 950, loss[loss=0.2584, simple_loss=0.298, pruned_loss=0.07992, ctc_loss=0.1474, over 19519.00 frames. ], tot_loss[loss=0.3039, simple_loss=0.3366, pruned_loss=0.09869, ctc_loss=0.1847, over 3820935.01 frames. ], batch size: 49, lr: 3.71e-02, grad_scale: 32.0
+2024-08-25 09:12:03,484 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=31674.666666666668, ans=0.0
+2024-08-25 09:13:22,658 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=31728.0, ans=0.2
+2024-08-25 09:13:22,943 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.55 vs. limit=15.0
+2024-08-25 09:16:00,986 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=31728.0, ans=0.04949747468305833
+2024-08-25 09:17:05,547 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=31781.333333333332, ans=0.125
+2024-08-25 09:22:18,947 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.12 vs. limit=12.0
+2024-08-25 09:23:03,902 INFO [train.py:1114] (3/4) Epoch 3, batch 1000, loss[loss=0.2742, simple_loss=0.3139, pruned_loss=0.08543, ctc_loss=0.1593, over 19866.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.3376, pruned_loss=0.09945, ctc_loss=0.1861, over 3817290.58 frames. ], batch size: 52, lr: 3.71e-02, grad_scale: 16.0
+2024-08-25 09:26:43,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=31994.666666666668, ans=0.125
+2024-08-25 09:28:26,907 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=31994.666666666668, ans=0.125
+2024-08-25 09:29:07,849 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.398e+02 1.873e+02 2.237e+02 2.628e+02 7.664e+02, threshold=4.475e+02, percent-clipped=6.0
+2024-08-25 09:29:15,835 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.41 vs. limit=22.5
+2024-08-25 09:32:05,596 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=32101.333333333332, ans=0.125
+2024-08-25 09:32:27,624 INFO [train.py:1114] (3/4) Epoch 3, batch 1050, loss[loss=0.3064, simple_loss=0.3395, pruned_loss=0.09938, ctc_loss=0.1865, over 19833.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3369, pruned_loss=0.09905, ctc_loss=0.1855, over 3823057.75 frames. ], batch size: 57, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:32:36,946 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=32154.666666666668, ans=0.0
+2024-08-25 09:35:12,781 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=32261.333333333332, ans=0.0
+2024-08-25 09:35:13,017 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.32 vs. limit=22.5
+2024-08-25 09:35:19,844 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=32261.333333333332, ans=0.125
+2024-08-25 09:36:22,243 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=12.24 vs. limit=15.0
+2024-08-25 09:39:16,711 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.45 vs. limit=10.0
+2024-08-25 09:41:10,324 INFO [train.py:1114] (3/4) Epoch 3, batch 1100, loss[loss=0.2903, simple_loss=0.3231, pruned_loss=0.09291, ctc_loss=0.1793, over 19594.00 frames. ], tot_loss[loss=0.3034, simple_loss=0.3362, pruned_loss=0.0984, ctc_loss=0.1844, over 3830355.15 frames. ], batch size: 52, lr: 3.70e-02, grad_scale: 16.0
+2024-08-25 09:41:46,257 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=32474.666666666668, ans=0.025
+2024-08-25 09:43:23,038 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.681e+02 1.943e+02 2.357e+02 4.515e+02, threshold=3.887e+02, percent-clipped=1.0
+2024-08-25 09:43:30,917 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=32581.333333333332, ans=0.0037866666666666665
+2024-08-25 09:45:15,744 INFO [train.py:1114] (3/4) Epoch 3, batch 1150, loss[loss=0.2951, simple_loss=0.3286, pruned_loss=0.09564, ctc_loss=0.176, over 19570.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.3359, pruned_loss=0.09833, ctc_loss=0.184, over 3829999.35 frames. ], batch size: 52, lr: 3.69e-02, grad_scale: 16.0
+2024-08-25 09:51:56,049 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=32688.0, ans=0.125
+2024-08-25 09:51:56,880 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=32688.0, ans=0.2
+2024-08-25 09:54:00,329 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=32741.333333333332, ans=0.1
+2024-08-25 09:55:26,292 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=32901.333333333336, ans=0.0
+2024-08-25 09:55:29,636 INFO [train.py:1114] (3/4) Epoch 3, batch 1200, loss[loss=0.3184, simple_loss=0.3577, pruned_loss=0.1018, ctc_loss=0.1889, over 19848.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3364, pruned_loss=0.09844, ctc_loss=0.1847, over 3825593.25 frames. ], batch size: 57, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:56:08,227 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=33061.333333333336, ans=0.0
+2024-08-25 09:56:31,119 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.705e+02 1.941e+02 2.201e+02 4.168e+02, threshold=3.882e+02, percent-clipped=1.0
+2024-08-25 09:56:48,396 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=33114.666666666664, ans=0.05
+2024-08-25 09:57:41,496 INFO [train.py:1114] (3/4) Epoch 3, batch 1250, loss[loss=0.3168, simple_loss=0.3518, pruned_loss=0.1032, ctc_loss=0.1884, over 19540.00 frames. ], tot_loss[loss=0.3037, simple_loss=0.3371, pruned_loss=0.0983, ctc_loss=0.1843, over 3843355.26 frames. ], batch size: 61, lr: 3.68e-02, grad_scale: 32.0
+2024-08-25 09:57:48,037 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=33221.333333333336, ans=0.1
+2024-08-25 09:58:16,524 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=33274.666666666664, ans=0.0036359420289855072
+2024-08-25 09:58:17,377 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=33274.666666666664, ans=0.1
+2024-08-25 09:58:39,235 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=33381.333333333336, ans=0.2
+2024-08-25 09:58:48,384 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=33434.666666666664, ans=0.025
+2024-08-25 09:59:04,151 INFO [train.py:1114] (3/4) Epoch 3, batch 1300, loss[loss=0.3455, simple_loss=0.3691, pruned_loss=0.1182, ctc_loss=0.2138, over 18785.00 frames. ], tot_loss[loss=0.3015, simple_loss=0.3355, pruned_loss=0.09729, ctc_loss=0.1824, over 3846466.03 frames. ], batch size: 76, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 09:59:18,694 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=33488.0, ans=0.0
+2024-08-25 09:59:43,022 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=33594.666666666664, ans=0.0
+2024-08-25 09:59:48,217 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.674e+02 1.887e+02 2.172e+02 3.368e+02, threshold=3.774e+02, percent-clipped=0.0
+2024-08-25 10:00:00,596 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=33648.0, ans=0.125
+2024-08-25 10:00:02,437 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=33701.333333333336, ans=0.125
+2024-08-25 10:00:22,514 INFO [train.py:1114] (3/4) Epoch 3, batch 1350, loss[loss=0.3111, simple_loss=0.3387, pruned_loss=0.1021, ctc_loss=0.1982, over 19785.00 frames. ], tot_loss[loss=0.2997, simple_loss=0.3342, pruned_loss=0.09649, ctc_loss=0.1808, over 3858148.22 frames. ], batch size: 54, lr: 3.67e-02, grad_scale: 32.0
+2024-08-25 10:00:33,354 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=33754.666666666664, ans=0.0
+2024-08-25 10:00:33,417 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=33754.666666666664, ans=0.0
+2024-08-25 10:01:14,313 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.09 vs. limit=15.0
+2024-08-25 10:01:16,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=33861.333333333336, ans=0.003508405797101449
+2024-08-25 10:01:50,939 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=33968.0, ans=0.003485217391304348
+2024-08-25 10:02:01,546 INFO [train.py:1114] (3/4) Epoch 3, batch 1400, loss[loss=0.2752, simple_loss=0.3091, pruned_loss=0.08777, ctc_loss=0.1646, over 19685.00 frames. ], tot_loss[loss=0.2991, simple_loss=0.3339, pruned_loss=0.09615, ctc_loss=0.18, over 3865726.13 frames. ], batch size: 46, lr: 3.66e-02, grad_scale: 32.0
+2024-08-25 10:02:10,308 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=34021.333333333336, ans=0.125
+2024-08-25 10:02:12,271 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=34021.333333333336, ans=0.125
+2024-08-25 10:02:13,185 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=34021.333333333336, ans=0.125
+2024-08-25 10:02:21,114 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=34074.666666666664, ans=0.04949747468305833
+2024-08-25 10:02:22,862 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=34074.666666666664, ans=0.125
+2024-08-25 10:02:33,442 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=34128.0, ans=0.125
+2024-08-25 10:02:42,955 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=34128.0, ans=15.0
+2024-08-25 10:02:45,298 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 1.896e+02 2.159e+02 2.528e+02 3.857e+02, threshold=4.318e+02, percent-clipped=1.0
+2024-08-25 10:02:55,016 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.63 vs. limit=6.0
+2024-08-25 10:02:55,644 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=34181.333333333336, ans=0.125
+2024-08-25 10:02:56,598 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=34181.333333333336, ans=0.125
+2024-08-25 10:03:12,602 INFO [train.py:1114] (3/4) Epoch 3, batch 1450, loss[loss=0.3312, simple_loss=0.357, pruned_loss=0.1116, ctc_loss=0.2055, over 19664.00 frames. ], tot_loss[loss=0.3011, simple_loss=0.3353, pruned_loss=0.09711, ctc_loss=0.1817, over 3863914.65 frames. ], batch size: 63, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:03:15,904 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34288.0, ans=0.1
+2024-08-25 10:03:22,670 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.31 vs. limit=15.0
+2024-08-25 10:03:39,164 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.71 vs. limit=22.5
+2024-08-25 10:03:47,202 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=34394.666666666664, ans=0.025
+2024-08-25 10:03:57,420 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.08 vs. limit=12.0
+2024-08-25 10:04:16,493 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.54 vs. limit=22.5
+2024-08-25 10:04:19,951 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=34501.333333333336, ans=0.95
+2024-08-25 10:04:20,004 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=34501.333333333336, ans=0.025
+2024-08-25 10:04:20,828 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=34554.666666666664, ans=0.125
+2024-08-25 10:04:21,642 INFO [train.py:1114] (3/4) Epoch 3, batch 1500, loss[loss=0.3069, simple_loss=0.3416, pruned_loss=0.09847, ctc_loss=0.1883, over 19574.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3356, pruned_loss=0.09689, ctc_loss=0.1816, over 3864371.48 frames. ], batch size: 57, lr: 3.65e-02, grad_scale: 32.0
+2024-08-25 10:04:36,274 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=34608.0, ans=0.0033460869565217393
+2024-08-25 10:04:44,752 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34661.333333333336, ans=0.1
+2024-08-25 10:04:45,534 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=34661.333333333336, ans=0.2
+2024-08-25 10:05:09,918 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.778e+02 1.971e+02 2.353e+02 5.678e+02, threshold=3.941e+02, percent-clipped=1.0
+2024-08-25 10:05:29,629 INFO [train.py:1114] (3/4) Epoch 3, batch 1550, loss[loss=0.3223, simple_loss=0.3499, pruned_loss=0.1092, ctc_loss=0.1904, over 19610.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.3357, pruned_loss=0.09751, ctc_loss=0.1823, over 3849402.66 frames. ], batch size: 60, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:05:53,839 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=34928.0, ans=0.125
+2024-08-25 10:05:56,034 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=34928.0, ans=0.2
+2024-08-25 10:06:19,993 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=35034.666666666664, ans=0.125
+2024-08-25 10:06:40,867 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=35034.666666666664, ans=0.0032533333333333346
+2024-08-25 10:06:42,364 INFO [train.py:1114] (3/4) Epoch 3, batch 1600, loss[loss=0.3054, simple_loss=0.3421, pruned_loss=0.09786, ctc_loss=0.1823, over 19838.00 frames. ], tot_loss[loss=0.3023, simple_loss=0.3356, pruned_loss=0.09793, ctc_loss=0.1829, over 3838381.03 frames. ], batch size: 57, lr: 3.64e-02, grad_scale: 32.0
+2024-08-25 10:07:10,341 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.48 vs. limit=15.0
+2024-08-25 10:07:28,025 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=35194.666666666664, ans=0.125
+2024-08-25 10:07:28,936 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=35248.0, ans=0.0032069565217391306
+2024-08-25 10:07:47,935 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 1.752e+02 2.032e+02 2.338e+02 4.104e+02, threshold=4.064e+02, percent-clipped=1.0
+2024-08-25 10:07:50,904 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:08:00,975 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=35301.333333333336, ans=0.2
+2024-08-25 10:08:06,824 INFO [train.py:1114] (3/4) Epoch 3, batch 1650, loss[loss=0.2938, simple_loss=0.3321, pruned_loss=0.09223, ctc_loss=0.1779, over 19671.00 frames. ], tot_loss[loss=0.3021, simple_loss=0.3355, pruned_loss=0.09786, ctc_loss=0.1827, over 3833842.25 frames. ], batch size: 59, lr: 3.63e-02, grad_scale: 32.0
+2024-08-25 10:08:12,908 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=35354.666666666664, ans=0.125
+2024-08-25 10:08:15,632 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:08:20,300 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=35408.0, ans=0.025
+2024-08-25 10:08:45,930 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=35514.666666666664, ans=0.0
+2024-08-25 10:08:50,722 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=35514.666666666664, ans=0.0
+2024-08-25 10:08:57,925 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.45 vs. limit=10.0
+2024-08-25 10:09:04,871 INFO [train.py:1114] (3/4) Epoch 3, batch 1700, loss[loss=0.2679, simple_loss=0.3001, pruned_loss=0.08629, ctc_loss=0.1581, over 19677.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3349, pruned_loss=0.09723, ctc_loss=0.1815, over 3847148.22 frames. ], batch size: 46, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:09:11,825 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.82 vs. limit=6.0
+2024-08-25 10:09:17,188 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.42 vs. limit=15.0
+2024-08-25 10:09:26,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=35728.0, ans=0.125
+2024-08-25 10:09:52,821 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.835e+02 2.022e+02 2.484e+02 3.793e+02, threshold=4.043e+02, percent-clipped=0.0
+2024-08-25 10:10:02,205 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=3.70 vs. limit=15.0
+2024-08-25 10:10:09,475 INFO [train.py:1114] (3/4) Epoch 3, batch 1750, loss[loss=0.2681, simple_loss=0.3015, pruned_loss=0.08571, ctc_loss=0.1581, over 19625.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3346, pruned_loss=0.09681, ctc_loss=0.1807, over 3852016.43 frames. ], batch size: 45, lr: 3.62e-02, grad_scale: 32.0
+2024-08-25 10:10:19,334 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=35941.333333333336, ans=0.125
+2024-08-25 10:11:09,478 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=36101.333333333336, ans=0.2
+2024-08-25 10:11:14,926 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=36101.333333333336, ans=0.125
+2024-08-25 10:11:20,665 INFO [train.py:1114] (3/4) Epoch 3, batch 1800, loss[loss=0.3033, simple_loss=0.3455, pruned_loss=0.09477, ctc_loss=0.1789, over 19605.00 frames. ], tot_loss[loss=0.301, simple_loss=0.335, pruned_loss=0.09718, ctc_loss=0.1815, over 3853205.65 frames. ], batch size: 55, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:11:52,946 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.758e+02 2.042e+02 2.396e+02 4.902e+02, threshold=4.083e+02, percent-clipped=1.0
+2024-08-25 10:11:53,228 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=36314.666666666664, ans=10.0
+2024-08-25 10:11:58,380 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=36314.666666666664, ans=0.125
+2024-08-25 10:11:59,453 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=36314.666666666664, ans=0.125
+2024-08-25 10:12:33,948 INFO [train.py:1114] (3/4) Epoch 3, batch 1850, loss[loss=0.2931, simple_loss=0.3344, pruned_loss=0.08979, ctc_loss=0.1806, over 19592.00 frames. ], tot_loss[loss=0.2995, simple_loss=0.3338, pruned_loss=0.09655, ctc_loss=0.1803, over 3857174.83 frames. ], batch size: 57, lr: 3.61e-02, grad_scale: 32.0
+2024-08-25 10:12:43,756 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=36474.666666666664, ans=0.2
+2024-08-25 10:12:52,472 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36474.666666666664, ans=0.1
+2024-08-25 10:12:52,567 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=36474.666666666664, ans=0.0
+2024-08-25 10:12:57,625 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=36528.0, ans=0.015
+2024-08-25 10:13:09,581 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=36528.0, ans=0.125
+2024-08-25 10:13:31,586 INFO [train.py:1114] (3/4) Epoch 3, batch 1900, loss[loss=0.3141, simple_loss=0.3507, pruned_loss=0.1018, ctc_loss=0.1845, over 19621.00 frames. ], tot_loss[loss=0.2994, simple_loss=0.3341, pruned_loss=0.09634, ctc_loss=0.1801, over 3861414.14 frames. ], batch size: 59, lr: 3.60e-02, grad_scale: 32.0
+2024-08-25 10:13:49,582 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=36688.0, ans=0.1
+2024-08-25 10:13:52,951 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=36688.0, ans=0.125
+2024-08-25 10:14:13,404 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff2.min_abs, batch_count=36741.333333333336, ans=0.1
+2024-08-25 10:14:16,878 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.69 vs. limit=12.0
+2024-08-25 10:14:24,178 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=36794.666666666664, ans=0.125
+2024-08-25 10:14:24,214 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=36794.666666666664, ans=0.125
+2024-08-25 10:14:29,198 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 1.725e+02 1.920e+02 2.285e+02 4.448e+02, threshold=3.841e+02, percent-clipped=1.0
+2024-08-25 10:14:35,076 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=36848.0, ans=0.125
+2024-08-25 10:14:54,777 INFO [train.py:1114] (3/4) Epoch 3, batch 1950, loss[loss=0.27, simple_loss=0.3135, pruned_loss=0.08046, ctc_loss=0.1641, over 19586.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3355, pruned_loss=0.09658, ctc_loss=0.1804, over 3869991.40 frames. ], batch size: 52, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:14:54,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=36954.666666666664, ans=0.0
+2024-08-25 10:15:12,459 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=37008.0, ans=0.125
+2024-08-25 10:15:16,082 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=37008.0, ans=0.125
+2024-08-25 10:15:21,423 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.55 vs. limit=15.0
+2024-08-25 10:15:23,112 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.51 vs. limit=22.5
+2024-08-25 10:15:51,822 INFO [train.py:1114] (3/4) Epoch 3, batch 2000, loss[loss=0.2461, simple_loss=0.2835, pruned_loss=0.07648, ctc_loss=0.1395, over 19631.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.3362, pruned_loss=0.09731, ctc_loss=0.1816, over 3854736.13 frames. ], batch size: 45, lr: 3.59e-02, grad_scale: 32.0
+2024-08-25 10:15:59,014 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=37221.333333333336, ans=0.015
+2024-08-25 10:16:00,922 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=37274.666666666664, ans=0.1
+2024-08-25 10:16:04,302 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37274.666666666664, ans=0.1
+2024-08-25 10:16:19,095 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 1.904e+02 2.146e+02 2.566e+02 5.347e+02, threshold=4.293e+02, percent-clipped=2.0
+2024-08-25 10:16:19,601 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=11.04 vs. limit=15.0
+2024-08-25 10:16:20,976 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=37381.333333333336, ans=0.125
+2024-08-25 10:16:34,639 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=37434.666666666664, ans=0.2
+2024-08-25 10:16:44,026 INFO [train.py:1114] (3/4) Epoch 3, batch 2050, loss[loss=0.2665, simple_loss=0.2997, pruned_loss=0.08476, ctc_loss=0.1597, over 19692.00 frames. ], tot_loss[loss=0.3001, simple_loss=0.3345, pruned_loss=0.09675, ctc_loss=0.1806, over 3850078.92 frames. ], batch size: 47, lr: 3.58e-02, grad_scale: 32.0
+2024-08-25 10:16:45,258 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.67 vs. limit=15.0
+2024-08-25 10:17:00,573 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.05 vs. limit=15.0
+2024-08-25 10:17:11,751 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.61 vs. limit=15.0
+2024-08-25 10:17:12,371 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=37541.333333333336, ans=0.025
+2024-08-25 10:17:15,687 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=37541.333333333336, ans=0.0
+2024-08-25 10:17:24,805 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=5.02 vs. limit=12.0
+2024-08-25 10:17:27,137 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37648.0, ans=0.1
+2024-08-25 10:17:44,191 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:17:48,579 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=37701.333333333336, ans=0.5
+2024-08-25 10:17:50,402 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37701.333333333336, ans=0.1
+2024-08-25 10:17:56,064 INFO [train.py:1114] (3/4) Epoch 3, batch 2100, loss[loss=0.2973, simple_loss=0.3352, pruned_loss=0.09447, ctc_loss=0.1762, over 19758.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.3337, pruned_loss=0.096, ctc_loss=0.1792, over 3857945.32 frames. ], batch size: 54, lr: 3.58e-02, grad_scale: 16.0
+2024-08-25 10:18:18,006 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=37754.666666666664, ans=0.2
+2024-08-25 10:18:40,748 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=37808.0, ans=0.0
+2024-08-25 10:18:40,881 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.92 vs. limit=15.0
+2024-08-25 10:18:51,423 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.40 vs. limit=22.5
+2024-08-25 10:19:06,531 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=37861.333333333336, ans=0.125
+2024-08-25 10:19:13,887 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=37914.666666666664, ans=0.125
+2024-08-25 10:19:20,762 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.776e+02 1.971e+02 2.246e+02 3.814e+02, threshold=3.941e+02, percent-clipped=0.0
+2024-08-25 10:19:39,569 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=37914.666666666664, ans=0.2
+2024-08-25 10:19:47,415 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=37968.0, ans=0.1
+2024-08-25 10:20:09,517 INFO [train.py:1114] (3/4) Epoch 3, batch 2150, loss[loss=0.2774, simple_loss=0.3173, pruned_loss=0.08601, ctc_loss=0.1638, over 19869.00 frames. ], tot_loss[loss=0.2978, simple_loss=0.333, pruned_loss=0.09564, ctc_loss=0.1783, over 3869275.29 frames. ], batch size: 52, lr: 3.57e-02, grad_scale: 16.0
+2024-08-25 10:20:30,997 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=38074.666666666664, ans=0.125
+2024-08-25 10:20:31,344 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.19 vs. limit=12.0
+2024-08-25 10:20:34,639 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.59 vs. limit=15.0
+2024-08-25 10:20:35,546 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=38074.666666666664, ans=0.025
+2024-08-25 10:20:38,505 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=38128.0, ans=0.125
+2024-08-25 10:20:42,895 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=38128.0, ans=0.0
+2024-08-25 10:20:52,078 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.66 vs. limit=12.0
+2024-08-25 10:21:02,529 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.41 vs. limit=15.0
+2024-08-25 10:21:03,245 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.19 vs. limit=15.0
+2024-08-25 10:21:11,136 INFO [train.py:1114] (3/4) Epoch 3, batch 2200, loss[loss=0.3129, simple_loss=0.3499, pruned_loss=0.09978, ctc_loss=0.1909, over 19596.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3329, pruned_loss=0.09544, ctc_loss=0.1779, over 3867911.36 frames. ], batch size: 57, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:21:45,952 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=38394.666666666664, ans=10.0
+2024-08-25 10:21:48,782 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=38394.666666666664, ans=0.2
+2024-08-25 10:21:56,467 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.750e+02 1.922e+02 2.212e+02 3.187e+02, threshold=3.844e+02, percent-clipped=0.0
+2024-08-25 10:22:12,982 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=38448.0, ans=0.002511304347826087
+2024-08-25 10:22:16,787 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=38501.333333333336, ans=6.0
+2024-08-25 10:22:28,991 INFO [train.py:1114] (3/4) Epoch 3, batch 2250, loss[loss=0.2763, simple_loss=0.3224, pruned_loss=0.08354, ctc_loss=0.1577, over 19614.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.3323, pruned_loss=0.09503, ctc_loss=0.1774, over 3868294.23 frames. ], batch size: 55, lr: 3.56e-02, grad_scale: 16.0
+2024-08-25 10:22:31,647 INFO [scaling.py:1120] (3/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-25 10:22:34,593 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.08 vs. limit=15.0
+2024-08-25 10:22:45,297 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=38608.0, ans=0.125
+2024-08-25 10:22:53,365 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38661.333333333336, ans=0.1
+2024-08-25 10:23:17,093 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=38714.666666666664, ans=0.125
+2024-08-25 10:23:40,909 INFO [train.py:1114] (3/4) Epoch 3, batch 2300, loss[loss=0.2485, simple_loss=0.2956, pruned_loss=0.07312, ctc_loss=0.138, over 19512.00 frames. ], tot_loss[loss=0.2959, simple_loss=0.3313, pruned_loss=0.09482, ctc_loss=0.1773, over 3860984.35 frames. ], batch size: 49, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:23:41,349 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.35 vs. limit=15.0
+2024-08-25 10:23:51,371 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.46 vs. limit=22.5
+2024-08-25 10:24:13,769 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 1.820e+02 2.030e+02 2.354e+02 3.970e+02, threshold=4.059e+02, percent-clipped=1.0
+2024-08-25 10:24:14,845 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=38981.333333333336, ans=0.125
+2024-08-25 10:24:45,563 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=39034.666666666664, ans=0.025
+2024-08-25 10:24:48,807 INFO [train.py:1114] (3/4) Epoch 3, batch 2350, loss[loss=0.3087, simple_loss=0.3467, pruned_loss=0.09959, ctc_loss=0.1789, over 19691.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3314, pruned_loss=0.0951, ctc_loss=0.1776, over 3863350.47 frames. ], batch size: 63, lr: 3.55e-02, grad_scale: 16.0
+2024-08-25 10:24:57,745 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=39088.0, ans=0.0
+2024-08-25 10:25:02,503 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=39088.0, ans=0.125
+2024-08-25 10:25:09,136 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=39141.333333333336, ans=0.125
+2024-08-25 10:25:10,031 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=39141.333333333336, ans=0.125
+2024-08-25 10:25:43,146 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=39301.333333333336, ans=0.0
+2024-08-25 10:25:45,126 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.69 vs. limit=22.5
+2024-08-25 10:25:52,380 INFO [train.py:1114] (3/4) Epoch 3, batch 2400, loss[loss=0.312, simple_loss=0.3503, pruned_loss=0.09974, ctc_loss=0.1856, over 19154.00 frames. ], tot_loss[loss=0.2988, simple_loss=0.3336, pruned_loss=0.09615, ctc_loss=0.1793, over 3857217.27 frames. ], batch size: 71, lr: 3.54e-02, grad_scale: 32.0
+2024-08-25 10:25:57,628 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=39354.666666666664, ans=0.125
+2024-08-25 10:26:04,024 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.07 vs. limit=15.0
+2024-08-25 10:26:11,019 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.01 vs. limit=22.5
+2024-08-25 10:26:41,529 WARNING [optim.py:487] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.777e+02 2.047e+02 2.383e+02 4.291e+02, threshold=4.094e+02, percent-clipped=1.0
+2024-08-25 10:27:07,770 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.46 vs. limit=15.0
+2024-08-25 10:27:14,153 INFO [train.py:1114] (3/4) Epoch 3, batch 2450, loss[loss=0.3985, simple_loss=0.3828, pruned_loss=0.1516, ctc_loss=0.2773, over 13418.00 frames. ], tot_loss[loss=0.3086, simple_loss=0.3393, pruned_loss=0.1012, ctc_loss=0.1887, over 3731394.84 frames. ], batch size: 141, lr: 3.53e-02, grad_scale: 16.0
+2024-08-25 10:27:41,335 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=39728.0, ans=0.125
+2024-08-25 10:27:51,463 INFO [scaling.py:214] (3/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=39781.333333333336, ans=0.2
+2024-08-25 10:27:54,289 INFO [scaling.py:1024] (3/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.49 vs. limit=15.0
+2024-08-25 10:39:24,826 INFO [train.py:1050] (3/4) Caught exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=89707, OpType=ALLREDUCE, NumelIn=745, NumelOut=745, Timeout(ms)=600000) ran for 600002 milliseconds before timing out..
+2024-08-25 10:39:24,827 INFO [checkpoint.py:75] (3/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/bad-model-3.pt
+2024-08-25 10:40:06,840 INFO [train.py:1413] (3/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/batch-41f60be0-7cef-6aa3-6aed-cf4a4599a084.pt
+2024-08-25 10:40:07,355 INFO [train.py:1419] (3/4) features shape: torch.Size([48, 1633, 80])
+2024-08-25 10:40:07,357 INFO [train.py:1423] (3/4) num tokens: 3855
diff --git a/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-02-0 b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-02-0
new file mode 100644
index 0000000000000000000000000000000000000000..47586076d51c86f9373837853e27d8910b9269b2
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-02-0
@@ -0,0 +1,5503 @@
+2024-08-26 14:14:06,477 INFO [train.py:1182] (0/4) Training started
+2024-08-26 14:14:12,389 INFO [train.py:1192] (0/4) Device: cuda:0
+2024-08-26 14:14:12,392 INFO [train.py:1210] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2652.int.cedar.computecanada.ca', 'IP address': '172.16.146.89'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 4, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-26 14:14:12,392 INFO [train.py:1212] (0/4) About to create model
+2024-08-26 14:14:13,058 INFO [train.py:1216] (0/4) Number of model parameters: 65805511
+2024-08-26 14:14:13,604 INFO [checkpoint.py:112] (0/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-3.pt
+2024-08-26 14:14:19,856 INFO [checkpoint.py:131] (0/4) Loading averaged model
+2024-08-26 14:14:20,225 INFO [train.py:1231] (0/4) Using DDP
+2024-08-26 14:14:24,078 INFO [train.py:1243] (0/4) Loading optimizer state dict
+2024-08-26 14:14:25,323 INFO [train.py:1251] (0/4) Loading scheduler state dict
+2024-08-26 14:14:25,324 INFO [asr_datamodule.py:894] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-26 14:14:28,346 INFO [asr_datamodule.py:696] (0/4) Disable MUSAN
+2024-08-26 14:14:28,346 INFO [asr_datamodule.py:714] (0/4) Enable SpecAugment
+2024-08-26 14:14:28,346 INFO [asr_datamodule.py:715] (0/4) Time warp factor: 80
+2024-08-26 14:14:28,346 INFO [asr_datamodule.py:725] (0/4) Num frame mask: 10
+2024-08-26 14:14:28,347 INFO [asr_datamodule.py:738] (0/4) About to create train dataset
+2024-08-26 14:14:28,347 INFO [asr_datamodule.py:765] (0/4) Using DynamicBucketingSampler.
+2024-08-26 14:14:29,882 INFO [asr_datamodule.py:782] (0/4) About to create train dataloader
+2024-08-26 14:14:29,883 INFO [asr_datamodule.py:911] (0/4) About to get dev-clean cuts
+2024-08-26 14:14:32,051 INFO [asr_datamodule.py:918] (0/4) About to get dev-other cuts
+2024-08-26 14:14:32,052 INFO [asr_datamodule.py:814] (0/4) About to create dev dataset
+2024-08-26 14:14:32,362 INFO [asr_datamodule.py:831] (0/4) About to create dev dataloader
+2024-08-26 14:14:32,362 INFO [train.py:1435] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-26 14:18:38,883 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=512, metric=3.13 vs. limit=7.5
+2024-08-26 14:18:40,628 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12155MB
+2024-08-26 14:18:41,877 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12155MB
+2024-08-26 14:18:49,649 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12209MB
+2024-08-26 14:18:50,842 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12220MB
+2024-08-26 14:19:04,869 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12220MB
+2024-08-26 14:19:05,859 INFO [scaling.py:1024] (0/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.61 vs. limit=7.5
+2024-08-26 14:19:06,158 INFO [train.py:1463] (0/4) Maximum memory allocated so far is 12220MB
+2024-08-26 14:19:06,175 INFO [train.py:1344] (0/4) Loading grad scaler state dict
+2024-08-26 14:19:49,468 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.97 vs. limit=15.0
+2024-08-26 14:19:52,355 INFO [train.py:1114] (0/4) Epoch 4, batch 0, loss[loss=0.2792, simple_loss=0.3179, pruned_loss=0.08588, ctc_loss=0.1717, over 19412.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.3179, pruned_loss=0.08588, ctc_loss=0.1717, over 19412.00 frames. ], batch size: 48, lr: 3.30e-02, grad_scale: 32.0
+2024-08-26 14:19:52,356 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 14:20:25,790 INFO [train.py:1146] (0/4) Epoch 4, validation: loss=0.2421, simple_loss=0.3218, pruned_loss=0.05945, ctc_loss=0.1086, over 944034.00 frames.
+2024-08-26 14:20:25,791 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 12220MB
+2024-08-26 14:20:28,128 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.49 vs. limit=15.0
+2024-08-26 14:21:40,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=39936.0, ans=0.125
+2024-08-26 14:22:00,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=39936.0, ans=0.0021878260869565213
+2024-08-26 14:22:42,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=39989.333333333336, ans=0.00217623188405797
+2024-08-26 14:23:04,536 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.845e+02 2.126e+02 2.642e+02 4.004e+02, threshold=4.252e+02, percent-clipped=0.0
+2024-08-26 14:23:26,402 INFO [train.py:1114] (0/4) Epoch 4, batch 50, loss[loss=0.2685, simple_loss=0.3056, pruned_loss=0.08346, ctc_loss=0.1611, over 19690.00 frames. ], tot_loss[loss=0.3033, simple_loss=0.3367, pruned_loss=0.09798, ctc_loss=0.1847, over 844645.13 frames. ], batch size: 47, lr: 3.30e-02, grad_scale: 32.0
+2024-08-26 14:23:55,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=40149.333333333336, ans=0.0
+2024-08-26 14:24:01,477 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=40149.333333333336, ans=0.125
+2024-08-26 14:24:35,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=40202.666666666664, ans=0.125
+2024-08-26 14:24:41,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=40202.666666666664, ans=0.125
+2024-08-26 14:24:54,607 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=40256.0, ans=0.125
+2024-08-26 14:25:08,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=40256.0, ans=0.125
+2024-08-26 14:25:33,125 INFO [train.py:1114] (0/4) Epoch 4, batch 100, loss[loss=0.2924, simple_loss=0.3329, pruned_loss=0.09114, ctc_loss=0.1744, over 19708.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3365, pruned_loss=0.09699, ctc_loss=0.1818, over 1498983.17 frames. ], batch size: 51, lr: 3.29e-02, grad_scale: 32.0
+2024-08-26 14:25:36,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=40362.666666666664, ans=0.125
+2024-08-26 14:26:02,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=40416.0, ans=0.0
+2024-08-26 14:26:13,457 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff2.min_abs, batch_count=40469.333333333336, ans=0.1
+2024-08-26 14:26:34,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=40522.666666666664, ans=0.0
+2024-08-26 14:26:40,725 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.662e+02 1.906e+02 2.226e+02 3.245e+02, threshold=3.812e+02, percent-clipped=0.0
+2024-08-26 14:26:48,534 INFO [train.py:1114] (0/4) Epoch 4, batch 150, loss[loss=0.2788, simple_loss=0.3065, pruned_loss=0.09234, ctc_loss=0.1661, over 19733.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.3319, pruned_loss=0.0942, ctc_loss=0.1766, over 2028351.17 frames. ], batch size: 47, lr: 3.28e-02, grad_scale: 32.0
+2024-08-26 14:26:49,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=40629.333333333336, ans=10.0
+2024-08-26 14:27:32,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=40736.0, ans=0.07
+2024-08-26 14:27:38,678 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=40736.0, ans=0.00201391304347826
+2024-08-26 14:27:54,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=40842.666666666664, ans=0.035
+2024-08-26 14:28:04,923 INFO [train.py:1114] (0/4) Epoch 4, batch 200, loss[loss=0.3166, simple_loss=0.3503, pruned_loss=0.1027, ctc_loss=0.1937, over 18390.00 frames. ], tot_loss[loss=0.2934, simple_loss=0.3305, pruned_loss=0.09321, ctc_loss=0.1749, over 2436007.39 frames. ], batch size: 85, lr: 3.28e-02, grad_scale: 32.0
+2024-08-26 14:28:06,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=40896.0, ans=0.0
+2024-08-26 14:28:07,038 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=40896.0, ans=0.025
+2024-08-26 14:28:21,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff2.min_abs, batch_count=40949.333333333336, ans=0.1
+2024-08-26 14:28:28,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=41002.666666666664, ans=0.0
+2024-08-26 14:28:30,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=41002.666666666664, ans=0.2
+2024-08-26 14:28:37,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=41056.0, ans=0.1
+2024-08-26 14:28:49,777 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.824e+02 2.102e+02 2.533e+02 3.992e+02, threshold=4.203e+02, percent-clipped=3.0
+2024-08-26 14:28:55,760 INFO [train.py:1114] (0/4) Epoch 4, batch 250, loss[loss=0.3065, simple_loss=0.3533, pruned_loss=0.09436, ctc_loss=0.1775, over 19399.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.3295, pruned_loss=0.09203, ctc_loss=0.1724, over 2755963.59 frames. ], batch size: 67, lr: 3.27e-02, grad_scale: 32.0
+2024-08-26 14:29:19,666 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.79 vs. limit=8.0
+2024-08-26 14:29:36,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=41376.0, ans=0.125
+2024-08-26 14:29:42,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=41376.0, ans=0.125
+2024-08-26 14:29:44,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=41376.0, ans=0.1
+2024-08-26 14:29:46,774 INFO [train.py:1114] (0/4) Epoch 4, batch 300, loss[loss=0.2956, simple_loss=0.3424, pruned_loss=0.09124, ctc_loss=0.1656, over 19505.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3288, pruned_loss=0.0914, ctc_loss=0.1713, over 3001044.60 frames. ], batch size: 61, lr: 3.27e-02, grad_scale: 32.0
+2024-08-26 14:30:07,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=41536.0, ans=0.1
+2024-08-26 14:30:21,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=41589.333333333336, ans=0.001828405797101449
+2024-08-26 14:30:31,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=41642.666666666664, ans=0.125
+2024-08-26 14:30:32,084 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.674e+02 1.880e+02 2.161e+02 3.950e+02, threshold=3.761e+02, percent-clipped=0.0
+2024-08-26 14:30:32,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=41642.666666666664, ans=0.2
+2024-08-26 14:30:37,847 INFO [train.py:1114] (0/4) Epoch 4, batch 350, loss[loss=0.2861, simple_loss=0.3132, pruned_loss=0.09452, ctc_loss=0.1748, over 19766.00 frames. ], tot_loss[loss=0.2908, simple_loss=0.3293, pruned_loss=0.09185, ctc_loss=0.1717, over 3191455.99 frames. ], batch size: 48, lr: 3.26e-02, grad_scale: 32.0
+2024-08-26 14:30:49,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=41749.333333333336, ans=0.0
+2024-08-26 14:31:12,052 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.44 vs. limit=15.0
+2024-08-26 14:31:35,382 INFO [train.py:1114] (0/4) Epoch 4, batch 400, loss[loss=0.2758, simple_loss=0.3259, pruned_loss=0.0821, ctc_loss=0.154, over 19502.00 frames. ], tot_loss[loss=0.2889, simple_loss=0.328, pruned_loss=0.09098, ctc_loss=0.1698, over 3343576.12 frames. ], batch size: 54, lr: 3.26e-02, grad_scale: 32.0
+2024-08-26 14:32:04,286 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.42 vs. limit=10.0
+2024-08-26 14:32:09,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=42122.666666666664, ans=0.0017124637681159433
+2024-08-26 14:32:19,256 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 1.828e+02 2.157e+02 2.598e+02 8.551e+02, threshold=4.314e+02, percent-clipped=2.0
+2024-08-26 14:32:23,166 INFO [train.py:1114] (0/4) Epoch 4, batch 450, loss[loss=0.2808, simple_loss=0.3321, pruned_loss=0.08319, ctc_loss=0.1575, over 19616.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.328, pruned_loss=0.09122, ctc_loss=0.17, over 3452205.27 frames. ], batch size: 55, lr: 3.25e-02, grad_scale: 8.0
+2024-08-26 14:32:33,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=42229.333333333336, ans=0.1
+2024-08-26 14:32:37,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=42282.666666666664, ans=0.2
+2024-08-26 14:32:39,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=42282.666666666664, ans=0.125
+2024-08-26 14:32:50,410 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:32:50,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=42336.0, ans=0.125
+2024-08-26 14:32:56,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=42389.333333333336, ans=0.1
+2024-08-26 14:32:57,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=42389.333333333336, ans=0.125
+2024-08-26 14:33:01,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=42389.333333333336, ans=0.2
+2024-08-26 14:33:03,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=42442.666666666664, ans=0.5
+2024-08-26 14:33:14,189 INFO [train.py:1114] (0/4) Epoch 4, batch 500, loss[loss=0.2618, simple_loss=0.3148, pruned_loss=0.07652, ctc_loss=0.1393, over 19707.00 frames. ], tot_loss[loss=0.2865, simple_loss=0.3261, pruned_loss=0.08987, ctc_loss=0.1679, over 3547717.56 frames. ], batch size: 63, lr: 3.25e-02, grad_scale: 8.0
+2024-08-26 14:33:14,747 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.98 vs. limit=6.0
+2024-08-26 14:33:50,048 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-8000.pt
+2024-08-26 14:33:56,932 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.25 vs. limit=22.5
+2024-08-26 14:34:05,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=42709.333333333336, ans=0.125
+2024-08-26 14:34:07,927 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.676e+02 1.857e+02 2.171e+02 5.331e+02, threshold=3.714e+02, percent-clipped=2.0
+2024-08-26 14:34:11,747 INFO [train.py:1114] (0/4) Epoch 4, batch 550, loss[loss=0.3004, simple_loss=0.338, pruned_loss=0.09611, ctc_loss=0.1764, over 19276.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3271, pruned_loss=0.09048, ctc_loss=0.1688, over 3609344.55 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 8.0
+2024-08-26 14:34:17,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=42762.666666666664, ans=0.125
+2024-08-26 14:34:18,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=42762.666666666664, ans=0.05
+2024-08-26 14:34:20,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=42816.0, ans=0.0
+2024-08-26 14:34:35,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=42869.333333333336, ans=0.0
+2024-08-26 14:34:45,154 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=42922.666666666664, ans=0.125
+2024-08-26 14:34:47,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=42922.666666666664, ans=0.125
+2024-08-26 14:35:00,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=42976.0, ans=0.125
+2024-08-26 14:35:03,326 INFO [train.py:1114] (0/4) Epoch 4, batch 600, loss[loss=0.2845, simple_loss=0.3268, pruned_loss=0.08814, ctc_loss=0.1647, over 19390.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.3264, pruned_loss=0.08992, ctc_loss=0.1679, over 3666017.55 frames. ], batch size: 67, lr: 3.24e-02, grad_scale: 8.0
+2024-08-26 14:35:03,924 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.35 vs. limit=22.5
+2024-08-26 14:35:22,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=43136.0, ans=0.0
+2024-08-26 14:35:40,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=43189.333333333336, ans=0.125
+2024-08-26 14:35:50,402 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 1.699e+02 1.953e+02 2.270e+02 5.390e+02, threshold=3.906e+02, percent-clipped=1.0
+2024-08-26 14:35:52,095 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.75 vs. limit=15.0
+2024-08-26 14:35:54,189 INFO [train.py:1114] (0/4) Epoch 4, batch 650, loss[loss=0.286, simple_loss=0.3301, pruned_loss=0.08782, ctc_loss=0.1657, over 19764.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3242, pruned_loss=0.08863, ctc_loss=0.1654, over 3716027.46 frames. ], batch size: 54, lr: 3.23e-02, grad_scale: 8.0
+2024-08-26 14:36:24,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=43402.666666666664, ans=0.125
+2024-08-26 14:36:48,327 INFO [train.py:1114] (0/4) Epoch 4, batch 700, loss[loss=0.256, simple_loss=0.3017, pruned_loss=0.07619, ctc_loss=0.1449, over 19712.00 frames. ], tot_loss[loss=0.285, simple_loss=0.3253, pruned_loss=0.08911, ctc_loss=0.1664, over 3748433.07 frames. ], batch size: 51, lr: 3.22e-02, grad_scale: 8.0
+2024-08-26 14:36:56,096 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=43562.666666666664, ans=0.0
+2024-08-26 14:37:14,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=43669.333333333336, ans=0.125
+2024-08-26 14:37:15,014 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.26 vs. limit=22.5
+2024-08-26 14:37:29,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=43776.0, ans=0.0
+2024-08-26 14:37:33,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=43776.0, ans=0.00135304347826087
+2024-08-26 14:37:33,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=43776.0, ans=0.0
+2024-08-26 14:37:36,044 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.717e+02 1.974e+02 2.287e+02 3.794e+02, threshold=3.948e+02, percent-clipped=0.0
+2024-08-26 14:37:39,952 INFO [train.py:1114] (0/4) Epoch 4, batch 750, loss[loss=0.3118, simple_loss=0.3556, pruned_loss=0.09827, ctc_loss=0.1785, over 19497.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3254, pruned_loss=0.08912, ctc_loss=0.1663, over 3775360.00 frames. ], batch size: 54, lr: 3.22e-02, grad_scale: 8.0
+2024-08-26 14:37:47,426 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.59 vs. limit=22.5
+2024-08-26 14:37:49,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=43882.666666666664, ans=0.1
+2024-08-26 14:37:55,121 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.13 vs. limit=15.0
+2024-08-26 14:37:58,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=43936.0, ans=0.0
+2024-08-26 14:38:09,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=43989.333333333336, ans=0.5
+2024-08-26 14:38:10,061 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.11 vs. limit=15.0
+2024-08-26 14:38:22,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=44042.666666666664, ans=0.025
+2024-08-26 14:38:31,812 INFO [train.py:1114] (0/4) Epoch 4, batch 800, loss[loss=0.2788, simple_loss=0.313, pruned_loss=0.08899, ctc_loss=0.1665, over 19439.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3252, pruned_loss=0.08907, ctc_loss=0.1662, over 3795552.55 frames. ], batch size: 48, lr: 3.21e-02, grad_scale: 16.0
+2024-08-26 14:38:46,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=44149.333333333336, ans=0.1
+2024-08-26 14:39:05,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=44256.0, ans=0.015
+2024-08-26 14:39:13,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=44309.333333333336, ans=0.0
+2024-08-26 14:39:14,483 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:39:16,253 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.706e+02 1.876e+02 2.197e+02 5.470e+02, threshold=3.751e+02, percent-clipped=2.0
+2024-08-26 14:39:22,934 INFO [train.py:1114] (0/4) Epoch 4, batch 850, loss[loss=0.3378, simple_loss=0.368, pruned_loss=0.112, ctc_loss=0.209, over 19643.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3245, pruned_loss=0.08858, ctc_loss=0.1652, over 3815313.62 frames. ], batch size: 59, lr: 3.21e-02, grad_scale: 16.0
+2024-08-26 14:39:34,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=44416.0, ans=0.125
+2024-08-26 14:39:45,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=44469.333333333336, ans=0.125
+2024-08-26 14:39:50,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=44469.333333333336, ans=0.1
+2024-08-26 14:39:52,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=44522.666666666664, ans=0.125
+2024-08-26 14:39:52,952 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=44522.666666666664, ans=0.125
+2024-08-26 14:40:00,979 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=44576.0, ans=0.2
+2024-08-26 14:40:01,541 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.95 vs. limit=5.0
+2024-08-26 14:40:02,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=44576.0, ans=0.125
+2024-08-26 14:40:11,461 INFO [train.py:1114] (0/4) Epoch 4, batch 900, loss[loss=0.237, simple_loss=0.2886, pruned_loss=0.06622, ctc_loss=0.1323, over 19384.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3253, pruned_loss=0.08933, ctc_loss=0.1665, over 3818849.62 frames. ], batch size: 48, lr: 3.20e-02, grad_scale: 16.0
+2024-08-26 14:40:14,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=44629.333333333336, ans=0.125
+2024-08-26 14:40:27,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=44682.666666666664, ans=0.2
+2024-08-26 14:40:28,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=44682.666666666664, ans=0.025
+2024-08-26 14:40:45,393 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.25 vs. limit=15.0
+2024-08-26 14:40:59,413 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.686e+02 1.871e+02 2.157e+02 4.639e+02, threshold=3.742e+02, percent-clipped=1.0
+2024-08-26 14:41:03,411 INFO [train.py:1114] (0/4) Epoch 4, batch 950, loss[loss=0.2412, simple_loss=0.2912, pruned_loss=0.06964, ctc_loss=0.1299, over 19495.00 frames. ], tot_loss[loss=0.2859, simple_loss=0.3255, pruned_loss=0.0897, ctc_loss=0.1673, over 3820486.04 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 16.0
+2024-08-26 14:41:04,160 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.01 vs. limit=6.0
+2024-08-26 14:41:34,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=45056.0, ans=0.125
+2024-08-26 14:41:34,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=45056.0, ans=0.04949747468305833
+2024-08-26 14:41:52,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.99 vs. limit=22.5
+2024-08-26 14:41:53,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=45109.333333333336, ans=0.125
+2024-08-26 14:41:53,245 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.46 vs. limit=15.0
+2024-08-26 14:41:54,738 INFO [train.py:1114] (0/4) Epoch 4, batch 1000, loss[loss=0.2648, simple_loss=0.3047, pruned_loss=0.0824, ctc_loss=0.1502, over 19848.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3264, pruned_loss=0.09037, ctc_loss=0.1684, over 3816355.45 frames. ], batch size: 52, lr: 3.19e-02, grad_scale: 16.0
+2024-08-26 14:42:36,342 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.81 vs. limit=22.5
+2024-08-26 14:42:42,501 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.702e+02 1.844e+02 2.187e+02 3.225e+02, threshold=3.689e+02, percent-clipped=0.0
+2024-08-26 14:42:44,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=45376.0, ans=0.1
+2024-08-26 14:42:46,508 INFO [train.py:1114] (0/4) Epoch 4, batch 1050, loss[loss=0.3346, simple_loss=0.3647, pruned_loss=0.1113, ctc_loss=0.2048, over 19838.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3253, pruned_loss=0.08963, ctc_loss=0.1673, over 3823183.60 frames. ], batch size: 57, lr: 3.19e-02, grad_scale: 16.0
+2024-08-26 14:43:20,866 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.23 vs. limit=15.0
+2024-08-26 14:43:26,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=45589.333333333336, ans=0.125
+2024-08-26 14:43:27,600 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.28 vs. limit=15.0
+2024-08-26 14:43:35,529 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=45642.666666666664, ans=0.125
+2024-08-26 14:43:38,151 INFO [train.py:1114] (0/4) Epoch 4, batch 1100, loss[loss=0.2623, simple_loss=0.3118, pruned_loss=0.07708, ctc_loss=0.1464, over 19598.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.3248, pruned_loss=0.08912, ctc_loss=0.1662, over 3830731.93 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-26 14:43:39,373 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=45696.0, ans=0.125
+2024-08-26 14:43:45,564 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.95 vs. limit=15.0
+2024-08-26 14:43:48,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=45749.333333333336, ans=0.09899494936611666
+2024-08-26 14:43:51,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=45749.333333333336, ans=0.1
+2024-08-26 14:43:57,121 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.06 vs. limit=15.0
+2024-08-26 14:44:21,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=45909.333333333336, ans=0.125
+2024-08-26 14:44:25,688 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.748e+02 1.997e+02 2.350e+02 6.199e+02, threshold=3.995e+02, percent-clipped=5.0
+2024-08-26 14:44:29,534 INFO [train.py:1114] (0/4) Epoch 4, batch 1150, loss[loss=0.266, simple_loss=0.3089, pruned_loss=0.08215, ctc_loss=0.1472, over 19591.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3243, pruned_loss=0.08894, ctc_loss=0.1661, over 3829434.88 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-26 14:44:30,746 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=45962.666666666664, ans=0.125
+2024-08-26 14:44:35,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=45962.666666666664, ans=0.125
+2024-08-26 14:44:35,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=45962.666666666664, ans=0.1
+2024-08-26 14:46:03,380 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.84 vs. limit=15.0
+2024-08-26 14:46:04,423 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.whiten.whitening_limit, batch_count=46069.333333333336, ans=12.0
+2024-08-26 14:46:31,279 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=46176.0, ans=0.1
+2024-08-26 14:46:38,928 INFO [train.py:1114] (0/4) Epoch 4, batch 1200, loss[loss=0.2901, simple_loss=0.3282, pruned_loss=0.09065, ctc_loss=0.1766, over 19839.00 frames. ], tot_loss[loss=0.2854, simple_loss=0.3252, pruned_loss=0.08942, ctc_loss=0.1671, over 3825559.34 frames. ], batch size: 57, lr: 3.17e-02, grad_scale: 32.0
+2024-08-26 14:46:44,940 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=46229.333333333336, ans=0.125
+2024-08-26 14:46:50,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=46282.666666666664, ans=0.0
+2024-08-26 14:47:00,258 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=46336.0, ans=0.1
+2024-08-26 14:47:04,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=46336.0, ans=0.0
+2024-08-26 14:47:06,928 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=46389.333333333336, ans=0.125
+2024-08-26 14:47:14,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=46389.333333333336, ans=0.0
+2024-08-26 14:47:14,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=46389.333333333336, ans=0.0
+2024-08-26 14:47:17,995 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.73 vs. limit=22.5
+2024-08-26 14:47:23,214 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 1.767e+02 1.944e+02 2.283e+02 5.479e+02, threshold=3.889e+02, percent-clipped=1.0
+2024-08-26 14:47:29,952 INFO [train.py:1114] (0/4) Epoch 4, batch 1250, loss[loss=0.3091, simple_loss=0.3433, pruned_loss=0.1008, ctc_loss=0.1832, over 19526.00 frames. ], tot_loss[loss=0.285, simple_loss=0.3253, pruned_loss=0.08908, ctc_loss=0.1663, over 3843759.16 frames. ], batch size: 61, lr: 3.17e-02, grad_scale: 32.0
+2024-08-26 14:47:30,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=46496.0, ans=0.125
+2024-08-26 14:47:43,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=46549.333333333336, ans=0.125
+2024-08-26 14:47:46,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=46549.333333333336, ans=0.125
+2024-08-26 14:47:51,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=46602.666666666664, ans=0.025
+2024-08-26 14:47:51,777 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:47:59,769 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=46656.0, ans=0.1
+2024-08-26 14:48:21,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=46762.666666666664, ans=0.025
+2024-08-26 14:48:22,031 INFO [train.py:1114] (0/4) Epoch 4, batch 1300, loss[loss=0.3296, simple_loss=0.3619, pruned_loss=0.1088, ctc_loss=0.1988, over 19010.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3243, pruned_loss=0.08863, ctc_loss=0.1656, over 3847381.00 frames. ], batch size: 76, lr: 3.16e-02, grad_scale: 32.0
+2024-08-26 14:48:36,836 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.01 vs. limit=6.0
+2024-08-26 14:48:41,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=46869.333333333336, ans=0.0006805797101449261
+2024-08-26 14:49:06,437 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.633e+02 1.793e+02 2.136e+02 4.035e+02, threshold=3.586e+02, percent-clipped=1.0
+2024-08-26 14:49:07,626 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=46976.0, ans=0.125
+2024-08-26 14:49:09,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=47029.333333333336, ans=0.1
+2024-08-26 14:49:10,195 INFO [train.py:1114] (0/4) Epoch 4, batch 1350, loss[loss=0.2731, simple_loss=0.3202, pruned_loss=0.08132, ctc_loss=0.1585, over 19764.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.3238, pruned_loss=0.08808, ctc_loss=0.1646, over 3856211.67 frames. ], batch size: 54, lr: 3.16e-02, grad_scale: 32.0
+2024-08-26 14:49:22,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=47082.666666666664, ans=0.125
+2024-08-26 14:49:27,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=47082.666666666664, ans=0.125
+2024-08-26 14:49:28,315 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.93 vs. limit=22.5
+2024-08-26 14:49:33,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=47136.0, ans=0.1
+2024-08-26 14:49:59,812 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=47242.666666666664, ans=0.0
+2024-08-26 14:50:01,623 INFO [train.py:1114] (0/4) Epoch 4, batch 1400, loss[loss=0.2442, simple_loss=0.29, pruned_loss=0.07191, ctc_loss=0.1367, over 19662.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.3238, pruned_loss=0.08804, ctc_loss=0.1648, over 3862713.70 frames. ], batch size: 46, lr: 3.15e-02, grad_scale: 32.0
+2024-08-26 14:50:23,744 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.91 vs. limit=15.0
+2024-08-26 14:50:31,967 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.15 vs. limit=22.5
+2024-08-26 14:50:42,419 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=47509.333333333336, ans=0.125
+2024-08-26 14:50:49,028 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.701e+02 1.930e+02 2.137e+02 5.469e+02, threshold=3.859e+02, percent-clipped=2.0
+2024-08-26 14:50:53,069 INFO [train.py:1114] (0/4) Epoch 4, batch 1450, loss[loss=0.291, simple_loss=0.3313, pruned_loss=0.09212, ctc_loss=0.1662, over 19662.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.325, pruned_loss=0.0886, ctc_loss=0.166, over 3860652.50 frames. ], batch size: 63, lr: 3.15e-02, grad_scale: 32.0
+2024-08-26 14:51:14,584 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.78 vs. limit=15.0
+2024-08-26 14:51:43,456 INFO [train.py:1114] (0/4) Epoch 4, batch 1500, loss[loss=0.3009, simple_loss=0.3447, pruned_loss=0.09391, ctc_loss=0.1733, over 19581.00 frames. ], tot_loss[loss=0.2835, simple_loss=0.3248, pruned_loss=0.08806, ctc_loss=0.165, over 3859580.36 frames. ], batch size: 57, lr: 3.14e-02, grad_scale: 32.0
+2024-08-26 14:51:46,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=47829.333333333336, ans=0.125
+2024-08-26 14:51:53,402 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:52:11,527 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=47936.0, ans=0.0004486956521739128
+2024-08-26 14:52:20,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=47989.333333333336, ans=0.025
+2024-08-26 14:52:34,707 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.743e+02 1.956e+02 2.243e+02 3.928e+02, threshold=3.912e+02, percent-clipped=1.0
+2024-08-26 14:52:38,434 INFO [train.py:1114] (0/4) Epoch 4, batch 1550, loss[loss=0.3004, simple_loss=0.3406, pruned_loss=0.09646, ctc_loss=0.1679, over 19593.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3251, pruned_loss=0.08865, ctc_loss=0.1657, over 3846078.25 frames. ], batch size: 60, lr: 3.14e-02, grad_scale: 32.0
+2024-08-26 14:53:20,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-26 14:53:23,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=48309.333333333336, ans=0.07
+2024-08-26 14:53:23,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-26 14:53:24,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-26 14:53:29,859 INFO [train.py:1114] (0/4) Epoch 4, batch 1600, loss[loss=0.2955, simple_loss=0.3369, pruned_loss=0.09166, ctc_loss=0.177, over 19838.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3248, pruned_loss=0.08879, ctc_loss=0.166, over 3836386.44 frames. ], batch size: 57, lr: 3.13e-02, grad_scale: 32.0
+2024-08-26 14:53:39,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=48416.0, ans=0.2
+2024-08-26 14:53:41,017 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=48416.0, ans=0.125
+2024-08-26 14:53:46,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=48416.0, ans=0.125
+2024-08-26 14:54:18,017 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.701e+02 1.882e+02 2.341e+02 4.982e+02, threshold=3.764e+02, percent-clipped=3.0
+2024-08-26 14:54:21,809 INFO [train.py:1114] (0/4) Epoch 4, batch 1650, loss[loss=0.322, simple_loss=0.3514, pruned_loss=0.1053, ctc_loss=0.2048, over 19659.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3245, pruned_loss=0.08876, ctc_loss=0.1657, over 3833121.08 frames. ], batch size: 59, lr: 3.13e-02, grad_scale: 32.0
+2024-08-26 14:54:24,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=48629.333333333336, ans=0.125
+2024-08-26 14:54:30,087 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.34 vs. limit=15.0
+2024-08-26 14:54:37,795 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.95 vs. limit=15.0
+2024-08-26 14:54:38,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=48682.666666666664, ans=0.1
+2024-08-26 14:54:42,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=48736.0, ans=0.2
+2024-08-26 14:55:17,031 INFO [train.py:1114] (0/4) Epoch 4, batch 1700, loss[loss=0.2436, simple_loss=0.2857, pruned_loss=0.07395, ctc_loss=0.1339, over 19658.00 frames. ], tot_loss[loss=0.2827, simple_loss=0.3237, pruned_loss=0.08806, ctc_loss=0.1639, over 3847142.73 frames. ], batch size: 46, lr: 3.12e-02, grad_scale: 32.0
+2024-08-26 14:55:24,785 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=48896.0, ans=0.0002399999999999989
+2024-08-26 14:55:26,100 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.51 vs. limit=15.0
+2024-08-26 14:55:29,733 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.33 vs. limit=15.0
+2024-08-26 14:55:36,618 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=49002.666666666664, ans=0.2
+2024-08-26 14:55:40,270 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=49002.666666666664, ans=0.125
+2024-08-26 14:55:43,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff2.min_abs, batch_count=49002.666666666664, ans=0.1
+2024-08-26 14:55:45,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=49056.0, ans=0.2
+2024-08-26 14:55:46,181 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.63 vs. limit=15.0
+2024-08-26 14:55:53,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=49109.333333333336, ans=10.0
+2024-08-26 14:55:55,306 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.16 vs. limit=15.0
+2024-08-26 14:55:59,535 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 1.770e+02 1.975e+02 2.193e+02 4.882e+02, threshold=3.950e+02, percent-clipped=1.0
+2024-08-26 14:55:59,823 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=49109.333333333336, ans=0.125
+2024-08-26 14:56:03,237 INFO [train.py:1114] (0/4) Epoch 4, batch 1750, loss[loss=0.2441, simple_loss=0.2883, pruned_loss=0.07304, ctc_loss=0.1347, over 19657.00 frames. ], tot_loss[loss=0.2817, simple_loss=0.323, pruned_loss=0.08762, ctc_loss=0.1632, over 3852663.18 frames. ], batch size: 45, lr: 3.11e-02, grad_scale: 32.0
+2024-08-26 14:56:15,068 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=5.51 vs. limit=15.0
+2024-08-26 14:56:16,010 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=12.31 vs. limit=15.0
+2024-08-26 14:56:30,523 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.55 vs. limit=15.0
+2024-08-26 14:56:48,556 INFO [train.py:1114] (0/4) Epoch 4, batch 1800, loss[loss=0.2778, simple_loss=0.3307, pruned_loss=0.08192, ctc_loss=0.1526, over 19615.00 frames. ], tot_loss[loss=0.282, simple_loss=0.3231, pruned_loss=0.08775, ctc_loss=0.1633, over 3852526.50 frames. ], batch size: 55, lr: 3.11e-02, grad_scale: 32.0
+2024-08-26 14:56:56,149 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=49429.333333333336, ans=0.125
+2024-08-26 14:56:57,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=49482.666666666664, ans=0.125
+2024-08-26 14:57:02,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=49482.666666666664, ans=0.00011246376811594253
+2024-08-26 14:57:26,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=49642.666666666664, ans=0.0
+2024-08-26 14:57:30,226 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.664e+02 1.898e+02 2.172e+02 3.982e+02, threshold=3.795e+02, percent-clipped=1.0
+2024-08-26 14:57:33,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=49696.0, ans=0.125
+2024-08-26 14:57:33,984 INFO [train.py:1114] (0/4) Epoch 4, batch 1850, loss[loss=0.2761, simple_loss=0.3279, pruned_loss=0.08135, ctc_loss=0.1541, over 19599.00 frames. ], tot_loss[loss=0.2809, simple_loss=0.3224, pruned_loss=0.08725, ctc_loss=0.1624, over 3856120.50 frames. ], batch size: 57, lr: 3.10e-02, grad_scale: 32.0
+2024-08-26 14:57:53,662 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.94 vs. limit=15.0
+2024-08-26 14:58:18,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=49909.333333333336, ans=0.125
+2024-08-26 14:58:20,652 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=49962.666666666664, ans=0.025
+2024-08-26 14:58:21,318 INFO [train.py:1114] (0/4) Epoch 4, batch 1900, loss[loss=0.2984, simple_loss=0.3424, pruned_loss=0.09349, ctc_loss=0.1687, over 19647.00 frames. ], tot_loss[loss=0.281, simple_loss=0.3228, pruned_loss=0.08714, ctc_loss=0.1622, over 3860972.37 frames. ], batch size: 59, lr: 3.10e-02, grad_scale: 16.0
+2024-08-26 14:58:41,341 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.28 vs. limit=15.0
+2024-08-26 14:58:44,607 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:59:03,247 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.687e+02 1.820e+02 2.228e+02 3.741e+02, threshold=3.639e+02, percent-clipped=0.0
+2024-08-26 14:59:06,123 INFO [train.py:1114] (0/4) Epoch 4, batch 1950, loss[loss=0.282, simple_loss=0.3218, pruned_loss=0.08784, ctc_loss=0.1663, over 19588.00 frames. ], tot_loss[loss=0.2813, simple_loss=0.3236, pruned_loss=0.08709, ctc_loss=0.1622, over 3869830.13 frames. ], batch size: 52, lr: 3.09e-02, grad_scale: 16.0
+2024-08-26 14:59:15,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=50282.666666666664, ans=0.125
+2024-08-26 14:59:31,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=50336.0, ans=0.0
+2024-08-26 14:59:42,917 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.44 vs. limit=15.0
+2024-08-26 14:59:49,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=50442.666666666664, ans=0.125
+2024-08-26 14:59:53,436 INFO [train.py:1114] (0/4) Epoch 4, batch 2000, loss[loss=0.2675, simple_loss=0.2975, pruned_loss=0.08646, ctc_loss=0.1613, over 19654.00 frames. ], tot_loss[loss=0.2826, simple_loss=0.3243, pruned_loss=0.08776, ctc_loss=0.1636, over 3855683.96 frames. ], batch size: 45, lr: 3.09e-02, grad_scale: 32.0
+2024-08-26 15:00:06,984 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=50549.333333333336, ans=0.025
+2024-08-26 15:00:30,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=50709.333333333336, ans=0.2
+2024-08-26 15:00:35,434 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.722e+02 2.023e+02 2.377e+02 8.657e+02, threshold=4.047e+02, percent-clipped=4.0
+2024-08-26 15:00:37,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=50762.666666666664, ans=0.125
+2024-08-26 15:00:38,082 INFO [train.py:1114] (0/4) Epoch 4, batch 2050, loss[loss=0.2354, simple_loss=0.2763, pruned_loss=0.06957, ctc_loss=0.1383, over 19719.00 frames. ], tot_loss[loss=0.2812, simple_loss=0.3228, pruned_loss=0.08726, ctc_loss=0.1628, over 3851342.85 frames. ], batch size: 47, lr: 3.08e-02, grad_scale: 32.0
+2024-08-26 15:00:38,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=50762.666666666664, ans=0.025
+2024-08-26 15:00:41,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=50762.666666666664, ans=0.125
+2024-08-26 15:00:55,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=50869.333333333336, ans=0.05
+2024-08-26 15:00:59,892 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.38 vs. limit=15.0
+2024-08-26 15:01:03,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=50922.666666666664, ans=0.125
+2024-08-26 15:01:11,136 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=50922.666666666664, ans=10.0
+2024-08-26 15:01:18,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=50976.0, ans=0.1
+2024-08-26 15:01:21,118 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.68 vs. limit=15.0
+2024-08-26 15:01:22,461 INFO [train.py:1114] (0/4) Epoch 4, batch 2100, loss[loss=0.277, simple_loss=0.3218, pruned_loss=0.08491, ctc_loss=0.1561, over 19770.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.3217, pruned_loss=0.08623, ctc_loss=0.1608, over 3857773.24 frames. ], batch size: 54, lr: 3.08e-02, grad_scale: 32.0
+2024-08-26 15:01:32,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51082.666666666664, ans=0.1
+2024-08-26 15:01:35,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=51082.666666666664, ans=0.0
+2024-08-26 15:01:36,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=51082.666666666664, ans=0.125
+2024-08-26 15:01:41,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=51136.0, ans=0.125
+2024-08-26 15:01:53,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=51189.333333333336, ans=0.125
+2024-08-26 15:01:59,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=51242.666666666664, ans=0.125
+2024-08-26 15:02:04,155 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.626e+02 1.780e+02 1.895e+02 2.709e+02, threshold=3.561e+02, percent-clipped=0.0
+2024-08-26 15:02:07,164 INFO [train.py:1114] (0/4) Epoch 4, batch 2150, loss[loss=0.2481, simple_loss=0.3013, pruned_loss=0.0699, ctc_loss=0.1375, over 19590.00 frames. ], tot_loss[loss=0.278, simple_loss=0.3206, pruned_loss=0.08567, ctc_loss=0.1599, over 3868264.41 frames. ], batch size: 52, lr: 3.07e-02, grad_scale: 32.0
+2024-08-26 15:02:08,109 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=51296.0, ans=0.0
+2024-08-26 15:02:10,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=51296.0, ans=0.2
+2024-08-26 15:02:19,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51349.333333333336, ans=0.1
+2024-08-26 15:02:26,011 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.92 vs. limit=15.0
+2024-08-26 15:02:26,986 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.54 vs. limit=15.0
+2024-08-26 15:02:29,680 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.79 vs. limit=15.0
+2024-08-26 15:02:38,213 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.75 vs. limit=6.0
+2024-08-26 15:02:41,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=51456.0, ans=0.0
+2024-08-26 15:02:48,752 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.43 vs. limit=22.5
+2024-08-26 15:02:50,331 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.49 vs. limit=22.5
+2024-08-26 15:02:54,327 INFO [train.py:1114] (0/4) Epoch 4, batch 2200, loss[loss=0.2601, simple_loss=0.3169, pruned_loss=0.07376, ctc_loss=0.1397, over 19592.00 frames. ], tot_loss[loss=0.2777, simple_loss=0.3203, pruned_loss=0.0856, ctc_loss=0.1596, over 3867008.68 frames. ], batch size: 57, lr: 3.07e-02, grad_scale: 32.0
+2024-08-26 15:02:57,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=51562.666666666664, ans=0.125
+2024-08-26 15:03:01,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=51562.666666666664, ans=0.015
+2024-08-26 15:03:05,220 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.95 vs. limit=15.0
+2024-08-26 15:03:10,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=51616.0, ans=0.1
+2024-08-26 15:03:22,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=51722.666666666664, ans=0.07
+2024-08-26 15:03:23,840 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.41 vs. limit=10.0
+2024-08-26 15:03:24,758 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.66 vs. limit=22.5
+2024-08-26 15:03:33,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=51776.0, ans=0.0
+2024-08-26 15:03:34,332 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.28 vs. limit=22.5
+2024-08-26 15:03:36,548 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 1.687e+02 1.993e+02 2.251e+02 9.209e+02, threshold=3.987e+02, percent-clipped=2.0
+2024-08-26 15:03:39,207 INFO [train.py:1114] (0/4) Epoch 4, batch 2250, loss[loss=0.2819, simple_loss=0.3297, pruned_loss=0.08407, ctc_loss=0.1649, over 19623.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3208, pruned_loss=0.08572, ctc_loss=0.1601, over 3867747.12 frames. ], batch size: 55, lr: 3.06e-02, grad_scale: 32.0
+2024-08-26 15:03:40,642 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.94 vs. limit=15.0
+2024-08-26 15:03:50,785 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=51882.666666666664, ans=0.1
+2024-08-26 15:03:52,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=51882.666666666664, ans=10.0
+2024-08-26 15:03:52,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=51882.666666666664, ans=0.0
+2024-08-26 15:03:59,607 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.44 vs. limit=15.0
+2024-08-26 15:04:11,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=51989.333333333336, ans=0.0
+2024-08-26 15:04:15,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=52042.666666666664, ans=0.025
+2024-08-26 15:04:21,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52042.666666666664, ans=0.1
+2024-08-26 15:04:21,744 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52042.666666666664, ans=0.1
+2024-08-26 15:04:23,339 INFO [train.py:1114] (0/4) Epoch 4, batch 2300, loss[loss=0.253, simple_loss=0.3044, pruned_loss=0.07346, ctc_loss=0.1366, over 19504.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3196, pruned_loss=0.08535, ctc_loss=0.1595, over 3860995.81 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 32.0
+2024-08-26 15:04:35,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=52149.333333333336, ans=0.125
+2024-08-26 15:04:35,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=52149.333333333336, ans=0.125
+2024-08-26 15:04:37,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=52149.333333333336, ans=0.1
+2024-08-26 15:04:47,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=52202.666666666664, ans=0.0
+2024-08-26 15:04:49,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52256.0, ans=0.1
+2024-08-26 15:05:06,729 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.800e+02 1.978e+02 2.376e+02 5.904e+02, threshold=3.955e+02, percent-clipped=2.0
+2024-08-26 15:05:09,374 INFO [train.py:1114] (0/4) Epoch 4, batch 2350, loss[loss=0.3077, simple_loss=0.3466, pruned_loss=0.09966, ctc_loss=0.1737, over 19660.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3199, pruned_loss=0.08554, ctc_loss=0.1596, over 3864022.99 frames. ], batch size: 63, lr: 3.05e-02, grad_scale: 32.0
+2024-08-26 15:05:13,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=52362.666666666664, ans=0.0
+2024-08-26 15:05:23,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=52416.0, ans=0.1
+2024-08-26 15:05:25,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=52469.333333333336, ans=0.125
+2024-08-26 15:05:29,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=52469.333333333336, ans=0.0
+2024-08-26 15:05:36,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=52522.666666666664, ans=6.0
+2024-08-26 15:05:38,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=52522.666666666664, ans=0.0
+2024-08-26 15:05:43,478 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:05:47,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=52576.0, ans=0.1
+2024-08-26 15:05:51,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=52576.0, ans=0.2
+2024-08-26 15:06:03,789 INFO [train.py:1114] (0/4) Epoch 4, batch 2400, loss[loss=0.3055, simple_loss=0.345, pruned_loss=0.09725, ctc_loss=0.1788, over 19401.00 frames. ], tot_loss[loss=0.2795, simple_loss=0.3221, pruned_loss=0.0863, ctc_loss=0.1608, over 3858940.05 frames. ], batch size: 67, lr: 3.05e-02, grad_scale: 32.0
+2024-08-26 15:06:06,033 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.09 vs. limit=6.0
+2024-08-26 15:06:08,286 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52629.333333333336, ans=0.1
+2024-08-26 15:06:13,652 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=52682.666666666664, ans=0.05
+2024-08-26 15:06:21,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=52682.666666666664, ans=0.0
+2024-08-26 15:06:21,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=52682.666666666664, ans=0.125
+2024-08-26 15:06:24,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=52682.666666666664, ans=22.5
+2024-08-26 15:06:45,634 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=18.83 vs. limit=15.0
+2024-08-26 15:06:53,233 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.824e+02 2.127e+02 2.398e+02 5.215e+02, threshold=4.254e+02, percent-clipped=1.0
+2024-08-26 15:06:54,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=52896.0, ans=0.0
+2024-08-26 15:06:55,099 INFO [train.py:1114] (0/4) Epoch 4, batch 2450, loss[loss=0.3918, simple_loss=0.374, pruned_loss=0.1473, ctc_loss=0.2878, over 13118.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3278, pruned_loss=0.09113, ctc_loss=0.17, over 3734912.68 frames. ], batch size: 140, lr: 3.05e-02, grad_scale: 16.0
+2024-08-26 15:06:59,829 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=52896.0, ans=0.05
+2024-08-26 15:07:04,349 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=52949.333333333336, ans=0.0
+2024-08-26 15:07:04,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=52949.333333333336, ans=0.0
+2024-08-26 15:07:04,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=52949.333333333336, ans=0.125
+2024-08-26 15:07:08,830 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=52949.333333333336, ans=0.2
+2024-08-26 15:07:12,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=53002.666666666664, ans=0.2
+2024-08-26 15:07:13,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=53002.666666666664, ans=0.0
+2024-08-26 15:07:18,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=53002.666666666664, ans=0.0
+2024-08-26 15:07:29,590 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-4.pt
+2024-08-26 15:09:12,282 INFO [train.py:1114] (0/4) Epoch 5, batch 0, loss[loss=0.2637, simple_loss=0.302, pruned_loss=0.08211, ctc_loss=0.1531, over 19423.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.302, pruned_loss=0.08211, ctc_loss=0.1531, over 19423.00 frames. ], batch size: 48, lr: 2.83e-02, grad_scale: 32.0
+2024-08-26 15:09:12,284 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 15:09:22,083 INFO [train.py:1146] (0/4) Epoch 5, validation: loss=0.2289, simple_loss=0.3118, pruned_loss=0.05352, ctc_loss=0.09739, over 944034.00 frames.
+2024-08-26 15:09:22,763 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 12775MB
+2024-08-26 15:09:30,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=53104.0, ans=0.2
+2024-08-26 15:09:37,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=53157.333333333336, ans=0.0
+2024-08-26 15:09:41,314 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=53210.666666666664, ans=0.1
+2024-08-26 15:09:50,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=53264.0, ans=0.125
+2024-08-26 15:09:54,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=53264.0, ans=0.0
+2024-08-26 15:10:10,885 INFO [train.py:1114] (0/4) Epoch 5, batch 50, loss[loss=0.2436, simple_loss=0.2952, pruned_loss=0.06917, ctc_loss=0.1338, over 19753.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3261, pruned_loss=0.08796, ctc_loss=0.1656, over 844155.08 frames. ], batch size: 47, lr: 2.83e-02, grad_scale: 32.0
+2024-08-26 15:10:11,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=53370.666666666664, ans=0.0
+2024-08-26 15:10:22,327 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.804e+02 2.028e+02 2.297e+02 4.038e+02, threshold=4.056e+02, percent-clipped=0.0
+2024-08-26 15:10:45,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=53530.666666666664, ans=0.0
+2024-08-26 15:10:45,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=53530.666666666664, ans=0.125
+2024-08-26 15:10:49,545 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.14 vs. limit=15.0
+2024-08-26 15:11:01,253 INFO [train.py:1114] (0/4) Epoch 5, batch 100, loss[loss=0.2461, simple_loss=0.2941, pruned_loss=0.07249, ctc_loss=0.1326, over 19724.00 frames. ], tot_loss[loss=0.2826, simple_loss=0.3252, pruned_loss=0.0873, ctc_loss=0.1636, over 1498403.14 frames. ], batch size: 51, lr: 2.82e-02, grad_scale: 32.0
+2024-08-26 15:11:13,132 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=53690.666666666664, ans=0.125
+2024-08-26 15:11:16,955 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.56 vs. limit=6.0
+2024-08-26 15:11:19,832 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.53 vs. limit=15.0
+2024-08-26 15:11:29,962 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.89 vs. limit=22.5
+2024-08-26 15:11:30,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=53744.0, ans=0.2
+2024-08-26 15:11:34,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=53797.333333333336, ans=0.0
+2024-08-26 15:11:59,572 INFO [train.py:1114] (0/4) Epoch 5, batch 150, loss[loss=0.267, simple_loss=0.2947, pruned_loss=0.08802, ctc_loss=0.158, over 19741.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3205, pruned_loss=0.08447, ctc_loss=0.1584, over 2028143.16 frames. ], batch size: 47, lr: 2.82e-02, grad_scale: 32.0
+2024-08-26 15:12:07,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=53904.0, ans=0.0
+2024-08-26 15:12:10,027 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.696e+02 1.862e+02 2.172e+02 3.492e+02, threshold=3.724e+02, percent-clipped=0.0
+2024-08-26 15:12:14,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=53957.333333333336, ans=0.125
+2024-08-26 15:12:19,946 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.46 vs. limit=22.5
+2024-08-26 15:12:29,854 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=54064.0, ans=0.2
+2024-08-26 15:12:30,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=54064.0, ans=0.125
+2024-08-26 15:12:44,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=54117.333333333336, ans=0.0
+2024-08-26 15:12:48,444 INFO [train.py:1114] (0/4) Epoch 5, batch 200, loss[loss=0.3062, simple_loss=0.339, pruned_loss=0.1003, ctc_loss=0.1818, over 18194.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.3183, pruned_loss=0.08334, ctc_loss=0.1559, over 2436360.97 frames. ], batch size: 85, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:12:51,926 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.66 vs. limit=15.0
+2024-08-26 15:12:54,668 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.07 vs. limit=22.5
+2024-08-26 15:12:58,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=54224.0, ans=0.125
+2024-08-26 15:13:01,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=54224.0, ans=0.1
+2024-08-26 15:13:16,414 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.32 vs. limit=10.0
+2024-08-26 15:13:20,233 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.21 vs. limit=15.0
+2024-08-26 15:13:29,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=54330.666666666664, ans=0.2
+2024-08-26 15:13:37,759 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.15 vs. limit=12.0
+2024-08-26 15:13:41,977 INFO [train.py:1114] (0/4) Epoch 5, batch 250, loss[loss=0.279, simple_loss=0.3269, pruned_loss=0.08501, ctc_loss=0.1528, over 19428.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3169, pruned_loss=0.08228, ctc_loss=0.1543, over 2757087.79 frames. ], batch size: 67, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:13:50,513 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.685e+02 1.803e+02 2.078e+02 3.456e+02, threshold=3.607e+02, percent-clipped=0.0
+2024-08-26 15:13:59,305 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.18 vs. limit=22.5
+2024-08-26 15:14:03,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=54544.0, ans=0.0
+2024-08-26 15:14:16,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=54597.333333333336, ans=0.125
+2024-08-26 15:14:18,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=54597.333333333336, ans=0.125
+2024-08-26 15:14:32,302 INFO [train.py:1114] (0/4) Epoch 5, batch 300, loss[loss=0.3051, simple_loss=0.3436, pruned_loss=0.09675, ctc_loss=0.1828, over 19541.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3167, pruned_loss=0.08216, ctc_loss=0.1541, over 3002184.15 frames. ], batch size: 61, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:15:01,360 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.18 vs. limit=6.0
+2024-08-26 15:15:16,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=54917.333333333336, ans=0.0
+2024-08-26 15:15:22,164 INFO [train.py:1114] (0/4) Epoch 5, batch 350, loss[loss=0.2666, simple_loss=0.3101, pruned_loss=0.08178, ctc_loss=0.1492, over 19769.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3168, pruned_loss=0.08204, ctc_loss=0.1538, over 3191174.97 frames. ], batch size: 48, lr: 2.80e-02, grad_scale: 32.0
+2024-08-26 15:15:23,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=54970.666666666664, ans=0.1
+2024-08-26 15:15:27,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=54970.666666666664, ans=0.1
+2024-08-26 15:15:31,764 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.717e+02 1.933e+02 2.233e+02 3.797e+02, threshold=3.865e+02, percent-clipped=1.0
+2024-08-26 15:15:47,387 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.53 vs. limit=12.0
+2024-08-26 15:15:48,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=55077.333333333336, ans=0.1
+2024-08-26 15:15:48,241 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=55077.333333333336, ans=0.125
+2024-08-26 15:16:15,655 INFO [train.py:1114] (0/4) Epoch 5, batch 400, loss[loss=0.2796, simple_loss=0.329, pruned_loss=0.08303, ctc_loss=0.1605, over 19499.00 frames. ], tot_loss[loss=0.2706, simple_loss=0.3164, pruned_loss=0.08172, ctc_loss=0.1532, over 3343119.31 frames. ], batch size: 54, lr: 2.80e-02, grad_scale: 32.0
+2024-08-26 15:16:17,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=55237.333333333336, ans=0.1
+2024-08-26 15:16:24,052 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.47 vs. limit=15.0
+2024-08-26 15:16:25,034 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.72 vs. limit=10.0
+2024-08-26 15:16:44,017 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=55397.333333333336, ans=0.125
+2024-08-26 15:16:51,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55397.333333333336, ans=0.1
+2024-08-26 15:17:07,113 INFO [train.py:1114] (0/4) Epoch 5, batch 450, loss[loss=0.2415, simple_loss=0.3013, pruned_loss=0.06573, ctc_loss=0.1258, over 19599.00 frames. ], tot_loss[loss=0.2704, simple_loss=0.3161, pruned_loss=0.0817, ctc_loss=0.1532, over 3450619.45 frames. ], batch size: 55, lr: 2.79e-02, grad_scale: 16.0
+2024-08-26 15:17:11,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=55504.0, ans=0.125
+2024-08-26 15:17:17,447 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.642e+02 1.899e+02 2.179e+02 3.523e+02, threshold=3.798e+02, percent-clipped=0.0
+2024-08-26 15:17:17,668 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=55557.333333333336, ans=0.0
+2024-08-26 15:17:20,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=55557.333333333336, ans=0.125
+2024-08-26 15:17:25,184 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=55610.666666666664, ans=0.1
+2024-08-26 15:17:26,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=55610.666666666664, ans=0.0
+2024-08-26 15:17:35,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=55664.0, ans=0.125
+2024-08-26 15:17:37,916 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.90 vs. limit=15.0
+2024-08-26 15:17:48,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=55717.333333333336, ans=0.0
+2024-08-26 15:18:04,472 INFO [train.py:1114] (0/4) Epoch 5, batch 500, loss[loss=0.288, simple_loss=0.3297, pruned_loss=0.09088, ctc_loss=0.1614, over 19665.00 frames. ], tot_loss[loss=0.2694, simple_loss=0.3155, pruned_loss=0.08122, ctc_loss=0.1522, over 3546264.51 frames. ], batch size: 63, lr: 2.79e-02, grad_scale: 16.0
+2024-08-26 15:18:13,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55770.666666666664, ans=0.1
+2024-08-26 15:18:13,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=55770.666666666664, ans=0.0
+2024-08-26 15:18:20,197 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.47 vs. limit=15.0
+2024-08-26 15:18:41,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=55877.333333333336, ans=0.125
+2024-08-26 15:18:43,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=55877.333333333336, ans=0.125
+2024-08-26 15:18:51,377 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=55877.333333333336, ans=0.0
+2024-08-26 15:18:54,107 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=55930.666666666664, ans=0.2
+2024-08-26 15:19:06,097 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=55930.666666666664, ans=0.125
+2024-08-26 15:19:14,259 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=55930.666666666664, ans=0.5
+2024-08-26 15:19:46,989 INFO [train.py:1114] (0/4) Epoch 5, batch 550, loss[loss=0.3083, simple_loss=0.3462, pruned_loss=0.09894, ctc_loss=0.1815, over 19337.00 frames. ], tot_loss[loss=0.2695, simple_loss=0.3154, pruned_loss=0.0814, ctc_loss=0.1522, over 3609162.31 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 16.0
+2024-08-26 15:20:03,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=56090.666666666664, ans=0.125
+2024-08-26 15:20:04,970 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 1.676e+02 1.860e+02 2.053e+02 4.118e+02, threshold=3.720e+02, percent-clipped=1.0
+2024-08-26 15:20:43,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=56197.333333333336, ans=0.09899494936611666
+2024-08-26 15:20:56,217 INFO [train.py:1114] (0/4) Epoch 5, batch 600, loss[loss=0.3025, simple_loss=0.3443, pruned_loss=0.09394, ctc_loss=0.182, over 19446.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3159, pruned_loss=0.08155, ctc_loss=0.1526, over 3667103.29 frames. ], batch size: 67, lr: 2.78e-02, grad_scale: 16.0
+2024-08-26 15:20:58,511 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=56304.0, ans=0.125
+2024-08-26 15:21:00,285 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=56304.0, ans=0.1
+2024-08-26 15:21:05,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=56304.0, ans=0.07
+2024-08-26 15:21:09,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=56357.333333333336, ans=0.1
+2024-08-26 15:21:15,031 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=56357.333333333336, ans=0.2
+2024-08-26 15:21:16,270 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.70 vs. limit=15.0
+2024-08-26 15:21:29,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=56464.0, ans=0.125
+2024-08-26 15:21:40,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=56517.333333333336, ans=22.5
+2024-08-26 15:21:49,363 INFO [train.py:1114] (0/4) Epoch 5, batch 650, loss[loss=0.2688, simple_loss=0.322, pruned_loss=0.07961, ctc_loss=0.1409, over 19760.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3146, pruned_loss=0.08066, ctc_loss=0.1509, over 3717918.01 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:21:59,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=56624.0, ans=0.1
+2024-08-26 15:21:59,896 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.659e+02 1.803e+02 2.095e+02 3.596e+02, threshold=3.607e+02, percent-clipped=0.0
+2024-08-26 15:22:22,336 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=11.06 vs. limit=15.0
+2024-08-26 15:22:33,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=56784.0, ans=0.025
+2024-08-26 15:22:35,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=56784.0, ans=0.0
+2024-08-26 15:22:39,296 INFO [train.py:1114] (0/4) Epoch 5, batch 700, loss[loss=0.233, simple_loss=0.2882, pruned_loss=0.06522, ctc_loss=0.1183, over 19720.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3151, pruned_loss=0.08103, ctc_loss=0.1514, over 3749766.63 frames. ], batch size: 51, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:22:58,812 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=56944.0, ans=0.125
+2024-08-26 15:23:12,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=56997.333333333336, ans=0.125
+2024-08-26 15:23:26,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=57050.666666666664, ans=0.125
+2024-08-26 15:23:27,622 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:23:28,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=57104.0, ans=0.125
+2024-08-26 15:23:29,295 INFO [train.py:1114] (0/4) Epoch 5, batch 750, loss[loss=0.2643, simple_loss=0.3238, pruned_loss=0.07339, ctc_loss=0.1453, over 19841.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3149, pruned_loss=0.08111, ctc_loss=0.1519, over 3776345.09 frames. ], batch size: 55, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:23:39,772 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.732e+02 1.957e+02 2.375e+02 6.184e+02, threshold=3.914e+02, percent-clipped=3.0
+2024-08-26 15:23:45,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=57157.333333333336, ans=0.125
+2024-08-26 15:24:19,566 INFO [train.py:1114] (0/4) Epoch 5, batch 800, loss[loss=0.2195, simple_loss=0.2853, pruned_loss=0.05465, ctc_loss=0.1107, over 19430.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3149, pruned_loss=0.08098, ctc_loss=0.1515, over 3797007.90 frames. ], batch size: 48, lr: 2.76e-02, grad_scale: 32.0
+2024-08-26 15:24:21,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=57370.666666666664, ans=0.125
+2024-08-26 15:24:44,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=57477.333333333336, ans=0.0
+2024-08-26 15:25:02,789 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.21 vs. limit=22.5
+2024-08-26 15:25:10,633 INFO [train.py:1114] (0/4) Epoch 5, batch 850, loss[loss=0.3022, simple_loss=0.3462, pruned_loss=0.09443, ctc_loss=0.1733, over 19660.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3142, pruned_loss=0.08081, ctc_loss=0.151, over 3815725.18 frames. ], batch size: 59, lr: 2.76e-02, grad_scale: 32.0
+2024-08-26 15:25:13,753 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=57637.333333333336, ans=0.0
+2024-08-26 15:25:24,574 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.744e+02 1.971e+02 2.331e+02 4.591e+02, threshold=3.942e+02, percent-clipped=1.0
+2024-08-26 15:25:30,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=57690.666666666664, ans=0.1
+2024-08-26 15:26:03,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=57850.666666666664, ans=0.05
+2024-08-26 15:26:06,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=57904.0, ans=0.07
+2024-08-26 15:26:07,575 INFO [train.py:1114] (0/4) Epoch 5, batch 900, loss[loss=0.2372, simple_loss=0.2901, pruned_loss=0.06729, ctc_loss=0.1241, over 19403.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3145, pruned_loss=0.08113, ctc_loss=0.1514, over 3818513.59 frames. ], batch size: 48, lr: 2.75e-02, grad_scale: 32.0
+2024-08-26 15:26:12,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=57904.0, ans=0.0
+2024-08-26 15:26:25,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=57957.333333333336, ans=0.2
+2024-08-26 15:26:49,206 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.10 vs. limit=15.0
+2024-08-26 15:26:53,888 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.14 vs. limit=12.0
+2024-08-26 15:26:58,228 INFO [train.py:1114] (0/4) Epoch 5, batch 950, loss[loss=0.2325, simple_loss=0.2851, pruned_loss=0.06515, ctc_loss=0.1238, over 19477.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3151, pruned_loss=0.08173, ctc_loss=0.1523, over 3820323.62 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-26 15:27:01,309 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:27:03,166 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=58170.666666666664, ans=0.5
+2024-08-26 15:27:11,439 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.648e+02 1.859e+02 2.135e+02 3.098e+02, threshold=3.718e+02, percent-clipped=0.0
+2024-08-26 15:27:14,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=58224.0, ans=0.05
+2024-08-26 15:27:15,204 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.06 vs. limit=10.0
+2024-08-26 15:27:16,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=58224.0, ans=0.125
+2024-08-26 15:27:19,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=58277.333333333336, ans=0.025
+2024-08-26 15:27:43,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=58384.0, ans=0.0
+2024-08-26 15:27:43,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=58384.0, ans=0.0
+2024-08-26 15:27:49,896 INFO [train.py:1114] (0/4) Epoch 5, batch 1000, loss[loss=0.282, simple_loss=0.3214, pruned_loss=0.08812, ctc_loss=0.1656, over 19842.00 frames. ], tot_loss[loss=0.2704, simple_loss=0.3157, pruned_loss=0.08198, ctc_loss=0.1529, over 3816207.50 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-26 15:27:53,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=58437.333333333336, ans=0.125
+2024-08-26 15:27:54,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=58437.333333333336, ans=0.0
+2024-08-26 15:27:54,421 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.32 vs. limit=10.0
+2024-08-26 15:27:59,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=58490.666666666664, ans=0.1
+2024-08-26 15:28:40,058 INFO [train.py:1114] (0/4) Epoch 5, batch 1050, loss[loss=0.2749, simple_loss=0.3239, pruned_loss=0.08152, ctc_loss=0.1573, over 19842.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.315, pruned_loss=0.08157, ctc_loss=0.1525, over 3823196.06 frames. ], batch size: 57, lr: 2.74e-02, grad_scale: 32.0
+2024-08-26 15:28:50,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=58757.333333333336, ans=0.125
+2024-08-26 15:28:50,853 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.680e+02 1.893e+02 2.161e+02 3.731e+02, threshold=3.786e+02, percent-clipped=1.0
+2024-08-26 15:28:54,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=58757.333333333336, ans=0.125
+2024-08-26 15:29:01,149 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.23 vs. limit=15.0
+2024-08-26 15:29:02,166 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.67 vs. limit=6.0
+2024-08-26 15:29:03,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=58810.666666666664, ans=0.0
+2024-08-26 15:29:24,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=58917.333333333336, ans=0.0
+2024-08-26 15:29:33,663 INFO [train.py:1114] (0/4) Epoch 5, batch 1100, loss[loss=0.2413, simple_loss=0.291, pruned_loss=0.06948, ctc_loss=0.1315, over 19566.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3141, pruned_loss=0.08088, ctc_loss=0.1514, over 3831654.08 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 16.0
+2024-08-26 15:29:35,086 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.42 vs. limit=15.0
+2024-08-26 15:29:51,941 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=59077.333333333336, ans=0.2
+2024-08-26 15:29:55,664 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:30:08,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=59130.666666666664, ans=0.0
+2024-08-26 15:30:18,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=59184.0, ans=0.1
+2024-08-26 15:30:21,908 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.70 vs. limit=22.5
+2024-08-26 15:30:24,311 INFO [train.py:1114] (0/4) Epoch 5, batch 1150, loss[loss=0.2516, simple_loss=0.2958, pruned_loss=0.07563, ctc_loss=0.1406, over 19606.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3143, pruned_loss=0.08098, ctc_loss=0.1513, over 3829563.53 frames. ], batch size: 52, lr: 2.73e-02, grad_scale: 16.0
+2024-08-26 15:30:24,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=59237.333333333336, ans=0.025
+2024-08-26 15:30:34,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=59290.666666666664, ans=0.1
+2024-08-26 15:30:35,926 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.591e+02 1.744e+02 2.042e+02 4.394e+02, threshold=3.489e+02, percent-clipped=2.0
+2024-08-26 15:30:37,158 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:30:38,979 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=59290.666666666664, ans=0.1
+2024-08-26 15:30:45,518 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.35 vs. limit=12.0
+2024-08-26 15:30:53,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=59397.333333333336, ans=0.0
+2024-08-26 15:31:06,148 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=5.06 vs. limit=15.0
+2024-08-26 15:31:06,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=59450.666666666664, ans=0.0
+2024-08-26 15:31:15,286 INFO [train.py:1114] (0/4) Epoch 5, batch 1200, loss[loss=0.2621, simple_loss=0.3153, pruned_loss=0.07565, ctc_loss=0.1438, over 19839.00 frames. ], tot_loss[loss=0.2695, simple_loss=0.3153, pruned_loss=0.08142, ctc_loss=0.1522, over 3824947.14 frames. ], batch size: 57, lr: 2.73e-02, grad_scale: 32.0
+2024-08-26 15:31:24,593 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.89 vs. limit=15.0
+2024-08-26 15:31:36,229 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.59 vs. limit=15.0
+2024-08-26 15:31:43,809 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=59664.0, ans=0.125
+2024-08-26 15:31:53,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=59717.333333333336, ans=0.125
+2024-08-26 15:31:53,181 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=59717.333333333336, ans=0.1
+2024-08-26 15:32:06,262 INFO [train.py:1114] (0/4) Epoch 5, batch 1250, loss[loss=0.2984, simple_loss=0.3385, pruned_loss=0.09625, ctc_loss=0.1643, over 19557.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3152, pruned_loss=0.08105, ctc_loss=0.1514, over 3842931.87 frames. ], batch size: 61, lr: 2.72e-02, grad_scale: 32.0
+2024-08-26 15:32:18,030 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.635e+02 1.798e+02 2.001e+02 4.301e+02, threshold=3.596e+02, percent-clipped=1.0
+2024-08-26 15:32:48,488 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.39 vs. limit=15.0
+2024-08-26 15:32:55,068 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.47 vs. limit=15.0
+2024-08-26 15:32:56,445 INFO [train.py:1114] (0/4) Epoch 5, batch 1300, loss[loss=0.3065, simple_loss=0.3396, pruned_loss=0.09962, ctc_loss=0.1855, over 18839.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.314, pruned_loss=0.08029, ctc_loss=0.1499, over 3846267.59 frames. ], batch size: 76, lr: 2.72e-02, grad_scale: 32.0
+2024-08-26 15:32:56,733 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=60037.333333333336, ans=0.0
+2024-08-26 15:33:05,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten.whitening_limit, batch_count=60090.666666666664, ans=15.0
+2024-08-26 15:33:06,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=60090.666666666664, ans=0.025
+2024-08-26 15:33:11,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=60090.666666666664, ans=0.125
+2024-08-26 15:33:16,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=60144.0, ans=0.04949747468305833
+2024-08-26 15:33:28,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=60197.333333333336, ans=0.0
+2024-08-26 15:33:43,911 INFO [train.py:1114] (0/4) Epoch 5, batch 1350, loss[loss=0.245, simple_loss=0.3031, pruned_loss=0.06812, ctc_loss=0.1269, over 19751.00 frames. ], tot_loss[loss=0.2661, simple_loss=0.3135, pruned_loss=0.07963, ctc_loss=0.1486, over 3857861.13 frames. ], batch size: 54, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:33:47,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=60304.0, ans=0.125
+2024-08-26 15:33:54,118 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.70 vs. limit=22.5
+2024-08-26 15:33:55,391 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.610e+02 1.752e+02 1.989e+02 4.527e+02, threshold=3.503e+02, percent-clipped=1.0
+2024-08-26 15:34:04,464 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.94 vs. limit=10.0
+2024-08-26 15:34:09,293 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.93 vs. limit=22.5
+2024-08-26 15:34:13,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=60410.666666666664, ans=0.09899494936611666
+2024-08-26 15:34:15,063 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60464.0, ans=0.1
+2024-08-26 15:34:15,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=60464.0, ans=0.2
+2024-08-26 15:34:18,170 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.39 vs. limit=10.0
+2024-08-26 15:34:20,739 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=60464.0, ans=0.125
+2024-08-26 15:34:29,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=60517.333333333336, ans=0.125
+2024-08-26 15:34:30,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=60517.333333333336, ans=0.025
+2024-08-26 15:34:30,995 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60517.333333333336, ans=0.1
+2024-08-26 15:34:32,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=60517.333333333336, ans=0.125
+2024-08-26 15:34:34,576 INFO [train.py:1114] (0/4) Epoch 5, batch 1400, loss[loss=0.2404, simple_loss=0.2826, pruned_loss=0.07286, ctc_loss=0.131, over 19661.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3127, pruned_loss=0.0794, ctc_loss=0.148, over 3864258.34 frames. ], batch size: 46, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:34:37,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=60570.666666666664, ans=0.2
+2024-08-26 15:34:47,344 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.22 vs. limit=10.0
+2024-08-26 15:34:55,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.29 vs. limit=15.0
+2024-08-26 15:35:17,244 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.55 vs. limit=15.0
+2024-08-26 15:35:21,227 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.54 vs. limit=10.0
+2024-08-26 15:35:27,728 INFO [train.py:1114] (0/4) Epoch 5, batch 1450, loss[loss=0.2757, simple_loss=0.3256, pruned_loss=0.08131, ctc_loss=0.1579, over 19665.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.314, pruned_loss=0.07995, ctc_loss=0.1491, over 3862292.07 frames. ], batch size: 63, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:35:42,506 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.680e+02 1.820e+02 2.123e+02 3.172e+02, threshold=3.639e+02, percent-clipped=0.0
+2024-08-26 15:35:51,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=60944.0, ans=0.125
+2024-08-26 15:35:56,829 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=60944.0, ans=0.125
+2024-08-26 15:35:56,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=60944.0, ans=0.125
+2024-08-26 15:35:56,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=60944.0, ans=0.0
+2024-08-26 15:36:00,714 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=60997.333333333336, ans=0.125
+2024-08-26 15:36:19,831 INFO [train.py:1114] (0/4) Epoch 5, batch 1500, loss[loss=0.2468, simple_loss=0.3122, pruned_loss=0.06618, ctc_loss=0.1229, over 19582.00 frames. ], tot_loss[loss=0.2661, simple_loss=0.3137, pruned_loss=0.07957, ctc_loss=0.1486, over 3862535.21 frames. ], batch size: 57, lr: 2.70e-02, grad_scale: 32.0
+2024-08-26 15:36:24,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=61104.0, ans=0.2
+2024-08-26 15:36:25,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=61104.0, ans=0.0
+2024-08-26 15:36:43,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=61210.666666666664, ans=0.2
+2024-08-26 15:36:45,378 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=61210.666666666664, ans=0.125
+2024-08-26 15:36:59,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=61317.333333333336, ans=0.125
+2024-08-26 15:37:09,970 INFO [train.py:1114] (0/4) Epoch 5, batch 1550, loss[loss=0.2885, simple_loss=0.3278, pruned_loss=0.0911, ctc_loss=0.1677, over 19603.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3141, pruned_loss=0.08003, ctc_loss=0.1495, over 3847063.11 frames. ], batch size: 60, lr: 2.70e-02, grad_scale: 16.0
+2024-08-26 15:37:16,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=61370.666666666664, ans=0.2
+2024-08-26 15:37:22,481 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.359e+02 1.752e+02 1.975e+02 2.269e+02 3.644e+02, threshold=3.951e+02, percent-clipped=1.0
+2024-08-26 15:37:25,607 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=61424.0, ans=0.125
+2024-08-26 15:37:38,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=61530.666666666664, ans=0.125
+2024-08-26 15:38:03,723 INFO [train.py:1114] (0/4) Epoch 5, batch 1600, loss[loss=0.2667, simple_loss=0.3193, pruned_loss=0.07789, ctc_loss=0.1457, over 19839.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3144, pruned_loss=0.08064, ctc_loss=0.1508, over 3836525.65 frames. ], batch size: 57, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:38:07,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=61637.333333333336, ans=0.125
+2024-08-26 15:38:26,262 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=19.53 vs. limit=15.0
+2024-08-26 15:38:44,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=61744.0, ans=0.125
+2024-08-26 15:38:47,875 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.27 vs. limit=12.0
+2024-08-26 15:38:54,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=61744.0, ans=0.1
+2024-08-26 15:39:09,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=61850.666666666664, ans=0.125
+2024-08-26 15:39:19,221 INFO [train.py:1114] (0/4) Epoch 5, batch 1650, loss[loss=0.2682, simple_loss=0.3233, pruned_loss=0.07635, ctc_loss=0.1509, over 19640.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3146, pruned_loss=0.08076, ctc_loss=0.151, over 3833447.05 frames. ], batch size: 59, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:39:20,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=61904.0, ans=15.0
+2024-08-26 15:39:24,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=61904.0, ans=0.125
+2024-08-26 15:39:27,209 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=61904.0, ans=0.125
+2024-08-26 15:39:29,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=61957.333333333336, ans=0.125
+2024-08-26 15:39:30,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff2.min_abs, batch_count=61957.333333333336, ans=0.1
+2024-08-26 15:39:31,760 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.578e+02 1.738e+02 2.103e+02 3.628e+02, threshold=3.475e+02, percent-clipped=0.0
+2024-08-26 15:39:33,876 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:39:33,994 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.99 vs. limit=15.0
+2024-08-26 15:39:34,072 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.29 vs. limit=22.5
+2024-08-26 15:39:50,177 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.47 vs. limit=22.5
+2024-08-26 15:39:56,924 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.07 vs. limit=15.0
+2024-08-26 15:39:58,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62117.333333333336, ans=0.1
+2024-08-26 15:40:03,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=62117.333333333336, ans=0.2
+2024-08-26 15:40:08,740 INFO [train.py:1114] (0/4) Epoch 5, batch 1700, loss[loss=0.2179, simple_loss=0.2682, pruned_loss=0.06001, ctc_loss=0.1187, over 19665.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3135, pruned_loss=0.07979, ctc_loss=0.1493, over 3847846.32 frames. ], batch size: 46, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:40:13,221 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.11 vs. limit=6.0
+2024-08-26 15:40:13,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=62170.666666666664, ans=0.125
+2024-08-26 15:40:41,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=62330.666666666664, ans=0.125
+2024-08-26 15:40:41,809 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=62330.666666666664, ans=0.0
+2024-08-26 15:40:44,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=62384.0, ans=0.025
+2024-08-26 15:40:54,052 INFO [train.py:1114] (0/4) Epoch 5, batch 1750, loss[loss=0.2234, simple_loss=0.2713, pruned_loss=0.06411, ctc_loss=0.1184, over 19655.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3125, pruned_loss=0.07913, ctc_loss=0.148, over 3852781.90 frames. ], batch size: 45, lr: 2.68e-02, grad_scale: 32.0
+2024-08-26 15:41:00,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=62437.333333333336, ans=0.125
+2024-08-26 15:41:05,733 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.599e+02 1.842e+02 2.097e+02 3.191e+02, threshold=3.683e+02, percent-clipped=0.0
+2024-08-26 15:41:15,448 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.44 vs. limit=15.0
+2024-08-26 15:41:19,186 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.34 vs. limit=10.0
+2024-08-26 15:41:36,357 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.98 vs. limit=15.0
+2024-08-26 15:41:39,328 INFO [train.py:1114] (0/4) Epoch 5, batch 1800, loss[loss=0.2563, simple_loss=0.3069, pruned_loss=0.07446, ctc_loss=0.1422, over 19613.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.313, pruned_loss=0.07938, ctc_loss=0.1485, over 3853809.76 frames. ], batch size: 55, lr: 2.68e-02, grad_scale: 32.0
+2024-08-26 15:41:45,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62704.0, ans=0.1
+2024-08-26 15:41:48,786 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.48 vs. limit=22.5
+2024-08-26 15:41:51,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=62757.333333333336, ans=0.0
+2024-08-26 15:42:07,588 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.86 vs. limit=22.5
+2024-08-26 15:42:09,309 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.07 vs. limit=12.0
+2024-08-26 15:42:19,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=62917.333333333336, ans=10.0
+2024-08-26 15:42:20,022 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.32 vs. limit=15.0
+2024-08-26 15:42:24,245 INFO [train.py:1114] (0/4) Epoch 5, batch 1850, loss[loss=0.2635, simple_loss=0.315, pruned_loss=0.07704, ctc_loss=0.1449, over 19569.00 frames. ], tot_loss[loss=0.2647, simple_loss=0.3126, pruned_loss=0.07893, ctc_loss=0.1476, over 3856524.31 frames. ], batch size: 57, lr: 2.67e-02, grad_scale: 32.0
+2024-08-26 15:42:27,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=62970.666666666664, ans=0.125
+2024-08-26 15:42:30,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=62970.666666666664, ans=0.1
+2024-08-26 15:42:31,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=62970.666666666664, ans=0.0
+2024-08-26 15:42:35,859 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.605e+02 1.818e+02 2.016e+02 3.945e+02, threshold=3.637e+02, percent-clipped=1.0
+2024-08-26 15:42:44,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63077.333333333336, ans=0.1
+2024-08-26 15:42:53,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=63077.333333333336, ans=0.125
+2024-08-26 15:42:53,783 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.54 vs. limit=15.0
+2024-08-26 15:42:59,880 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=63130.666666666664, ans=0.125
+2024-08-26 15:43:16,393 INFO [train.py:1114] (0/4) Epoch 5, batch 1900, loss[loss=0.269, simple_loss=0.3232, pruned_loss=0.07824, ctc_loss=0.1459, over 19652.00 frames. ], tot_loss[loss=0.2651, simple_loss=0.313, pruned_loss=0.07903, ctc_loss=0.1479, over 3860781.12 frames. ], batch size: 59, lr: 2.67e-02, grad_scale: 16.0
+2024-08-26 15:43:17,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=63237.333333333336, ans=0.125
+2024-08-26 15:43:24,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=63237.333333333336, ans=0.0
+2024-08-26 15:43:37,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=63290.666666666664, ans=0.1
+2024-08-26 15:43:39,827 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=63344.0, ans=0.125
+2024-08-26 15:43:48,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=63397.333333333336, ans=0.125
+2024-08-26 15:44:05,667 INFO [train.py:1114] (0/4) Epoch 5, batch 1950, loss[loss=0.252, simple_loss=0.3051, pruned_loss=0.07317, ctc_loss=0.1314, over 19569.00 frames. ], tot_loss[loss=0.2658, simple_loss=0.314, pruned_loss=0.07918, ctc_loss=0.148, over 3869409.36 frames. ], batch size: 52, lr: 2.67e-02, grad_scale: 16.0
+2024-08-26 15:44:07,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63504.0, ans=0.1
+2024-08-26 15:44:12,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=63504.0, ans=0.1
+2024-08-26 15:44:20,104 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.677e+02 1.824e+02 1.963e+02 3.212e+02, threshold=3.647e+02, percent-clipped=0.0
+2024-08-26 15:44:21,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.84 vs. limit=15.0
+2024-08-26 15:44:23,886 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:44:24,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=63610.666666666664, ans=0.0
+2024-08-26 15:44:26,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63610.666666666664, ans=0.1
+2024-08-26 15:44:27,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.98 vs. limit=22.5
+2024-08-26 15:44:33,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63664.0, ans=0.1
+2024-08-26 15:44:35,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=63664.0, ans=0.2
+2024-08-26 15:44:44,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=63717.333333333336, ans=0.125
+2024-08-26 15:44:46,466 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.68 vs. limit=22.5
+2024-08-26 15:44:52,301 INFO [train.py:1114] (0/4) Epoch 5, batch 2000, loss[loss=0.2104, simple_loss=0.2605, pruned_loss=0.05903, ctc_loss=0.1057, over 19636.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3149, pruned_loss=0.08006, ctc_loss=0.1495, over 3853618.52 frames. ], batch size: 45, lr: 2.66e-02, grad_scale: 32.0
+2024-08-26 15:44:57,989 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.81 vs. limit=15.0
+2024-08-26 15:45:04,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63824.0, ans=0.1
+2024-08-26 15:45:17,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=63877.333333333336, ans=0.0
+2024-08-26 15:45:20,351 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.83 vs. limit=15.0
+2024-08-26 15:45:29,791 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-12000.pt
+2024-08-26 15:45:36,649 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.01 vs. limit=15.0
+2024-08-26 15:45:38,883 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=63984.0, ans=0.0
+2024-08-26 15:45:42,246 INFO [train.py:1114] (0/4) Epoch 5, batch 2050, loss[loss=0.2261, simple_loss=0.275, pruned_loss=0.06343, ctc_loss=0.1256, over 19706.00 frames. ], tot_loss[loss=0.2662, simple_loss=0.3134, pruned_loss=0.07966, ctc_loss=0.149, over 3850977.62 frames. ], batch size: 47, lr: 2.66e-02, grad_scale: 32.0
+2024-08-26 15:45:54,616 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.354e+02 1.624e+02 1.773e+02 2.077e+02 3.322e+02, threshold=3.546e+02, percent-clipped=0.0
+2024-08-26 15:45:56,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=64090.666666666664, ans=0.125
+2024-08-26 15:46:10,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=64197.333333333336, ans=0.5
+2024-08-26 15:46:11,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=64197.333333333336, ans=0.1
+2024-08-26 15:46:26,371 INFO [train.py:1114] (0/4) Epoch 5, batch 2100, loss[loss=0.2664, simple_loss=0.3154, pruned_loss=0.07882, ctc_loss=0.1498, over 19789.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.3123, pruned_loss=0.07878, ctc_loss=0.1471, over 3858618.66 frames. ], batch size: 54, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:46:37,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=64357.333333333336, ans=0.125
+2024-08-26 15:46:45,880 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.67 vs. limit=15.0
+2024-08-26 15:46:49,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64410.666666666664, ans=0.1
+2024-08-26 15:46:55,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=64464.0, ans=0.125
+2024-08-26 15:46:56,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=64464.0, ans=0.0
+2024-08-26 15:47:18,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=64517.333333333336, ans=0.1
+2024-08-26 15:47:23,494 INFO [train.py:1114] (0/4) Epoch 5, batch 2150, loss[loss=0.2435, simple_loss=0.301, pruned_loss=0.06674, ctc_loss=0.1314, over 19597.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.3114, pruned_loss=0.07824, ctc_loss=0.1461, over 3869218.50 frames. ], batch size: 52, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:47:32,173 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.81 vs. limit=15.0
+2024-08-26 15:47:35,827 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.599e+02 1.757e+02 2.074e+02 2.995e+02, threshold=3.513e+02, percent-clipped=0.0
+2024-08-26 15:47:46,589 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=64677.333333333336, ans=0.125
+2024-08-26 15:47:49,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64730.666666666664, ans=0.1
+2024-08-26 15:47:59,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=64784.0, ans=0.0
+2024-08-26 15:48:07,237 INFO [train.py:1114] (0/4) Epoch 5, batch 2200, loss[loss=0.2865, simple_loss=0.3308, pruned_loss=0.08808, ctc_loss=0.165, over 19592.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3117, pruned_loss=0.07845, ctc_loss=0.1465, over 3867821.49 frames. ], batch size: 57, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:48:12,050 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.93 vs. limit=15.0
+2024-08-26 15:48:21,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=64890.666666666664, ans=0.125
+2024-08-26 15:48:26,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=64944.0, ans=0.1
+2024-08-26 15:48:37,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=64997.333333333336, ans=0.1
+2024-08-26 15:48:37,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=64997.333333333336, ans=0.07
+2024-08-26 15:48:52,521 INFO [train.py:1114] (0/4) Epoch 5, batch 2250, loss[loss=0.2658, simple_loss=0.3158, pruned_loss=0.07672, ctc_loss=0.1558, over 19606.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3118, pruned_loss=0.07842, ctc_loss=0.1466, over 3867541.43 frames. ], batch size: 55, lr: 2.64e-02, grad_scale: 16.0
+2024-08-26 15:48:58,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=65104.0, ans=0.0
+2024-08-26 15:49:01,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=65157.333333333336, ans=0.035
+2024-08-26 15:49:02,637 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.01 vs. limit=10.0
+2024-08-26 15:49:03,661 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.50 vs. limit=22.5
+2024-08-26 15:49:05,750 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.721e+02 2.056e+02 2.448e+02 6.138e+02, threshold=4.112e+02, percent-clipped=3.0
+2024-08-26 15:49:08,694 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.32 vs. limit=22.5
+2024-08-26 15:49:09,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=65210.666666666664, ans=0.0
+2024-08-26 15:49:15,696 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.85 vs. limit=15.0
+2024-08-26 15:49:25,732 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.66 vs. limit=8.0
+2024-08-26 15:49:28,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=65317.333333333336, ans=0.0
+2024-08-26 15:49:34,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=65317.333333333336, ans=0.2
+2024-08-26 15:49:36,468 INFO [train.py:1114] (0/4) Epoch 5, batch 2300, loss[loss=0.2419, simple_loss=0.2974, pruned_loss=0.06802, ctc_loss=0.1258, over 19504.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3106, pruned_loss=0.0784, ctc_loss=0.1463, over 3861826.33 frames. ], batch size: 49, lr: 2.64e-02, grad_scale: 16.0
+2024-08-26 15:49:57,905 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=65477.333333333336, ans=0.125
+2024-08-26 15:50:01,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=65477.333333333336, ans=0.2
+2024-08-26 15:50:07,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=65530.666666666664, ans=0.1
+2024-08-26 15:50:10,256 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.71 vs. limit=15.0
+2024-08-26 15:50:23,059 INFO [train.py:1114] (0/4) Epoch 5, batch 2350, loss[loss=0.3045, simple_loss=0.3399, pruned_loss=0.09893, ctc_loss=0.1782, over 19684.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.311, pruned_loss=0.07859, ctc_loss=0.1465, over 3864672.00 frames. ], batch size: 63, lr: 2.63e-02, grad_scale: 16.0
+2024-08-26 15:50:23,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=65637.33333333333, ans=0.1
+2024-08-26 15:50:32,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=65690.66666666667, ans=0.125
+2024-08-26 15:50:36,069 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.297e+02 1.568e+02 1.781e+02 2.033e+02 3.218e+02, threshold=3.561e+02, percent-clipped=0.0
+2024-08-26 15:50:42,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=65744.0, ans=0.1
+2024-08-26 15:50:44,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=65744.0, ans=0.09899494936611666
+2024-08-26 15:50:49,827 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.39 vs. limit=22.5
+2024-08-26 15:50:57,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=65850.66666666667, ans=0.0
+2024-08-26 15:51:02,758 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=65850.66666666667, ans=0.125
+2024-08-26 15:51:07,079 INFO [train.py:1114] (0/4) Epoch 5, batch 2400, loss[loss=0.274, simple_loss=0.3329, pruned_loss=0.07686, ctc_loss=0.1536, over 19429.00 frames. ], tot_loss[loss=0.2657, simple_loss=0.3135, pruned_loss=0.07928, ctc_loss=0.1481, over 3858442.69 frames. ], batch size: 67, lr: 2.63e-02, grad_scale: 32.0
+2024-08-26 15:51:36,885 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.73 vs. limit=15.0
+2024-08-26 15:51:48,164 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.85 vs. limit=12.0
+2024-08-26 15:51:52,367 INFO [train.py:1114] (0/4) Epoch 5, batch 2450, loss[loss=0.3699, simple_loss=0.3717, pruned_loss=0.135, ctc_loss=0.2452, over 13486.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3186, pruned_loss=0.0837, ctc_loss=0.1561, over 3734260.93 frames. ], batch size: 141, lr: 2.63e-02, grad_scale: 16.0
+2024-08-26 15:51:54,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=66170.66666666667, ans=0.125
+2024-08-26 15:52:03,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=66224.0, ans=0.2
+2024-08-26 15:52:07,306 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 1.716e+02 1.912e+02 2.213e+02 5.978e+02, threshold=3.825e+02, percent-clipped=3.0
+2024-08-26 15:52:07,528 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=66224.0, ans=0.125
+2024-08-26 15:52:18,754 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=66277.33333333333, ans=0.125
+2024-08-26 15:52:27,976 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-5.pt
+2024-08-26 15:53:42,790 INFO [train.py:1114] (0/4) Epoch 6, batch 0, loss[loss=0.2388, simple_loss=0.2865, pruned_loss=0.07046, ctc_loss=0.1255, over 19827.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.2865, pruned_loss=0.07046, ctc_loss=0.1255, over 19827.00 frames. ], batch size: 49, lr: 2.45e-02, grad_scale: 32.0
+2024-08-26 15:53:42,791 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 15:54:26,135 INFO [train.py:1146] (0/4) Epoch 6, validation: loss=0.2162, simple_loss=0.3022, pruned_loss=0.04785, ctc_loss=0.08613, over 944034.00 frames.
+2024-08-26 15:54:26,136 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 12795MB
+2024-08-26 15:54:44,004 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.08 vs. limit=10.0
+2024-08-26 15:54:45,902 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.05 vs. limit=22.5
+2024-08-26 15:54:50,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=66485.33333333333, ans=0.125
+2024-08-26 15:54:55,769 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=66538.66666666667, ans=0.1
+2024-08-26 15:55:02,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=66538.66666666667, ans=0.125
+2024-08-26 15:55:03,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=66592.0, ans=0.125
+2024-08-26 15:55:04,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=66592.0, ans=0.125
+2024-08-26 15:55:05,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=66592.0, ans=0.125
+2024-08-26 15:55:07,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=66592.0, ans=0.0
+2024-08-26 15:55:13,592 INFO [train.py:1114] (0/4) Epoch 6, batch 50, loss[loss=0.2227, simple_loss=0.2729, pruned_loss=0.06215, ctc_loss=0.1204, over 19728.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3141, pruned_loss=0.0804, ctc_loss=0.152, over 844928.81 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:55:13,746 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=66645.33333333333, ans=0.125
+2024-08-26 15:55:19,338 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:55:19,720 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=25.43 vs. limit=22.5
+2024-08-26 15:55:29,317 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.68 vs. limit=6.0
+2024-08-26 15:55:29,841 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=66698.66666666667, ans=0.0
+2024-08-26 15:55:29,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=66698.66666666667, ans=0.0
+2024-08-26 15:55:37,718 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.54 vs. limit=15.0
+2024-08-26 15:55:39,180 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.607e+02 1.759e+02 1.997e+02 3.496e+02, threshold=3.518e+02, percent-clipped=0.0
+2024-08-26 15:55:41,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=66805.33333333333, ans=0.2
+2024-08-26 15:55:47,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=66805.33333333333, ans=0.125
+2024-08-26 15:55:50,799 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=66858.66666666667, ans=0.125
+2024-08-26 15:56:02,669 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.99 vs. limit=22.5
+2024-08-26 15:56:03,171 INFO [train.py:1114] (0/4) Epoch 6, batch 100, loss[loss=0.2226, simple_loss=0.2851, pruned_loss=0.0588, ctc_loss=0.1062, over 19711.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3152, pruned_loss=0.07909, ctc_loss=0.1487, over 1499585.12 frames. ], batch size: 51, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:56:14,050 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.99 vs. limit=15.0
+2024-08-26 15:56:41,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=67072.0, ans=0.125
+2024-08-26 15:56:43,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=67072.0, ans=0.2
+2024-08-26 15:56:53,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=67125.33333333333, ans=0.1
+2024-08-26 15:56:57,279 INFO [train.py:1114] (0/4) Epoch 6, batch 150, loss[loss=0.2303, simple_loss=0.2806, pruned_loss=0.06622, ctc_loss=0.1189, over 19714.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3104, pruned_loss=0.0769, ctc_loss=0.1441, over 2027782.93 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:57:19,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=67285.33333333333, ans=0.125
+2024-08-26 15:57:22,721 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.333e+02 1.584e+02 1.709e+02 1.986e+02 2.973e+02, threshold=3.418e+02, percent-clipped=0.0
+2024-08-26 15:57:28,645 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:57:36,177 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=67392.0, ans=0.125
+2024-08-26 15:57:40,956 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=67392.0, ans=0.125
+2024-08-26 15:57:44,428 INFO [train.py:1114] (0/4) Epoch 6, batch 200, loss[loss=0.2702, simple_loss=0.3151, pruned_loss=0.0828, ctc_loss=0.1494, over 18434.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.3082, pruned_loss=0.07579, ctc_loss=0.1417, over 2435777.26 frames. ], batch size: 85, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:57:47,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=67445.33333333333, ans=0.05
+2024-08-26 15:57:50,873 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.97 vs. limit=10.0
+2024-08-26 15:58:03,782 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=67552.0, ans=0.0
+2024-08-26 15:58:04,848 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=67552.0, ans=0.2
+2024-08-26 15:58:10,338 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=67552.0, ans=0.05
+2024-08-26 15:58:11,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=67552.0, ans=0.0
+2024-08-26 15:58:16,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=67605.33333333333, ans=0.2
+2024-08-26 15:58:19,226 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.98 vs. limit=10.0
+2024-08-26 15:58:26,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=67658.66666666667, ans=0.125
+2024-08-26 15:58:27,697 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=67658.66666666667, ans=0.125
+2024-08-26 15:58:36,038 INFO [train.py:1114] (0/4) Epoch 6, batch 250, loss[loss=0.2651, simple_loss=0.3197, pruned_loss=0.07616, ctc_loss=0.1452, over 19444.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3083, pruned_loss=0.07518, ctc_loss=0.1408, over 2756020.36 frames. ], batch size: 67, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:59:01,203 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=67765.33333333333, ans=0.0
+2024-08-26 15:59:06,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=67818.66666666667, ans=0.0
+2024-08-26 15:59:10,413 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.559e+02 1.703e+02 1.915e+02 3.590e+02, threshold=3.407e+02, percent-clipped=1.0
+2024-08-26 15:59:11,620 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=67818.66666666667, ans=0.5
+2024-08-26 15:59:16,410 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=67872.0, ans=0.125
+2024-08-26 15:59:29,939 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=67925.33333333333, ans=0.125
+2024-08-26 15:59:32,669 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:59:35,255 INFO [train.py:1114] (0/4) Epoch 6, batch 300, loss[loss=0.2952, simple_loss=0.3378, pruned_loss=0.09449, ctc_loss=0.1593, over 19503.00 frames. ], tot_loss[loss=0.2571, simple_loss=0.3077, pruned_loss=0.07514, ctc_loss=0.1405, over 3000636.50 frames. ], batch size: 61, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:59:35,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=67978.66666666667, ans=0.125
+2024-08-26 15:59:44,966 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=67978.66666666667, ans=0.125
+2024-08-26 16:00:10,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=68138.66666666667, ans=0.125
+2024-08-26 16:00:24,076 INFO [train.py:1114] (0/4) Epoch 6, batch 350, loss[loss=0.2413, simple_loss=0.2877, pruned_loss=0.07199, ctc_loss=0.1272, over 19759.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3082, pruned_loss=0.07515, ctc_loss=0.1404, over 3190927.67 frames. ], batch size: 48, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:00:24,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=68245.33333333333, ans=0.125
+2024-08-26 16:00:31,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=68245.33333333333, ans=0.1
+2024-08-26 16:00:49,640 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.625e+02 1.872e+02 2.224e+02 3.924e+02, threshold=3.744e+02, percent-clipped=2.0
+2024-08-26 16:00:58,184 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=68405.33333333333, ans=0.035
+2024-08-26 16:01:02,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=68458.66666666667, ans=0.025
+2024-08-26 16:01:04,828 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:01:10,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=68512.0, ans=0.2
+2024-08-26 16:01:11,319 INFO [train.py:1114] (0/4) Epoch 6, batch 400, loss[loss=0.2264, simple_loss=0.2868, pruned_loss=0.0603, ctc_loss=0.1134, over 19521.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.307, pruned_loss=0.0744, ctc_loss=0.1391, over 3343391.92 frames. ], batch size: 54, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:01:20,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=68565.33333333333, ans=0.125
+2024-08-26 16:01:33,560 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.24 vs. limit=6.0
+2024-08-26 16:01:34,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=68618.66666666667, ans=0.5
+2024-08-26 16:01:36,571 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.41 vs. limit=22.5
+2024-08-26 16:01:56,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=68725.33333333333, ans=0.0
+2024-08-26 16:02:07,092 INFO [train.py:1114] (0/4) Epoch 6, batch 450, loss[loss=0.271, simple_loss=0.3205, pruned_loss=0.08128, ctc_loss=0.1475, over 19629.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3072, pruned_loss=0.07461, ctc_loss=0.1395, over 3451979.55 frames. ], batch size: 55, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:02:14,086 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.64 vs. limit=15.0
+2024-08-26 16:02:26,912 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.90 vs. limit=6.0
+2024-08-26 16:02:26,950 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.08 vs. limit=15.0
+2024-08-26 16:02:34,034 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.611e+02 1.799e+02 2.140e+02 4.925e+02, threshold=3.597e+02, percent-clipped=1.0
+2024-08-26 16:02:37,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=68938.66666666667, ans=0.125
+2024-08-26 16:02:45,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=68992.0, ans=0.125
+2024-08-26 16:02:55,674 INFO [train.py:1114] (0/4) Epoch 6, batch 500, loss[loss=0.2859, simple_loss=0.3346, pruned_loss=0.08699, ctc_loss=0.1581, over 19635.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.306, pruned_loss=0.07414, ctc_loss=0.1388, over 3547779.06 frames. ], batch size: 63, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:02:55,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=69045.33333333333, ans=0.0
+2024-08-26 16:03:27,195 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=69205.33333333333, ans=0.1
+2024-08-26 16:03:32,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=69205.33333333333, ans=0.125
+2024-08-26 16:03:33,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=69258.66666666667, ans=0.2
+2024-08-26 16:03:35,015 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.10 vs. limit=6.0
+2024-08-26 16:03:43,059 INFO [train.py:1114] (0/4) Epoch 6, batch 550, loss[loss=0.2901, simple_loss=0.3301, pruned_loss=0.09161, ctc_loss=0.1673, over 19282.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3065, pruned_loss=0.07446, ctc_loss=0.1394, over 3609600.28 frames. ], batch size: 71, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:03:54,834 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=69365.33333333333, ans=0.125
+2024-08-26 16:03:57,083 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.49 vs. limit=15.0
+2024-08-26 16:04:08,890 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.633e+02 1.875e+02 2.080e+02 6.681e+02, threshold=3.749e+02, percent-clipped=3.0
+2024-08-26 16:04:09,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=69418.66666666667, ans=0.1
+2024-08-26 16:04:13,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=69472.0, ans=0.125
+2024-08-26 16:04:19,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=69472.0, ans=0.125
+2024-08-26 16:04:30,180 INFO [train.py:1114] (0/4) Epoch 6, batch 600, loss[loss=0.275, simple_loss=0.3291, pruned_loss=0.0805, ctc_loss=0.1498, over 19348.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3063, pruned_loss=0.07406, ctc_loss=0.1387, over 3666363.11 frames. ], batch size: 67, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:04:37,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=69578.66666666667, ans=0.07
+2024-08-26 16:04:38,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=69578.66666666667, ans=0.0
+2024-08-26 16:04:39,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=69578.66666666667, ans=0.125
+2024-08-26 16:04:49,283 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.46 vs. limit=22.5
+2024-08-26 16:04:54,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=69632.0, ans=0.1
+2024-08-26 16:05:05,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=69738.66666666667, ans=0.1
+2024-08-26 16:05:07,604 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=69738.66666666667, ans=0.0
+2024-08-26 16:05:07,723 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=69738.66666666667, ans=0.0
+2024-08-26 16:05:12,626 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:05:25,783 INFO [train.py:1114] (0/4) Epoch 6, batch 650, loss[loss=0.2289, simple_loss=0.2947, pruned_loss=0.05904, ctc_loss=0.1127, over 19766.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3058, pruned_loss=0.07363, ctc_loss=0.1379, over 3716488.73 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-26 16:05:53,416 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.596e+02 1.734e+02 1.974e+02 3.978e+02, threshold=3.467e+02, percent-clipped=1.0
+2024-08-26 16:05:57,830 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.89 vs. limit=22.5
+2024-08-26 16:06:00,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=70005.33333333333, ans=0.0
+2024-08-26 16:06:07,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=70058.66666666667, ans=0.025
+2024-08-26 16:06:15,361 INFO [train.py:1114] (0/4) Epoch 6, batch 700, loss[loss=0.2117, simple_loss=0.276, pruned_loss=0.05412, ctc_loss=0.09767, over 19724.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3065, pruned_loss=0.07407, ctc_loss=0.1383, over 3748518.01 frames. ], batch size: 51, lr: 2.40e-02, grad_scale: 16.0
+2024-08-26 16:06:16,691 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.42 vs. limit=15.0
+2024-08-26 16:06:27,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=70165.33333333333, ans=0.2
+2024-08-26 16:06:40,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=70218.66666666667, ans=0.0
+2024-08-26 16:07:02,317 INFO [train.py:1114] (0/4) Epoch 6, batch 750, loss[loss=0.2504, simple_loss=0.3116, pruned_loss=0.06734, ctc_loss=0.1362, over 19502.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3059, pruned_loss=0.07372, ctc_loss=0.1377, over 3774484.37 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 16.0
+2024-08-26 16:07:13,311 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=16.02 vs. limit=15.0
+2024-08-26 16:07:27,620 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=70485.33333333333, ans=0.1
+2024-08-26 16:07:33,082 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.662e+02 1.845e+02 2.236e+02 2.956e+02, threshold=3.689e+02, percent-clipped=0.0
+2024-08-26 16:07:43,803 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.71 vs. limit=15.0
+2024-08-26 16:08:24,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=70592.0, ans=0.125
+2024-08-26 16:08:25,917 INFO [train.py:1114] (0/4) Epoch 6, batch 800, loss[loss=0.205, simple_loss=0.2682, pruned_loss=0.0506, ctc_loss=0.1016, over 19810.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3055, pruned_loss=0.07371, ctc_loss=0.1377, over 3794875.81 frames. ], batch size: 49, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:08:26,997 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=70645.33333333333, ans=0.2
+2024-08-26 16:08:41,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=70698.66666666667, ans=0.125
+2024-08-26 16:09:29,162 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.61 vs. limit=6.0
+2024-08-26 16:09:32,223 INFO [train.py:1114] (0/4) Epoch 6, batch 850, loss[loss=0.2563, simple_loss=0.3154, pruned_loss=0.07148, ctc_loss=0.1355, over 19681.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3053, pruned_loss=0.07381, ctc_loss=0.1378, over 3813603.32 frames. ], batch size: 59, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:09:34,432 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=70912.0, ans=0.125
+2024-08-26 16:09:48,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=70965.33333333333, ans=0.125
+2024-08-26 16:09:58,792 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.315e+02 1.558e+02 1.696e+02 1.888e+02 5.151e+02, threshold=3.391e+02, percent-clipped=1.0
+2024-08-26 16:10:04,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=71072.0, ans=0.09899494936611666
+2024-08-26 16:10:22,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=71072.0, ans=0.125
+2024-08-26 16:10:24,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=71072.0, ans=0.025
+2024-08-26 16:10:29,087 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=71125.33333333333, ans=0.0
+2024-08-26 16:10:35,829 INFO [train.py:1114] (0/4) Epoch 6, batch 900, loss[loss=0.2265, simple_loss=0.279, pruned_loss=0.06327, ctc_loss=0.1188, over 19439.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3056, pruned_loss=0.07421, ctc_loss=0.1384, over 3817682.59 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:10:47,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=71232.0, ans=0.125
+2024-08-26 16:10:51,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=71232.0, ans=0.125
+2024-08-26 16:10:55,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=71285.33333333333, ans=10.0
+2024-08-26 16:11:05,785 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.11 vs. limit=22.5
+2024-08-26 16:11:11,477 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=71338.66666666667, ans=0.125
+2024-08-26 16:11:14,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=71392.0, ans=0.0
+2024-08-26 16:11:16,366 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=71392.0, ans=0.125
+2024-08-26 16:11:23,813 INFO [train.py:1114] (0/4) Epoch 6, batch 950, loss[loss=0.2394, simple_loss=0.2944, pruned_loss=0.06637, ctc_loss=0.1295, over 19522.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.306, pruned_loss=0.07455, ctc_loss=0.1391, over 3819604.40 frames. ], batch size: 49, lr: 2.38e-02, grad_scale: 16.0
+2024-08-26 16:11:45,955 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=71498.66666666667, ans=0.125
+2024-08-26 16:11:47,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71498.66666666667, ans=0.1
+2024-08-26 16:11:50,917 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=71552.0, ans=0.125
+2024-08-26 16:11:59,454 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.602e+02 1.780e+02 2.099e+02 5.215e+02, threshold=3.559e+02, percent-clipped=4.0
+2024-08-26 16:12:03,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.whiten.whitening_limit, batch_count=71605.33333333333, ans=15.0
+2024-08-26 16:12:04,586 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:12:21,521 INFO [train.py:1114] (0/4) Epoch 6, batch 1000, loss[loss=0.2451, simple_loss=0.2975, pruned_loss=0.07056, ctc_loss=0.129, over 19845.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3067, pruned_loss=0.07466, ctc_loss=0.1393, over 3815694.02 frames. ], batch size: 52, lr: 2.38e-02, grad_scale: 16.0
+2024-08-26 16:12:30,577 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=71765.33333333333, ans=0.125
+2024-08-26 16:12:31,906 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.95 vs. limit=15.0
+2024-08-26 16:12:59,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=71818.66666666667, ans=0.0
+2024-08-26 16:13:04,743 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=71872.0, ans=0.025
+2024-08-26 16:13:11,570 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=71872.0, ans=0.09899494936611666
+2024-08-26 16:13:12,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71925.33333333333, ans=0.1
+2024-08-26 16:13:16,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=71925.33333333333, ans=0.125
+2024-08-26 16:13:19,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=71925.33333333333, ans=0.125
+2024-08-26 16:13:20,052 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=71925.33333333333, ans=0.125
+2024-08-26 16:13:22,633 INFO [train.py:1114] (0/4) Epoch 6, batch 1050, loss[loss=0.2442, simple_loss=0.3038, pruned_loss=0.06744, ctc_loss=0.1244, over 19845.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3059, pruned_loss=0.0743, ctc_loss=0.1387, over 3821802.79 frames. ], batch size: 57, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:13:29,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71978.66666666667, ans=0.1
+2024-08-26 16:13:50,116 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.278e+02 1.587e+02 1.763e+02 2.081e+02 5.001e+02, threshold=3.526e+02, percent-clipped=1.0
+2024-08-26 16:13:55,154 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=72138.66666666667, ans=0.0
+2024-08-26 16:14:09,055 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=72192.0, ans=0.125
+2024-08-26 16:14:10,566 INFO [train.py:1114] (0/4) Epoch 6, batch 1100, loss[loss=0.2333, simple_loss=0.2998, pruned_loss=0.06101, ctc_loss=0.1122, over 19586.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3055, pruned_loss=0.07376, ctc_loss=0.1377, over 3829564.03 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:14:11,714 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=72245.33333333333, ans=0.2
+2024-08-26 16:14:27,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=72298.66666666667, ans=0.125
+2024-08-26 16:14:36,590 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=72352.0, ans=0.125
+2024-08-26 16:14:42,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72352.0, ans=0.1
+2024-08-26 16:14:50,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=72405.33333333333, ans=0.0
+2024-08-26 16:15:25,945 INFO [train.py:1114] (0/4) Epoch 6, batch 1150, loss[loss=0.2321, simple_loss=0.2884, pruned_loss=0.06255, ctc_loss=0.1269, over 19581.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.3049, pruned_loss=0.07353, ctc_loss=0.1373, over 3828050.86 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:15:52,302 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.06 vs. limit=15.0
+2024-08-26 16:15:55,805 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=72565.33333333333, ans=0.07
+2024-08-26 16:15:59,429 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=72565.33333333333, ans=0.025
+2024-08-26 16:16:51,594 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.628e+02 1.822e+02 2.077e+02 5.117e+02, threshold=3.645e+02, percent-clipped=2.0
+2024-08-26 16:16:53,089 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.58 vs. limit=6.0
+2024-08-26 16:17:05,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=72725.33333333333, ans=0.07
+2024-08-26 16:17:15,527 INFO [train.py:1114] (0/4) Epoch 6, batch 1200, loss[loss=0.2879, simple_loss=0.3349, pruned_loss=0.08792, ctc_loss=0.1627, over 19845.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3062, pruned_loss=0.07413, ctc_loss=0.1384, over 3823637.56 frames. ], batch size: 57, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:17:17,891 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.37 vs. limit=15.0
+2024-08-26 16:17:26,365 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=72832.0, ans=0.125
+2024-08-26 16:17:30,519 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.92 vs. limit=15.0
+2024-08-26 16:17:31,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=72832.0, ans=0.2
+2024-08-26 16:17:32,984 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=72832.0, ans=0.2
+2024-08-26 16:17:37,152 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.58 vs. limit=22.5
+2024-08-26 16:17:38,037 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.66 vs. limit=10.0
+2024-08-26 16:17:40,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=72885.33333333333, ans=0.125
+2024-08-26 16:17:45,477 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.64 vs. limit=15.0
+2024-08-26 16:17:48,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72938.66666666667, ans=0.1
+2024-08-26 16:18:04,588 INFO [train.py:1114] (0/4) Epoch 6, batch 1250, loss[loss=0.2821, simple_loss=0.3216, pruned_loss=0.09039, ctc_loss=0.1545, over 19526.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3062, pruned_loss=0.07377, ctc_loss=0.1377, over 3842300.87 frames. ], batch size: 61, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:18:15,716 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.01 vs. limit=15.0
+2024-08-26 16:18:16,388 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=73098.66666666667, ans=0.125
+2024-08-26 16:18:17,274 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=73098.66666666667, ans=0.125
+2024-08-26 16:18:24,679 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=73152.0, ans=0.125
+2024-08-26 16:18:31,263 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=73152.0, ans=0.0
+2024-08-26 16:18:31,866 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.534e+02 1.709e+02 2.004e+02 3.682e+02, threshold=3.418e+02, percent-clipped=1.0
+2024-08-26 16:18:56,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=73258.66666666667, ans=0.125
+2024-08-26 16:18:59,507 INFO [train.py:1114] (0/4) Epoch 6, batch 1300, loss[loss=0.2961, simple_loss=0.3443, pruned_loss=0.08942, ctc_loss=0.1727, over 18835.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3055, pruned_loss=0.07349, ctc_loss=0.1371, over 3846607.15 frames. ], batch size: 76, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:19:18,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=73312.0, ans=0.2
+2024-08-26 16:19:22,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=73365.33333333333, ans=0.125
+2024-08-26 16:19:53,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=73365.33333333333, ans=0.2
+2024-08-26 16:19:56,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten.whitening_limit, batch_count=73365.33333333333, ans=15.0
+2024-08-26 16:20:00,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=73418.66666666667, ans=0.125
+2024-08-26 16:20:10,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=73472.0, ans=0.0
+2024-08-26 16:20:16,244 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:20:27,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=73525.33333333333, ans=0.125
+2024-08-26 16:20:32,211 INFO [train.py:1114] (0/4) Epoch 6, batch 1350, loss[loss=0.2353, simple_loss=0.3021, pruned_loss=0.06189, ctc_loss=0.112, over 19765.00 frames. ], tot_loss[loss=0.2521, simple_loss=0.3045, pruned_loss=0.07273, ctc_loss=0.1357, over 3857129.69 frames. ], batch size: 54, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:20:33,423 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=73578.66666666667, ans=0.0
+2024-08-26 16:20:38,259 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=73578.66666666667, ans=0.0
+2024-08-26 16:20:42,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=73632.0, ans=0.2
+2024-08-26 16:20:55,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=73685.33333333333, ans=0.0
+2024-08-26 16:20:57,760 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=73685.33333333333, ans=0.0
+2024-08-26 16:21:00,540 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.536e+02 1.657e+02 1.960e+02 3.055e+02, threshold=3.315e+02, percent-clipped=0.0
+2024-08-26 16:21:20,641 INFO [train.py:1114] (0/4) Epoch 6, batch 1400, loss[loss=0.2398, simple_loss=0.2859, pruned_loss=0.07056, ctc_loss=0.1314, over 19670.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3046, pruned_loss=0.07291, ctc_loss=0.1363, over 3864354.35 frames. ], batch size: 46, lr: 2.35e-02, grad_scale: 32.0
+2024-08-26 16:21:42,313 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.50 vs. limit=15.0
+2024-08-26 16:21:52,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=74005.33333333333, ans=0.125
+2024-08-26 16:21:57,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=74005.33333333333, ans=0.125
+2024-08-26 16:21:57,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=74005.33333333333, ans=0.2
+2024-08-26 16:22:07,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=74112.0, ans=0.0
+2024-08-26 16:22:08,768 INFO [train.py:1114] (0/4) Epoch 6, batch 1450, loss[loss=0.2909, simple_loss=0.3311, pruned_loss=0.09206, ctc_loss=0.1662, over 19641.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.305, pruned_loss=0.07306, ctc_loss=0.1366, over 3863037.37 frames. ], batch size: 63, lr: 2.35e-02, grad_scale: 16.0
+2024-08-26 16:23:32,945 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.612e+02 1.863e+02 2.093e+02 4.374e+02, threshold=3.727e+02, percent-clipped=2.0
+2024-08-26 16:23:40,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=74272.0, ans=0.0
+2024-08-26 16:23:47,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=74325.33333333333, ans=0.125
+2024-08-26 16:23:49,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=74325.33333333333, ans=0.04949747468305833
+2024-08-26 16:23:57,147 INFO [train.py:1114] (0/4) Epoch 6, batch 1500, loss[loss=0.2557, simple_loss=0.3073, pruned_loss=0.07504, ctc_loss=0.135, over 19578.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3055, pruned_loss=0.0732, ctc_loss=0.1367, over 3862765.27 frames. ], batch size: 57, lr: 2.35e-02, grad_scale: 16.0
+2024-08-26 16:23:59,717 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.78 vs. limit=6.0
+2024-08-26 16:24:31,956 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.32 vs. limit=12.0
+2024-08-26 16:24:38,929 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.06 vs. limit=15.0
+2024-08-26 16:24:40,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=74485.33333333333, ans=0.0
+2024-08-26 16:24:41,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74485.33333333333, ans=0.1
+2024-08-26 16:24:51,910 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.77 vs. limit=6.0
+2024-08-26 16:25:10,583 INFO [train.py:1114] (0/4) Epoch 6, batch 1550, loss[loss=0.2527, simple_loss=0.3079, pruned_loss=0.07176, ctc_loss=0.1351, over 19610.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.3054, pruned_loss=0.07341, ctc_loss=0.1371, over 3846999.82 frames. ], batch size: 60, lr: 2.34e-02, grad_scale: 16.0
+2024-08-26 16:25:12,037 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.70 vs. limit=15.0
+2024-08-26 16:26:20,821 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.577e+02 1.696e+02 1.957e+02 2.811e+02, threshold=3.391e+02, percent-clipped=0.0
+2024-08-26 16:26:22,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=74805.33333333333, ans=0.0
+2024-08-26 16:26:30,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=74858.66666666667, ans=0.125
+2024-08-26 16:26:33,967 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:26:37,661 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=74858.66666666667, ans=0.125
+2024-08-26 16:26:38,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=74858.66666666667, ans=0.04949747468305833
+2024-08-26 16:26:40,312 INFO [train.py:1114] (0/4) Epoch 6, batch 1600, loss[loss=0.2651, simple_loss=0.3194, pruned_loss=0.07668, ctc_loss=0.1437, over 19841.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3057, pruned_loss=0.07376, ctc_loss=0.1376, over 3836462.91 frames. ], batch size: 57, lr: 2.34e-02, grad_scale: 32.0
+2024-08-26 16:27:12,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=75018.66666666667, ans=0.125
+2024-08-26 16:27:22,897 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.11 vs. limit=6.0
+2024-08-26 16:27:35,213 INFO [train.py:1114] (0/4) Epoch 6, batch 1650, loss[loss=0.2634, simple_loss=0.3218, pruned_loss=0.07341, ctc_loss=0.1454, over 19643.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3055, pruned_loss=0.07354, ctc_loss=0.1374, over 3832786.98 frames. ], batch size: 59, lr: 2.34e-02, grad_scale: 32.0
+2024-08-26 16:27:35,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=75178.66666666667, ans=0.125
+2024-08-26 16:27:58,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=75285.33333333333, ans=10.0
+2024-08-26 16:28:43,074 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.584e+02 1.799e+02 2.082e+02 3.549e+02, threshold=3.597e+02, percent-clipped=1.0
+2024-08-26 16:29:32,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=75392.0, ans=0.1
+2024-08-26 16:29:36,192 INFO [train.py:1114] (0/4) Epoch 6, batch 1700, loss[loss=0.2135, simple_loss=0.2664, pruned_loss=0.05972, ctc_loss=0.1029, over 19668.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3046, pruned_loss=0.07274, ctc_loss=0.136, over 3846878.92 frames. ], batch size: 46, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:29:38,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=75445.33333333333, ans=0.125
+2024-08-26 16:29:55,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75552.0, ans=0.1
+2024-08-26 16:29:58,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=75552.0, ans=0.0
+2024-08-26 16:30:00,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=75552.0, ans=0.0
+2024-08-26 16:30:01,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=75552.0, ans=0.125
+2024-08-26 16:30:07,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=75605.33333333333, ans=0.5
+2024-08-26 16:30:20,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=75658.66666666667, ans=0.025
+2024-08-26 16:30:22,748 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.41 vs. limit=10.0
+2024-08-26 16:30:24,076 INFO [train.py:1114] (0/4) Epoch 6, batch 1750, loss[loss=0.219, simple_loss=0.2704, pruned_loss=0.06066, ctc_loss=0.1154, over 19645.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.304, pruned_loss=0.07244, ctc_loss=0.1354, over 3852018.78 frames. ], batch size: 45, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:31:04,531 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.524e+02 1.697e+02 1.959e+02 3.052e+02, threshold=3.394e+02, percent-clipped=0.0
+2024-08-26 16:31:11,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=75872.0, ans=0.2
+2024-08-26 16:31:16,285 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=75925.33333333333, ans=0.125
+2024-08-26 16:31:25,741 INFO [train.py:1114] (0/4) Epoch 6, batch 1800, loss[loss=0.2595, simple_loss=0.3175, pruned_loss=0.07314, ctc_loss=0.1382, over 19623.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3043, pruned_loss=0.07264, ctc_loss=0.136, over 3853824.76 frames. ], batch size: 55, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:32:52,208 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=76192.0, ans=0.025
+2024-08-26 16:33:01,929 INFO [train.py:1114] (0/4) Epoch 6, batch 1850, loss[loss=0.2769, simple_loss=0.3287, pruned_loss=0.08189, ctc_loss=0.1531, over 19579.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3039, pruned_loss=0.07233, ctc_loss=0.1354, over 3858141.23 frames. ], batch size: 57, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:33:26,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=76245.33333333333, ans=0.2
+2024-08-26 16:33:57,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=76352.0, ans=0.125
+2024-08-26 16:34:05,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=76405.33333333333, ans=0.0
+2024-08-26 16:34:05,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=76405.33333333333, ans=0.0
+2024-08-26 16:34:05,740 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.545e+02 1.701e+02 1.893e+02 2.907e+02, threshold=3.402e+02, percent-clipped=0.0
+2024-08-26 16:34:23,395 INFO [train.py:1114] (0/4) Epoch 6, batch 1900, loss[loss=0.2541, simple_loss=0.3187, pruned_loss=0.06997, ctc_loss=0.124, over 19656.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3044, pruned_loss=0.07223, ctc_loss=0.135, over 3863486.84 frames. ], batch size: 59, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:34:29,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=76512.0, ans=0.0
+2024-08-26 16:34:49,280 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.84 vs. limit=15.0
+2024-08-26 16:35:11,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76672.0, ans=0.1
+2024-08-26 16:35:13,197 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.29 vs. limit=15.0
+2024-08-26 16:35:18,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=76725.33333333333, ans=0.2
+2024-08-26 16:35:25,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=76725.33333333333, ans=0.2
+2024-08-26 16:35:26,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=76725.33333333333, ans=0.025
+2024-08-26 16:35:26,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=76725.33333333333, ans=0.2
+2024-08-26 16:35:27,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=76778.66666666667, ans=0.125
+2024-08-26 16:35:27,762 INFO [train.py:1114] (0/4) Epoch 6, batch 1950, loss[loss=0.2267, simple_loss=0.2906, pruned_loss=0.05816, ctc_loss=0.1162, over 19571.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3058, pruned_loss=0.07262, ctc_loss=0.1356, over 3871849.41 frames. ], batch size: 52, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:36:23,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=76885.33333333333, ans=0.125
+2024-08-26 16:36:32,182 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.646e+02 1.808e+02 2.059e+02 4.885e+02, threshold=3.617e+02, percent-clipped=2.0
+2024-08-26 16:36:40,334 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=76938.66666666667, ans=0.125
+2024-08-26 16:36:53,611 INFO [train.py:1114] (0/4) Epoch 6, batch 2000, loss[loss=0.2277, simple_loss=0.2787, pruned_loss=0.06344, ctc_loss=0.1244, over 19691.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.307, pruned_loss=0.07351, ctc_loss=0.1373, over 3855417.73 frames. ], batch size: 45, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:37:07,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=77098.66666666667, ans=0.0
+2024-08-26 16:37:16,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=77152.0, ans=0.0
+2024-08-26 16:37:22,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=77205.33333333333, ans=0.125
+2024-08-26 16:37:24,481 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.65 vs. limit=12.0
+2024-08-26 16:37:32,363 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.43 vs. limit=10.0
+2024-08-26 16:37:38,199 INFO [train.py:1114] (0/4) Epoch 6, batch 2050, loss[loss=0.1985, simple_loss=0.2597, pruned_loss=0.04934, ctc_loss=0.09688, over 19725.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3051, pruned_loss=0.07288, ctc_loss=0.1361, over 3852827.87 frames. ], batch size: 47, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:37:38,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=77312.0, ans=0.0
+2024-08-26 16:37:38,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=77312.0, ans=0.125
+2024-08-26 16:37:40,147 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:37:44,123 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.22 vs. limit=22.5
+2024-08-26 16:38:04,750 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 1.566e+02 1.748e+02 2.075e+02 4.290e+02, threshold=3.497e+02, percent-clipped=1.0
+2024-08-26 16:38:10,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=77472.0, ans=0.025
+2024-08-26 16:38:12,113 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.89 vs. limit=12.0
+2024-08-26 16:38:34,161 INFO [train.py:1114] (0/4) Epoch 6, batch 2100, loss[loss=0.2441, simple_loss=0.3067, pruned_loss=0.06495, ctc_loss=0.129, over 19770.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3041, pruned_loss=0.07204, ctc_loss=0.1348, over 3859819.95 frames. ], batch size: 54, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:38:35,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=77578.66666666667, ans=0.2
+2024-08-26 16:39:07,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=77578.66666666667, ans=0.125
+2024-08-26 16:39:10,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=77578.66666666667, ans=0.125
+2024-08-26 16:39:30,072 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.32 vs. limit=15.0
+2024-08-26 16:39:42,870 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.09 vs. limit=15.0
+2024-08-26 16:39:42,999 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=77792.0, ans=15.0
+2024-08-26 16:39:46,031 INFO [train.py:1114] (0/4) Epoch 6, batch 2150, loss[loss=0.241, simple_loss=0.2986, pruned_loss=0.06677, ctc_loss=0.1249, over 19586.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3036, pruned_loss=0.0718, ctc_loss=0.1342, over 3870116.27 frames. ], batch size: 52, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:39:52,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.89 vs. limit=15.0
+2024-08-26 16:39:55,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=77898.66666666667, ans=0.0
+2024-08-26 16:40:04,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=77952.0, ans=0.125
+2024-08-26 16:40:13,759 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 1.590e+02 1.744e+02 2.019e+02 3.989e+02, threshold=3.489e+02, percent-clipped=1.0
+2024-08-26 16:40:14,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=78005.33333333333, ans=0.125
+2024-08-26 16:40:24,500 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:40:31,353 INFO [train.py:1114] (0/4) Epoch 6, batch 2200, loss[loss=0.2384, simple_loss=0.3033, pruned_loss=0.06232, ctc_loss=0.1223, over 19602.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3038, pruned_loss=0.07196, ctc_loss=0.1344, over 3868804.12 frames. ], batch size: 57, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:40:33,208 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=78112.0, ans=0.1
+2024-08-26 16:40:51,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=78218.66666666667, ans=0.125
+2024-08-26 16:40:56,151 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=78218.66666666667, ans=0.025
+2024-08-26 16:40:56,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=78218.66666666667, ans=0.125
+2024-08-26 16:41:01,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=78272.0, ans=0.2
+2024-08-26 16:41:03,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=78272.0, ans=0.05
+2024-08-26 16:41:57,897 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=14.10 vs. limit=15.0
+2024-08-26 16:42:02,279 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.49 vs. limit=15.0
+2024-08-26 16:42:04,357 INFO [train.py:1114] (0/4) Epoch 6, batch 2250, loss[loss=0.2337, simple_loss=0.2954, pruned_loss=0.06267, ctc_loss=0.1166, over 19609.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3037, pruned_loss=0.07206, ctc_loss=0.1343, over 3868380.80 frames. ], batch size: 55, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:42:30,472 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.366e+02 1.631e+02 1.850e+02 2.118e+02 4.912e+02, threshold=3.701e+02, percent-clipped=4.0
+2024-08-26 16:42:57,024 INFO [train.py:1114] (0/4) Epoch 6, batch 2300, loss[loss=0.2239, simple_loss=0.2808, pruned_loss=0.06044, ctc_loss=0.1152, over 19514.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3028, pruned_loss=0.07183, ctc_loss=0.1339, over 3861729.87 frames. ], batch size: 49, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:43:17,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=78752.0, ans=0.125
+2024-08-26 16:43:25,629 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.52 vs. limit=22.5
+2024-08-26 16:43:28,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=78805.33333333333, ans=0.04949747468305833
+2024-08-26 16:43:36,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=78858.66666666667, ans=0.125
+2024-08-26 16:43:41,597 INFO [train.py:1114] (0/4) Epoch 6, batch 2350, loss[loss=0.2648, simple_loss=0.3144, pruned_loss=0.07872, ctc_loss=0.1445, over 19666.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3031, pruned_loss=0.0721, ctc_loss=0.1345, over 3864765.58 frames. ], batch size: 63, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:43:56,583 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=78965.33333333333, ans=0.1
+2024-08-26 16:43:56,899 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.03 vs. limit=15.0
+2024-08-26 16:44:09,678 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.319e+02 1.571e+02 1.792e+02 2.053e+02 3.529e+02, threshold=3.585e+02, percent-clipped=0.0
+2024-08-26 16:44:10,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=79072.0, ans=0.0
+2024-08-26 16:44:27,088 INFO [train.py:1114] (0/4) Epoch 6, batch 2400, loss[loss=0.2553, simple_loss=0.31, pruned_loss=0.07228, ctc_loss=0.1401, over 19346.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3049, pruned_loss=0.07285, ctc_loss=0.1357, over 3859314.25 frames. ], batch size: 67, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:44:42,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=79232.0, ans=0.2
+2024-08-26 16:44:44,188 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=79285.33333333333, ans=0.2
+2024-08-26 16:45:06,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=79392.0, ans=0.125
+2024-08-26 16:45:12,831 INFO [train.py:1114] (0/4) Epoch 6, batch 2450, loss[loss=0.3439, simple_loss=0.3484, pruned_loss=0.1258, ctc_loss=0.2196, over 13494.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3099, pruned_loss=0.07687, ctc_loss=0.1432, over 3735937.20 frames. ], batch size: 140, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:45:13,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=79445.33333333333, ans=15.0
+2024-08-26 16:45:17,839 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=16.37 vs. limit=15.0
+2024-08-26 16:45:22,416 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.95 vs. limit=15.0
+2024-08-26 16:45:23,140 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=79498.66666666667, ans=0.0
+2024-08-26 16:45:26,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=79498.66666666667, ans=0.125
+2024-08-26 16:45:26,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=79498.66666666667, ans=0.125
+2024-08-26 16:45:34,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=79552.0, ans=0.125
+2024-08-26 16:45:35,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=79552.0, ans=0.0
+2024-08-26 16:45:40,096 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 1.744e+02 1.902e+02 2.066e+02 3.652e+02, threshold=3.804e+02, percent-clipped=1.0
+2024-08-26 16:45:41,136 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=79605.33333333333, ans=0.125
+2024-08-26 16:45:44,833 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=79605.33333333333, ans=0.125
+2024-08-26 16:45:48,561 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-6.pt
+2024-08-26 16:48:16,413 INFO [train.py:1114] (0/4) Epoch 7, batch 0, loss[loss=0.2426, simple_loss=0.2927, pruned_loss=0.07011, ctc_loss=0.1307, over 19812.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.2927, pruned_loss=0.07011, ctc_loss=0.1307, over 19812.00 frames. ], batch size: 49, lr: 2.14e-02, grad_scale: 32.0
+2024-08-26 16:48:16,415 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 16:48:29,568 INFO [train.py:1146] (0/4) Epoch 7, validation: loss=0.2068, simple_loss=0.2958, pruned_loss=0.04327, ctc_loss=0.07811, over 944034.00 frames.
+2024-08-26 16:48:29,570 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 16:48:35,791 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.20 vs. limit=22.5
+2024-08-26 16:48:45,057 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.60 vs. limit=6.0
+2024-08-26 16:49:02,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=79818.66666666667, ans=0.0
+2024-08-26 16:49:08,969 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.54 vs. limit=15.0
+2024-08-26 16:49:19,279 INFO [train.py:1114] (0/4) Epoch 7, batch 50, loss[loss=0.213, simple_loss=0.2767, pruned_loss=0.05369, ctc_loss=0.1046, over 19723.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3061, pruned_loss=0.07314, ctc_loss=0.1374, over 844189.99 frames. ], batch size: 47, lr: 2.14e-02, grad_scale: 32.0
+2024-08-26 16:49:24,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=79925.33333333333, ans=0.125
+2024-08-26 16:49:30,277 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.69 vs. limit=12.0
+2024-08-26 16:49:42,031 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.54 vs. limit=10.0
+2024-08-26 16:49:50,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=80085.33333333333, ans=0.025
+2024-08-26 16:49:57,479 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.282e+02 1.584e+02 1.822e+02 2.089e+02 3.575e+02, threshold=3.645e+02, percent-clipped=0.0
+2024-08-26 16:50:07,020 INFO [train.py:1114] (0/4) Epoch 7, batch 100, loss[loss=0.2194, simple_loss=0.2846, pruned_loss=0.05637, ctc_loss=0.1035, over 19727.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3063, pruned_loss=0.07236, ctc_loss=0.136, over 1500395.15 frames. ], batch size: 51, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:50:11,877 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=80192.0, ans=0.2
+2024-08-26 16:50:34,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=80298.66666666667, ans=0.125
+2024-08-26 16:50:52,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=80405.33333333333, ans=0.1
+2024-08-26 16:50:58,409 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.04 vs. limit=22.5
+2024-08-26 16:51:01,442 INFO [train.py:1114] (0/4) Epoch 7, batch 150, loss[loss=0.2096, simple_loss=0.2623, pruned_loss=0.05717, ctc_loss=0.1065, over 19726.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3032, pruned_loss=0.07065, ctc_loss=0.1325, over 2029556.49 frames. ], batch size: 47, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:51:32,194 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.97 vs. limit=15.0
+2024-08-26 16:51:33,942 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.20 vs. limit=12.0
+2024-08-26 16:51:39,008 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.222e+02 1.525e+02 1.667e+02 1.863e+02 2.878e+02, threshold=3.334e+02, percent-clipped=0.0
+2024-08-26 16:51:41,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=80672.0, ans=0.0
+2024-08-26 16:51:46,783 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=80672.0, ans=0.125
+2024-08-26 16:51:47,146 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.37 vs. limit=22.5
+2024-08-26 16:51:48,551 INFO [train.py:1114] (0/4) Epoch 7, batch 200, loss[loss=0.2648, simple_loss=0.3214, pruned_loss=0.07601, ctc_loss=0.1404, over 18351.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3016, pruned_loss=0.07031, ctc_loss=0.1319, over 2436499.74 frames. ], batch size: 85, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:51:49,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=80725.33333333333, ans=0.125
+2024-08-26 16:51:49,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=80725.33333333333, ans=0.025
+2024-08-26 16:51:51,841 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.69 vs. limit=22.5
+2024-08-26 16:52:11,299 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.78 vs. limit=12.0
+2024-08-26 16:52:11,819 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=80832.0, ans=0.0
+2024-08-26 16:52:35,146 INFO [train.py:1114] (0/4) Epoch 7, batch 250, loss[loss=0.2396, simple_loss=0.3074, pruned_loss=0.06224, ctc_loss=0.1181, over 19400.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3003, pruned_loss=0.06921, ctc_loss=0.1297, over 2756000.58 frames. ], batch size: 67, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:52:41,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=80992.0, ans=0.125
+2024-08-26 16:52:50,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=81045.33333333333, ans=0.125
+2024-08-26 16:53:01,452 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=81098.66666666667, ans=0.125
+2024-08-26 16:53:03,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=81152.0, ans=0.0
+2024-08-26 16:53:05,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81152.0, ans=0.1
+2024-08-26 16:53:16,592 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.591e+02 1.729e+02 1.900e+02 5.825e+02, threshold=3.457e+02, percent-clipped=1.0
+2024-08-26 16:53:25,919 INFO [train.py:1114] (0/4) Epoch 7, batch 300, loss[loss=0.2698, simple_loss=0.3268, pruned_loss=0.07686, ctc_loss=0.1477, over 19538.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.2999, pruned_loss=0.0689, ctc_loss=0.1294, over 2999935.01 frames. ], batch size: 61, lr: 2.12e-02, grad_scale: 32.0
+2024-08-26 16:53:35,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=81258.66666666667, ans=0.1
+2024-08-26 16:54:06,392 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81418.66666666667, ans=0.1
+2024-08-26 16:54:10,128 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=81472.0, ans=0.0
+2024-08-26 16:54:18,317 INFO [train.py:1114] (0/4) Epoch 7, batch 350, loss[loss=0.2226, simple_loss=0.2715, pruned_loss=0.06341, ctc_loss=0.1172, over 19755.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.2998, pruned_loss=0.06892, ctc_loss=0.1292, over 3189143.82 frames. ], batch size: 48, lr: 2.12e-02, grad_scale: 16.0
+2024-08-26 16:54:20,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=81525.33333333333, ans=0.0
+2024-08-26 16:54:31,494 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:54:51,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=81685.33333333333, ans=0.0
+2024-08-26 16:54:56,446 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.574e+02 1.753e+02 2.022e+02 2.928e+02, threshold=3.506e+02, percent-clipped=0.0
+2024-08-26 16:54:59,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=81738.66666666667, ans=0.0
+2024-08-26 16:55:04,706 INFO [train.py:1114] (0/4) Epoch 7, batch 400, loss[loss=0.2486, simple_loss=0.3059, pruned_loss=0.06984, ctc_loss=0.1291, over 19482.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.2997, pruned_loss=0.06887, ctc_loss=0.1288, over 3340491.50 frames. ], batch size: 54, lr: 2.12e-02, grad_scale: 32.0
+2024-08-26 16:55:05,821 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=81792.0, ans=0.05
+2024-08-26 16:55:09,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81792.0, ans=0.1
+2024-08-26 16:55:17,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=81845.33333333333, ans=0.0
+2024-08-26 16:55:20,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81845.33333333333, ans=0.1
+2024-08-26 16:55:21,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=81845.33333333333, ans=0.025
+2024-08-26 16:55:36,198 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.74 vs. limit=22.5
+2024-08-26 16:55:43,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=82005.33333333333, ans=0.2
+2024-08-26 16:55:47,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=82005.33333333333, ans=0.0
+2024-08-26 16:55:49,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=82005.33333333333, ans=0.07
+2024-08-26 16:55:51,750 INFO [train.py:1114] (0/4) Epoch 7, batch 450, loss[loss=0.2448, simple_loss=0.3053, pruned_loss=0.06501, ctc_loss=0.1358, over 19601.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.2999, pruned_loss=0.069, ctc_loss=0.1291, over 3447901.07 frames. ], batch size: 55, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:56:11,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=82112.0, ans=0.2
+2024-08-26 16:56:11,463 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=82112.0, ans=0.0
+2024-08-26 16:56:34,680 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.47 vs. limit=15.0
+2024-08-26 16:56:41,733 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 1.485e+02 1.753e+02 2.038e+02 3.855e+02, threshold=3.505e+02, percent-clipped=1.0
+2024-08-26 16:56:43,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=82272.0, ans=0.1
+2024-08-26 16:56:44,096 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.72 vs. limit=15.0
+2024-08-26 16:56:48,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=82325.33333333333, ans=0.025
+2024-08-26 16:56:49,043 INFO [train.py:1114] (0/4) Epoch 7, batch 500, loss[loss=0.2535, simple_loss=0.3125, pruned_loss=0.07118, ctc_loss=0.1303, over 19655.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.2982, pruned_loss=0.06803, ctc_loss=0.1273, over 3543511.16 frames. ], batch size: 63, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:57:07,788 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:57:27,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=82538.66666666667, ans=6.0
+2024-08-26 16:57:29,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=82538.66666666667, ans=0.1
+2024-08-26 16:57:34,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=82538.66666666667, ans=0.2
+2024-08-26 16:57:35,769 INFO [train.py:1114] (0/4) Epoch 7, batch 550, loss[loss=0.2527, simple_loss=0.3097, pruned_loss=0.07025, ctc_loss=0.1378, over 19167.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.2987, pruned_loss=0.06835, ctc_loss=0.1278, over 3604774.38 frames. ], batch size: 71, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:57:48,109 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=82645.33333333333, ans=0.125
+2024-08-26 16:57:58,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=82698.66666666667, ans=0.0
+2024-08-26 16:58:00,788 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.21 vs. limit=15.0
+2024-08-26 16:58:16,879 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.530e+02 1.701e+02 1.927e+02 4.407e+02, threshold=3.402e+02, percent-clipped=1.0
+2024-08-26 16:58:30,198 INFO [train.py:1114] (0/4) Epoch 7, batch 600, loss[loss=0.2554, simple_loss=0.3162, pruned_loss=0.0718, ctc_loss=0.1276, over 19322.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.2993, pruned_loss=0.06838, ctc_loss=0.128, over 3663939.57 frames. ], batch size: 67, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 17:00:42,894 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82912.0, ans=0.1
+2024-08-26 17:04:51,937 INFO [train.py:1114] (0/4) Epoch 7, batch 650, loss[loss=0.2662, simple_loss=0.3208, pruned_loss=0.07728, ctc_loss=0.1429, over 19746.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2982, pruned_loss=0.06783, ctc_loss=0.127, over 3714209.88 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:05:23,591 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=83232.0, ans=0.125
+2024-08-26 17:05:41,844 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.294e+02 1.502e+02 1.666e+02 1.880e+02 3.682e+02, threshold=3.331e+02, percent-clipped=2.0
+2024-08-26 17:06:19,673 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=83392.0, ans=0.125
+2024-08-26 17:06:20,351 INFO [train.py:1114] (0/4) Epoch 7, batch 700, loss[loss=0.2125, simple_loss=0.2748, pruned_loss=0.05463, ctc_loss=0.102, over 19715.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2988, pruned_loss=0.0682, ctc_loss=0.1276, over 3746801.58 frames. ], batch size: 51, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:06:46,867 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=83498.66666666667, ans=0.2
+2024-08-26 17:07:06,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=83605.33333333333, ans=0.0
+2024-08-26 17:07:08,428 INFO [train.py:1114] (0/4) Epoch 7, batch 750, loss[loss=0.2147, simple_loss=0.2876, pruned_loss=0.05137, ctc_loss=0.09747, over 19505.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.2984, pruned_loss=0.06805, ctc_loss=0.1274, over 3773207.45 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:07:10,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=83658.66666666667, ans=0.2
+2024-08-26 17:07:20,132 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.35 vs. limit=10.0
+2024-08-26 17:07:41,293 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.39 vs. limit=6.0
+2024-08-26 17:07:48,231 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.281e+02 1.533e+02 1.678e+02 1.875e+02 3.166e+02, threshold=3.356e+02, percent-clipped=0.0
+2024-08-26 17:07:50,302 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=83872.0, ans=0.0
+2024-08-26 17:07:56,873 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.51 vs. limit=10.0
+2024-08-26 17:07:58,364 INFO [train.py:1114] (0/4) Epoch 7, batch 800, loss[loss=0.2274, simple_loss=0.2828, pruned_loss=0.06265, ctc_loss=0.1169, over 19801.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.2988, pruned_loss=0.06833, ctc_loss=0.1276, over 3794941.03 frames. ], batch size: 49, lr: 2.10e-02, grad_scale: 32.0
+2024-08-26 17:08:11,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=83978.66666666667, ans=0.125
+2024-08-26 17:08:13,556 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.65 vs. limit=22.5
+2024-08-26 17:08:14,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=83978.66666666667, ans=0.1
+2024-08-26 17:08:20,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=84032.0, ans=0.2
+2024-08-26 17:08:27,293 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=84032.0, ans=0.0
+2024-08-26 17:08:36,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=84085.33333333333, ans=0.0
+2024-08-26 17:08:36,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=84085.33333333333, ans=0.125
+2024-08-26 17:08:56,347 INFO [train.py:1114] (0/4) Epoch 7, batch 850, loss[loss=0.2589, simple_loss=0.3179, pruned_loss=0.0728, ctc_loss=0.1356, over 19655.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2982, pruned_loss=0.06799, ctc_loss=0.1272, over 3814006.77 frames. ], batch size: 59, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:08:56,915 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.76 vs. limit=6.0
+2024-08-26 17:09:04,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff3.min_abs, batch_count=84192.0, ans=0.2
+2024-08-26 17:09:06,673 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=84245.33333333333, ans=0.1
+2024-08-26 17:09:19,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=84298.66666666667, ans=0.1
+2024-08-26 17:09:33,718 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=84298.66666666667, ans=0.125
+2024-08-26 17:09:34,956 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.05 vs. limit=22.5
+2024-08-26 17:09:48,663 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=84298.66666666667, ans=0.1
+2024-08-26 17:10:01,952 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.310e+02 1.545e+02 1.673e+02 1.909e+02 3.259e+02, threshold=3.346e+02, percent-clipped=0.0
+2024-08-26 17:10:09,591 INFO [train.py:1114] (0/4) Epoch 7, batch 900, loss[loss=0.2096, simple_loss=0.2687, pruned_loss=0.05443, ctc_loss=0.1044, over 19440.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2987, pruned_loss=0.06855, ctc_loss=0.1279, over 3817543.60 frames. ], batch size: 48, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:10:11,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=84458.66666666667, ans=0.0
+2024-08-26 17:10:26,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=84512.0, ans=0.5
+2024-08-26 17:10:36,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=84565.33333333333, ans=0.05
+2024-08-26 17:10:58,453 INFO [train.py:1114] (0/4) Epoch 7, batch 950, loss[loss=0.2258, simple_loss=0.2831, pruned_loss=0.06146, ctc_loss=0.1139, over 19510.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.2989, pruned_loss=0.06864, ctc_loss=0.1282, over 3819914.51 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:11:03,344 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=84725.33333333333, ans=0.0
+2024-08-26 17:11:14,971 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.76 vs. limit=15.0
+2024-08-26 17:11:19,705 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.22 vs. limit=15.0
+2024-08-26 17:11:21,707 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.08 vs. limit=15.0
+2024-08-26 17:11:22,721 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=12.0
+2024-08-26 17:11:39,653 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:11:39,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=84885.33333333333, ans=0.0
+2024-08-26 17:11:48,307 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.332e+02 1.566e+02 1.708e+02 1.976e+02 3.572e+02, threshold=3.415e+02, percent-clipped=1.0
+2024-08-26 17:12:17,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=84992.0, ans=0.95
+2024-08-26 17:12:18,436 INFO [train.py:1114] (0/4) Epoch 7, batch 1000, loss[loss=0.212, simple_loss=0.2755, pruned_loss=0.05384, ctc_loss=0.1021, over 19858.00 frames. ], tot_loss[loss=0.245, simple_loss=0.2999, pruned_loss=0.06918, ctc_loss=0.1292, over 3816644.91 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:13:31,052 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=85098.66666666667, ans=0.0
+2024-08-26 17:13:38,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=85098.66666666667, ans=0.0
+2024-08-26 17:13:57,118 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=85205.33333333333, ans=0.1
+2024-08-26 17:13:59,708 INFO [train.py:1114] (0/4) Epoch 7, batch 1050, loss[loss=0.2513, simple_loss=0.312, pruned_loss=0.07016, ctc_loss=0.1256, over 19864.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.299, pruned_loss=0.0686, ctc_loss=0.1279, over 3823808.53 frames. ], batch size: 57, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:14:10,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=85312.0, ans=0.125
+2024-08-26 17:14:12,366 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-16000.pt
+2024-08-26 17:14:39,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=85312.0, ans=0.0
+2024-08-26 17:14:40,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=85312.0, ans=0.125
+2024-08-26 17:16:32,436 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=85418.66666666667, ans=0.125
+2024-08-26 17:16:37,121 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=85418.66666666667, ans=0.125
+2024-08-26 17:16:40,670 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.203e+02 1.449e+02 1.584e+02 1.768e+02 2.861e+02, threshold=3.169e+02, percent-clipped=0.0
+2024-08-26 17:16:40,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=85472.0, ans=0.125
+2024-08-26 17:16:43,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=85472.0, ans=0.025
+2024-08-26 17:16:44,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85472.0, ans=0.1
+2024-08-26 17:16:44,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=85472.0, ans=0.125
+2024-08-26 17:16:45,993 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.88 vs. limit=12.0
+2024-08-26 17:16:46,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=85472.0, ans=0.125
+2024-08-26 17:16:47,590 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:16:48,398 INFO [train.py:1114] (0/4) Epoch 7, batch 1100, loss[loss=0.2326, simple_loss=0.2912, pruned_loss=0.06286, ctc_loss=0.1207, over 19601.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.298, pruned_loss=0.06787, ctc_loss=0.127, over 3831458.50 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:16:48,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=85525.33333333333, ans=0.125
+2024-08-26 17:17:07,922 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.63 vs. limit=12.0
+2024-08-26 17:17:09,787 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.21 vs. limit=22.5
+2024-08-26 17:17:12,173 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=85632.0, ans=0.04949747468305833
+2024-08-26 17:17:35,328 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=85738.66666666667, ans=0.125
+2024-08-26 17:17:45,027 INFO [train.py:1114] (0/4) Epoch 7, batch 1150, loss[loss=0.2128, simple_loss=0.2796, pruned_loss=0.05249, ctc_loss=0.1026, over 19588.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2983, pruned_loss=0.06831, ctc_loss=0.1279, over 3830327.90 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-26 17:17:45,240 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=85792.0, ans=0.125
+2024-08-26 17:17:56,295 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=85845.33333333333, ans=0.0
+2024-08-26 17:18:01,596 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=85845.33333333333, ans=0.125
+2024-08-26 17:18:05,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=85845.33333333333, ans=0.125
+2024-08-26 17:18:08,268 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=85898.66666666667, ans=0.2
+2024-08-26 17:18:16,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=85898.66666666667, ans=0.0
+2024-08-26 17:18:24,551 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.11 vs. limit=6.0
+2024-08-26 17:18:41,958 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.243e+02 1.522e+02 1.667e+02 1.891e+02 3.736e+02, threshold=3.335e+02, percent-clipped=2.0
+2024-08-26 17:18:45,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=86005.33333333333, ans=0.025
+2024-08-26 17:18:48,636 INFO [train.py:1114] (0/4) Epoch 7, batch 1200, loss[loss=0.2522, simple_loss=0.3106, pruned_loss=0.07121, ctc_loss=0.1283, over 19850.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2993, pruned_loss=0.06861, ctc_loss=0.1283, over 3826209.33 frames. ], batch size: 57, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:19:14,031 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten.whitening_limit, batch_count=86165.33333333333, ans=15.0
+2024-08-26 17:19:26,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=86218.66666666667, ans=0.125
+2024-08-26 17:19:54,879 INFO [train.py:1114] (0/4) Epoch 7, batch 1250, loss[loss=0.2555, simple_loss=0.3106, pruned_loss=0.07243, ctc_loss=0.1387, over 19523.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2994, pruned_loss=0.06828, ctc_loss=0.1278, over 3844367.75 frames. ], batch size: 61, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:19:56,344 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.35 vs. limit=15.0
+2024-08-26 17:20:00,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=86325.33333333333, ans=0.125
+2024-08-26 17:20:02,027 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.39 vs. limit=15.0
+2024-08-26 17:20:08,770 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:20:17,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=86432.0, ans=10.0
+2024-08-26 17:20:21,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=86432.0, ans=0.125
+2024-08-26 17:20:21,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=86432.0, ans=0.125
+2024-08-26 17:20:30,372 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=86485.33333333333, ans=0.125
+2024-08-26 17:20:31,618 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.87 vs. limit=15.0
+2024-08-26 17:20:35,654 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.207e+02 1.476e+02 1.609e+02 1.857e+02 3.245e+02, threshold=3.218e+02, percent-clipped=0.0
+2024-08-26 17:20:44,740 INFO [train.py:1114] (0/4) Epoch 7, batch 1300, loss[loss=0.2722, simple_loss=0.3193, pruned_loss=0.08363, ctc_loss=0.1446, over 18932.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2981, pruned_loss=0.06756, ctc_loss=0.1265, over 3847421.45 frames. ], batch size: 76, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:20:49,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=86592.0, ans=0.0
+2024-08-26 17:21:23,229 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=86752.0, ans=0.0
+2024-08-26 17:21:26,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=86752.0, ans=0.125
+2024-08-26 17:21:31,518 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=86805.33333333333, ans=0.125
+2024-08-26 17:21:37,540 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.28 vs. limit=15.0
+2024-08-26 17:21:38,929 INFO [train.py:1114] (0/4) Epoch 7, batch 1350, loss[loss=0.2372, simple_loss=0.2975, pruned_loss=0.06492, ctc_loss=0.1176, over 19771.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.2969, pruned_loss=0.06679, ctc_loss=0.1251, over 3858190.88 frames. ], batch size: 54, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:21:44,707 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=86858.66666666667, ans=0.0
+2024-08-26 17:21:48,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=86912.0, ans=0.0
+2024-08-26 17:21:52,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=86912.0, ans=0.125
+2024-08-26 17:21:57,697 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=86965.33333333333, ans=0.1
+2024-08-26 17:22:08,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=87018.66666666667, ans=0.025
+2024-08-26 17:22:15,052 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=87018.66666666667, ans=0.1
+2024-08-26 17:22:19,557 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.495e+02 1.726e+02 1.992e+02 3.104e+02, threshold=3.452e+02, percent-clipped=0.0
+2024-08-26 17:22:26,113 INFO [train.py:1114] (0/4) Epoch 7, batch 1400, loss[loss=0.1916, simple_loss=0.2518, pruned_loss=0.04749, ctc_loss=0.09083, over 19698.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.2963, pruned_loss=0.06677, ctc_loss=0.1247, over 3866044.91 frames. ], batch size: 46, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:22:28,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=87125.33333333333, ans=0.0
+2024-08-26 17:22:28,520 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.95 vs. limit=22.5
+2024-08-26 17:23:02,264 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.66 vs. limit=15.0
+2024-08-26 17:23:24,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=87285.33333333333, ans=0.125
+2024-08-26 17:23:35,695 INFO [train.py:1114] (0/4) Epoch 7, batch 1450, loss[loss=0.2593, simple_loss=0.3118, pruned_loss=0.07483, ctc_loss=0.1427, over 19693.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2976, pruned_loss=0.06746, ctc_loss=0.126, over 3862692.13 frames. ], batch size: 63, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:23:49,129 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.62 vs. limit=22.5
+2024-08-26 17:23:50,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=87445.33333333333, ans=0.125
+2024-08-26 17:24:07,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=87552.0, ans=0.125
+2024-08-26 17:24:13,197 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=87552.0, ans=0.0
+2024-08-26 17:24:20,640 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.540e+02 1.669e+02 1.894e+02 3.453e+02, threshold=3.338e+02, percent-clipped=1.0
+2024-08-26 17:24:23,900 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.67 vs. limit=15.0
+2024-08-26 17:24:29,674 INFO [train.py:1114] (0/4) Epoch 7, batch 1500, loss[loss=0.2585, simple_loss=0.3116, pruned_loss=0.07483, ctc_loss=0.1395, over 19570.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.298, pruned_loss=0.06755, ctc_loss=0.1264, over 3861815.85 frames. ], batch size: 57, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:24:35,484 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:24:39,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=87658.66666666667, ans=0.125
+2024-08-26 17:25:01,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=87818.66666666667, ans=0.1
+2024-08-26 17:25:08,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=87818.66666666667, ans=0.125
+2024-08-26 17:25:08,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=87818.66666666667, ans=0.0
+2024-08-26 17:25:19,519 INFO [train.py:1114] (0/4) Epoch 7, batch 1550, loss[loss=0.2569, simple_loss=0.3173, pruned_loss=0.07051, ctc_loss=0.139, over 19600.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2981, pruned_loss=0.06787, ctc_loss=0.127, over 3847484.92 frames. ], batch size: 60, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:25:21,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=87925.33333333333, ans=0.125
+2024-08-26 17:25:37,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=88032.0, ans=0.1
+2024-08-26 17:25:38,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=88032.0, ans=0.125
+2024-08-26 17:25:47,663 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.18 vs. limit=15.0
+2024-08-26 17:25:57,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=88085.33333333333, ans=0.0
+2024-08-26 17:26:00,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.84 vs. limit=10.0
+2024-08-26 17:26:03,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=88138.66666666667, ans=0.09899494936611666
+2024-08-26 17:26:04,271 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.559e+02 1.788e+02 2.182e+02 5.116e+02, threshold=3.576e+02, percent-clipped=3.0
+2024-08-26 17:26:06,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=88138.66666666667, ans=0.125
+2024-08-26 17:26:10,964 INFO [train.py:1114] (0/4) Epoch 7, batch 1600, loss[loss=0.2403, simple_loss=0.3043, pruned_loss=0.06305, ctc_loss=0.1252, over 19840.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.2983, pruned_loss=0.06801, ctc_loss=0.1272, over 3836479.69 frames. ], batch size: 57, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:26:12,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=88192.0, ans=0.2
+2024-08-26 17:26:34,800 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=88298.66666666667, ans=0.0
+2024-08-26 17:26:39,208 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=88298.66666666667, ans=0.0
+2024-08-26 17:26:42,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=88352.0, ans=0.0
+2024-08-26 17:26:46,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=88352.0, ans=0.1
+2024-08-26 17:26:53,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=88405.33333333333, ans=0.125
+2024-08-26 17:27:01,934 INFO [train.py:1114] (0/4) Epoch 7, batch 1650, loss[loss=0.2374, simple_loss=0.2998, pruned_loss=0.06294, ctc_loss=0.123, over 19659.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2979, pruned_loss=0.06789, ctc_loss=0.1271, over 3832847.28 frames. ], batch size: 59, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:27:30,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=88458.66666666667, ans=0.0
+2024-08-26 17:27:32,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=88458.66666666667, ans=0.125
+2024-08-26 17:27:47,871 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.76 vs. limit=15.0
+2024-08-26 17:28:05,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=88565.33333333333, ans=0.0
+2024-08-26 17:28:12,761 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.93 vs. limit=10.0
+2024-08-26 17:28:17,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=88565.33333333333, ans=0.125
+2024-08-26 17:28:44,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=88618.66666666667, ans=0.1
+2024-08-26 17:29:23,876 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=88672.0, ans=0.125
+2024-08-26 17:29:25,554 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.503e+02 1.653e+02 1.809e+02 2.992e+02, threshold=3.307e+02, percent-clipped=0.0
+2024-08-26 17:29:26,126 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.33 vs. limit=22.5
+2024-08-26 17:29:30,694 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=88672.0, ans=0.5
+2024-08-26 17:29:40,063 INFO [train.py:1114] (0/4) Epoch 7, batch 1700, loss[loss=0.2161, simple_loss=0.2677, pruned_loss=0.06006, ctc_loss=0.1113, over 19668.00 frames. ], tot_loss[loss=0.241, simple_loss=0.2974, pruned_loss=0.0671, ctc_loss=0.1258, over 3847638.34 frames. ], batch size: 46, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:30:08,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=88778.66666666667, ans=0.1
+2024-08-26 17:30:12,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=88778.66666666667, ans=0.2
+2024-08-26 17:30:29,255 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=88885.33333333333, ans=0.1
+2024-08-26 17:30:44,517 INFO [train.py:1114] (0/4) Epoch 7, batch 1750, loss[loss=0.1987, simple_loss=0.2523, pruned_loss=0.05267, ctc_loss=0.09946, over 19664.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.2969, pruned_loss=0.06696, ctc_loss=0.1254, over 3851910.98 frames. ], batch size: 45, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:31:13,447 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=89152.0, ans=0.125
+2024-08-26 17:31:15,320 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=89152.0, ans=0.125
+2024-08-26 17:31:17,129 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=89152.0, ans=0.0
+2024-08-26 17:31:23,276 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.487e+02 1.622e+02 1.808e+02 3.869e+02, threshold=3.245e+02, percent-clipped=1.0
+2024-08-26 17:31:28,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=89258.66666666667, ans=0.0
+2024-08-26 17:31:29,437 INFO [train.py:1114] (0/4) Epoch 7, batch 1800, loss[loss=0.2397, simple_loss=0.3036, pruned_loss=0.06332, ctc_loss=0.1232, over 19609.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.2969, pruned_loss=0.06688, ctc_loss=0.1253, over 3853707.10 frames. ], batch size: 55, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:31:29,620 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=89258.66666666667, ans=0.025
+2024-08-26 17:31:31,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=89258.66666666667, ans=0.025
+2024-08-26 17:31:39,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=89312.0, ans=0.0
+2024-08-26 17:31:55,325 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=89418.66666666667, ans=0.1
+2024-08-26 17:31:57,091 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:32:14,104 INFO [train.py:1114] (0/4) Epoch 7, batch 1850, loss[loss=0.2514, simple_loss=0.3128, pruned_loss=0.0687, ctc_loss=0.1316, over 19566.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.2964, pruned_loss=0.06647, ctc_loss=0.1243, over 3856339.26 frames. ], batch size: 57, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:32:16,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=89525.33333333333, ans=0.2
+2024-08-26 17:32:19,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=89525.33333333333, ans=0.05
+2024-08-26 17:32:20,481 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=89525.33333333333, ans=0.0
+2024-08-26 17:32:49,478 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=89685.33333333333, ans=0.0
+2024-08-26 17:32:54,352 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.90 vs. limit=10.0
+2024-08-26 17:32:55,548 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.278e+02 1.590e+02 1.759e+02 2.003e+02 3.443e+02, threshold=3.517e+02, percent-clipped=1.0
+2024-08-26 17:33:01,825 INFO [train.py:1114] (0/4) Epoch 7, batch 1900, loss[loss=0.2351, simple_loss=0.2992, pruned_loss=0.0614, ctc_loss=0.1204, over 19679.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2971, pruned_loss=0.06669, ctc_loss=0.1247, over 3861764.73 frames. ], batch size: 59, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:35:00,640 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.09 vs. limit=15.0
+2024-08-26 17:35:13,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90005.33333333333, ans=0.1
+2024-08-26 17:35:17,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=90005.33333333333, ans=0.1
+2024-08-26 17:35:20,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=90005.33333333333, ans=0.07
+2024-08-26 17:35:23,453 INFO [train.py:1114] (0/4) Epoch 7, batch 1950, loss[loss=0.2478, simple_loss=0.3069, pruned_loss=0.06723, ctc_loss=0.1359, over 19594.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.2979, pruned_loss=0.06665, ctc_loss=0.1249, over 3871132.26 frames. ], batch size: 52, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:35:23,943 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.13 vs. limit=22.5
+2024-08-26 17:35:45,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=90165.33333333333, ans=0.0
+2024-08-26 17:35:45,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=90165.33333333333, ans=0.0
+2024-08-26 17:35:53,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=90218.66666666667, ans=0.0
+2024-08-26 17:35:57,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=90218.66666666667, ans=0.1
+2024-08-26 17:36:03,243 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.531e+02 1.657e+02 1.854e+02 3.915e+02, threshold=3.314e+02, percent-clipped=1.0
+2024-08-26 17:36:05,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=90272.0, ans=0.1
+2024-08-26 17:36:09,475 INFO [train.py:1114] (0/4) Epoch 7, batch 2000, loss[loss=0.2074, simple_loss=0.2626, pruned_loss=0.05567, ctc_loss=0.102, over 19665.00 frames. ], tot_loss[loss=0.242, simple_loss=0.2986, pruned_loss=0.06743, ctc_loss=0.1262, over 3856701.82 frames. ], batch size: 45, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:36:16,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=90325.33333333333, ans=0.0
+2024-08-26 17:36:18,812 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=90378.66666666667, ans=0.0
+2024-08-26 17:36:19,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=90378.66666666667, ans=0.0
+2024-08-26 17:36:22,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=90378.66666666667, ans=0.125
+2024-08-26 17:36:22,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=90378.66666666667, ans=0.125
+2024-08-26 17:36:26,167 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.20 vs. limit=10.0
+2024-08-26 17:36:27,990 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90432.0, ans=0.1
+2024-08-26 17:36:33,268 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=90432.0, ans=0.125
+2024-08-26 17:36:42,016 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90485.33333333333, ans=0.1
+2024-08-26 17:36:44,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=90538.66666666667, ans=0.125
+2024-08-26 17:36:51,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=90538.66666666667, ans=0.025
+2024-08-26 17:36:53,970 INFO [train.py:1114] (0/4) Epoch 7, batch 2050, loss[loss=0.2141, simple_loss=0.2736, pruned_loss=0.05704, ctc_loss=0.1014, over 19720.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.2971, pruned_loss=0.06693, ctc_loss=0.1252, over 3852774.23 frames. ], batch size: 47, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:36:54,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=90592.0, ans=0.2
+2024-08-26 17:36:56,789 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:36:56,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=90592.0, ans=0.125
+2024-08-26 17:37:02,679 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=90645.33333333333, ans=0.125
+2024-08-26 17:37:15,956 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=90698.66666666667, ans=0.0
+2024-08-26 17:37:16,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=90698.66666666667, ans=0.0
+2024-08-26 17:37:21,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=90752.0, ans=0.0
+2024-08-26 17:37:26,706 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.92 vs. limit=6.0
+2024-08-26 17:37:27,602 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.45 vs. limit=12.0
+2024-08-26 17:37:28,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=90752.0, ans=0.1
+2024-08-26 17:37:29,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=90805.33333333333, ans=0.125
+2024-08-26 17:37:32,308 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.477e+02 1.642e+02 1.962e+02 4.346e+02, threshold=3.284e+02, percent-clipped=3.0
+2024-08-26 17:37:38,468 INFO [train.py:1114] (0/4) Epoch 7, batch 2100, loss[loss=0.2338, simple_loss=0.2969, pruned_loss=0.06311, ctc_loss=0.1111, over 19765.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.2967, pruned_loss=0.0665, ctc_loss=0.1244, over 3859772.11 frames. ], batch size: 54, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:37:52,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=90912.0, ans=0.09899494936611666
+2024-08-26 17:37:55,133 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.88 vs. limit=15.0
+2024-08-26 17:38:00,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=90965.33333333333, ans=0.2
+2024-08-26 17:38:08,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=91018.66666666667, ans=0.1
+2024-08-26 17:38:09,934 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=91018.66666666667, ans=0.0
+2024-08-26 17:38:11,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=91018.66666666667, ans=0.125
+2024-08-26 17:38:13,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=91018.66666666667, ans=0.125
+2024-08-26 17:38:25,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=91125.33333333333, ans=0.0
+2024-08-26 17:38:25,985 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=11.18 vs. limit=15.0
+2024-08-26 17:38:26,419 INFO [train.py:1114] (0/4) Epoch 7, batch 2150, loss[loss=0.2261, simple_loss=0.2851, pruned_loss=0.06036, ctc_loss=0.1156, over 19586.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.2955, pruned_loss=0.06587, ctc_loss=0.123, over 3870090.82 frames. ], batch size: 52, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:38:37,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-26 17:38:38,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=91178.66666666667, ans=0.1
+2024-08-26 17:38:51,347 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=91232.0, ans=0.0
+2024-08-26 17:38:52,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91285.33333333333, ans=0.1
+2024-08-26 17:39:04,135 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.485e+02 1.702e+02 1.931e+02 2.999e+02, threshold=3.403e+02, percent-clipped=0.0
+2024-08-26 17:39:10,353 INFO [train.py:1114] (0/4) Epoch 7, batch 2200, loss[loss=0.2246, simple_loss=0.2904, pruned_loss=0.05788, ctc_loss=0.1078, over 19566.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.2955, pruned_loss=0.06575, ctc_loss=0.1229, over 3867872.96 frames. ], batch size: 57, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:39:14,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=91392.0, ans=0.125
+2024-08-26 17:39:18,921 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.89 vs. limit=22.5
+2024-08-26 17:39:23,866 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=91445.33333333333, ans=0.025
+2024-08-26 17:39:25,644 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:39:33,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=91498.66666666667, ans=0.0
+2024-08-26 17:39:40,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=91552.0, ans=0.125
+2024-08-26 17:39:47,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=91605.33333333333, ans=0.125
+2024-08-26 17:39:54,464 INFO [train.py:1114] (0/4) Epoch 7, batch 2250, loss[loss=0.2515, simple_loss=0.3058, pruned_loss=0.07216, ctc_loss=0.1324, over 19606.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.2956, pruned_loss=0.06587, ctc_loss=0.1233, over 3867908.29 frames. ], batch size: 55, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:39:57,995 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=91658.66666666667, ans=0.125
+2024-08-26 17:40:21,462 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.79 vs. limit=22.5
+2024-08-26 17:40:23,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=91818.66666666667, ans=0.125
+2024-08-26 17:40:32,444 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.538e+02 1.708e+02 1.997e+02 3.315e+02, threshold=3.416e+02, percent-clipped=0.0
+2024-08-26 17:40:38,568 INFO [train.py:1114] (0/4) Epoch 7, batch 2300, loss[loss=0.2082, simple_loss=0.2687, pruned_loss=0.05355, ctc_loss=0.1014, over 19477.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.2946, pruned_loss=0.06563, ctc_loss=0.1228, over 3861624.74 frames. ], batch size: 49, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:40:45,216 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=9.72 vs. limit=15.0
+2024-08-26 17:40:48,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=91978.66666666667, ans=0.125
+2024-08-26 17:40:57,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=92032.0, ans=0.2
+2024-08-26 17:41:02,236 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.40 vs. limit=22.5
+2024-08-26 17:41:06,365 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:41:08,499 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.59 vs. limit=6.0
+2024-08-26 17:41:11,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=92085.33333333333, ans=0.125
+2024-08-26 17:41:22,879 INFO [train.py:1114] (0/4) Epoch 7, batch 2350, loss[loss=0.258, simple_loss=0.3096, pruned_loss=0.07606, ctc_loss=0.1356, over 19667.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.2949, pruned_loss=0.06601, ctc_loss=0.1233, over 3864539.95 frames. ], batch size: 63, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:41:40,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=92298.66666666667, ans=0.125
+2024-08-26 17:41:42,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=92298.66666666667, ans=0.125
+2024-08-26 17:41:43,971 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=92298.66666666667, ans=0.0
+2024-08-26 17:41:45,673 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=92298.66666666667, ans=0.125
+2024-08-26 17:42:01,691 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.248e+02 1.515e+02 1.664e+02 1.862e+02 3.479e+02, threshold=3.327e+02, percent-clipped=1.0
+2024-08-26 17:42:06,884 INFO [train.py:1114] (0/4) Epoch 7, batch 2400, loss[loss=0.2392, simple_loss=0.3025, pruned_loss=0.06449, ctc_loss=0.1174, over 19393.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2971, pruned_loss=0.06677, ctc_loss=0.1245, over 3858854.77 frames. ], batch size: 67, lr: 2.01e-02, grad_scale: 32.0
+2024-08-26 17:42:09,660 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=92458.66666666667, ans=0.125
+2024-08-26 17:42:09,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=92458.66666666667, ans=0.125
+2024-08-26 17:42:13,549 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.20 vs. limit=15.0
+2024-08-26 17:42:16,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=92512.0, ans=22.5
+2024-08-26 17:42:19,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=92512.0, ans=0.0
+2024-08-26 17:42:44,444 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=92672.0, ans=0.125
+2024-08-26 17:42:48,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=92672.0, ans=0.95
+2024-08-26 17:42:50,113 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=92672.0, ans=0.0
+2024-08-26 17:42:56,044 INFO [train.py:1114] (0/4) Epoch 7, batch 2450, loss[loss=0.3343, simple_loss=0.348, pruned_loss=0.1158, ctc_loss=0.2228, over 14230.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3026, pruned_loss=0.07097, ctc_loss=0.1327, over 3733452.56 frames. ], batch size: 140, lr: 2.01e-02, grad_scale: 16.0
+2024-08-26 17:43:10,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=92778.66666666667, ans=0.0
+2024-08-26 17:43:19,044 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=92832.0, ans=0.125
+2024-08-26 17:43:24,372 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=92885.33333333333, ans=0.0
+2024-08-26 17:43:31,083 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-7.pt
+2024-08-26 17:44:23,145 INFO [train.py:1114] (0/4) Epoch 8, batch 0, loss[loss=0.2292, simple_loss=0.2851, pruned_loss=0.06487, ctc_loss=0.1089, over 19412.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.2851, pruned_loss=0.06487, ctc_loss=0.1089, over 19412.00 frames. ], batch size: 48, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:44:23,146 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 17:44:49,256 INFO [train.py:1146] (0/4) Epoch 8, validation: loss=0.2003, simple_loss=0.2903, pruned_loss=0.04062, ctc_loss=0.07268, over 944034.00 frames.
+2024-08-26 17:44:49,257 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 17:44:55,033 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.675e+02 1.918e+02 2.084e+02 4.365e+02, threshold=3.836e+02, percent-clipped=1.0
+2024-08-26 17:45:17,325 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=7.06 vs. limit=12.0
+2024-08-26 17:45:37,972 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=93093.33333333333, ans=0.2
+2024-08-26 17:45:38,086 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.52 vs. limit=15.0
+2024-08-26 17:45:51,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=93146.66666666667, ans=0.0
+2024-08-26 17:45:54,276 INFO [train.py:1114] (0/4) Epoch 8, batch 50, loss[loss=0.2166, simple_loss=0.2756, pruned_loss=0.05781, ctc_loss=0.105, over 19734.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.301, pruned_loss=0.06807, ctc_loss=0.128, over 844483.73 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:45:58,245 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=93200.0, ans=0.125
+2024-08-26 17:46:04,867 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.81 vs. limit=22.5
+2024-08-26 17:46:16,039 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.32 vs. limit=22.5
+2024-08-26 17:46:23,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=93360.0, ans=0.2
+2024-08-26 17:46:29,419 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.89 vs. limit=12.0
+2024-08-26 17:46:42,910 INFO [train.py:1114] (0/4) Epoch 8, batch 100, loss[loss=0.2104, simple_loss=0.2772, pruned_loss=0.05147, ctc_loss=0.1017, over 19716.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2993, pruned_loss=0.06722, ctc_loss=0.1264, over 1499106.47 frames. ], batch size: 51, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:46:48,506 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.574e+02 1.749e+02 2.053e+02 3.512e+02, threshold=3.498e+02, percent-clipped=0.0
+2024-08-26 17:47:03,880 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.00 vs. limit=12.0
+2024-08-26 17:47:04,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=93573.33333333333, ans=0.0
+2024-08-26 17:47:26,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=93680.0, ans=0.125
+2024-08-26 17:47:32,162 INFO [train.py:1114] (0/4) Epoch 8, batch 150, loss[loss=0.2096, simple_loss=0.2654, pruned_loss=0.0567, ctc_loss=0.101, over 19697.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2963, pruned_loss=0.06603, ctc_loss=0.1237, over 2026666.89 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:47:36,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=93733.33333333333, ans=0.125
+2024-08-26 17:47:41,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=93786.66666666667, ans=0.125
+2024-08-26 17:47:50,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=93786.66666666667, ans=0.125
+2024-08-26 17:47:50,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=93840.0, ans=0.0
+2024-08-26 17:48:11,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=93946.66666666667, ans=0.125
+2024-08-26 17:48:20,174 INFO [train.py:1114] (0/4) Epoch 8, batch 200, loss[loss=0.2606, simple_loss=0.3157, pruned_loss=0.07429, ctc_loss=0.1424, over 18453.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.2939, pruned_loss=0.06484, ctc_loss=0.1218, over 2434139.91 frames. ], batch size: 85, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:48:24,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=94000.0, ans=0.0
+2024-08-26 17:48:25,116 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.22 vs. limit=6.0
+2024-08-26 17:48:25,564 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.434e+02 1.574e+02 1.787e+02 2.973e+02, threshold=3.148e+02, percent-clipped=0.0
+2024-08-26 17:48:31,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff2.min_abs, batch_count=94053.33333333333, ans=0.1
+2024-08-26 17:48:32,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=94053.33333333333, ans=0.0
+2024-08-26 17:48:38,509 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=94053.33333333333, ans=0.1
+2024-08-26 17:48:43,139 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=94106.66666666667, ans=0.125
+2024-08-26 17:48:46,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=94106.66666666667, ans=0.125
+2024-08-26 17:48:49,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=94106.66666666667, ans=0.1
+2024-08-26 17:48:49,685 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=94106.66666666667, ans=0.125
+2024-08-26 17:48:52,851 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.44 vs. limit=6.0
+2024-08-26 17:48:53,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=94160.0, ans=0.025
+2024-08-26 17:49:12,242 INFO [train.py:1114] (0/4) Epoch 8, batch 250, loss[loss=0.2511, simple_loss=0.3075, pruned_loss=0.07046, ctc_loss=0.1346, over 19396.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.2935, pruned_loss=0.06448, ctc_loss=0.1211, over 2754429.15 frames. ], batch size: 67, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:49:26,520 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=94320.0, ans=0.125
+2024-08-26 17:49:44,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=94426.66666666667, ans=0.2
+2024-08-26 17:49:47,334 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.08 vs. limit=15.0
+2024-08-26 17:49:48,743 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=94426.66666666667, ans=0.0
+2024-08-26 17:49:54,388 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=94480.0, ans=0.1
+2024-08-26 17:49:59,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=94480.0, ans=0.05
+2024-08-26 17:50:03,379 INFO [train.py:1114] (0/4) Epoch 8, batch 300, loss[loss=0.2624, simple_loss=0.3169, pruned_loss=0.0752, ctc_loss=0.1439, over 19547.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.2929, pruned_loss=0.06406, ctc_loss=0.1202, over 2999392.35 frames. ], batch size: 61, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:50:09,200 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.482e+02 1.652e+02 1.879e+02 4.693e+02, threshold=3.305e+02, percent-clipped=1.0
+2024-08-26 17:50:19,091 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=94586.66666666667, ans=0.125
+2024-08-26 17:50:23,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=94640.0, ans=0.07
+2024-08-26 17:50:39,278 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:50:50,298 INFO [train.py:1114] (0/4) Epoch 8, batch 350, loss[loss=0.2147, simple_loss=0.2673, pruned_loss=0.06011, ctc_loss=0.1046, over 19784.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.2933, pruned_loss=0.06406, ctc_loss=0.1199, over 3189369.02 frames. ], batch size: 48, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:51:02,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=94853.33333333333, ans=0.125
+2024-08-26 17:51:15,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=94906.66666666667, ans=0.125
+2024-08-26 17:51:31,770 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.34 vs. limit=15.0
+2024-08-26 17:51:34,259 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=95013.33333333333, ans=0.125
+2024-08-26 17:51:59,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=95013.33333333333, ans=0.025
+2024-08-26 17:52:04,761 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=95013.33333333333, ans=0.025
+2024-08-26 17:52:19,173 INFO [train.py:1114] (0/4) Epoch 8, batch 400, loss[loss=0.2201, simple_loss=0.2921, pruned_loss=0.05373, ctc_loss=0.1014, over 19501.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.2924, pruned_loss=0.06356, ctc_loss=0.1187, over 3341730.71 frames. ], batch size: 54, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:52:23,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=95066.66666666667, ans=0.125
+2024-08-26 17:52:24,638 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 1.574e+02 1.829e+02 2.059e+02 4.627e+02, threshold=3.659e+02, percent-clipped=2.0
+2024-08-26 17:52:25,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=95066.66666666667, ans=0.125
+2024-08-26 17:53:02,322 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.97 vs. limit=22.5
+2024-08-26 17:53:04,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=95280.0, ans=0.0
+2024-08-26 17:53:08,502 INFO [train.py:1114] (0/4) Epoch 8, batch 450, loss[loss=0.2217, simple_loss=0.2896, pruned_loss=0.05655, ctc_loss=0.1018, over 19589.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2929, pruned_loss=0.06383, ctc_loss=0.1195, over 3449127.76 frames. ], batch size: 55, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:53:11,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=95333.33333333333, ans=0.125
+2024-08-26 17:53:11,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=95333.33333333333, ans=0.0
+2024-08-26 17:53:14,689 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.06 vs. limit=22.5
+2024-08-26 17:53:21,596 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=95386.66666666667, ans=0.2
+2024-08-26 17:53:27,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95386.66666666667, ans=0.1
+2024-08-26 17:53:42,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=95493.33333333333, ans=0.025
+2024-08-26 17:53:50,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=95546.66666666667, ans=0.125
+2024-08-26 17:53:56,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=95546.66666666667, ans=0.125
+2024-08-26 17:53:58,070 INFO [train.py:1114] (0/4) Epoch 8, batch 500, loss[loss=0.2472, simple_loss=0.3119, pruned_loss=0.06644, ctc_loss=0.1243, over 19623.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2918, pruned_loss=0.06312, ctc_loss=0.1183, over 3545194.71 frames. ], batch size: 63, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:54:03,655 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.468e+02 1.609e+02 1.778e+02 4.606e+02, threshold=3.218e+02, percent-clipped=1.0
+2024-08-26 17:54:07,813 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=95653.33333333333, ans=0.0
+2024-08-26 17:54:42,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=95760.0, ans=0.125
+2024-08-26 17:55:44,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=95760.0, ans=0.5
+2024-08-26 17:55:49,373 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.63 vs. limit=15.0
+2024-08-26 17:56:20,608 INFO [train.py:1114] (0/4) Epoch 8, batch 550, loss[loss=0.2465, simple_loss=0.3037, pruned_loss=0.06903, ctc_loss=0.1282, over 19305.00 frames. ], tot_loss[loss=0.233, simple_loss=0.2917, pruned_loss=0.0634, ctc_loss=0.1186, over 3607776.32 frames. ], batch size: 71, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:56:25,456 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=95866.66666666667, ans=0.125
+2024-08-26 17:57:14,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=95866.66666666667, ans=0.025
+2024-08-26 17:57:22,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=95920.0, ans=0.07
+2024-08-26 17:57:32,759 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.06 vs. limit=15.0
+2024-08-26 17:57:58,504 INFO [train.py:1114] (0/4) Epoch 8, batch 600, loss[loss=0.235, simple_loss=0.3025, pruned_loss=0.06099, ctc_loss=0.1135, over 19373.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.2926, pruned_loss=0.06378, ctc_loss=0.1193, over 3665938.20 frames. ], batch size: 67, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:58:05,961 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.508e+02 1.654e+02 1.896e+02 3.415e+02, threshold=3.309e+02, percent-clipped=1.0
+2024-08-26 17:58:42,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=96346.66666666667, ans=0.1
+2024-08-26 17:58:49,408 INFO [train.py:1114] (0/4) Epoch 8, batch 650, loss[loss=0.2238, simple_loss=0.2851, pruned_loss=0.05913, ctc_loss=0.1106, over 19765.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2916, pruned_loss=0.06327, ctc_loss=0.1185, over 3715955.42 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 17:58:51,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=96400.0, ans=0.1
+2024-08-26 17:59:03,579 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=96453.33333333333, ans=0.0
+2024-08-26 17:59:04,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=96453.33333333333, ans=0.125
+2024-08-26 17:59:33,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten.whitening_limit, batch_count=96613.33333333333, ans=15.0
+2024-08-26 17:59:36,106 INFO [train.py:1114] (0/4) Epoch 8, batch 700, loss[loss=0.2169, simple_loss=0.281, pruned_loss=0.05562, ctc_loss=0.1041, over 19717.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2918, pruned_loss=0.06324, ctc_loss=0.1184, over 3747733.37 frames. ], batch size: 51, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 17:59:41,810 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.481e+02 1.644e+02 1.817e+02 3.294e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-26 17:59:46,689 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=96720.0, ans=0.125
+2024-08-26 18:00:20,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=96880.0, ans=0.0
+2024-08-26 18:00:27,685 INFO [train.py:1114] (0/4) Epoch 8, batch 750, loss[loss=0.2217, simple_loss=0.2924, pruned_loss=0.05496, ctc_loss=0.1029, over 19505.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.2917, pruned_loss=0.06306, ctc_loss=0.1181, over 3773504.01 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 18:00:38,121 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=96986.66666666667, ans=0.125
+2024-08-26 18:00:44,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=96986.66666666667, ans=0.025
+2024-08-26 18:00:57,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=97093.33333333333, ans=0.125
+2024-08-26 18:01:10,038 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=97146.66666666667, ans=0.125
+2024-08-26 18:01:19,066 INFO [train.py:1114] (0/4) Epoch 8, batch 800, loss[loss=0.2112, simple_loss=0.2648, pruned_loss=0.0577, ctc_loss=0.1053, over 19402.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2912, pruned_loss=0.06269, ctc_loss=0.1173, over 3795054.21 frames. ], batch size: 48, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 18:01:24,567 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.172e+02 1.524e+02 1.729e+02 2.039e+02 3.596e+02, threshold=3.457e+02, percent-clipped=1.0
+2024-08-26 18:01:34,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=97253.33333333333, ans=0.07
+2024-08-26 18:01:36,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=97306.66666666667, ans=0.125
+2024-08-26 18:01:37,319 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.66 vs. limit=6.0
+2024-08-26 18:01:56,872 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=97413.33333333333, ans=0.125
+2024-08-26 18:01:58,848 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:01:59,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=97413.33333333333, ans=0.0
+2024-08-26 18:02:00,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=97413.33333333333, ans=0.0
+2024-08-26 18:02:02,692 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:02:06,324 INFO [train.py:1114] (0/4) Epoch 8, batch 850, loss[loss=0.2471, simple_loss=0.3088, pruned_loss=0.06769, ctc_loss=0.1252, over 19658.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.2909, pruned_loss=0.0627, ctc_loss=0.1173, over 3814413.34 frames. ], batch size: 59, lr: 1.85e-02, grad_scale: 32.0
+2024-08-26 18:02:08,626 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.24 vs. limit=15.0
+2024-08-26 18:02:22,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=97520.0, ans=0.125
+2024-08-26 18:02:24,054 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=97573.33333333333, ans=0.5
+2024-08-26 18:02:27,664 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=97573.33333333333, ans=0.0
+2024-08-26 18:02:34,603 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.76 vs. limit=15.0
+2024-08-26 18:02:47,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=97680.0, ans=0.125
+2024-08-26 18:02:54,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=97680.0, ans=0.125
+2024-08-26 18:02:58,340 INFO [train.py:1114] (0/4) Epoch 8, batch 900, loss[loss=0.1981, simple_loss=0.2645, pruned_loss=0.04831, ctc_loss=0.08783, over 19397.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2917, pruned_loss=0.06328, ctc_loss=0.1184, over 3817837.83 frames. ], batch size: 48, lr: 1.85e-02, grad_scale: 32.0
+2024-08-26 18:03:03,997 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.578e+02 1.704e+02 2.106e+02 3.434e+02, threshold=3.409e+02, percent-clipped=0.0
+2024-08-26 18:03:09,084 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=97786.66666666667, ans=10.0
+2024-08-26 18:03:13,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=97786.66666666667, ans=0.125
+2024-08-26 18:03:16,928 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=97840.0, ans=0.125
+2024-08-26 18:03:19,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=97840.0, ans=0.125
+2024-08-26 18:03:45,502 INFO [train.py:1114] (0/4) Epoch 8, batch 950, loss[loss=0.2247, simple_loss=0.2864, pruned_loss=0.05996, ctc_loss=0.1079, over 19485.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.292, pruned_loss=0.06355, ctc_loss=0.119, over 3818995.90 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:03:50,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=98000.0, ans=0.125
+2024-08-26 18:04:07,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=98053.33333333333, ans=0.0
+2024-08-26 18:04:08,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=98106.66666666667, ans=0.125
+2024-08-26 18:04:17,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=98106.66666666667, ans=0.0
+2024-08-26 18:04:37,651 INFO [train.py:1114] (0/4) Epoch 8, batch 1000, loss[loss=0.2051, simple_loss=0.2694, pruned_loss=0.05104, ctc_loss=0.09669, over 19854.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.2927, pruned_loss=0.06382, ctc_loss=0.1196, over 3815625.27 frames. ], batch size: 52, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:04:44,380 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.497e+02 1.652e+02 1.874e+02 4.992e+02, threshold=3.305e+02, percent-clipped=2.0
+2024-08-26 18:04:51,619 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.07 vs. limit=6.0
+2024-08-26 18:04:54,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=98320.0, ans=0.025
+2024-08-26 18:05:12,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=98426.66666666667, ans=0.125
+2024-08-26 18:05:14,652 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=98480.0, ans=0.125
+2024-08-26 18:05:22,319 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.51 vs. limit=22.5
+2024-08-26 18:05:24,681 INFO [train.py:1114] (0/4) Epoch 8, batch 1050, loss[loss=0.221, simple_loss=0.2895, pruned_loss=0.05493, ctc_loss=0.1065, over 19837.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2915, pruned_loss=0.06328, ctc_loss=0.1185, over 3820525.10 frames. ], batch size: 57, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:05:28,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=98533.33333333333, ans=0.125
+2024-08-26 18:05:54,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=98640.0, ans=0.0
+2024-08-26 18:06:03,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=98693.33333333333, ans=0.125
+2024-08-26 18:06:09,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=98746.66666666667, ans=0.125
+2024-08-26 18:06:18,164 INFO [train.py:1114] (0/4) Epoch 8, batch 1100, loss[loss=0.2454, simple_loss=0.3018, pruned_loss=0.06859, ctc_loss=0.1296, over 19597.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2912, pruned_loss=0.06304, ctc_loss=0.118, over 3829314.28 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 16.0
+2024-08-26 18:06:24,659 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.469e+02 1.560e+02 1.744e+02 3.443e+02, threshold=3.121e+02, percent-clipped=2.0
+2024-08-26 18:06:27,661 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:07:10,093 INFO [train.py:1114] (0/4) Epoch 8, batch 1150, loss[loss=0.2146, simple_loss=0.2796, pruned_loss=0.05449, ctc_loss=0.1014, over 19589.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2912, pruned_loss=0.06314, ctc_loss=0.1183, over 3827681.85 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 16.0
+2024-08-26 18:07:14,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=99066.66666666667, ans=0.0
+2024-08-26 18:07:23,808 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.14 vs. limit=15.0
+2024-08-26 18:07:45,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=99226.66666666667, ans=0.0
+2024-08-26 18:07:57,677 INFO [train.py:1114] (0/4) Epoch 8, batch 1200, loss[loss=0.2346, simple_loss=0.2996, pruned_loss=0.06167, ctc_loss=0.1154, over 19848.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.2924, pruned_loss=0.06362, ctc_loss=0.1193, over 3824393.63 frames. ], batch size: 57, lr: 1.84e-02, grad_scale: 32.0
+2024-08-26 18:07:57,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=99333.33333333333, ans=0.0
+2024-08-26 18:07:58,309 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.65 vs. limit=15.0
+2024-08-26 18:08:04,254 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.256e+02 1.491e+02 1.608e+02 2.003e+02 2.840e+02, threshold=3.216e+02, percent-clipped=0.0
+2024-08-26 18:08:10,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=99386.66666666667, ans=0.125
+2024-08-26 18:08:23,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=99440.0, ans=0.125
+2024-08-26 18:08:24,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=99493.33333333333, ans=0.025
+2024-08-26 18:08:34,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=99493.33333333333, ans=0.1
+2024-08-26 18:08:49,185 INFO [train.py:1114] (0/4) Epoch 8, batch 1250, loss[loss=0.2788, simple_loss=0.3285, pruned_loss=0.08354, ctc_loss=0.1552, over 19533.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2931, pruned_loss=0.06379, ctc_loss=0.1193, over 3842788.18 frames. ], batch size: 61, lr: 1.84e-02, grad_scale: 32.0
+2024-08-26 18:09:35,673 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.88 vs. limit=15.0
+2024-08-26 18:09:40,616 INFO [train.py:1114] (0/4) Epoch 8, batch 1300, loss[loss=0.2744, simple_loss=0.3232, pruned_loss=0.0826, ctc_loss=0.1511, over 18859.00 frames. ], tot_loss[loss=0.233, simple_loss=0.2923, pruned_loss=0.06321, ctc_loss=0.1182, over 3845901.22 frames. ], batch size: 76, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:09:47,138 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.481e+02 1.661e+02 1.866e+02 3.142e+02, threshold=3.323e+02, percent-clipped=0.0
+2024-08-26 18:09:59,820 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.58 vs. limit=22.5
+2024-08-26 18:10:13,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=100026.66666666667, ans=0.015
+2024-08-26 18:10:14,466 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=100026.66666666667, ans=0.0
+2024-08-26 18:10:15,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=100026.66666666667, ans=0.125
+2024-08-26 18:10:27,285 INFO [train.py:1114] (0/4) Epoch 8, batch 1350, loss[loss=0.2237, simple_loss=0.2856, pruned_loss=0.0588, ctc_loss=0.1102, over 19769.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.2913, pruned_loss=0.06234, ctc_loss=0.1166, over 3857801.95 frames. ], batch size: 54, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:10:29,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100133.33333333333, ans=0.1
+2024-08-26 18:10:30,595 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.03 vs. limit=22.5
+2024-08-26 18:10:31,167 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:10:39,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=100186.66666666667, ans=0.125
+2024-08-26 18:10:48,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=100240.0, ans=0.0
+2024-08-26 18:10:53,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100240.0, ans=0.1
+2024-08-26 18:10:57,957 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.78 vs. limit=22.5
+2024-08-26 18:11:11,909 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=100346.66666666667, ans=0.95
+2024-08-26 18:11:14,663 INFO [train.py:1114] (0/4) Epoch 8, batch 1400, loss[loss=0.2128, simple_loss=0.2652, pruned_loss=0.05859, ctc_loss=0.1081, over 19669.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2909, pruned_loss=0.06233, ctc_loss=0.1165, over 3864552.82 frames. ], batch size: 46, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:11:15,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=100400.0, ans=10.0
+2024-08-26 18:11:23,745 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.577e+02 1.859e+02 2.331e+02 3.237e+02, threshold=3.718e+02, percent-clipped=0.0
+2024-08-26 18:11:33,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=100453.33333333333, ans=0.2
+2024-08-26 18:11:39,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100506.66666666667, ans=0.1
+2024-08-26 18:11:39,578 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=100506.66666666667, ans=0.2
+2024-08-26 18:11:47,198 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=100506.66666666667, ans=0.125
+2024-08-26 18:11:48,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100506.66666666667, ans=0.125
+2024-08-26 18:11:55,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=100560.0, ans=0.0
+2024-08-26 18:12:00,744 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=100613.33333333333, ans=0.95
+2024-08-26 18:12:01,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=100613.33333333333, ans=0.125
+2024-08-26 18:12:09,366 INFO [train.py:1114] (0/4) Epoch 8, batch 1450, loss[loss=0.2491, simple_loss=0.3086, pruned_loss=0.06987, ctc_loss=0.1244, over 19703.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.2915, pruned_loss=0.06265, ctc_loss=0.117, over 3862989.12 frames. ], batch size: 63, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:12:20,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100720.0, ans=0.125
+2024-08-26 18:12:42,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=100826.66666666667, ans=0.125
+2024-08-26 18:12:53,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=100880.0, ans=0.04949747468305833
+2024-08-26 18:12:54,432 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.73 vs. limit=15.0
+2024-08-26 18:13:00,705 INFO [train.py:1114] (0/4) Epoch 8, batch 1500, loss[loss=0.238, simple_loss=0.2999, pruned_loss=0.06451, ctc_loss=0.1175, over 19578.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.292, pruned_loss=0.06278, ctc_loss=0.1173, over 3862350.18 frames. ], batch size: 57, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:13:05,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=100933.33333333333, ans=0.125
+2024-08-26 18:13:07,551 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.450e+02 1.594e+02 1.806e+02 5.150e+02, threshold=3.189e+02, percent-clipped=1.0
+2024-08-26 18:13:34,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=101093.33333333333, ans=0.125
+2024-08-26 18:13:37,414 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.95 vs. limit=22.5
+2024-08-26 18:13:41,009 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=101146.66666666667, ans=0.125
+2024-08-26 18:13:48,291 INFO [train.py:1114] (0/4) Epoch 8, batch 1550, loss[loss=0.2496, simple_loss=0.3038, pruned_loss=0.07135, ctc_loss=0.1318, over 19607.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.2919, pruned_loss=0.06301, ctc_loss=0.118, over 3846891.21 frames. ], batch size: 60, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:13:59,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=101253.33333333333, ans=0.025
+2024-08-26 18:14:04,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101253.33333333333, ans=0.1
+2024-08-26 18:14:14,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101306.66666666667, ans=0.1
+2024-08-26 18:14:22,014 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=101360.0, ans=0.125
+2024-08-26 18:14:30,735 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=101413.33333333333, ans=0.025
+2024-08-26 18:14:34,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=101413.33333333333, ans=0.125
+2024-08-26 18:14:35,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101413.33333333333, ans=0.125
+2024-08-26 18:14:40,875 INFO [train.py:1114] (0/4) Epoch 8, batch 1600, loss[loss=0.2287, simple_loss=0.2942, pruned_loss=0.05954, ctc_loss=0.1102, over 19829.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2919, pruned_loss=0.06313, ctc_loss=0.1182, over 3836792.40 frames. ], batch size: 57, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:14:47,311 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.562e+02 1.716e+02 2.059e+02 3.797e+02, threshold=3.431e+02, percent-clipped=2.0
+2024-08-26 18:14:55,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=101520.0, ans=0.2
+2024-08-26 18:15:20,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=101626.66666666667, ans=0.125
+2024-08-26 18:15:26,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=101680.0, ans=0.025
+2024-08-26 18:15:26,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=101680.0, ans=0.125
+2024-08-26 18:15:32,093 INFO [train.py:1114] (0/4) Epoch 8, batch 1650, loss[loss=0.2362, simple_loss=0.2991, pruned_loss=0.06271, ctc_loss=0.1196, over 19644.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2918, pruned_loss=0.06322, ctc_loss=0.1183, over 3834185.61 frames. ], batch size: 59, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:15:42,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=101786.66666666667, ans=0.125
+2024-08-26 18:15:50,366 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.08 vs. limit=15.0
+2024-08-26 18:15:57,884 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=1.95 vs. limit=15.0
+2024-08-26 18:15:58,959 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.51 vs. limit=15.0
+2024-08-26 18:16:01,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn1.whiten.whitening_limit, batch_count=101893.33333333333, ans=22.5
+2024-08-26 18:16:05,207 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:16:13,693 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.84 vs. limit=22.5
+2024-08-26 18:16:14,457 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=101946.66666666667, ans=0.0
+2024-08-26 18:16:16,951 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=101946.66666666667, ans=0.125
+2024-08-26 18:16:18,712 INFO [train.py:1114] (0/4) Epoch 8, batch 1700, loss[loss=0.1996, simple_loss=0.2579, pruned_loss=0.05172, ctc_loss=0.09463, over 19678.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.2911, pruned_loss=0.06265, ctc_loss=0.1172, over 3848314.27 frames. ], batch size: 46, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:16:25,307 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.495e+02 1.737e+02 2.089e+02 3.401e+02, threshold=3.475e+02, percent-clipped=0.0
+2024-08-26 18:17:03,791 INFO [train.py:1114] (0/4) Epoch 8, batch 1750, loss[loss=0.2016, simple_loss=0.2611, pruned_loss=0.05278, ctc_loss=0.09118, over 19684.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.2903, pruned_loss=0.06231, ctc_loss=0.1165, over 3851911.08 frames. ], batch size: 45, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:17:05,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=102266.66666666667, ans=0.125
+2024-08-26 18:17:10,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=102266.66666666667, ans=0.95
+2024-08-26 18:17:13,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102320.0, ans=0.125
+2024-08-26 18:17:19,666 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.62 vs. limit=15.0
+2024-08-26 18:17:27,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102373.33333333333, ans=0.1
+2024-08-26 18:17:35,456 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=102426.66666666667, ans=0.2
+2024-08-26 18:17:48,529 INFO [train.py:1114] (0/4) Epoch 8, batch 1800, loss[loss=0.2378, simple_loss=0.3049, pruned_loss=0.06223, ctc_loss=0.1153, over 19612.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.2905, pruned_loss=0.06222, ctc_loss=0.1161, over 3854379.38 frames. ], batch size: 55, lr: 1.81e-02, grad_scale: 32.0
+2024-08-26 18:17:56,848 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.517e+02 1.665e+02 1.949e+02 3.105e+02, threshold=3.330e+02, percent-clipped=0.0
+2024-08-26 18:18:00,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=102586.66666666667, ans=0.125
+2024-08-26 18:18:15,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=102640.0, ans=0.1
+2024-08-26 18:18:18,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=102693.33333333333, ans=0.125
+2024-08-26 18:18:36,732 INFO [train.py:1114] (0/4) Epoch 8, batch 1850, loss[loss=0.2607, simple_loss=0.3131, pruned_loss=0.07619, ctc_loss=0.1397, over 19594.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2907, pruned_loss=0.06243, ctc_loss=0.1167, over 3857135.20 frames. ], batch size: 57, lr: 1.81e-02, grad_scale: 32.0
+2024-08-26 18:19:01,017 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=102906.66666666667, ans=0.125
+2024-08-26 18:19:18,825 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=103013.33333333333, ans=0.5
+2024-08-26 18:19:21,226 INFO [train.py:1114] (0/4) Epoch 8, batch 1900, loss[loss=0.2423, simple_loss=0.3125, pruned_loss=0.06328, ctc_loss=0.1137, over 19649.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.291, pruned_loss=0.0624, ctc_loss=0.1166, over 3861782.54 frames. ], batch size: 59, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:19:27,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=103066.66666666667, ans=0.025
+2024-08-26 18:19:28,165 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.533e+02 1.714e+02 2.014e+02 3.062e+02, threshold=3.427e+02, percent-clipped=0.0
+2024-08-26 18:19:29,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=103120.0, ans=0.025
+2024-08-26 18:19:32,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=103120.0, ans=0.125
+2024-08-26 18:19:39,866 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.95 vs. limit=15.0
+2024-08-26 18:19:49,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=103226.66666666667, ans=0.0
+2024-08-26 18:19:54,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=103226.66666666667, ans=0.2
+2024-08-26 18:20:04,889 INFO [train.py:1114] (0/4) Epoch 8, batch 1950, loss[loss=0.2213, simple_loss=0.2831, pruned_loss=0.05867, ctc_loss=0.1054, over 19589.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.2921, pruned_loss=0.06249, ctc_loss=0.1166, over 3870478.34 frames. ], batch size: 52, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:20:12,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=103333.33333333333, ans=0.2
+2024-08-26 18:20:13,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=103386.66666666667, ans=0.125
+2024-08-26 18:20:36,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=103493.33333333333, ans=0.2
+2024-08-26 18:20:37,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=103493.33333333333, ans=0.125
+2024-08-26 18:20:44,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=103546.66666666667, ans=0.0
+2024-08-26 18:20:51,122 INFO [train.py:1114] (0/4) Epoch 8, batch 2000, loss[loss=0.2035, simple_loss=0.2584, pruned_loss=0.05414, ctc_loss=0.101, over 19643.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.2931, pruned_loss=0.0633, ctc_loss=0.118, over 3855829.59 frames. ], batch size: 45, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:21:00,304 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.619e+02 1.835e+02 2.136e+02 5.632e+02, threshold=3.670e+02, percent-clipped=2.0
+2024-08-26 18:21:09,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=103706.66666666667, ans=0.0
+2024-08-26 18:21:21,587 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.30 vs. limit=15.0
+2024-08-26 18:21:26,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=103813.33333333333, ans=0.125
+2024-08-26 18:21:36,072 INFO [train.py:1114] (0/4) Epoch 8, batch 2050, loss[loss=0.2165, simple_loss=0.2719, pruned_loss=0.05924, ctc_loss=0.1065, over 19688.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2922, pruned_loss=0.06313, ctc_loss=0.1175, over 3851400.97 frames. ], batch size: 47, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:21:37,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=103866.66666666667, ans=0.2
+2024-08-26 18:21:42,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=103866.66666666667, ans=0.025
+2024-08-26 18:21:48,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=103920.0, ans=0.0
+2024-08-26 18:21:52,132 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=103920.0, ans=0.125
+2024-08-26 18:21:57,971 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=103973.33333333333, ans=0.125
+2024-08-26 18:22:00,034 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.80 vs. limit=15.0
+2024-08-26 18:22:07,679 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104026.66666666667, ans=0.1
+2024-08-26 18:22:10,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104080.0, ans=0.125
+2024-08-26 18:22:12,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=104080.0, ans=0.125
+2024-08-26 18:22:19,592 INFO [train.py:1114] (0/4) Epoch 8, batch 2100, loss[loss=0.2367, simple_loss=0.2933, pruned_loss=0.0651, ctc_loss=0.1249, over 19772.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.2907, pruned_loss=0.06211, ctc_loss=0.116, over 3858466.81 frames. ], batch size: 54, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:22:27,467 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.502e+02 1.673e+02 2.007e+02 2.886e+02, threshold=3.346e+02, percent-clipped=0.0
+2024-08-26 18:22:37,150 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=104240.0, ans=0.125
+2024-08-26 18:22:39,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=104240.0, ans=0.125
+2024-08-26 18:22:47,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=104293.33333333333, ans=0.2
+2024-08-26 18:22:49,573 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.05 vs. limit=22.5
+2024-08-26 18:22:52,926 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.55 vs. limit=15.0
+2024-08-26 18:23:02,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=104400.0, ans=0.2
+2024-08-26 18:23:03,063 INFO [train.py:1114] (0/4) Epoch 8, batch 2150, loss[loss=0.2215, simple_loss=0.2876, pruned_loss=0.05669, ctc_loss=0.1048, over 19585.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.29, pruned_loss=0.06171, ctc_loss=0.1153, over 3869125.10 frames. ], batch size: 52, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:23:46,687 INFO [train.py:1114] (0/4) Epoch 8, batch 2200, loss[loss=0.2468, simple_loss=0.3026, pruned_loss=0.07042, ctc_loss=0.1253, over 19594.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.2902, pruned_loss=0.06173, ctc_loss=0.1154, over 3867395.04 frames. ], batch size: 57, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:23:54,540 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.596e+02 1.839e+02 2.214e+02 3.376e+02, threshold=3.678e+02, percent-clipped=1.0
+2024-08-26 18:24:00,954 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=104720.0, ans=0.09899494936611666
+2024-08-26 18:24:16,131 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.43 vs. limit=10.0
+2024-08-26 18:24:21,099 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=104880.0, ans=0.125
+2024-08-26 18:24:25,883 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.57 vs. limit=15.0
+2024-08-26 18:24:30,564 INFO [train.py:1114] (0/4) Epoch 8, batch 2250, loss[loss=0.2304, simple_loss=0.2986, pruned_loss=0.05865, ctc_loss=0.1123, over 19628.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2903, pruned_loss=0.06179, ctc_loss=0.1156, over 3867405.77 frames. ], batch size: 55, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:24:42,293 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.50 vs. limit=6.0
+2024-08-26 18:24:48,781 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=105040.0, ans=0.07
+2024-08-26 18:24:55,208 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.13 vs. limit=15.0
+2024-08-26 18:24:57,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=105093.33333333333, ans=0.125
+2024-08-26 18:25:02,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=105093.33333333333, ans=0.0
+2024-08-26 18:25:03,955 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=105093.33333333333, ans=0.0
+2024-08-26 18:25:08,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=105146.66666666667, ans=0.125
+2024-08-26 18:25:14,129 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.43 vs. limit=6.0
+2024-08-26 18:25:16,092 INFO [train.py:1114] (0/4) Epoch 8, batch 2300, loss[loss=0.1967, simple_loss=0.2624, pruned_loss=0.04811, ctc_loss=0.08696, over 19513.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2892, pruned_loss=0.06186, ctc_loss=0.1156, over 3860937.02 frames. ], batch size: 49, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:25:17,934 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=105200.0, ans=0.2
+2024-08-26 18:25:23,766 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.553e+02 1.767e+02 2.002e+02 4.280e+02, threshold=3.534e+02, percent-clipped=3.0
+2024-08-26 18:25:24,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=105253.33333333333, ans=0.125
+2024-08-26 18:25:27,351 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=105253.33333333333, ans=0.125
+2024-08-26 18:25:35,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105306.66666666667, ans=0.1
+2024-08-26 18:25:37,620 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=105306.66666666667, ans=0.0
+2024-08-26 18:25:52,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=105413.33333333333, ans=0.2
+2024-08-26 18:25:58,624 INFO [train.py:1114] (0/4) Epoch 8, batch 2350, loss[loss=0.2613, simple_loss=0.3197, pruned_loss=0.07499, ctc_loss=0.1324, over 19669.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2891, pruned_loss=0.06181, ctc_loss=0.1154, over 3863603.50 frames. ], batch size: 63, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:25:59,044 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.47 vs. limit=10.0
+2024-08-26 18:26:18,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-26 18:26:24,883 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.11 vs. limit=22.5
+2024-08-26 18:26:33,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=105680.0, ans=0.125
+2024-08-26 18:26:42,919 INFO [train.py:1114] (0/4) Epoch 8, batch 2400, loss[loss=0.2614, simple_loss=0.3146, pruned_loss=0.07596, ctc_loss=0.1409, over 19477.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2916, pruned_loss=0.06294, ctc_loss=0.1173, over 3858382.89 frames. ], batch size: 67, lr: 1.79e-02, grad_scale: 32.0
+2024-08-26 18:26:50,599 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.526e+02 1.733e+02 1.998e+02 3.354e+02, threshold=3.467e+02, percent-clipped=0.0
+2024-08-26 18:27:10,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=105893.33333333333, ans=0.125
+2024-08-26 18:27:18,224 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.76 vs. limit=15.0
+2024-08-26 18:27:27,047 INFO [train.py:1114] (0/4) Epoch 8, batch 2450, loss[loss=0.3059, simple_loss=0.3309, pruned_loss=0.1038, ctc_loss=0.1831, over 13958.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.2966, pruned_loss=0.067, ctc_loss=0.1247, over 3736317.57 frames. ], batch size: 140, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:27:28,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=106000.0, ans=0.125
+2024-08-26 18:27:29,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=106000.0, ans=0.125
+2024-08-26 18:27:32,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=106000.0, ans=0.125
+2024-08-26 18:27:36,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=106053.33333333333, ans=0.125
+2024-08-26 18:27:36,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=106053.33333333333, ans=0.1
+2024-08-26 18:27:45,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=106106.66666666667, ans=0.2
+2024-08-26 18:27:51,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=106106.66666666667, ans=0.2
+2024-08-26 18:27:55,594 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.13 vs. limit=10.0
+2024-08-26 18:27:58,700 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=106160.0, ans=0.125
+2024-08-26 18:27:59,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=106160.0, ans=0.5
+2024-08-26 18:28:01,684 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-8.pt
+2024-08-26 18:28:46,566 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:28:47,198 INFO [train.py:1114] (0/4) Epoch 9, batch 0, loss[loss=0.2132, simple_loss=0.2692, pruned_loss=0.05737, ctc_loss=0.1062, over 19818.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2692, pruned_loss=0.05737, ctc_loss=0.1062, over 19818.00 frames. ], batch size: 49, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:28:47,199 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 18:28:56,819 INFO [train.py:1146] (0/4) Epoch 9, validation: loss=0.1927, simple_loss=0.2844, pruned_loss=0.03737, ctc_loss=0.06585, over 944034.00 frames.
+2024-08-26 18:28:56,819 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 18:29:13,951 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=106261.33333333333, ans=0.125
+2024-08-26 18:29:16,435 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 1.688e+02 1.849e+02 2.025e+02 3.204e+02, threshold=3.698e+02, percent-clipped=0.0
+2024-08-26 18:29:43,038 INFO [train.py:1114] (0/4) Epoch 9, batch 50, loss[loss=0.1846, simple_loss=0.2546, pruned_loss=0.0414, ctc_loss=0.07968, over 19734.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.2922, pruned_loss=0.06228, ctc_loss=0.1179, over 844043.89 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:29:51,470 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106474.66666666667, ans=0.1
+2024-08-26 18:29:52,861 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=7.89 vs. limit=15.0
+2024-08-26 18:30:12,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=106634.66666666667, ans=0.07
+2024-08-26 18:30:17,650 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-20000.pt
+2024-08-26 18:30:33,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=106688.0, ans=0.2
+2024-08-26 18:30:39,522 INFO [train.py:1114] (0/4) Epoch 9, batch 100, loss[loss=0.2083, simple_loss=0.274, pruned_loss=0.05117, ctc_loss=0.1008, over 19724.00 frames. ], tot_loss[loss=0.233, simple_loss=0.2934, pruned_loss=0.06257, ctc_loss=0.1187, over 1497202.38 frames. ], batch size: 51, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:30:44,284 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106741.33333333333, ans=0.125
+2024-08-26 18:30:57,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=106794.66666666667, ans=0.2
+2024-08-26 18:31:02,331 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.554e+02 1.735e+02 2.126e+02 3.416e+02, threshold=3.470e+02, percent-clipped=0.0
+2024-08-26 18:31:03,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106848.0, ans=0.0
+2024-08-26 18:31:07,375 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.84 vs. limit=15.0
+2024-08-26 18:31:07,601 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=7.25 vs. limit=15.0
+2024-08-26 18:31:09,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=106901.33333333333, ans=0.125
+2024-08-26 18:31:21,224 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=106954.66666666667, ans=0.125
+2024-08-26 18:31:21,497 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.94 vs. limit=22.5
+2024-08-26 18:31:25,034 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.67 vs. limit=15.0
+2024-08-26 18:31:28,284 INFO [train.py:1114] (0/4) Epoch 9, batch 150, loss[loss=0.1952, simple_loss=0.2607, pruned_loss=0.04782, ctc_loss=0.08532, over 19693.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.2903, pruned_loss=0.06141, ctc_loss=0.1158, over 2027503.00 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 16.0
+2024-08-26 18:31:35,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=107008.0, ans=22.5
+2024-08-26 18:32:14,112 INFO [train.py:1114] (0/4) Epoch 9, batch 200, loss[loss=0.2469, simple_loss=0.3017, pruned_loss=0.0696, ctc_loss=0.1323, over 18274.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.2887, pruned_loss=0.06064, ctc_loss=0.1145, over 2435063.29 frames. ], batch size: 85, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:32:14,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=107274.66666666667, ans=0.0
+2024-08-26 18:32:16,534 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.35 vs. limit=22.5
+2024-08-26 18:32:22,547 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=107328.0, ans=0.2
+2024-08-26 18:32:36,049 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 1.442e+02 1.571e+02 1.787e+02 2.800e+02, threshold=3.143e+02, percent-clipped=0.0
+2024-08-26 18:32:40,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=107381.33333333333, ans=0.125
+2024-08-26 18:32:45,379 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=107434.66666666667, ans=0.125
+2024-08-26 18:32:54,127 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.02 vs. limit=15.0
+2024-08-26 18:33:01,987 INFO [train.py:1114] (0/4) Epoch 9, batch 250, loss[loss=0.2612, simple_loss=0.3183, pruned_loss=0.07498, ctc_loss=0.1355, over 19360.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.2885, pruned_loss=0.06026, ctc_loss=0.1135, over 2754598.94 frames. ], batch size: 67, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:33:06,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=107541.33333333333, ans=0.0
+2024-08-26 18:33:26,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=107594.66666666667, ans=0.125
+2024-08-26 18:33:40,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107701.33333333333, ans=0.1
+2024-08-26 18:33:43,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=107701.33333333333, ans=0.2
+2024-08-26 18:33:53,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=107754.66666666667, ans=0.125
+2024-08-26 18:33:54,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=107754.66666666667, ans=0.2
+2024-08-26 18:34:01,016 INFO [train.py:1114] (0/4) Epoch 9, batch 300, loss[loss=0.2371, simple_loss=0.2972, pruned_loss=0.06411, ctc_loss=0.1217, over 19553.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2875, pruned_loss=0.05977, ctc_loss=0.1121, over 2999865.94 frames. ], batch size: 61, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:34:12,177 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.37 vs. limit=22.5
+2024-08-26 18:34:12,803 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=107861.33333333333, ans=0.0
+2024-08-26 18:34:24,464 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.264e+02 1.498e+02 1.681e+02 1.999e+02 2.633e+02, threshold=3.363e+02, percent-clipped=0.0
+2024-08-26 18:34:29,749 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.95 vs. limit=15.0
+2024-08-26 18:34:30,605 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=107914.66666666667, ans=22.5
+2024-08-26 18:34:35,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=107968.0, ans=0.0
+2024-08-26 18:34:36,817 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=107968.0, ans=0.0
+2024-08-26 18:34:36,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=107968.0, ans=0.0
+2024-08-26 18:34:50,531 INFO [train.py:1114] (0/4) Epoch 9, batch 350, loss[loss=0.2072, simple_loss=0.266, pruned_loss=0.05353, ctc_loss=0.1034, over 19747.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2876, pruned_loss=0.05972, ctc_loss=0.1117, over 3189846.95 frames. ], batch size: 48, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:34:58,533 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=11.22 vs. limit=15.0
+2024-08-26 18:35:02,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=108128.0, ans=0.125
+2024-08-26 18:35:15,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=108181.33333333333, ans=0.125
+2024-08-26 18:35:25,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=108234.66666666667, ans=0.0
+2024-08-26 18:35:29,112 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.21 vs. limit=10.0
+2024-08-26 18:35:40,759 INFO [train.py:1114] (0/4) Epoch 9, batch 400, loss[loss=0.2062, simple_loss=0.2881, pruned_loss=0.04398, ctc_loss=0.09103, over 19514.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2871, pruned_loss=0.05933, ctc_loss=0.1111, over 3342190.73 frames. ], batch size: 54, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:35:54,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=108394.66666666667, ans=0.125
+2024-08-26 18:35:55,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.48 vs. limit=15.0
+2024-08-26 18:36:02,029 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.232e+02 1.489e+02 1.712e+02 1.995e+02 4.778e+02, threshold=3.424e+02, percent-clipped=1.0
+2024-08-26 18:36:02,573 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.04 vs. limit=15.0
+2024-08-26 18:36:05,518 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.98 vs. limit=22.5
+2024-08-26 18:36:23,485 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:36:26,321 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:36:27,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=108554.66666666667, ans=0.0
+2024-08-26 18:36:32,710 INFO [train.py:1114] (0/4) Epoch 9, batch 450, loss[loss=0.1995, simple_loss=0.2791, pruned_loss=0.04258, ctc_loss=0.08658, over 19624.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.287, pruned_loss=0.05912, ctc_loss=0.1106, over 3450818.45 frames. ], batch size: 55, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:36:36,683 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:37:09,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108768.0, ans=0.1
+2024-08-26 18:37:19,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=108821.33333333333, ans=0.025
+2024-08-26 18:37:19,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=108821.33333333333, ans=0.125
+2024-08-26 18:37:21,536 INFO [train.py:1114] (0/4) Epoch 9, batch 500, loss[loss=0.2416, simple_loss=0.3059, pruned_loss=0.06526, ctc_loss=0.1169, over 19701.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2861, pruned_loss=0.05876, ctc_loss=0.1101, over 3545802.40 frames. ], batch size: 63, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:37:24,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=108874.66666666667, ans=0.125
+2024-08-26 18:37:31,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=108928.0, ans=0.0
+2024-08-26 18:37:35,692 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=108928.0, ans=0.05
+2024-08-26 18:37:42,874 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.480e+02 1.660e+02 1.957e+02 3.087e+02, threshold=3.320e+02, percent-clipped=0.0
+2024-08-26 18:37:46,340 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.58 vs. limit=12.0
+2024-08-26 18:37:59,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=109088.0, ans=0.125
+2024-08-26 18:38:05,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=109088.0, ans=0.0
+2024-08-26 18:38:07,950 INFO [train.py:1114] (0/4) Epoch 9, batch 550, loss[loss=0.2229, simple_loss=0.2877, pruned_loss=0.05799, ctc_loss=0.1051, over 19392.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2868, pruned_loss=0.05921, ctc_loss=0.1111, over 3607826.60 frames. ], batch size: 71, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:38:09,119 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=109141.33333333333, ans=0.0
+2024-08-26 18:38:25,754 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=109194.66666666667, ans=0.09899494936611666
+2024-08-26 18:38:38,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=109301.33333333333, ans=0.125
+2024-08-26 18:38:50,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=109354.66666666667, ans=0.2
+2024-08-26 18:38:55,941 INFO [train.py:1114] (0/4) Epoch 9, batch 600, loss[loss=0.2389, simple_loss=0.2979, pruned_loss=0.06521, ctc_loss=0.1238, over 19429.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.2876, pruned_loss=0.0595, ctc_loss=0.1115, over 3665188.56 frames. ], batch size: 67, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:38:58,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=109408.0, ans=0.125
+2024-08-26 18:39:21,961 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.496e+02 1.658e+02 1.980e+02 4.382e+02, threshold=3.316e+02, percent-clipped=1.0
+2024-08-26 18:39:32,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=109568.0, ans=0.125
+2024-08-26 18:39:34,869 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=109568.0, ans=0.2
+2024-08-26 18:39:35,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109568.0, ans=0.1
+2024-08-26 18:39:49,347 INFO [train.py:1114] (0/4) Epoch 9, batch 650, loss[loss=0.2102, simple_loss=0.2788, pruned_loss=0.05193, ctc_loss=0.09442, over 19758.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2867, pruned_loss=0.05893, ctc_loss=0.1105, over 3715812.23 frames. ], batch size: 54, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:39:51,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=109674.66666666667, ans=0.025
+2024-08-26 18:39:58,325 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.70 vs. limit=15.0
+2024-08-26 18:40:29,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109834.66666666667, ans=0.1
+2024-08-26 18:40:36,288 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.59 vs. limit=6.0
+2024-08-26 18:40:40,276 INFO [train.py:1114] (0/4) Epoch 9, batch 700, loss[loss=0.2349, simple_loss=0.2891, pruned_loss=0.06613, ctc_loss=0.1214, over 19723.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2869, pruned_loss=0.05922, ctc_loss=0.111, over 3748214.42 frames. ], batch size: 51, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:40:42,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=109941.33333333333, ans=0.1
+2024-08-26 18:41:01,793 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.271e+02 1.503e+02 1.748e+02 2.321e+02 3.813e+02, threshold=3.497e+02, percent-clipped=1.0
+2024-08-26 18:41:04,833 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=110048.0, ans=0.125
+2024-08-26 18:41:22,509 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=110154.66666666667, ans=0.0
+2024-08-26 18:41:28,640 INFO [train.py:1114] (0/4) Epoch 9, batch 750, loss[loss=0.2169, simple_loss=0.287, pruned_loss=0.05304, ctc_loss=0.1019, over 19483.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2867, pruned_loss=0.05913, ctc_loss=0.1106, over 3774614.02 frames. ], batch size: 54, lr: 1.66e-02, grad_scale: 16.0
+2024-08-26 18:41:38,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=110261.33333333333, ans=0.025
+2024-08-26 18:41:39,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=110261.33333333333, ans=0.125
+2024-08-26 18:41:53,735 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=110314.66666666667, ans=0.0
+2024-08-26 18:42:09,833 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:42:22,143 INFO [train.py:1114] (0/4) Epoch 9, batch 800, loss[loss=0.1923, simple_loss=0.2477, pruned_loss=0.04961, ctc_loss=0.09423, over 19841.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2868, pruned_loss=0.05924, ctc_loss=0.1107, over 3796448.51 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:42:24,629 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=15.09 vs. limit=15.0
+2024-08-26 18:42:33,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=110528.0, ans=0.125
+2024-08-26 18:42:35,074 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.67 vs. limit=15.0
+2024-08-26 18:42:38,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=110528.0, ans=0.2
+2024-08-26 18:42:43,921 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.269e+02 1.427e+02 1.539e+02 1.792e+02 3.382e+02, threshold=3.078e+02, percent-clipped=0.0
+2024-08-26 18:43:09,192 INFO [train.py:1114] (0/4) Epoch 9, batch 850, loss[loss=0.24, simple_loss=0.3005, pruned_loss=0.06452, ctc_loss=0.1263, over 19657.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2863, pruned_loss=0.05899, ctc_loss=0.1102, over 3815637.27 frames. ], batch size: 59, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:43:10,719 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.76 vs. limit=15.0
+2024-08-26 18:43:21,520 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.33 vs. limit=15.0
+2024-08-26 18:43:30,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=110848.0, ans=0.0
+2024-08-26 18:43:41,738 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=110901.33333333333, ans=0.125
+2024-08-26 18:43:52,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=110954.66666666667, ans=0.125
+2024-08-26 18:43:55,669 INFO [train.py:1114] (0/4) Epoch 9, batch 900, loss[loss=0.1884, simple_loss=0.257, pruned_loss=0.04258, ctc_loss=0.08659, over 19805.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2866, pruned_loss=0.05927, ctc_loss=0.1108, over 3820448.07 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:44:05,200 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.14 vs. limit=15.0
+2024-08-26 18:45:38,160 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.254e+02 1.519e+02 1.752e+02 2.077e+02 5.433e+02, threshold=3.505e+02, percent-clipped=5.0
+2024-08-26 18:45:39,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=111114.66666666667, ans=0.0
+2024-08-26 18:45:58,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=111221.33333333333, ans=0.0
+2024-08-26 18:46:05,599 INFO [train.py:1114] (0/4) Epoch 9, batch 950, loss[loss=0.189, simple_loss=0.2586, pruned_loss=0.04362, ctc_loss=0.08024, over 19520.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.2873, pruned_loss=0.05965, ctc_loss=0.1115, over 3822695.00 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:46:16,725 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.43 vs. limit=6.0
+2024-08-26 18:46:20,884 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.16 vs. limit=15.0
+2024-08-26 18:46:27,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=111328.0, ans=10.0
+2024-08-26 18:46:38,231 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111434.66666666667, ans=0.1
+2024-08-26 18:46:39,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=111434.66666666667, ans=0.125
+2024-08-26 18:46:50,386 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=111488.0, ans=0.0
+2024-08-26 18:46:57,416 INFO [train.py:1114] (0/4) Epoch 9, batch 1000, loss[loss=0.1983, simple_loss=0.2683, pruned_loss=0.04634, ctc_loss=0.08893, over 19829.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.2881, pruned_loss=0.06002, ctc_loss=0.1122, over 3819280.03 frames. ], batch size: 52, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:47:09,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=111594.66666666667, ans=0.0
+2024-08-26 18:47:12,712 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=111594.66666666667, ans=0.125
+2024-08-26 18:47:19,853 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.461e+02 1.756e+02 2.077e+02 6.803e+02, threshold=3.513e+02, percent-clipped=1.0
+2024-08-26 18:47:43,906 INFO [train.py:1114] (0/4) Epoch 9, batch 1050, loss[loss=0.2242, simple_loss=0.2909, pruned_loss=0.05801, ctc_loss=0.1036, over 19859.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.2872, pruned_loss=0.05963, ctc_loss=0.1116, over 3823747.95 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:47:45,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=111808.0, ans=0.0
+2024-08-26 18:48:00,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111861.33333333333, ans=0.1
+2024-08-26 18:48:31,756 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112074.66666666667, ans=0.1
+2024-08-26 18:48:32,538 INFO [train.py:1114] (0/4) Epoch 9, batch 1100, loss[loss=0.2207, simple_loss=0.2855, pruned_loss=0.05667, ctc_loss=0.1062, over 19592.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2865, pruned_loss=0.05921, ctc_loss=0.1108, over 3831698.90 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:48:32,809 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=112074.66666666667, ans=0.125
+2024-08-26 18:48:41,128 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:48:43,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=112128.0, ans=0.125
+2024-08-26 18:48:59,878 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.444e+02 1.690e+02 2.009e+02 4.396e+02, threshold=3.380e+02, percent-clipped=1.0
+2024-08-26 18:49:05,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=112181.33333333333, ans=0.0
+2024-08-26 18:49:41,001 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.75 vs. limit=22.5
+2024-08-26 18:49:51,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112288.0, ans=0.1
+2024-08-26 18:49:53,113 INFO [train.py:1114] (0/4) Epoch 9, batch 1150, loss[loss=0.1977, simple_loss=0.2705, pruned_loss=0.04602, ctc_loss=0.08183, over 19600.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2859, pruned_loss=0.05911, ctc_loss=0.1105, over 3830700.83 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:50:00,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=112341.33333333333, ans=0.125
+2024-08-26 18:50:24,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112448.0, ans=0.125
+2024-08-26 18:50:32,481 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=112448.0, ans=0.125
+2024-08-26 18:50:39,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112501.33333333333, ans=0.1
+2024-08-26 18:50:40,216 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.95 vs. limit=15.0
+2024-08-26 18:50:54,160 INFO [train.py:1114] (0/4) Epoch 9, batch 1200, loss[loss=0.2151, simple_loss=0.2879, pruned_loss=0.05152, ctc_loss=0.09815, over 19834.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2864, pruned_loss=0.05905, ctc_loss=0.1105, over 3825832.07 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-26 18:51:04,079 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.27 vs. limit=15.0
+2024-08-26 18:51:09,652 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=112661.33333333333, ans=0.1
+2024-08-26 18:51:15,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=112714.66666666667, ans=0.05
+2024-08-26 18:51:16,811 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.431e+02 1.600e+02 1.807e+02 3.201e+02, threshold=3.201e+02, percent-clipped=0.0
+2024-08-26 18:51:18,048 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112714.66666666667, ans=0.0
+2024-08-26 18:51:18,999 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112714.66666666667, ans=0.1
+2024-08-26 18:51:33,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112821.33333333333, ans=0.1
+2024-08-26 18:51:42,791 INFO [train.py:1114] (0/4) Epoch 9, batch 1250, loss[loss=0.2535, simple_loss=0.3079, pruned_loss=0.07346, ctc_loss=0.1303, over 19548.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2868, pruned_loss=0.05905, ctc_loss=0.1103, over 3844137.32 frames. ], batch size: 61, lr: 1.65e-02, grad_scale: 32.0
+2024-08-26 18:51:51,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=112928.0, ans=0.0
+2024-08-26 18:51:58,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=112928.0, ans=0.125
+2024-08-26 18:52:01,654 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.43 vs. limit=6.0
+2024-08-26 18:52:26,696 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=113088.0, ans=0.2
+2024-08-26 18:52:31,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=113088.0, ans=0.1
+2024-08-26 18:52:36,294 INFO [train.py:1114] (0/4) Epoch 9, batch 1300, loss[loss=0.2433, simple_loss=0.3045, pruned_loss=0.06583, ctc_loss=0.1262, over 18806.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.286, pruned_loss=0.05872, ctc_loss=0.1097, over 3847673.27 frames. ], batch size: 76, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:52:50,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=113194.66666666667, ans=0.0
+2024-08-26 18:52:56,294 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.46 vs. limit=15.0
+2024-08-26 18:52:58,752 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.498e+02 1.743e+02 2.034e+02 3.430e+02, threshold=3.487e+02, percent-clipped=2.0
+2024-08-26 18:53:16,869 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=113354.66666666667, ans=0.09899494936611666
+2024-08-26 18:53:19,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113354.66666666667, ans=0.1
+2024-08-26 18:53:22,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=113408.0, ans=0.025
+2024-08-26 18:53:23,262 INFO [train.py:1114] (0/4) Epoch 9, batch 1350, loss[loss=0.1992, simple_loss=0.2718, pruned_loss=0.0451, ctc_loss=0.09085, over 19737.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2854, pruned_loss=0.05831, ctc_loss=0.1089, over 3859306.16 frames. ], batch size: 54, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:53:33,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=113461.33333333333, ans=0.125
+2024-08-26 18:53:34,815 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.78 vs. limit=15.0
+2024-08-26 18:53:46,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=113514.66666666667, ans=0.125
+2024-08-26 18:53:49,825 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:53:53,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=113568.0, ans=0.125
+2024-08-26 18:53:55,466 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=113568.0, ans=0.125
+2024-08-26 18:54:06,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=113621.33333333333, ans=0.125
+2024-08-26 18:54:09,868 INFO [train.py:1114] (0/4) Epoch 9, batch 1400, loss[loss=0.2051, simple_loss=0.263, pruned_loss=0.05334, ctc_loss=0.1012, over 19662.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2855, pruned_loss=0.05827, ctc_loss=0.1088, over 3866130.34 frames. ], batch size: 46, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:54:20,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=113728.0, ans=0.2
+2024-08-26 18:54:33,079 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.168e+02 1.492e+02 1.644e+02 1.948e+02 2.802e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-26 18:54:33,460 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=113781.33333333333, ans=0.125
+2024-08-26 18:54:37,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113781.33333333333, ans=0.1
+2024-08-26 18:54:38,907 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:54:54,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=113888.0, ans=0.125
+2024-08-26 18:54:59,233 INFO [train.py:1114] (0/4) Epoch 9, batch 1450, loss[loss=0.2486, simple_loss=0.3004, pruned_loss=0.07228, ctc_loss=0.1308, over 19679.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.2859, pruned_loss=0.05847, ctc_loss=0.1093, over 3864108.67 frames. ], batch size: 63, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:55:04,416 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.85 vs. limit=15.0
+2024-08-26 18:55:09,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=113941.33333333333, ans=0.025
+2024-08-26 18:55:20,079 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=113994.66666666667, ans=0.0
+2024-08-26 18:55:25,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=114048.0, ans=10.0
+2024-08-26 18:55:28,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=114048.0, ans=0.125
+2024-08-26 18:55:29,753 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.91 vs. limit=15.0
+2024-08-26 18:55:41,702 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=114101.33333333333, ans=0.125
+2024-08-26 18:55:54,510 INFO [train.py:1114] (0/4) Epoch 9, batch 1500, loss[loss=0.239, simple_loss=0.3014, pruned_loss=0.06477, ctc_loss=0.1177, over 19600.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2863, pruned_loss=0.05856, ctc_loss=0.1095, over 3863198.78 frames. ], batch size: 57, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:55:55,865 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=114208.0, ans=0.125
+2024-08-26 18:56:01,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=114208.0, ans=0.125
+2024-08-26 18:56:16,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=114314.66666666667, ans=0.0
+2024-08-26 18:56:18,314 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.542e+02 1.688e+02 1.884e+02 2.711e+02, threshold=3.377e+02, percent-clipped=0.0
+2024-08-26 18:56:32,447 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:56:41,347 INFO [train.py:1114] (0/4) Epoch 9, batch 1550, loss[loss=0.2519, simple_loss=0.311, pruned_loss=0.06993, ctc_loss=0.1323, over 19597.00 frames. ], tot_loss[loss=0.225, simple_loss=0.2872, pruned_loss=0.05927, ctc_loss=0.1109, over 3847363.39 frames. ], batch size: 60, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:56:42,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=114474.66666666667, ans=0.07
+2024-08-26 18:56:43,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=114474.66666666667, ans=0.125
+2024-08-26 18:56:47,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=114474.66666666667, ans=0.1
+2024-08-26 18:56:55,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=114528.0, ans=0.0
+2024-08-26 18:56:59,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.44 vs. limit=15.0
+2024-08-26 18:57:08,732 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.33 vs. limit=15.0
+2024-08-26 18:57:11,217 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=114634.66666666667, ans=0.0
+2024-08-26 18:57:12,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=114634.66666666667, ans=0.125
+2024-08-26 18:57:16,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=114634.66666666667, ans=0.125
+2024-08-26 18:57:17,178 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=114634.66666666667, ans=0.125
+2024-08-26 18:57:29,653 INFO [train.py:1114] (0/4) Epoch 9, batch 1600, loss[loss=0.2364, simple_loss=0.2947, pruned_loss=0.06485, ctc_loss=0.1211, over 19838.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2875, pruned_loss=0.05971, ctc_loss=0.1116, over 3835919.64 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 32.0
+2024-08-26 18:57:35,632 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.25 vs. limit=12.0
+2024-08-26 18:57:42,895 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=114794.66666666667, ans=0.125
+2024-08-26 18:57:44,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=114794.66666666667, ans=0.125
+2024-08-26 18:57:56,394 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.15 vs. limit=15.0
+2024-08-26 18:57:57,607 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.291e+02 1.549e+02 1.720e+02 1.979e+02 3.573e+02, threshold=3.441e+02, percent-clipped=1.0
+2024-08-26 18:58:11,372 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=114901.33333333333, ans=0.0
+2024-08-26 18:58:36,384 INFO [train.py:1114] (0/4) Epoch 9, batch 1650, loss[loss=0.2455, simple_loss=0.3067, pruned_loss=0.06755, ctc_loss=0.1228, over 19639.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2873, pruned_loss=0.05955, ctc_loss=0.1111, over 3833183.94 frames. ], batch size: 59, lr: 1.63e-02, grad_scale: 32.0
+2024-08-26 18:59:46,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=115114.66666666667, ans=0.2
+2024-08-26 18:59:46,775 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.00 vs. limit=10.0
+2024-08-26 18:59:57,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=115168.0, ans=0.2
+2024-08-26 19:00:07,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=115221.33333333333, ans=0.125
+2024-08-26 19:00:11,416 INFO [train.py:1114] (0/4) Epoch 9, batch 1700, loss[loss=0.194, simple_loss=0.2575, pruned_loss=0.04751, ctc_loss=0.08855, over 19661.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2867, pruned_loss=0.0587, ctc_loss=0.1098, over 3847890.26 frames. ], batch size: 46, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:00:11,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=115274.66666666667, ans=0.125
+2024-08-26 19:00:19,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=115328.0, ans=0.0
+2024-08-26 19:00:34,656 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.254e+02 1.433e+02 1.619e+02 1.844e+02 2.581e+02, threshold=3.239e+02, percent-clipped=0.0
+2024-08-26 19:00:35,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115381.33333333333, ans=0.1
+2024-08-26 19:00:36,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=115381.33333333333, ans=0.2
+2024-08-26 19:00:39,310 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=115434.66666666667, ans=0.125
+2024-08-26 19:00:53,077 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=9.72 vs. limit=22.5
+2024-08-26 19:00:56,875 INFO [train.py:1114] (0/4) Epoch 9, batch 1750, loss[loss=0.2125, simple_loss=0.2659, pruned_loss=0.0578, ctc_loss=0.1088, over 19683.00 frames. ], tot_loss[loss=0.223, simple_loss=0.2857, pruned_loss=0.0583, ctc_loss=0.109, over 3852696.95 frames. ], batch size: 45, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:01:05,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=115594.66666666667, ans=0.07
+2024-08-26 19:01:05,119 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=115594.66666666667, ans=0.0
+2024-08-26 19:01:10,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=115594.66666666667, ans=0.125
+2024-08-26 19:01:30,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=115701.33333333333, ans=0.2
+2024-08-26 19:01:33,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=115754.66666666667, ans=0.125
+2024-08-26 19:01:43,101 INFO [train.py:1114] (0/4) Epoch 9, batch 1800, loss[loss=0.2098, simple_loss=0.2886, pruned_loss=0.04745, ctc_loss=0.09012, over 19616.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.2859, pruned_loss=0.05839, ctc_loss=0.1091, over 3852814.33 frames. ], batch size: 55, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:01:43,604 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.03 vs. limit=15.0
+2024-08-26 19:01:51,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=115861.33333333333, ans=0.95
+2024-08-26 19:01:59,404 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.42 vs. limit=15.0
+2024-08-26 19:02:02,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=115914.66666666667, ans=0.2
+2024-08-26 19:02:06,016 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.500e+02 1.645e+02 1.953e+02 3.789e+02, threshold=3.290e+02, percent-clipped=1.0
+2024-08-26 19:02:06,206 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=115914.66666666667, ans=0.125
+2024-08-26 19:02:27,274 INFO [train.py:1114] (0/4) Epoch 9, batch 1850, loss[loss=0.238, simple_loss=0.3074, pruned_loss=0.06102, ctc_loss=0.1163, over 19591.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.2857, pruned_loss=0.05851, ctc_loss=0.109, over 3856726.08 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:02:49,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=116181.33333333333, ans=0.95
+2024-08-26 19:03:01,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=116234.66666666667, ans=0.2
+2024-08-26 19:03:08,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=116288.0, ans=0.0
+2024-08-26 19:03:13,217 INFO [train.py:1114] (0/4) Epoch 9, batch 1900, loss[loss=0.2309, simple_loss=0.3, pruned_loss=0.05942, ctc_loss=0.1076, over 19640.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2862, pruned_loss=0.0586, ctc_loss=0.1092, over 3862410.33 frames. ], batch size: 59, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:03:17,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=116341.33333333333, ans=0.0
+2024-08-26 19:03:18,988 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.80 vs. limit=12.0
+2024-08-26 19:03:33,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=116448.0, ans=0.125
+2024-08-26 19:03:35,874 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.270e+02 1.509e+02 1.695e+02 1.935e+02 3.320e+02, threshold=3.390e+02, percent-clipped=1.0
+2024-08-26 19:03:49,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=116554.66666666667, ans=0.125
+2024-08-26 19:03:49,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=116554.66666666667, ans=0.125
+2024-08-26 19:03:56,674 INFO [train.py:1114] (0/4) Epoch 9, batch 1950, loss[loss=0.2239, simple_loss=0.2904, pruned_loss=0.05713, ctc_loss=0.1078, over 19575.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2872, pruned_loss=0.05869, ctc_loss=0.1095, over 3871997.96 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:03:57,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=116608.0, ans=0.0
+2024-08-26 19:03:59,642 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=116608.0, ans=0.1
+2024-08-26 19:04:08,437 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.67 vs. limit=15.0
+2024-08-26 19:04:09,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=116661.33333333333, ans=0.125
+2024-08-26 19:04:13,463 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:04:17,896 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.04 vs. limit=15.0
+2024-08-26 19:04:26,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=116768.0, ans=0.125
+2024-08-26 19:04:30,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=116768.0, ans=0.125
+2024-08-26 19:04:34,536 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.35 vs. limit=22.5
+2024-08-26 19:04:41,242 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=116821.33333333333, ans=0.125
+2024-08-26 19:04:45,336 INFO [train.py:1114] (0/4) Epoch 9, batch 2000, loss[loss=0.1812, simple_loss=0.2426, pruned_loss=0.04352, ctc_loss=0.08177, over 19696.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2873, pruned_loss=0.05896, ctc_loss=0.11, over 3856507.86 frames. ], batch size: 45, lr: 1.62e-02, grad_scale: 32.0
+2024-08-26 19:05:09,040 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.518e+02 1.711e+02 1.998e+02 4.316e+02, threshold=3.422e+02, percent-clipped=2.0
+2024-08-26 19:05:11,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=117034.66666666667, ans=0.0
+2024-08-26 19:05:12,844 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=117034.66666666667, ans=0.1
+2024-08-26 19:05:14,536 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=117034.66666666667, ans=0.125
+2024-08-26 19:05:16,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=117034.66666666667, ans=0.125
+2024-08-26 19:05:29,284 INFO [train.py:1114] (0/4) Epoch 9, batch 2050, loss[loss=0.1871, simple_loss=0.2554, pruned_loss=0.04269, ctc_loss=0.08361, over 19686.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2863, pruned_loss=0.05863, ctc_loss=0.1093, over 3852956.16 frames. ], batch size: 47, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:05:40,955 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=117194.66666666667, ans=0.125
+2024-08-26 19:05:45,241 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=117194.66666666667, ans=0.125
+2024-08-26 19:05:45,481 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.24 vs. limit=15.0
+2024-08-26 19:05:51,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=117248.0, ans=0.125
+2024-08-26 19:05:51,255 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=117248.0, ans=0.125
+2024-08-26 19:06:08,281 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.17 vs. limit=12.0
+2024-08-26 19:06:12,997 INFO [train.py:1114] (0/4) Epoch 9, batch 2100, loss[loss=0.2273, simple_loss=0.2904, pruned_loss=0.06034, ctc_loss=0.1087, over 19767.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2857, pruned_loss=0.05834, ctc_loss=0.1087, over 3859079.64 frames. ], batch size: 54, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:06:19,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=117408.0, ans=0.125
+2024-08-26 19:06:36,666 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.488e+02 1.695e+02 1.945e+02 3.088e+02, threshold=3.391e+02, percent-clipped=0.0
+2024-08-26 19:06:46,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=117621.33333333333, ans=0.0
+2024-08-26 19:06:55,554 INFO [train.py:1114] (0/4) Epoch 9, batch 2150, loss[loss=0.1918, simple_loss=0.258, pruned_loss=0.04564, ctc_loss=0.08569, over 19589.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2849, pruned_loss=0.05803, ctc_loss=0.1081, over 3869879.56 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 8.0
+2024-08-26 19:07:02,222 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.01 vs. limit=15.0
+2024-08-26 19:07:03,716 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.48 vs. limit=15.0
+2024-08-26 19:07:05,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=117728.0, ans=0.0
+2024-08-26 19:07:06,154 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=117728.0, ans=0.125
+2024-08-26 19:07:15,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=117781.33333333333, ans=0.2
+2024-08-26 19:07:15,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=117781.33333333333, ans=0.0
+2024-08-26 19:07:15,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=117781.33333333333, ans=0.0
+2024-08-26 19:07:23,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=117834.66666666667, ans=0.125
+2024-08-26 19:07:25,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=117834.66666666667, ans=0.0
+2024-08-26 19:07:38,969 INFO [train.py:1114] (0/4) Epoch 9, batch 2200, loss[loss=0.226, simple_loss=0.2902, pruned_loss=0.05903, ctc_loss=0.1093, over 19579.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2854, pruned_loss=0.05839, ctc_loss=0.1089, over 3869284.31 frames. ], batch size: 57, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:07:40,955 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=117941.33333333333, ans=0.025
+2024-08-26 19:07:48,172 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.14 vs. limit=22.5
+2024-08-26 19:07:56,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=118048.0, ans=0.0
+2024-08-26 19:07:56,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=118048.0, ans=0.125
+2024-08-26 19:08:03,128 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.528e+02 1.792e+02 2.132e+02 3.306e+02, threshold=3.583e+02, percent-clipped=0.0
+2024-08-26 19:08:19,936 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=118154.66666666667, ans=0.2
+2024-08-26 19:08:34,323 INFO [train.py:1114] (0/4) Epoch 9, batch 2250, loss[loss=0.2057, simple_loss=0.278, pruned_loss=0.04791, ctc_loss=0.09361, over 19627.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2854, pruned_loss=0.05806, ctc_loss=0.1085, over 3868578.45 frames. ], batch size: 55, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:08:56,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=118314.66666666667, ans=0.125
+2024-08-26 19:09:02,465 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.86 vs. limit=6.0
+2024-08-26 19:09:17,302 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.53 vs. limit=12.0
+2024-08-26 19:09:17,826 INFO [train.py:1114] (0/4) Epoch 9, batch 2300, loss[loss=0.1863, simple_loss=0.2521, pruned_loss=0.04392, ctc_loss=0.08149, over 19510.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2838, pruned_loss=0.05753, ctc_loss=0.1076, over 3861868.07 frames. ], batch size: 49, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:09:33,795 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.38 vs. limit=12.0
+2024-08-26 19:09:42,035 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 1.479e+02 1.669e+02 2.317e+02 3.988e+02, threshold=3.338e+02, percent-clipped=3.0
+2024-08-26 19:09:51,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=118634.66666666667, ans=0.1
+2024-08-26 19:09:54,799 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=118688.0, ans=0.125
+2024-08-26 19:10:01,389 INFO [train.py:1114] (0/4) Epoch 9, batch 2350, loss[loss=0.2364, simple_loss=0.2997, pruned_loss=0.0623, ctc_loss=0.1215, over 19635.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2837, pruned_loss=0.05742, ctc_loss=0.1075, over 3864113.15 frames. ], batch size: 63, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:10:05,978 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=118741.33333333333, ans=0.125
+2024-08-26 19:10:06,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=118741.33333333333, ans=0.2
+2024-08-26 19:10:10,022 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=118794.66666666667, ans=0.125
+2024-08-26 19:10:10,199 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.23 vs. limit=10.0
+2024-08-26 19:11:00,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=118848.0, ans=0.125
+2024-08-26 19:11:01,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=118848.0, ans=0.1
+2024-08-26 19:11:01,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=118848.0, ans=0.125
+2024-08-26 19:11:06,994 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=118848.0, ans=0.04949747468305833
+2024-08-26 19:11:10,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=118901.33333333333, ans=15.0
+2024-08-26 19:11:18,144 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=118954.66666666667, ans=0.1
+2024-08-26 19:11:23,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=118954.66666666667, ans=0.125
+2024-08-26 19:11:29,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=118954.66666666667, ans=0.125
+2024-08-26 19:11:31,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=118954.66666666667, ans=0.125
+2024-08-26 19:11:32,749 INFO [train.py:1114] (0/4) Epoch 9, batch 2400, loss[loss=0.2491, simple_loss=0.3036, pruned_loss=0.07143, ctc_loss=0.1294, over 19405.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.286, pruned_loss=0.05855, ctc_loss=0.1093, over 3857848.50 frames. ], batch size: 67, lr: 1.61e-02, grad_scale: 16.0
+2024-08-26 19:11:36,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=119008.0, ans=0.125
+2024-08-26 19:11:44,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=119008.0, ans=0.07
+2024-08-26 19:11:52,179 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.56 vs. limit=15.0
+2024-08-26 19:12:03,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=119114.66666666667, ans=0.0
+2024-08-26 19:12:04,696 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.526e+02 1.714e+02 1.892e+02 3.175e+02, threshold=3.427e+02, percent-clipped=0.0
+2024-08-26 19:12:23,660 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.75 vs. limit=15.0
+2024-08-26 19:12:24,795 INFO [train.py:1114] (0/4) Epoch 9, batch 2450, loss[loss=0.309, simple_loss=0.3321, pruned_loss=0.102, ctc_loss=0.205, over 13679.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.2905, pruned_loss=0.06217, ctc_loss=0.1166, over 3728798.97 frames. ], batch size: 140, lr: 1.61e-02, grad_scale: 16.0
+2024-08-26 19:12:32,052 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=119274.66666666667, ans=0.125
+2024-08-26 19:13:11,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=119434.66666666667, ans=0.125
+2024-08-26 19:13:15,623 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-9.pt
+2024-08-26 19:14:15,882 INFO [train.py:1114] (0/4) Epoch 10, batch 0, loss[loss=0.2146, simple_loss=0.2727, pruned_loss=0.05705, ctc_loss=0.1061, over 19806.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2727, pruned_loss=0.05705, ctc_loss=0.1061, over 19806.00 frames. ], batch size: 49, lr: 1.53e-02, grad_scale: 16.0
+2024-08-26 19:14:15,883 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 19:14:48,059 INFO [train.py:1146] (0/4) Epoch 10, validation: loss=0.1896, simple_loss=0.2813, pruned_loss=0.03622, ctc_loss=0.0637, over 944034.00 frames.
+2024-08-26 19:14:48,061 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 19:14:52,767 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=119482.66666666667, ans=0.125
+2024-08-26 19:15:03,339 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=119536.0, ans=0.1
+2024-08-26 19:15:25,083 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.696e+02 1.867e+02 2.057e+02 3.331e+02, threshold=3.733e+02, percent-clipped=0.0
+2024-08-26 19:15:25,419 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=119696.0, ans=0.0
+2024-08-26 19:15:34,236 INFO [train.py:1114] (0/4) Epoch 10, batch 50, loss[loss=0.1856, simple_loss=0.2487, pruned_loss=0.04434, ctc_loss=0.08443, over 19711.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2884, pruned_loss=0.05847, ctc_loss=0.1104, over 843213.08 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 16.0
+2024-08-26 19:15:48,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=119802.66666666667, ans=0.125
+2024-08-26 19:15:48,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=119802.66666666667, ans=0.2
+2024-08-26 19:16:03,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=119909.33333333333, ans=0.125
+2024-08-26 19:16:14,244 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=119962.66666666667, ans=0.125
+2024-08-26 19:16:20,477 INFO [train.py:1114] (0/4) Epoch 10, batch 100, loss[loss=0.2038, simple_loss=0.273, pruned_loss=0.0482, ctc_loss=0.0954, over 19710.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2889, pruned_loss=0.0585, ctc_loss=0.1099, over 1497783.82 frames. ], batch size: 51, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:16:21,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=120016.0, ans=0.125
+2024-08-26 19:16:52,904 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.07 vs. limit=6.0
+2024-08-26 19:16:53,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=120176.0, ans=0.0
+2024-08-26 19:16:55,456 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=120176.0, ans=0.0
+2024-08-26 19:17:03,447 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.471e+02 1.633e+02 1.792e+02 2.780e+02, threshold=3.265e+02, percent-clipped=0.0
+2024-08-26 19:17:09,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=120229.33333333333, ans=0.125
+2024-08-26 19:17:11,629 INFO [train.py:1114] (0/4) Epoch 10, batch 150, loss[loss=0.1958, simple_loss=0.2671, pruned_loss=0.04555, ctc_loss=0.08362, over 19725.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.287, pruned_loss=0.05807, ctc_loss=0.1085, over 2026659.98 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:17:20,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-26 19:17:20,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-26 19:17:48,611 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=120442.66666666667, ans=0.0
+2024-08-26 19:18:04,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=120496.0, ans=0.125
+2024-08-26 19:18:04,511 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=120496.0, ans=0.5
+2024-08-26 19:18:07,157 INFO [train.py:1114] (0/4) Epoch 10, batch 200, loss[loss=0.2401, simple_loss=0.2925, pruned_loss=0.06806, ctc_loss=0.1288, over 18272.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2851, pruned_loss=0.05768, ctc_loss=0.1079, over 2434216.39 frames. ], batch size: 85, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:18:30,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=120549.33333333333, ans=0.125
+2024-08-26 19:18:45,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=120656.0, ans=0.125
+2024-08-26 19:18:47,214 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=120656.0, ans=0.0
+2024-08-26 19:18:52,833 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=120709.33333333333, ans=0.1
+2024-08-26 19:19:08,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=120709.33333333333, ans=0.125
+2024-08-26 19:19:12,212 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.459e+02 1.596e+02 1.815e+02 3.041e+02, threshold=3.193e+02, percent-clipped=0.0
+2024-08-26 19:19:44,928 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=120762.66666666667, ans=0.2
+2024-08-26 19:19:48,326 INFO [train.py:1114] (0/4) Epoch 10, batch 250, loss[loss=0.2287, simple_loss=0.2993, pruned_loss=0.05737, ctc_loss=0.1085, over 19445.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2844, pruned_loss=0.05689, ctc_loss=0.1067, over 2755282.56 frames. ], batch size: 67, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:19:49,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=120816.0, ans=0.0
+2024-08-26 19:19:51,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=120816.0, ans=0.0
+2024-08-26 19:19:51,789 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.80 vs. limit=10.0
+2024-08-26 19:19:57,295 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.00 vs. limit=15.0
+2024-08-26 19:20:05,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=120869.33333333333, ans=0.125
+2024-08-26 19:20:15,295 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.79 vs. limit=6.0
+2024-08-26 19:20:17,406 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=120922.66666666667, ans=0.125
+2024-08-26 19:20:28,419 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=120976.0, ans=0.0
+2024-08-26 19:20:45,510 INFO [train.py:1114] (0/4) Epoch 10, batch 300, loss[loss=0.2353, simple_loss=0.3013, pruned_loss=0.06156, ctc_loss=0.1152, over 19529.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2833, pruned_loss=0.05643, ctc_loss=0.1058, over 3000652.65 frames. ], batch size: 61, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:21:03,532 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.87 vs. limit=15.0
+2024-08-26 19:21:14,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=121189.33333333333, ans=0.035
+2024-08-26 19:21:15,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=121189.33333333333, ans=0.0
+2024-08-26 19:21:22,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=121242.66666666667, ans=0.125
+2024-08-26 19:21:29,978 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.480e+02 1.641e+02 1.981e+02 3.456e+02, threshold=3.281e+02, percent-clipped=2.0
+2024-08-26 19:21:33,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=121296.0, ans=0.0
+2024-08-26 19:21:34,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=121296.0, ans=0.0
+2024-08-26 19:21:38,273 INFO [train.py:1114] (0/4) Epoch 10, batch 350, loss[loss=0.208, simple_loss=0.2641, pruned_loss=0.05642, ctc_loss=0.09755, over 19747.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2837, pruned_loss=0.05676, ctc_loss=0.1066, over 3191619.59 frames. ], batch size: 48, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:21:41,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=121349.33333333333, ans=0.125
+2024-08-26 19:21:45,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=121349.33333333333, ans=0.125
+2024-08-26 19:21:52,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=121402.66666666667, ans=0.05
+2024-08-26 19:21:58,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=121456.0, ans=0.125
+2024-08-26 19:22:24,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=121616.0, ans=0.09899494936611666
+2024-08-26 19:22:24,844 INFO [train.py:1114] (0/4) Epoch 10, batch 400, loss[loss=0.2177, simple_loss=0.2849, pruned_loss=0.05535, ctc_loss=0.09919, over 19862.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2827, pruned_loss=0.05617, ctc_loss=0.1054, over 3342903.28 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:22:27,183 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.09 vs. limit=6.0
+2024-08-26 19:22:28,876 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=121616.0, ans=0.0
+2024-08-26 19:22:30,208 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=121616.0, ans=0.2
+2024-08-26 19:22:34,052 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=121616.0, ans=0.125
+2024-08-26 19:22:35,865 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=121669.33333333333, ans=0.1
+2024-08-26 19:22:40,388 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=121669.33333333333, ans=0.125
+2024-08-26 19:23:09,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=121776.0, ans=0.1
+2024-08-26 19:23:18,032 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 1.471e+02 1.735e+02 2.020e+02 3.245e+02, threshold=3.470e+02, percent-clipped=0.0
+2024-08-26 19:23:23,150 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.04 vs. limit=22.5
+2024-08-26 19:23:26,370 INFO [train.py:1114] (0/4) Epoch 10, batch 450, loss[loss=0.1946, simple_loss=0.2747, pruned_loss=0.04033, ctc_loss=0.08458, over 19603.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2831, pruned_loss=0.05656, ctc_loss=0.1058, over 3450466.04 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:23:49,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=121989.33333333333, ans=0.0
+2024-08-26 19:23:56,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=122042.66666666667, ans=0.125
+2024-08-26 19:24:06,418 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:24:19,331 INFO [train.py:1114] (0/4) Epoch 10, batch 500, loss[loss=0.2184, simple_loss=0.2846, pruned_loss=0.05528, ctc_loss=0.1043, over 19671.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2819, pruned_loss=0.0561, ctc_loss=0.1049, over 3546117.80 frames. ], batch size: 63, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:24:26,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=122149.33333333333, ans=0.0
+2024-08-26 19:24:26,729 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.18 vs. limit=15.0
+2024-08-26 19:24:28,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=122149.33333333333, ans=0.05
+2024-08-26 19:24:33,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=122149.33333333333, ans=0.1
+2024-08-26 19:25:07,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=122309.33333333333, ans=0.125
+2024-08-26 19:25:10,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-26 19:25:10,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-26 19:25:11,346 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.449e+02 1.637e+02 1.959e+02 3.375e+02, threshold=3.275e+02, percent-clipped=0.0
+2024-08-26 19:25:17,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-26 19:25:19,722 INFO [train.py:1114] (0/4) Epoch 10, batch 550, loss[loss=0.2392, simple_loss=0.3049, pruned_loss=0.06326, ctc_loss=0.1173, over 19236.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2821, pruned_loss=0.0563, ctc_loss=0.1052, over 3606988.23 frames. ], batch size: 71, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:25:20,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=122416.0, ans=0.125
+2024-08-26 19:25:33,161 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:25:34,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=122469.33333333333, ans=0.0
+2024-08-26 19:25:43,920 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.35 vs. limit=15.0
+2024-08-26 19:26:00,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=122629.33333333333, ans=0.1
+2024-08-26 19:26:10,298 INFO [train.py:1114] (0/4) Epoch 10, batch 600, loss[loss=0.2492, simple_loss=0.3069, pruned_loss=0.06866, ctc_loss=0.1353, over 19406.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2824, pruned_loss=0.05637, ctc_loss=0.1054, over 3665624.41 frames. ], batch size: 67, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:26:50,249 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.480e+02 1.661e+02 1.846e+02 3.271e+02, threshold=3.322e+02, percent-clipped=0.0
+2024-08-26 19:26:50,507 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=122896.0, ans=0.0
+2024-08-26 19:26:58,408 INFO [train.py:1114] (0/4) Epoch 10, batch 650, loss[loss=0.213, simple_loss=0.2789, pruned_loss=0.05328, ctc_loss=0.1014, over 19767.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2817, pruned_loss=0.05589, ctc_loss=0.1047, over 3716191.05 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:27:43,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=123162.66666666667, ans=0.04949747468305833
+2024-08-26 19:27:49,792 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:27:51,538 INFO [train.py:1114] (0/4) Epoch 10, batch 700, loss[loss=0.205, simple_loss=0.2765, pruned_loss=0.04828, ctc_loss=0.0922, over 19721.00 frames. ], tot_loss[loss=0.218, simple_loss=0.282, pruned_loss=0.05603, ctc_loss=0.1048, over 3747909.39 frames. ], batch size: 51, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:27:54,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=123216.0, ans=0.125
+2024-08-26 19:28:04,459 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=123269.33333333333, ans=0.125
+2024-08-26 19:28:08,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=123269.33333333333, ans=0.125
+2024-08-26 19:28:20,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=123376.0, ans=0.125
+2024-08-26 19:28:29,133 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.526e+02 1.912e+02 2.394e+02 4.336e+02, threshold=3.825e+02, percent-clipped=8.0
+2024-08-26 19:28:36,473 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.05 vs. limit=15.0
+2024-08-26 19:28:38,776 INFO [train.py:1114] (0/4) Epoch 10, batch 750, loss[loss=0.2271, simple_loss=0.2954, pruned_loss=0.05779, ctc_loss=0.1081, over 19860.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2822, pruned_loss=0.05615, ctc_loss=0.105, over 3774351.17 frames. ], batch size: 55, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:28:57,586 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.36 vs. limit=15.0
+2024-08-26 19:29:23,143 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=123696.0, ans=0.1
+2024-08-26 19:29:27,355 INFO [train.py:1114] (0/4) Epoch 10, batch 800, loss[loss=0.1818, simple_loss=0.2556, pruned_loss=0.03836, ctc_loss=0.07814, over 19816.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2822, pruned_loss=0.05615, ctc_loss=0.1049, over 3795624.09 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-26 19:29:32,959 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=123749.33333333333, ans=0.0
+2024-08-26 19:29:47,105 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=123856.0, ans=0.1
+2024-08-26 19:29:54,923 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=123856.0, ans=0.025
+2024-08-26 19:29:58,631 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=123909.33333333333, ans=0.125
+2024-08-26 19:30:07,521 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.505e+02 1.745e+02 2.038e+02 4.368e+02, threshold=3.490e+02, percent-clipped=1.0
+2024-08-26 19:30:08,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=123962.66666666667, ans=0.125
+2024-08-26 19:30:17,637 INFO [train.py:1114] (0/4) Epoch 10, batch 850, loss[loss=0.2168, simple_loss=0.2857, pruned_loss=0.05335, ctc_loss=0.1028, over 19633.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2817, pruned_loss=0.05592, ctc_loss=0.1044, over 3815807.51 frames. ], batch size: 59, lr: 1.50e-02, grad_scale: 32.0
+2024-08-26 19:30:30,338 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=124069.33333333333, ans=0.125
+2024-08-26 19:30:32,217 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=124069.33333333333, ans=0.125
+2024-08-26 19:30:34,054 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=124069.33333333333, ans=0.125
+2024-08-26 19:30:38,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=124122.66666666667, ans=0.125
+2024-08-26 19:30:42,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_na.min_abs, batch_count=124122.66666666667, ans=0.02
+2024-08-26 19:30:50,589 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=124176.0, ans=0.0
+2024-08-26 19:31:08,749 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.27 vs. limit=6.0
+2024-08-26 19:31:14,610 INFO [train.py:1114] (0/4) Epoch 10, batch 900, loss[loss=0.209, simple_loss=0.2612, pruned_loss=0.05724, ctc_loss=0.1055, over 19398.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2818, pruned_loss=0.05648, ctc_loss=0.1052, over 3820191.06 frames. ], batch size: 48, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:31:17,585 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=124282.66666666667, ans=0.0
+2024-08-26 19:31:18,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=124282.66666666667, ans=0.05
+2024-08-26 19:31:23,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=124282.66666666667, ans=0.125
+2024-08-26 19:31:23,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=124282.66666666667, ans=0.0
+2024-08-26 19:32:20,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=124389.33333333333, ans=0.125
+2024-08-26 19:32:23,575 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.28 vs. limit=15.0
+2024-08-26 19:32:29,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=124442.66666666667, ans=0.0
+2024-08-26 19:32:32,474 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=124496.0, ans=0.0
+2024-08-26 19:32:35,085 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.525e+02 1.733e+02 2.036e+02 4.140e+02, threshold=3.466e+02, percent-clipped=3.0
+2024-08-26 19:32:42,447 INFO [train.py:1114] (0/4) Epoch 10, batch 950, loss[loss=0.2078, simple_loss=0.2738, pruned_loss=0.05093, ctc_loss=0.09959, over 19493.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2823, pruned_loss=0.05665, ctc_loss=0.1055, over 3821394.20 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:32:43,640 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=124549.33333333333, ans=0.0
+2024-08-26 19:32:48,509 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.46 vs. limit=22.5
+2024-08-26 19:33:17,449 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.11 vs. limit=15.0
+2024-08-26 19:33:18,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=124709.33333333333, ans=0.1
+2024-08-26 19:33:18,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff3.min_abs, batch_count=124709.33333333333, ans=0.2
+2024-08-26 19:33:33,302 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=124762.66666666667, ans=0.125
+2024-08-26 19:33:33,563 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.41 vs. limit=10.0
+2024-08-26 19:33:35,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=124762.66666666667, ans=0.1
+2024-08-26 19:33:36,604 INFO [train.py:1114] (0/4) Epoch 10, batch 1000, loss[loss=0.2046, simple_loss=0.2751, pruned_loss=0.04792, ctc_loss=0.09569, over 19839.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2827, pruned_loss=0.05681, ctc_loss=0.1058, over 3818105.96 frames. ], batch size: 52, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:33:40,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=124816.0, ans=0.1
+2024-08-26 19:33:51,177 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=124869.33333333333, ans=0.0
+2024-08-26 19:33:51,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=124869.33333333333, ans=0.2
+2024-08-26 19:33:56,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=124869.33333333333, ans=0.2
+2024-08-26 19:34:18,409 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=125029.33333333333, ans=0.0
+2024-08-26 19:34:19,956 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.433e+02 1.580e+02 1.832e+02 3.141e+02, threshold=3.159e+02, percent-clipped=0.0
+2024-08-26 19:34:24,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=125029.33333333333, ans=0.125
+2024-08-26 19:34:27,369 INFO [train.py:1114] (0/4) Epoch 10, batch 1050, loss[loss=0.2098, simple_loss=0.2878, pruned_loss=0.04756, ctc_loss=0.09201, over 19856.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2821, pruned_loss=0.05643, ctc_loss=0.1053, over 3822872.28 frames. ], batch size: 57, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:34:51,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=125082.66666666667, ans=0.07
+2024-08-26 19:35:21,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=125242.66666666667, ans=0.125
+2024-08-26 19:35:26,107 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:35:28,929 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=125296.0, ans=0.1
+2024-08-26 19:35:36,345 INFO [train.py:1114] (0/4) Epoch 10, batch 1100, loss[loss=0.2071, simple_loss=0.2752, pruned_loss=0.05049, ctc_loss=0.0952, over 19586.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2819, pruned_loss=0.05619, ctc_loss=0.1048, over 3830875.15 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 8.0
+2024-08-26 19:35:50,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=125402.66666666667, ans=0.025
+2024-08-26 19:35:55,278 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.64 vs. limit=15.0
+2024-08-26 19:36:03,647 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.91 vs. limit=6.0
+2024-08-26 19:36:06,426 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.04 vs. limit=15.0
+2024-08-26 19:36:07,304 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.51 vs. limit=22.5
+2024-08-26 19:36:16,686 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=9.43 vs. limit=15.0
+2024-08-26 19:36:18,875 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.433e+02 1.605e+02 1.841e+02 2.779e+02, threshold=3.211e+02, percent-clipped=0.0
+2024-08-26 19:36:25,415 INFO [train.py:1114] (0/4) Epoch 10, batch 1150, loss[loss=0.1883, simple_loss=0.2623, pruned_loss=0.04146, ctc_loss=0.07853, over 19602.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2819, pruned_loss=0.05616, ctc_loss=0.1048, over 3828605.14 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 8.0
+2024-08-26 19:36:28,481 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=125616.0, ans=0.0
+2024-08-26 19:36:30,466 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=125616.0, ans=0.0
+2024-08-26 19:36:48,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=125722.66666666667, ans=10.0
+2024-08-26 19:37:11,121 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=125829.33333333333, ans=0.125
+2024-08-26 19:37:17,645 INFO [train.py:1114] (0/4) Epoch 10, batch 1200, loss[loss=0.2189, simple_loss=0.2897, pruned_loss=0.05231, ctc_loss=0.1085, over 19844.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2825, pruned_loss=0.05627, ctc_loss=0.1054, over 3823900.47 frames. ], batch size: 57, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:37:34,450 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=125936.0, ans=0.2
+2024-08-26 19:37:48,505 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.89 vs. limit=22.5
+2024-08-26 19:37:56,619 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=126096.0, ans=0.0
+2024-08-26 19:37:57,392 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.466e+02 1.608e+02 1.824e+02 2.979e+02, threshold=3.216e+02, percent-clipped=0.0
+2024-08-26 19:37:57,620 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=126096.0, ans=0.0
+2024-08-26 19:37:57,861 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.78 vs. limit=15.0
+2024-08-26 19:38:04,053 INFO [train.py:1114] (0/4) Epoch 10, batch 1250, loss[loss=0.2253, simple_loss=0.2902, pruned_loss=0.05759, ctc_loss=0.1131, over 19528.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2825, pruned_loss=0.05593, ctc_loss=0.1047, over 3842182.81 frames. ], batch size: 61, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:38:33,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=126256.0, ans=0.125
+2024-08-26 19:38:39,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=126256.0, ans=0.125
+2024-08-26 19:38:46,393 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.32 vs. limit=22.5
+2024-08-26 19:39:04,995 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=126309.33333333333, ans=0.05
+2024-08-26 19:40:04,495 INFO [train.py:1114] (0/4) Epoch 10, batch 1300, loss[loss=0.2237, simple_loss=0.2919, pruned_loss=0.05593, ctc_loss=0.1088, over 18915.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2816, pruned_loss=0.05554, ctc_loss=0.1039, over 3846635.47 frames. ], batch size: 76, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:40:12,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=126416.0, ans=0.0
+2024-08-26 19:40:18,206 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=126469.33333333333, ans=0.0
+2024-08-26 19:40:24,090 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=126469.33333333333, ans=0.1
+2024-08-26 19:40:25,074 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=126469.33333333333, ans=0.125
+2024-08-26 19:40:41,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=126576.0, ans=0.025
+2024-08-26 19:40:54,253 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.480e+02 1.716e+02 1.981e+02 3.061e+02, threshold=3.432e+02, percent-clipped=0.0
+2024-08-26 19:41:00,864 INFO [train.py:1114] (0/4) Epoch 10, batch 1350, loss[loss=0.209, simple_loss=0.2739, pruned_loss=0.05248, ctc_loss=0.09775, over 19767.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2814, pruned_loss=0.05532, ctc_loss=0.1034, over 3858173.24 frames. ], batch size: 54, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:41:02,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=126682.66666666667, ans=0.0
+2024-08-26 19:41:12,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=126736.0, ans=0.1
+2024-08-26 19:41:16,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=126736.0, ans=0.125
+2024-08-26 19:41:19,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=126736.0, ans=0.125
+2024-08-26 19:41:20,567 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.02 vs. limit=15.0
+2024-08-26 19:41:24,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=126789.33333333333, ans=0.2
+2024-08-26 19:41:24,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=126789.33333333333, ans=0.015
+2024-08-26 19:41:30,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=126789.33333333333, ans=0.1
+2024-08-26 19:41:33,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=126842.66666666667, ans=0.125
+2024-08-26 19:41:39,297 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=126842.66666666667, ans=0.07
+2024-08-26 19:41:39,400 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=126842.66666666667, ans=0.125
+2024-08-26 19:41:52,414 INFO [train.py:1114] (0/4) Epoch 10, batch 1400, loss[loss=0.1955, simple_loss=0.2528, pruned_loss=0.05004, ctc_loss=0.09512, over 19682.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2808, pruned_loss=0.05494, ctc_loss=0.1029, over 3865225.95 frames. ], batch size: 46, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:42:25,854 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=127056.0, ans=0.09899494936611666
+2024-08-26 19:42:26,140 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.35 vs. limit=15.0
+2024-08-26 19:42:34,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=127109.33333333333, ans=0.125
+2024-08-26 19:42:38,302 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.62 vs. limit=12.0
+2024-08-26 19:42:41,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=127162.66666666667, ans=0.0
+2024-08-26 19:42:41,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=127162.66666666667, ans=0.0
+2024-08-26 19:42:43,189 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.452e+02 1.585e+02 1.952e+02 4.788e+02, threshold=3.170e+02, percent-clipped=2.0
+2024-08-26 19:42:47,675 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.28 vs. limit=8.0
+2024-08-26 19:42:49,762 INFO [train.py:1114] (0/4) Epoch 10, batch 1450, loss[loss=0.2366, simple_loss=0.3058, pruned_loss=0.061, ctc_loss=0.1134, over 19636.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.282, pruned_loss=0.05547, ctc_loss=0.1037, over 3863599.03 frames. ], batch size: 63, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:43:07,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=127322.66666666667, ans=0.125
+2024-08-26 19:43:14,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=127322.66666666667, ans=0.0
+2024-08-26 19:43:15,489 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=127322.66666666667, ans=0.0
+2024-08-26 19:43:40,835 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:43:44,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=127429.33333333333, ans=0.125
+2024-08-26 19:43:48,225 INFO [train.py:1114] (0/4) Epoch 10, batch 1500, loss[loss=0.2431, simple_loss=0.3034, pruned_loss=0.06687, ctc_loss=0.1227, over 19587.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2824, pruned_loss=0.05551, ctc_loss=0.1038, over 3862952.85 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:43:48,796 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.78 vs. limit=12.0
+2024-08-26 19:43:58,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=127482.66666666667, ans=0.05
+2024-08-26 19:44:03,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=127536.0, ans=0.125
+2024-08-26 19:44:25,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=127642.66666666667, ans=0.1
+2024-08-26 19:44:28,536 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=127642.66666666667, ans=0.1
+2024-08-26 19:44:37,667 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.427e+02 1.587e+02 1.794e+02 3.285e+02, threshold=3.174e+02, percent-clipped=1.0
+2024-08-26 19:44:51,738 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=127749.33333333333, ans=0.0
+2024-08-26 19:44:52,062 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.87 vs. limit=15.0
+2024-08-26 19:44:52,457 INFO [train.py:1114] (0/4) Epoch 10, batch 1550, loss[loss=0.2252, simple_loss=0.2835, pruned_loss=0.06098, ctc_loss=0.1124, over 19610.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2821, pruned_loss=0.05549, ctc_loss=0.1041, over 3847527.24 frames. ], batch size: 60, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:44:57,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=127749.33333333333, ans=0.125
+2024-08-26 19:45:18,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=127856.0, ans=0.125
+2024-08-26 19:45:35,905 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-24000.pt
+2024-08-26 19:45:43,642 INFO [train.py:1114] (0/4) Epoch 10, batch 1600, loss[loss=0.2236, simple_loss=0.2875, pruned_loss=0.05772, ctc_loss=0.1108, over 19839.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.282, pruned_loss=0.0555, ctc_loss=0.1041, over 3836742.72 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:45:49,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=128016.0, ans=0.125
+2024-08-26 19:45:51,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=128016.0, ans=0.0
+2024-08-26 19:46:02,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=128122.66666666667, ans=0.125
+2024-08-26 19:46:04,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=128122.66666666667, ans=0.125
+2024-08-26 19:46:06,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=128122.66666666667, ans=0.95
+2024-08-26 19:46:18,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=128176.0, ans=0.04949747468305833
+2024-08-26 19:46:26,520 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.210e+02 1.460e+02 1.671e+02 2.068e+02 2.984e+02, threshold=3.342e+02, percent-clipped=0.0
+2024-08-26 19:46:33,068 INFO [train.py:1114] (0/4) Epoch 10, batch 1650, loss[loss=0.2198, simple_loss=0.2864, pruned_loss=0.05611, ctc_loss=0.1026, over 19641.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2819, pruned_loss=0.05566, ctc_loss=0.1043, over 3834161.07 frames. ], batch size: 59, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:46:35,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=128282.66666666667, ans=0.0
+2024-08-26 19:46:59,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=128389.33333333333, ans=0.5
+2024-08-26 19:47:03,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128389.33333333333, ans=0.1
+2024-08-26 19:47:05,456 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=128442.66666666667, ans=0.125
+2024-08-26 19:47:28,686 INFO [train.py:1114] (0/4) Epoch 10, batch 1700, loss[loss=0.1978, simple_loss=0.2581, pruned_loss=0.05019, ctc_loss=0.09276, over 19696.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2816, pruned_loss=0.0553, ctc_loss=0.1034, over 3847940.54 frames. ], batch size: 46, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:47:29,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=128549.33333333333, ans=0.2
+2024-08-26 19:47:39,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=128602.66666666667, ans=0.2
+2024-08-26 19:47:40,363 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=128602.66666666667, ans=0.125
+2024-08-26 19:47:48,635 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.02 vs. limit=22.5
+2024-08-26 19:47:51,047 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:48:10,531 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.19 vs. limit=15.0
+2024-08-26 19:48:14,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=128709.33333333333, ans=0.0
+2024-08-26 19:48:16,006 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.81 vs. limit=10.0
+2024-08-26 19:48:17,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=128762.66666666667, ans=0.0
+2024-08-26 19:48:18,885 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.440e+02 1.568e+02 1.897e+02 2.765e+02, threshold=3.136e+02, percent-clipped=0.0
+2024-08-26 19:48:25,121 INFO [train.py:1114] (0/4) Epoch 10, batch 1750, loss[loss=0.1924, simple_loss=0.2534, pruned_loss=0.04704, ctc_loss=0.09322, over 19649.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.281, pruned_loss=0.05514, ctc_loss=0.1032, over 3852797.68 frames. ], batch size: 45, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:48:32,286 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=128816.0, ans=0.2
+2024-08-26 19:48:43,348 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.65 vs. limit=8.0
+2024-08-26 19:48:49,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128922.66666666667, ans=0.1
+2024-08-26 19:48:50,679 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=128976.0, ans=0.125
+2024-08-26 19:48:57,129 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.36 vs. limit=12.0
+2024-08-26 19:49:03,117 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.40 vs. limit=10.0
+2024-08-26 19:49:08,981 INFO [train.py:1114] (0/4) Epoch 10, batch 1800, loss[loss=0.2041, simple_loss=0.28, pruned_loss=0.04691, ctc_loss=0.0857, over 19616.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2812, pruned_loss=0.05523, ctc_loss=0.1034, over 3854022.31 frames. ], batch size: 55, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:49:13,478 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=129082.66666666667, ans=0.0
+2024-08-26 19:49:47,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=129296.0, ans=0.0
+2024-08-26 19:49:49,354 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.466e+02 1.715e+02 2.130e+02 3.505e+02, threshold=3.430e+02, percent-clipped=4.0
+2024-08-26 19:49:54,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=129349.33333333333, ans=0.125
+2024-08-26 19:49:55,605 INFO [train.py:1114] (0/4) Epoch 10, batch 1850, loss[loss=0.2382, simple_loss=0.303, pruned_loss=0.06253, ctc_loss=0.1209, over 19577.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2805, pruned_loss=0.05479, ctc_loss=0.1024, over 3858493.05 frames. ], batch size: 57, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:50:00,154 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=129349.33333333333, ans=0.025
+2024-08-26 19:50:03,880 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.12 vs. limit=10.0
+2024-08-26 19:50:05,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=129402.66666666667, ans=0.025
+2024-08-26 19:50:15,038 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=129456.0, ans=0.125
+2024-08-26 19:50:41,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=129562.66666666667, ans=0.125
+2024-08-26 19:50:42,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=129562.66666666667, ans=0.2
+2024-08-26 19:50:50,337 INFO [train.py:1114] (0/4) Epoch 10, batch 1900, loss[loss=0.218, simple_loss=0.2864, pruned_loss=0.05337, ctc_loss=0.1071, over 19653.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2813, pruned_loss=0.05503, ctc_loss=0.1027, over 3864230.46 frames. ], batch size: 59, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:50:53,940 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=129616.0, ans=0.125
+2024-08-26 19:50:56,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=129616.0, ans=0.125
+2024-08-26 19:51:07,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=129722.66666666667, ans=0.125
+2024-08-26 19:51:14,041 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=129722.66666666667, ans=0.1
+2024-08-26 19:51:27,696 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.498e+02 1.655e+02 1.944e+02 4.101e+02, threshold=3.311e+02, percent-clipped=1.0
+2024-08-26 19:51:33,750 INFO [train.py:1114] (0/4) Epoch 10, batch 1950, loss[loss=0.1924, simple_loss=0.2667, pruned_loss=0.04241, ctc_loss=0.08322, over 19591.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2819, pruned_loss=0.05497, ctc_loss=0.1026, over 3872510.94 frames. ], batch size: 52, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:51:37,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=129882.66666666667, ans=0.05
+2024-08-26 19:51:45,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=129936.0, ans=0.2
+2024-08-26 19:51:47,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=129936.0, ans=0.0
+2024-08-26 19:51:53,892 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=129989.33333333333, ans=0.0
+2024-08-26 19:52:51,606 INFO [train.py:1114] (0/4) Epoch 10, batch 2000, loss[loss=0.2174, simple_loss=0.2676, pruned_loss=0.06198, ctc_loss=0.1082, over 19679.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2828, pruned_loss=0.05562, ctc_loss=0.104, over 3857015.78 frames. ], batch size: 45, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:53:02,407 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=130202.66666666667, ans=0.125
+2024-08-26 19:53:18,878 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=130309.33333333333, ans=0.125
+2024-08-26 19:53:29,085 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.467e+02 1.617e+02 1.850e+02 3.299e+02, threshold=3.233e+02, percent-clipped=0.0
+2024-08-26 19:53:31,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=130362.66666666667, ans=0.125
+2024-08-26 19:53:33,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=130362.66666666667, ans=0.5
+2024-08-26 19:53:35,211 INFO [train.py:1114] (0/4) Epoch 10, batch 2050, loss[loss=0.1808, simple_loss=0.2429, pruned_loss=0.04265, ctc_loss=0.08354, over 19736.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2815, pruned_loss=0.05531, ctc_loss=0.1034, over 3851578.47 frames. ], batch size: 47, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:53:45,792 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=130469.33333333333, ans=0.2
+2024-08-26 19:53:46,966 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.12 vs. limit=15.0
+2024-08-26 19:54:18,664 INFO [train.py:1114] (0/4) Epoch 10, batch 2100, loss[loss=0.205, simple_loss=0.2735, pruned_loss=0.04899, ctc_loss=0.09645, over 19749.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2808, pruned_loss=0.05456, ctc_loss=0.1018, over 3858479.51 frames. ], batch size: 54, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:54:19,609 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=130682.66666666667, ans=0.125
+2024-08-26 19:54:22,238 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=130682.66666666667, ans=0.125
+2024-08-26 19:54:26,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=130736.0, ans=0.2
+2024-08-26 19:54:35,122 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=130789.33333333333, ans=0.0
+2024-08-26 19:54:40,027 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=130789.33333333333, ans=0.09899494936611666
+2024-08-26 19:54:45,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=130842.66666666667, ans=0.125
+2024-08-26 19:54:53,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=130896.0, ans=0.0
+2024-08-26 19:54:56,941 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.237e+02 1.404e+02 1.614e+02 1.979e+02 3.349e+02, threshold=3.228e+02, percent-clipped=1.0
+2024-08-26 19:55:03,212 INFO [train.py:1114] (0/4) Epoch 10, batch 2150, loss[loss=0.2003, simple_loss=0.2662, pruned_loss=0.0488, ctc_loss=0.09212, over 19579.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2803, pruned_loss=0.0545, ctc_loss=0.1018, over 3869417.32 frames. ], batch size: 52, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:55:23,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=131056.0, ans=0.125
+2024-08-26 19:55:28,034 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.24 vs. limit=15.0
+2024-08-26 19:55:28,442 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=131109.33333333334, ans=0.125
+2024-08-26 19:55:43,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=131162.66666666666, ans=0.0
+2024-08-26 19:55:50,198 INFO [train.py:1114] (0/4) Epoch 10, batch 2200, loss[loss=0.2124, simple_loss=0.2891, pruned_loss=0.05036, ctc_loss=0.08731, over 19596.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2802, pruned_loss=0.05434, ctc_loss=0.1015, over 3867611.73 frames. ], batch size: 57, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:56:22,298 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.73 vs. limit=15.0
+2024-08-26 19:56:22,433 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.77 vs. limit=10.0
+2024-08-26 19:56:22,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=131322.66666666666, ans=0.05
+2024-08-26 19:56:26,365 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=131376.0, ans=0.125
+2024-08-26 19:56:33,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=131376.0, ans=0.125
+2024-08-26 19:56:38,538 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.505e+02 1.694e+02 1.989e+02 3.015e+02, threshold=3.388e+02, percent-clipped=0.0
+2024-08-26 19:56:43,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=131482.66666666666, ans=0.0
+2024-08-26 19:56:44,637 INFO [train.py:1114] (0/4) Epoch 10, batch 2250, loss[loss=0.2275, simple_loss=0.2891, pruned_loss=0.0601, ctc_loss=0.1141, over 19624.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2806, pruned_loss=0.05464, ctc_loss=0.102, over 3867698.01 frames. ], batch size: 55, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:56:57,282 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.98 vs. limit=22.5
+2024-08-26 19:57:09,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=131589.33333333334, ans=0.05
+2024-08-26 19:57:13,406 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=131642.66666666666, ans=0.1
+2024-08-26 19:57:17,667 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:57:20,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=131696.0, ans=0.0
+2024-08-26 19:57:27,780 INFO [train.py:1114] (0/4) Epoch 10, batch 2300, loss[loss=0.1995, simple_loss=0.266, pruned_loss=0.04889, ctc_loss=0.08791, over 19495.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2796, pruned_loss=0.0546, ctc_loss=0.1017, over 3861259.43 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:57:42,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=131802.66666666666, ans=0.125
+2024-08-26 19:57:45,461 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=131856.0, ans=0.125
+2024-08-26 19:57:51,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=131856.0, ans=0.125
+2024-08-26 19:58:05,769 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.213e+02 1.499e+02 1.709e+02 2.092e+02 3.241e+02, threshold=3.418e+02, percent-clipped=0.0
+2024-08-26 19:58:43,763 INFO [train.py:1114] (0/4) Epoch 10, batch 2350, loss[loss=0.2433, simple_loss=0.3077, pruned_loss=0.0657, ctc_loss=0.1189, over 19695.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.28, pruned_loss=0.05489, ctc_loss=0.1021, over 3864159.33 frames. ], batch size: 63, lr: 1.46e-02, grad_scale: 16.0
+2024-08-26 19:58:43,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=132016.0, ans=0.1
+2024-08-26 19:58:50,071 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.15 vs. limit=22.5
+2024-08-26 19:58:51,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=132016.0, ans=0.125
+2024-08-26 19:58:54,350 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=132069.33333333334, ans=0.0
+2024-08-26 19:59:22,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=132176.0, ans=0.0
+2024-08-26 19:59:26,768 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.87 vs. limit=22.5
+2024-08-26 19:59:31,148 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=132229.33333333334, ans=0.1
+2024-08-26 19:59:32,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=132282.66666666666, ans=0.0
+2024-08-26 19:59:32,687 INFO [train.py:1114] (0/4) Epoch 10, batch 2400, loss[loss=0.2321, simple_loss=0.2924, pruned_loss=0.06303, ctc_loss=0.1147, over 19387.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.282, pruned_loss=0.05579, ctc_loss=0.1037, over 3858298.54 frames. ], batch size: 67, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:59:54,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=132389.33333333334, ans=0.0
+2024-08-26 20:00:08,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=132389.33333333334, ans=0.05
+2024-08-26 20:00:09,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=132389.33333333334, ans=0.0
+2024-08-26 20:00:24,176 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.05 vs. limit=10.0
+2024-08-26 20:00:25,694 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:00:36,869 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.213e+02 1.532e+02 1.694e+02 1.900e+02 3.260e+02, threshold=3.387e+02, percent-clipped=0.0
+2024-08-26 20:00:38,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=132496.0, ans=0.0
+2024-08-26 20:00:42,855 INFO [train.py:1114] (0/4) Epoch 10, batch 2450, loss[loss=0.311, simple_loss=0.3348, pruned_loss=0.1061, ctc_loss=0.1873, over 14117.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2866, pruned_loss=0.05915, ctc_loss=0.11, over 3732667.83 frames. ], batch size: 140, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 20:00:51,837 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=132549.33333333334, ans=0.05
+2024-08-26 20:00:53,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=132549.33333333334, ans=0.0
+2024-08-26 20:01:02,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=132602.66666666666, ans=0.0
+2024-08-26 20:01:09,641 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=11.02 vs. limit=12.0
+2024-08-26 20:01:19,722 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.67 vs. limit=10.0
+2024-08-26 20:01:24,528 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=132709.33333333334, ans=0.125
+2024-08-26 20:01:31,733 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-10.pt
+2024-08-26 20:03:28,163 INFO [train.py:1114] (0/4) Epoch 11, batch 0, loss[loss=0.2145, simple_loss=0.2768, pruned_loss=0.05604, ctc_loss=0.1003, over 19429.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2768, pruned_loss=0.05604, ctc_loss=0.1003, over 19429.00 frames. ], batch size: 48, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:03:28,164 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 20:03:42,225 INFO [train.py:1146] (0/4) Epoch 11, validation: loss=0.1858, simple_loss=0.2776, pruned_loss=0.03491, ctc_loss=0.06042, over 944034.00 frames.
+2024-08-26 20:03:42,226 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 20:04:01,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=132864.0, ans=0.125
+2024-08-26 20:04:09,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=132864.0, ans=0.125
+2024-08-26 20:04:11,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=132864.0, ans=0.5
+2024-08-26 20:04:14,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.82 vs. limit=15.0
+2024-08-26 20:04:16,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=132917.33333333334, ans=0.1
+2024-08-26 20:04:32,366 INFO [train.py:1114] (0/4) Epoch 11, batch 50, loss[loss=0.1953, simple_loss=0.2584, pruned_loss=0.04806, ctc_loss=0.08989, over 19703.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2855, pruned_loss=0.05715, ctc_loss=0.1079, over 844145.34 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:04:37,949 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.624e+02 1.801e+02 2.017e+02 3.320e+02, threshold=3.603e+02, percent-clipped=0.0
+2024-08-26 20:04:41,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=133077.33333333334, ans=0.125
+2024-08-26 20:04:50,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=133077.33333333334, ans=0.125
+2024-08-26 20:05:00,326 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:05:00,442 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=133130.66666666666, ans=0.0
+2024-08-26 20:05:10,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff3.min_abs, batch_count=133184.0, ans=0.2
+2024-08-26 20:05:10,463 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=133184.0, ans=0.0
+2024-08-26 20:05:21,266 INFO [train.py:1114] (0/4) Epoch 11, batch 100, loss[loss=0.1973, simple_loss=0.2723, pruned_loss=0.04428, ctc_loss=0.08413, over 19719.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.286, pruned_loss=0.05626, ctc_loss=0.106, over 1498581.10 frames. ], batch size: 51, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:05:29,996 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:05:34,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=133344.0, ans=0.0
+2024-08-26 20:05:49,628 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.07 vs. limit=10.0
+2024-08-26 20:05:53,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=133450.66666666666, ans=0.1
+2024-08-26 20:05:55,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=133450.66666666666, ans=0.0
+2024-08-26 20:06:01,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=133504.0, ans=0.125
+2024-08-26 20:06:08,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=133504.0, ans=0.125
+2024-08-26 20:06:10,908 INFO [train.py:1114] (0/4) Epoch 11, batch 150, loss[loss=0.1848, simple_loss=0.2486, pruned_loss=0.04471, ctc_loss=0.07895, over 19738.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2826, pruned_loss=0.05529, ctc_loss=0.1036, over 2028235.97 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:06:16,408 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.457e+02 1.584e+02 1.841e+02 2.561e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-08-26 20:06:21,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=133610.66666666666, ans=0.125
+2024-08-26 20:06:34,900 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:06:34,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=133664.0, ans=0.025
+2024-08-26 20:06:35,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=133664.0, ans=0.0
+2024-08-26 20:06:40,478 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=133717.33333333334, ans=0.0
+2024-08-26 20:06:51,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=133717.33333333334, ans=0.125
+2024-08-26 20:06:54,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=133770.66666666666, ans=0.5
+2024-08-26 20:06:56,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=133770.66666666666, ans=0.1
+2024-08-26 20:08:06,609 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.97 vs. limit=15.0
+2024-08-26 20:08:08,065 INFO [train.py:1114] (0/4) Epoch 11, batch 200, loss[loss=0.2477, simple_loss=0.3093, pruned_loss=0.06725, ctc_loss=0.1291, over 18128.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2811, pruned_loss=0.05453, ctc_loss=0.1021, over 2435787.07 frames. ], batch size: 85, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:08:32,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=133930.66666666666, ans=0.125
+2024-08-26 20:08:38,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=133984.0, ans=0.1
+2024-08-26 20:09:00,078 INFO [train.py:1114] (0/4) Epoch 11, batch 250, loss[loss=0.2099, simple_loss=0.2804, pruned_loss=0.0508, ctc_loss=0.0948, over 19397.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2797, pruned_loss=0.0537, ctc_loss=0.1005, over 2755511.01 frames. ], batch size: 67, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:09:05,644 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.397e+02 1.518e+02 1.749e+02 2.921e+02, threshold=3.037e+02, percent-clipped=0.0
+2024-08-26 20:09:06,248 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.11 vs. limit=15.0
+2024-08-26 20:09:11,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=134144.0, ans=0.125
+2024-08-26 20:09:13,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=134144.0, ans=0.0
+2024-08-26 20:09:17,447 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=134144.0, ans=0.125
+2024-08-26 20:09:20,437 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=134197.33333333334, ans=0.07
+2024-08-26 20:09:24,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=134197.33333333334, ans=0.125
+2024-08-26 20:09:40,943 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.58 vs. limit=15.0
+2024-08-26 20:09:43,392 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=134304.0, ans=0.125
+2024-08-26 20:09:43,460 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=134304.0, ans=0.1
+2024-08-26 20:09:51,430 INFO [train.py:1114] (0/4) Epoch 11, batch 300, loss[loss=0.2428, simple_loss=0.2996, pruned_loss=0.06782, ctc_loss=0.1258, over 19497.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2789, pruned_loss=0.05357, ctc_loss=0.1002, over 2999613.45 frames. ], batch size: 61, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:10:05,972 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=134410.66666666666, ans=0.125
+2024-08-26 20:10:18,605 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.57 vs. limit=15.0
+2024-08-26 20:10:35,590 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.11 vs. limit=15.0
+2024-08-26 20:10:41,585 INFO [train.py:1114] (0/4) Epoch 11, batch 350, loss[loss=0.1908, simple_loss=0.256, pruned_loss=0.04686, ctc_loss=0.07982, over 19775.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2793, pruned_loss=0.05349, ctc_loss=0.09984, over 3190014.24 frames. ], batch size: 48, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:10:43,875 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=134624.0, ans=0.025
+2024-08-26 20:10:45,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=134624.0, ans=0.125
+2024-08-26 20:10:47,198 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.479e+02 1.637e+02 2.052e+02 3.441e+02, threshold=3.275e+02, percent-clipped=1.0
+2024-08-26 20:11:08,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=134730.66666666666, ans=0.125
+2024-08-26 20:11:20,416 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=134784.0, ans=0.0
+2024-08-26 20:11:29,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=134837.33333333334, ans=0.2
+2024-08-26 20:11:31,284 INFO [train.py:1114] (0/4) Epoch 11, batch 400, loss[loss=0.2012, simple_loss=0.2811, pruned_loss=0.04386, ctc_loss=0.08407, over 19835.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2793, pruned_loss=0.05342, ctc_loss=0.09981, over 3342096.11 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:11:31,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=134890.66666666666, ans=0.1
+2024-08-26 20:11:37,045 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=134890.66666666666, ans=0.0
+2024-08-26 20:11:43,085 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.25 vs. limit=22.5
+2024-08-26 20:11:54,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=134997.33333333334, ans=0.0
+2024-08-26 20:11:58,739 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=134997.33333333334, ans=0.125
+2024-08-26 20:12:02,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=135050.66666666666, ans=0.2
+2024-08-26 20:12:18,512 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=15.0
+2024-08-26 20:12:20,804 INFO [train.py:1114] (0/4) Epoch 11, batch 450, loss[loss=0.1891, simple_loss=0.271, pruned_loss=0.03866, ctc_loss=0.07484, over 19612.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2791, pruned_loss=0.0533, ctc_loss=0.09961, over 3450879.35 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:12:29,027 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.192e+02 1.489e+02 1.652e+02 2.008e+02 3.634e+02, threshold=3.305e+02, percent-clipped=1.0
+2024-08-26 20:12:32,877 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=135210.66666666666, ans=0.125
+2024-08-26 20:12:42,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=135264.0, ans=0.2
+2024-08-26 20:13:11,615 INFO [train.py:1114] (0/4) Epoch 11, batch 500, loss[loss=0.2131, simple_loss=0.2838, pruned_loss=0.05163, ctc_loss=0.09778, over 19700.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2781, pruned_loss=0.05302, ctc_loss=0.09929, over 3546311.09 frames. ], batch size: 63, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:13:20,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=135477.33333333334, ans=0.1
+2024-08-26 20:13:20,870 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.55 vs. limit=15.0
+2024-08-26 20:13:46,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=135584.0, ans=0.1
+2024-08-26 20:13:49,758 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=135637.33333333334, ans=0.125
+2024-08-26 20:13:58,006 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=135690.66666666666, ans=0.0
+2024-08-26 20:13:58,587 INFO [train.py:1114] (0/4) Epoch 11, batch 550, loss[loss=0.2136, simple_loss=0.2793, pruned_loss=0.05467, ctc_loss=0.09627, over 19187.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2786, pruned_loss=0.05332, ctc_loss=0.09973, over 3609092.84 frames. ], batch size: 71, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:13:58,889 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=135690.66666666666, ans=0.125
+2024-08-26 20:14:04,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=135690.66666666666, ans=0.2
+2024-08-26 20:14:04,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=135690.66666666666, ans=0.2
+2024-08-26 20:14:06,857 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.194e+02 1.449e+02 1.695e+02 2.078e+02 4.377e+02, threshold=3.390e+02, percent-clipped=1.0
+2024-08-26 20:14:08,460 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.07 vs. limit=15.0
+2024-08-26 20:14:09,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=135690.66666666666, ans=0.125
+2024-08-26 20:14:09,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=135690.66666666666, ans=0.125
+2024-08-26 20:14:19,627 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.82 vs. limit=12.0
+2024-08-26 20:14:24,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=135797.33333333334, ans=0.125
+2024-08-26 20:14:50,549 INFO [train.py:1114] (0/4) Epoch 11, batch 600, loss[loss=0.2108, simple_loss=0.2875, pruned_loss=0.04841, ctc_loss=0.09331, over 19392.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2786, pruned_loss=0.05317, ctc_loss=0.09962, over 3667260.93 frames. ], batch size: 67, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:15:04,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=136010.66666666666, ans=0.0
+2024-08-26 20:15:07,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=136010.66666666666, ans=0.0
+2024-08-26 20:15:24,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=136117.33333333334, ans=0.1
+2024-08-26 20:15:41,240 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.21 vs. limit=15.0
+2024-08-26 20:15:41,572 INFO [train.py:1114] (0/4) Epoch 11, batch 650, loss[loss=0.2004, simple_loss=0.2771, pruned_loss=0.04506, ctc_loss=0.0841, over 19769.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2778, pruned_loss=0.05301, ctc_loss=0.09915, over 3717034.93 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:15:44,609 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=136224.0, ans=0.95
+2024-08-26 20:15:47,098 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.457e+02 1.627e+02 2.058e+02 3.143e+02, threshold=3.253e+02, percent-clipped=0.0
+2024-08-26 20:16:01,991 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=136330.66666666666, ans=0.2
+2024-08-26 20:16:05,721 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=136330.66666666666, ans=0.05
+2024-08-26 20:16:27,822 INFO [train.py:1114] (0/4) Epoch 11, batch 700, loss[loss=0.2036, simple_loss=0.2747, pruned_loss=0.0483, ctc_loss=0.08998, over 19722.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2783, pruned_loss=0.05298, ctc_loss=0.09913, over 3748776.55 frames. ], batch size: 51, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:16:42,070 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=136544.0, ans=0.2
+2024-08-26 20:17:05,908 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=136650.66666666666, ans=0.125
+2024-08-26 20:17:16,604 INFO [train.py:1114] (0/4) Epoch 11, batch 750, loss[loss=0.2002, simple_loss=0.2743, pruned_loss=0.04658, ctc_loss=0.08225, over 19492.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2776, pruned_loss=0.05265, ctc_loss=0.09834, over 3775041.64 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:17:24,636 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.496e+02 1.727e+02 2.151e+02 3.286e+02, threshold=3.455e+02, percent-clipped=1.0
+2024-08-26 20:17:26,068 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.34 vs. limit=12.0
+2024-08-26 20:17:32,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=136810.66666666666, ans=0.0
+2024-08-26 20:17:40,755 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=136864.0, ans=0.025
+2024-08-26 20:17:51,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=136917.33333333334, ans=0.0
+2024-08-26 20:17:53,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=136917.33333333334, ans=0.125
+2024-08-26 20:18:08,144 INFO [train.py:1114] (0/4) Epoch 11, batch 800, loss[loss=0.1915, simple_loss=0.2512, pruned_loss=0.04856, ctc_loss=0.08692, over 19826.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2778, pruned_loss=0.05278, ctc_loss=0.0986, over 3797332.69 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:18:09,162 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=137024.0, ans=0.125
+2024-08-26 20:18:19,654 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.93 vs. limit=15.0
+2024-08-26 20:18:19,755 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.37 vs. limit=10.0
+2024-08-26 20:18:32,528 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.12 vs. limit=15.0
+2024-08-26 20:18:50,325 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.73 vs. limit=15.0
+2024-08-26 20:19:00,314 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.09 vs. limit=22.5
+2024-08-26 20:19:09,647 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=137237.33333333334, ans=0.0
+2024-08-26 20:19:27,349 INFO [train.py:1114] (0/4) Epoch 11, batch 850, loss[loss=0.2255, simple_loss=0.293, pruned_loss=0.05762, ctc_loss=0.1069, over 19636.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2779, pruned_loss=0.05289, ctc_loss=0.09906, over 3815886.78 frames. ], batch size: 59, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:19:35,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=137290.66666666666, ans=0.1
+2024-08-26 20:19:39,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=137290.66666666666, ans=0.95
+2024-08-26 20:19:39,839 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.453e+02 1.601e+02 1.920e+02 5.497e+02, threshold=3.202e+02, percent-clipped=1.0
+2024-08-26 20:19:47,019 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=137344.0, ans=0.125
+2024-08-26 20:19:50,252 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=137344.0, ans=0.0
+2024-08-26 20:19:51,297 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=137344.0, ans=0.125
+2024-08-26 20:19:58,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=137344.0, ans=0.0
+2024-08-26 20:20:05,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=137397.33333333334, ans=0.125
+2024-08-26 20:20:25,935 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=137450.66666666666, ans=0.025
+2024-08-26 20:20:44,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=137504.0, ans=0.025
+2024-08-26 20:20:45,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=137504.0, ans=0.125
+2024-08-26 20:20:53,077 INFO [train.py:1114] (0/4) Epoch 11, batch 900, loss[loss=0.1995, simple_loss=0.2605, pruned_loss=0.04966, ctc_loss=0.09763, over 19416.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2787, pruned_loss=0.05357, ctc_loss=0.1002, over 3820477.17 frames. ], batch size: 48, lr: 1.37e-02, grad_scale: 16.0
+2024-08-26 20:21:07,367 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=137610.66666666666, ans=10.0
+2024-08-26 20:21:26,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=137664.0, ans=0.025
+2024-08-26 20:21:48,815 INFO [train.py:1114] (0/4) Epoch 11, batch 950, loss[loss=0.2078, simple_loss=0.2712, pruned_loss=0.05265, ctc_loss=0.09788, over 19507.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2787, pruned_loss=0.0536, ctc_loss=0.1001, over 3820471.57 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 16.0
+2024-08-26 20:21:50,885 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:21:52,736 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=137824.0, ans=0.0
+2024-08-26 20:21:52,848 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=137824.0, ans=0.5
+2024-08-26 20:21:55,402 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.468e+02 1.744e+02 2.017e+02 3.816e+02, threshold=3.488e+02, percent-clipped=2.0
+2024-08-26 20:22:14,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=137930.66666666666, ans=0.1
+2024-08-26 20:22:16,075 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.38 vs. limit=22.5
+2024-08-26 20:22:27,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=137984.0, ans=0.2
+2024-08-26 20:22:40,793 INFO [train.py:1114] (0/4) Epoch 11, batch 1000, loss[loss=0.1903, simple_loss=0.2622, pruned_loss=0.0428, ctc_loss=0.08225, over 19840.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.279, pruned_loss=0.05373, ctc_loss=0.1003, over 3815736.60 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:22:51,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=138144.0, ans=0.0
+2024-08-26 20:23:01,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=138197.33333333334, ans=0.1
+2024-08-26 20:23:03,344 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.04 vs. limit=15.0
+2024-08-26 20:23:22,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=138304.0, ans=0.125
+2024-08-26 20:23:28,374 INFO [train.py:1114] (0/4) Epoch 11, batch 1050, loss[loss=0.2241, simple_loss=0.2919, pruned_loss=0.05677, ctc_loss=0.1067, over 19837.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2781, pruned_loss=0.05323, ctc_loss=0.09934, over 3821539.10 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:23:29,861 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.98 vs. limit=22.5
+2024-08-26 20:23:34,918 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.363e+02 1.534e+02 1.839e+02 4.578e+02, threshold=3.069e+02, percent-clipped=1.0
+2024-08-26 20:24:07,715 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=138357.33333333334, ans=0.0
+2024-08-26 20:24:09,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=138410.66666666666, ans=0.2
+2024-08-26 20:24:33,895 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.52 vs. limit=15.0
+2024-08-26 20:25:00,519 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=138570.66666666666, ans=0.2
+2024-08-26 20:25:03,554 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.01 vs. limit=10.0
+2024-08-26 20:25:07,611 INFO [train.py:1114] (0/4) Epoch 11, batch 1100, loss[loss=0.1984, simple_loss=0.2671, pruned_loss=0.04693, ctc_loss=0.08996, over 19566.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2779, pruned_loss=0.05276, ctc_loss=0.09858, over 3828733.98 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:25:13,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=138624.0, ans=0.0
+2024-08-26 20:25:14,901 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.26 vs. limit=12.0
+2024-08-26 20:25:25,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=138677.33333333334, ans=0.125
+2024-08-26 20:25:26,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=138730.66666666666, ans=0.05
+2024-08-26 20:25:30,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=138730.66666666666, ans=0.2
+2024-08-26 20:25:35,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=138784.0, ans=0.1
+2024-08-26 20:25:56,940 INFO [train.py:1114] (0/4) Epoch 11, batch 1150, loss[loss=0.1878, simple_loss=0.2645, pruned_loss=0.03983, ctc_loss=0.07875, over 19585.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2777, pruned_loss=0.05271, ctc_loss=0.09839, over 3827621.95 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:26:03,591 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.470e+02 1.661e+02 1.952e+02 3.516e+02, threshold=3.323e+02, percent-clipped=2.0
+2024-08-26 20:26:03,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=138890.66666666666, ans=0.125
+2024-08-26 20:26:08,383 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=138944.0, ans=0.0
+2024-08-26 20:26:30,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=139050.66666666666, ans=0.07
+2024-08-26 20:26:31,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=139050.66666666666, ans=0.0
+2024-08-26 20:26:33,830 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.73 vs. limit=10.0
+2024-08-26 20:26:45,573 INFO [train.py:1114] (0/4) Epoch 11, batch 1200, loss[loss=0.2326, simple_loss=0.3064, pruned_loss=0.05794, ctc_loss=0.1073, over 19840.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2787, pruned_loss=0.05294, ctc_loss=0.09878, over 3823615.73 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:26:49,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=139157.33333333334, ans=0.1
+2024-08-26 20:26:56,417 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.78 vs. limit=15.0
+2024-08-26 20:27:00,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=139210.66666666666, ans=0.125
+2024-08-26 20:27:03,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=139264.0, ans=0.125
+2024-08-26 20:27:31,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=139370.66666666666, ans=0.1
+2024-08-26 20:27:43,272 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.81 vs. limit=15.0
+2024-08-26 20:27:49,791 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.49 vs. limit=15.0
+2024-08-26 20:28:18,699 INFO [train.py:1114] (0/4) Epoch 11, batch 1250, loss[loss=0.215, simple_loss=0.2792, pruned_loss=0.05355, ctc_loss=0.1089, over 19520.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2786, pruned_loss=0.05272, ctc_loss=0.09841, over 3842516.75 frames. ], batch size: 61, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:28:21,619 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=139424.0, ans=0.0
+2024-08-26 20:28:27,597 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.224e+02 1.425e+02 1.545e+02 1.729e+02 3.064e+02, threshold=3.089e+02, percent-clipped=0.0
+2024-08-26 20:28:45,582 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.19 vs. limit=15.0
+2024-08-26 20:28:53,233 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.38 vs. limit=12.0
+2024-08-26 20:29:01,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=139637.33333333334, ans=0.2
+2024-08-26 20:29:12,955 INFO [train.py:1114] (0/4) Epoch 11, batch 1300, loss[loss=0.2193, simple_loss=0.2883, pruned_loss=0.0548, ctc_loss=0.1016, over 18823.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2779, pruned_loss=0.05244, ctc_loss=0.09801, over 3846394.09 frames. ], batch size: 76, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:29:18,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=139690.66666666666, ans=0.125
+2024-08-26 20:29:29,215 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=139744.0, ans=0.125
+2024-08-26 20:29:32,876 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=139744.0, ans=0.0
+2024-08-26 20:33:36,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=139797.33333333334, ans=0.0
+2024-08-26 20:35:35,556 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=139850.66666666666, ans=0.0
+2024-08-26 20:35:43,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=139904.0, ans=0.125
+2024-08-26 20:35:46,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=139904.0, ans=0.1
+2024-08-26 20:35:47,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=139904.0, ans=0.1
+2024-08-26 20:35:52,056 INFO [train.py:1114] (0/4) Epoch 11, batch 1350, loss[loss=0.2055, simple_loss=0.279, pruned_loss=0.04754, ctc_loss=0.09236, over 19769.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2775, pruned_loss=0.05219, ctc_loss=0.09755, over 3857051.96 frames. ], batch size: 54, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:35:58,559 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.441e+02 1.644e+02 1.919e+02 3.174e+02, threshold=3.287e+02, percent-clipped=1.0
+2024-08-26 20:36:00,939 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.43 vs. limit=15.0
+2024-08-26 20:36:25,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=140117.33333333334, ans=0.125
+2024-08-26 20:36:29,278 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=140117.33333333334, ans=0.125
+2024-08-26 20:36:29,591 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.24 vs. limit=15.0
+2024-08-26 20:36:41,172 INFO [train.py:1114] (0/4) Epoch 11, batch 1400, loss[loss=0.1921, simple_loss=0.2547, pruned_loss=0.04691, ctc_loss=0.0893, over 19656.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2774, pruned_loss=0.05216, ctc_loss=0.09727, over 3863704.71 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:37:27,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=140330.66666666666, ans=0.0
+2024-08-26 20:37:32,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=140330.66666666666, ans=0.025
+2024-08-26 20:37:50,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=140437.33333333334, ans=0.125
+2024-08-26 20:38:01,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=140490.66666666666, ans=0.1
+2024-08-26 20:38:01,757 INFO [train.py:1114] (0/4) Epoch 11, batch 1450, loss[loss=0.2385, simple_loss=0.2931, pruned_loss=0.06642, ctc_loss=0.1277, over 19695.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2783, pruned_loss=0.0528, ctc_loss=0.09849, over 3861968.91 frames. ], batch size: 63, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:38:04,915 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.48 vs. limit=15.0
+2024-08-26 20:38:08,097 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.486e+02 1.636e+02 1.926e+02 3.321e+02, threshold=3.272e+02, percent-clipped=1.0
+2024-08-26 20:38:10,158 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=140544.0, ans=0.125
+2024-08-26 20:38:16,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=140544.0, ans=0.125
+2024-08-26 20:38:29,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=140597.33333333334, ans=0.125
+2024-08-26 20:38:32,239 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.90 vs. limit=12.0
+2024-08-26 20:38:33,854 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=140650.66666666666, ans=0.05
+2024-08-26 20:38:35,043 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.46 vs. limit=15.0
+2024-08-26 20:38:40,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=140704.0, ans=0.0
+2024-08-26 20:38:43,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=140704.0, ans=0.0
+2024-08-26 20:38:50,495 INFO [train.py:1114] (0/4) Epoch 11, batch 1500, loss[loss=0.2159, simple_loss=0.2928, pruned_loss=0.04984, ctc_loss=0.09854, over 19582.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2787, pruned_loss=0.05291, ctc_loss=0.09882, over 3860681.04 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:39:03,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=140810.66666666666, ans=0.125
+2024-08-26 20:39:10,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=140864.0, ans=0.2
+2024-08-26 20:39:10,860 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.79 vs. limit=6.0
+2024-08-26 20:39:18,415 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.59 vs. limit=15.0
+2024-08-26 20:39:39,002 INFO [train.py:1114] (0/4) Epoch 11, batch 1550, loss[loss=0.2275, simple_loss=0.2888, pruned_loss=0.06045, ctc_loss=0.1131, over 19578.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2789, pruned_loss=0.05332, ctc_loss=0.09957, over 3845422.91 frames. ], batch size: 60, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:39:41,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=141024.0, ans=0.125
+2024-08-26 20:39:45,238 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.401e+02 1.612e+02 1.919e+02 3.103e+02, threshold=3.225e+02, percent-clipped=0.0
+2024-08-26 20:39:49,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=141077.33333333334, ans=0.2
+2024-08-26 20:40:15,825 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=15.98 vs. limit=15.0
+2024-08-26 20:40:21,056 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=141237.33333333334, ans=0.1
+2024-08-26 20:40:29,937 INFO [train.py:1114] (0/4) Epoch 11, batch 1600, loss[loss=0.2199, simple_loss=0.2926, pruned_loss=0.05331, ctc_loss=0.1015, over 19843.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2787, pruned_loss=0.05345, ctc_loss=0.0998, over 3834287.04 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:40:40,698 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:40:49,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=141397.33333333334, ans=0.1
+2024-08-26 20:41:18,740 INFO [train.py:1114] (0/4) Epoch 11, batch 1650, loss[loss=0.2245, simple_loss=0.2897, pruned_loss=0.05903, ctc_loss=0.1033, over 19656.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2789, pruned_loss=0.05378, ctc_loss=0.1003, over 3831344.31 frames. ], batch size: 59, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:41:25,307 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.119e+02 1.523e+02 1.726e+02 1.964e+02 3.202e+02, threshold=3.451e+02, percent-clipped=0.0
+2024-08-26 20:41:27,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=141610.66666666666, ans=0.125
+2024-08-26 20:41:29,232 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=141610.66666666666, ans=0.0
+2024-08-26 20:41:36,031 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.15 vs. limit=10.0
+2024-08-26 20:41:36,840 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.20 vs. limit=15.0
+2024-08-26 20:41:43,427 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.54 vs. limit=6.0
+2024-08-26 20:41:44,247 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.56 vs. limit=15.0
+2024-08-26 20:42:00,875 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=141770.66666666666, ans=15.0
+2024-08-26 20:42:07,201 INFO [train.py:1114] (0/4) Epoch 11, batch 1700, loss[loss=0.1848, simple_loss=0.2442, pruned_loss=0.04561, ctc_loss=0.0854, over 19650.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2779, pruned_loss=0.05315, ctc_loss=0.09915, over 3845513.70 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:42:16,190 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.52 vs. limit=15.0
+2024-08-26 20:42:16,780 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=141824.0, ans=0.125
+2024-08-26 20:42:18,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=141824.0, ans=0.125
+2024-08-26 20:42:24,581 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.95 vs. limit=15.0
+2024-08-26 20:42:36,687 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.60 vs. limit=15.0
+2024-08-26 20:42:37,380 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=141930.66666666666, ans=0.125
+2024-08-26 20:42:46,172 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:43:00,060 INFO [train.py:1114] (0/4) Epoch 11, batch 1750, loss[loss=0.1747, simple_loss=0.2421, pruned_loss=0.03829, ctc_loss=0.07687, over 19641.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2777, pruned_loss=0.05309, ctc_loss=0.09898, over 3851280.81 frames. ], batch size: 45, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:43:01,139 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=142090.66666666666, ans=0.0
+2024-08-26 20:43:06,161 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.441e+02 1.591e+02 1.781e+02 2.526e+02, threshold=3.183e+02, percent-clipped=0.0
+2024-08-26 20:43:06,424 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=142090.66666666666, ans=0.1
+2024-08-26 20:43:17,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=142197.33333333334, ans=0.1
+2024-08-26 20:43:22,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=142197.33333333334, ans=0.125
+2024-08-26 20:43:35,424 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=142250.66666666666, ans=0.125
+2024-08-26 20:43:46,385 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.36 vs. limit=22.5
+2024-08-26 20:43:50,387 INFO [train.py:1114] (0/4) Epoch 11, batch 1800, loss[loss=0.209, simple_loss=0.2846, pruned_loss=0.04885, ctc_loss=0.08933, over 19620.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2777, pruned_loss=0.05288, ctc_loss=0.09882, over 3852829.33 frames. ], batch size: 55, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:43:54,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=142357.33333333334, ans=0.0
+2024-08-26 20:43:55,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=142357.33333333334, ans=0.09899494936611666
+2024-08-26 20:43:56,975 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.55 vs. limit=6.0
+2024-08-26 20:44:03,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=142410.66666666666, ans=0.125
+2024-08-26 20:44:07,617 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=142410.66666666666, ans=0.125
+2024-08-26 20:44:09,297 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=142464.0, ans=0.0
+2024-08-26 20:44:17,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=142464.0, ans=0.125
+2024-08-26 20:44:31,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=142570.66666666666, ans=0.125
+2024-08-26 20:44:40,950 INFO [train.py:1114] (0/4) Epoch 11, batch 1850, loss[loss=0.2044, simple_loss=0.278, pruned_loss=0.04701, ctc_loss=0.09219, over 19558.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2774, pruned_loss=0.05261, ctc_loss=0.09844, over 3855883.92 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:44:47,995 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.442e+02 1.639e+02 2.043e+02 4.343e+02, threshold=3.277e+02, percent-clipped=6.0
+2024-08-26 20:44:49,470 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.66 vs. limit=10.0
+2024-08-26 20:44:51,351 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.65 vs. limit=15.0
+2024-08-26 20:44:58,307 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=142677.33333333334, ans=0.0
+2024-08-26 20:45:29,161 INFO [train.py:1114] (0/4) Epoch 11, batch 1900, loss[loss=0.2232, simple_loss=0.2938, pruned_loss=0.05629, ctc_loss=0.1003, over 19643.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2777, pruned_loss=0.05262, ctc_loss=0.09839, over 3860584.81 frames. ], batch size: 59, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:45:44,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=142944.0, ans=0.125
+2024-08-26 20:45:44,143 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=142944.0, ans=0.125
+2024-08-26 20:45:49,483 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=142997.33333333334, ans=0.07
+2024-08-26 20:45:55,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=142997.33333333334, ans=0.025
+2024-08-26 20:46:23,316 INFO [train.py:1114] (0/4) Epoch 11, batch 1950, loss[loss=0.2068, simple_loss=0.2775, pruned_loss=0.04949, ctc_loss=0.09283, over 19582.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2792, pruned_loss=0.05287, ctc_loss=0.09869, over 3869804.74 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:47:24,646 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.500e+02 1.631e+02 1.894e+02 3.317e+02, threshold=3.262e+02, percent-clipped=1.0
+2024-08-26 20:48:00,975 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.40 vs. limit=15.0
+2024-08-26 20:48:08,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=143317.33333333334, ans=0.125
+2024-08-26 20:48:22,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=143317.33333333334, ans=0.0
+2024-08-26 20:48:24,197 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=143370.66666666666, ans=0.0
+2024-08-26 20:48:33,084 INFO [train.py:1114] (0/4) Epoch 11, batch 2000, loss[loss=0.1976, simple_loss=0.2544, pruned_loss=0.0512, ctc_loss=0.09612, over 19676.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2793, pruned_loss=0.05321, ctc_loss=0.09934, over 3855510.50 frames. ], batch size: 45, lr: 1.34e-02, grad_scale: 32.0
+2024-08-26 20:48:40,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=143424.0, ans=0.04949747468305833
+2024-08-26 20:48:51,003 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=143530.66666666666, ans=0.0
+2024-08-26 20:48:52,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=143530.66666666666, ans=0.125
+2024-08-26 20:49:00,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=143584.0, ans=0.125
+2024-08-26 20:49:02,763 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=143584.0, ans=0.0
+2024-08-26 20:49:03,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=143584.0, ans=0.125
+2024-08-26 20:49:39,121 INFO [train.py:1114] (0/4) Epoch 11, batch 2050, loss[loss=0.208, simple_loss=0.2694, pruned_loss=0.054, ctc_loss=0.0965, over 19730.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2788, pruned_loss=0.05324, ctc_loss=0.09929, over 3852352.43 frames. ], batch size: 47, lr: 1.34e-02, grad_scale: 32.0
+2024-08-26 20:49:47,245 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.448e+02 1.585e+02 1.933e+02 3.153e+02, threshold=3.170e+02, percent-clipped=0.0
+2024-08-26 20:49:58,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=143744.0, ans=0.0
+2024-08-26 20:50:00,686 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.03 vs. limit=6.0
+2024-08-26 20:50:01,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=143744.0, ans=0.2
+2024-08-26 20:50:20,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=143850.66666666666, ans=0.0
+2024-08-26 20:50:22,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=143850.66666666666, ans=0.0
+2024-08-26 20:50:33,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten.whitening_limit, batch_count=143904.0, ans=22.5
+2024-08-26 20:50:37,969 INFO [train.py:1114] (0/4) Epoch 11, batch 2100, loss[loss=0.2076, simple_loss=0.2704, pruned_loss=0.05271, ctc_loss=0.09822, over 19789.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2784, pruned_loss=0.05306, ctc_loss=0.09897, over 3859782.18 frames. ], batch size: 54, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:50:38,118 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-26 20:50:39,931 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-26 20:50:45,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=143957.33333333334, ans=0.2
+2024-08-26 20:50:47,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=144010.66666666666, ans=0.0
+2024-08-26 20:50:54,427 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.34 vs. limit=10.0
+2024-08-26 20:50:55,855 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=144064.0, ans=0.125
+2024-08-26 20:50:56,093 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.66 vs. limit=15.0
+2024-08-26 20:51:00,407 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.27 vs. limit=15.0
+2024-08-26 20:51:02,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=144064.0, ans=0.125
+2024-08-26 20:51:06,385 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=144117.33333333334, ans=0.125
+2024-08-26 20:51:09,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=144117.33333333334, ans=0.1
+2024-08-26 20:51:11,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=144117.33333333334, ans=0.0
+2024-08-26 20:51:12,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=144170.66666666666, ans=0.0
+2024-08-26 20:51:22,999 INFO [train.py:1114] (0/4) Epoch 11, batch 2150, loss[loss=0.2042, simple_loss=0.2716, pruned_loss=0.05033, ctc_loss=0.09029, over 19591.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2779, pruned_loss=0.05267, ctc_loss=0.09824, over 3869376.40 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:51:30,822 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.485e+02 1.672e+02 2.037e+02 4.338e+02, threshold=3.345e+02, percent-clipped=7.0
+2024-08-26 20:51:47,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=144330.66666666666, ans=0.0
+2024-08-26 20:51:55,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=144384.0, ans=0.125
+2024-08-26 20:51:58,401 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.11 vs. limit=15.0
+2024-08-26 20:52:02,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=144437.33333333334, ans=0.0
+2024-08-26 20:52:03,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=144437.33333333334, ans=0.07
+2024-08-26 20:52:03,593 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.55 vs. limit=15.0
+2024-08-26 20:52:04,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=144437.33333333334, ans=0.0
+2024-08-26 20:52:06,899 INFO [train.py:1114] (0/4) Epoch 11, batch 2200, loss[loss=0.2243, simple_loss=0.292, pruned_loss=0.05731, ctc_loss=0.105, over 19588.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2774, pruned_loss=0.05233, ctc_loss=0.09767, over 3869041.31 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:52:10,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=144490.66666666666, ans=0.125
+2024-08-26 20:52:19,784 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.89 vs. limit=15.0
+2024-08-26 20:52:22,819 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=144544.0, ans=0.125
+2024-08-26 20:52:23,712 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=144597.33333333334, ans=0.5
+2024-08-26 20:52:29,269 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.62 vs. limit=10.0
+2024-08-26 20:52:39,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=144650.66666666666, ans=0.0
+2024-08-26 20:52:50,853 INFO [train.py:1114] (0/4) Epoch 11, batch 2250, loss[loss=0.2121, simple_loss=0.2806, pruned_loss=0.05209, ctc_loss=0.09851, over 19619.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2777, pruned_loss=0.05245, ctc_loss=0.09779, over 3868584.66 frames. ], batch size: 55, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:52:58,755 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.208e+02 1.461e+02 1.628e+02 1.934e+02 8.673e+02, threshold=3.256e+02, percent-clipped=2.0
+2024-08-26 20:53:02,005 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.45 vs. limit=22.5
+2024-08-26 20:53:03,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=144810.66666666666, ans=0.125
+2024-08-26 20:53:07,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=144864.0, ans=0.125
+2024-08-26 20:53:12,204 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.06 vs. limit=6.0
+2024-08-26 20:53:14,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=144864.0, ans=0.07
+2024-08-26 20:53:22,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=144917.33333333334, ans=0.1
+2024-08-26 20:53:30,705 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.65 vs. limit=15.0
+2024-08-26 20:53:35,365 INFO [train.py:1114] (0/4) Epoch 11, batch 2300, loss[loss=0.1876, simple_loss=0.2569, pruned_loss=0.04336, ctc_loss=0.07903, over 19508.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2768, pruned_loss=0.05243, ctc_loss=0.09779, over 3861329.49 frames. ], batch size: 49, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:53:42,774 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=145024.0, ans=0.1
+2024-08-26 20:53:57,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=145130.66666666666, ans=0.0
+2024-08-26 20:54:05,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=145184.0, ans=0.125
+2024-08-26 20:54:17,803 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.04 vs. limit=10.0
+2024-08-26 20:54:20,122 INFO [train.py:1114] (0/4) Epoch 11, batch 2350, loss[loss=0.2356, simple_loss=0.3023, pruned_loss=0.06281, ctc_loss=0.1083, over 19678.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2765, pruned_loss=0.05242, ctc_loss=0.09765, over 3863696.93 frames. ], batch size: 63, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:54:22,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=145290.66666666666, ans=0.0
+2024-08-26 20:54:24,579 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=145290.66666666666, ans=0.125
+2024-08-26 20:54:24,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=145290.66666666666, ans=0.025
+2024-08-26 20:54:28,773 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.482e+02 1.673e+02 1.901e+02 2.829e+02, threshold=3.345e+02, percent-clipped=0.0
+2024-08-26 20:54:31,942 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.19 vs. limit=15.0
+2024-08-26 20:54:34,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=145344.0, ans=0.95
+2024-08-26 20:54:45,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=145397.33333333334, ans=0.0
+2024-08-26 20:54:46,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=145450.66666666666, ans=0.1
+2024-08-26 20:55:01,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=145504.0, ans=0.1
+2024-08-26 20:55:04,264 INFO [train.py:1114] (0/4) Epoch 11, batch 2400, loss[loss=0.2675, simple_loss=0.3267, pruned_loss=0.0774, ctc_loss=0.1339, over 19327.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2789, pruned_loss=0.05349, ctc_loss=0.09946, over 3858441.92 frames. ], batch size: 67, lr: 1.33e-02, grad_scale: 32.0
+2024-08-26 20:55:06,027 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145557.33333333334, ans=0.1
+2024-08-26 20:55:09,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=145557.33333333334, ans=0.5
+2024-08-26 20:55:25,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=145664.0, ans=0.125
+2024-08-26 20:55:27,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=145664.0, ans=0.125
+2024-08-26 20:55:27,945 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=145664.0, ans=0.2
+2024-08-26 20:55:49,239 INFO [train.py:1114] (0/4) Epoch 11, batch 2450, loss[loss=0.3206, simple_loss=0.3447, pruned_loss=0.1078, ctc_loss=0.2024, over 13466.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.283, pruned_loss=0.05649, ctc_loss=0.1053, over 3734133.43 frames. ], batch size: 141, lr: 1.33e-02, grad_scale: 32.0
+2024-08-26 20:55:56,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=145824.0, ans=0.0
+2024-08-26 20:55:58,190 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.577e+02 1.748e+02 1.957e+02 3.323e+02, threshold=3.496e+02, percent-clipped=0.0
+2024-08-26 20:56:22,512 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=145984.0, ans=0.125
+2024-08-26 20:56:24,354 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-11.pt
+2024-08-26 21:01:32,163 INFO [train.py:1114] (0/4) Epoch 12, batch 0, loss[loss=0.1915, simple_loss=0.2585, pruned_loss=0.0453, ctc_loss=0.08483, over 19444.00 frames. ], tot_loss[loss=0.1915, simple_loss=0.2585, pruned_loss=0.0453, ctc_loss=0.08483, over 19444.00 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:01:32,164 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 21:01:49,208 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.8352, 5.0595, 5.6308, 5.3060], device='cuda:0')
+2024-08-26 21:01:52,246 INFO [train.py:1146] (0/4) Epoch 12, validation: loss=0.1812, simple_loss=0.274, pruned_loss=0.03284, ctc_loss=0.05683, over 944034.00 frames.
+2024-08-26 21:01:52,247 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 21:01:52,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=146032.0, ans=0.09899494936611666
+2024-08-26 21:02:26,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=146138.66666666666, ans=0.2
+2024-08-26 21:02:37,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=146192.0, ans=0.125
+2024-08-26 21:02:42,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=146245.33333333334, ans=0.0
+2024-08-26 21:02:50,474 INFO [train.py:1114] (0/4) Epoch 12, batch 50, loss[loss=0.1897, simple_loss=0.2481, pruned_loss=0.04839, ctc_loss=0.08598, over 19701.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2809, pruned_loss=0.05423, ctc_loss=0.1023, over 843793.79 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:03:02,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=146352.0, ans=0.0
+2024-08-26 21:03:06,336 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.42 vs. limit=15.0
+2024-08-26 21:03:11,152 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.556e+02 1.742e+02 1.990e+02 3.045e+02, threshold=3.484e+02, percent-clipped=0.0
+2024-08-26 21:03:30,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=146512.0, ans=0.0
+2024-08-26 21:04:07,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=146512.0, ans=0.125
+2024-08-26 21:04:10,876 INFO [train.py:1114] (0/4) Epoch 12, batch 100, loss[loss=0.1905, simple_loss=0.2609, pruned_loss=0.04402, ctc_loss=0.08002, over 19716.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2812, pruned_loss=0.05331, ctc_loss=0.1001, over 1498987.01 frames. ], batch size: 51, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:04:11,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=146565.33333333334, ans=0.0
+2024-08-26 21:04:15,693 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=146565.33333333334, ans=0.125
+2024-08-26 21:04:42,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=146672.0, ans=0.0
+2024-08-26 21:04:53,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=146725.33333333334, ans=0.1
+2024-08-26 21:04:54,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=146725.33333333334, ans=0.5
+2024-08-26 21:05:04,625 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.55 vs. limit=15.0
+2024-08-26 21:05:05,123 INFO [train.py:1114] (0/4) Epoch 12, batch 150, loss[loss=0.1803, simple_loss=0.2523, pruned_loss=0.03975, ctc_loss=0.07207, over 19734.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2781, pruned_loss=0.05185, ctc_loss=0.09722, over 2027803.58 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:05:25,624 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.421e+02 1.535e+02 1.745e+02 2.429e+02, threshold=3.070e+02, percent-clipped=0.0
+2024-08-26 21:05:29,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=146938.66666666666, ans=0.2
+2024-08-26 21:05:34,263 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:05:34,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=146992.0, ans=0.0
+2024-08-26 21:05:36,149 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=146992.0, ans=0.125
+2024-08-26 21:05:38,262 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.91 vs. limit=10.0
+2024-08-26 21:05:52,006 INFO [train.py:1114] (0/4) Epoch 12, batch 200, loss[loss=0.2261, simple_loss=0.2913, pruned_loss=0.05862, ctc_loss=0.1093, over 18565.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2765, pruned_loss=0.05127, ctc_loss=0.09627, over 2435789.11 frames. ], batch size: 85, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:05:54,092 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=147098.66666666666, ans=0.125
+2024-08-26 21:06:02,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=147152.0, ans=0.1
+2024-08-26 21:06:22,125 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.87 vs. limit=6.0
+2024-08-26 21:06:33,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=147312.0, ans=0.0
+2024-08-26 21:06:35,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=147312.0, ans=0.125
+2024-08-26 21:06:35,870 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=147312.0, ans=0.015
+2024-08-26 21:06:38,645 INFO [train.py:1114] (0/4) Epoch 12, batch 250, loss[loss=0.2059, simple_loss=0.285, pruned_loss=0.04626, ctc_loss=0.08554, over 19348.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2765, pruned_loss=0.05069, ctc_loss=0.09543, over 2755129.31 frames. ], batch size: 67, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:06:59,406 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.414e+02 1.495e+02 1.680e+02 4.024e+02, threshold=2.991e+02, percent-clipped=1.0
+2024-08-26 21:06:59,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=147472.0, ans=0.1
+2024-08-26 21:07:00,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=147472.0, ans=0.2
+2024-08-26 21:07:07,077 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=147525.33333333334, ans=0.125
+2024-08-26 21:07:20,652 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=9.78 vs. limit=15.0
+2024-08-26 21:07:26,861 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=147578.66666666666, ans=0.2
+2024-08-26 21:07:34,620 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.39 vs. limit=15.0
+2024-08-26 21:07:35,050 INFO [train.py:1114] (0/4) Epoch 12, batch 300, loss[loss=0.2107, simple_loss=0.282, pruned_loss=0.05093, ctc_loss=0.09364, over 19533.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2758, pruned_loss=0.05049, ctc_loss=0.09483, over 3000376.94 frames. ], batch size: 61, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:07:38,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=147632.0, ans=0.125
+2024-08-26 21:08:30,184 INFO [train.py:1114] (0/4) Epoch 12, batch 350, loss[loss=0.1884, simple_loss=0.2508, pruned_loss=0.04744, ctc_loss=0.0778, over 19738.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.276, pruned_loss=0.05051, ctc_loss=0.09472, over 3190558.33 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:08:36,019 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.48 vs. limit=15.0
+2024-08-26 21:12:05,919 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.26 vs. limit=15.0
+2024-08-26 21:12:10,843 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 1.537e+02 1.863e+02 2.287e+02 4.040e+02, threshold=3.725e+02, percent-clipped=5.0
+2024-08-26 21:12:15,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten.whitening_limit, batch_count=148005.33333333334, ans=15.0
+2024-08-26 21:12:18,540 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=148058.66666666666, ans=0.1
+2024-08-26 21:12:25,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=148058.66666666666, ans=0.0
+2024-08-26 21:12:26,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=148112.0, ans=0.125
+2024-08-26 21:13:47,513 INFO [train.py:1114] (0/4) Epoch 12, batch 400, loss[loss=0.2198, simple_loss=0.2896, pruned_loss=0.05441, ctc_loss=0.1028, over 19522.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2756, pruned_loss=0.0505, ctc_loss=0.09465, over 3341150.43 frames. ], batch size: 54, lr: 1.27e-02, grad_scale: 32.0
+2024-08-26 21:13:47,739 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:14:15,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=148325.33333333334, ans=0.125
+2024-08-26 21:14:15,864 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=148325.33333333334, ans=0.025
+2024-08-26 21:14:22,567 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=148325.33333333334, ans=0.0
+2024-08-26 21:14:34,560 INFO [train.py:1114] (0/4) Epoch 12, batch 450, loss[loss=0.212, simple_loss=0.2852, pruned_loss=0.04891, ctc_loss=0.1023, over 19609.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2761, pruned_loss=0.0509, ctc_loss=0.09549, over 3450543.39 frames. ], batch size: 55, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:14:53,207 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=148485.33333333334, ans=0.125
+2024-08-26 21:15:05,746 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.502e+02 1.695e+02 2.071e+02 2.894e+02, threshold=3.390e+02, percent-clipped=0.0
+2024-08-26 21:15:16,381 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.48 vs. limit=22.5
+2024-08-26 21:15:21,807 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.94 vs. limit=15.0
+2024-08-26 21:15:24,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=148645.33333333334, ans=0.0
+2024-08-26 21:15:28,929 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=148645.33333333334, ans=0.0
+2024-08-26 21:15:31,500 INFO [train.py:1114] (0/4) Epoch 12, batch 500, loss[loss=0.22, simple_loss=0.2921, pruned_loss=0.05471, ctc_loss=0.09605, over 19667.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.275, pruned_loss=0.05042, ctc_loss=0.09446, over 3546574.60 frames. ], batch size: 63, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:15:34,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=148698.66666666666, ans=0.0
+2024-08-26 21:15:54,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=148805.33333333334, ans=0.125
+2024-08-26 21:16:03,948 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=1.94 vs. limit=15.0
+2024-08-26 21:16:06,055 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.23 vs. limit=12.0
+2024-08-26 21:16:10,406 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=148912.0, ans=0.2
+2024-08-26 21:16:19,315 INFO [train.py:1114] (0/4) Epoch 12, batch 550, loss[loss=0.2243, simple_loss=0.294, pruned_loss=0.05534, ctc_loss=0.1097, over 19334.00 frames. ], tot_loss[loss=0.207, simple_loss=0.275, pruned_loss=0.05054, ctc_loss=0.09481, over 3608185.56 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:16:29,707 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=149018.66666666666, ans=0.125
+2024-08-26 21:16:39,081 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=149072.0, ans=0.0
+2024-08-26 21:16:39,717 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.448e+02 1.617e+02 1.906e+02 3.977e+02, threshold=3.234e+02, percent-clipped=1.0
+2024-08-26 21:16:40,929 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=149072.0, ans=0.125
+2024-08-26 21:16:56,983 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.25 vs. limit=10.0
+2024-08-26 21:17:36,374 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.73 vs. limit=6.0
+2024-08-26 21:17:47,329 INFO [train.py:1114] (0/4) Epoch 12, batch 600, loss[loss=0.2267, simple_loss=0.2966, pruned_loss=0.05804, ctc_loss=0.102, over 19414.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2754, pruned_loss=0.0509, ctc_loss=0.09549, over 3665590.69 frames. ], batch size: 67, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:17:53,207 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.48 vs. limit=15.0
+2024-08-26 21:17:54,154 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.95 vs. limit=15.0
+2024-08-26 21:18:01,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=149285.33333333334, ans=0.025
+2024-08-26 21:18:04,234 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-28000.pt
+2024-08-26 21:18:34,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=149392.0, ans=0.2
+2024-08-26 21:18:40,430 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=6.64 vs. limit=15.0
+2024-08-26 21:18:41,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=149445.33333333334, ans=0.1
+2024-08-26 21:18:43,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.58 vs. limit=15.0
+2024-08-26 21:18:44,824 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=149445.33333333334, ans=0.0
+2024-08-26 21:18:46,466 INFO [train.py:1114] (0/4) Epoch 12, batch 650, loss[loss=0.201, simple_loss=0.2773, pruned_loss=0.04567, ctc_loss=0.08356, over 19757.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2745, pruned_loss=0.05042, ctc_loss=0.09451, over 3715431.52 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:18:58,360 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=149498.66666666666, ans=0.1
+2024-08-26 21:19:01,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=149498.66666666666, ans=0.125
+2024-08-26 21:19:06,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=149552.0, ans=0.125
+2024-08-26 21:19:16,418 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.436e+02 1.583e+02 1.844e+02 2.674e+02, threshold=3.165e+02, percent-clipped=0.0
+2024-08-26 21:19:29,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=149658.66666666666, ans=0.1
+2024-08-26 21:19:36,490 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=149712.0, ans=0.2
+2024-08-26 21:19:39,708 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.67 vs. limit=15.0
+2024-08-26 21:19:41,522 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=149712.0, ans=0.2
+2024-08-26 21:19:44,345 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_na.min_abs, batch_count=149765.33333333334, ans=0.02
+2024-08-26 21:19:45,029 INFO [train.py:1114] (0/4) Epoch 12, batch 700, loss[loss=0.1929, simple_loss=0.2629, pruned_loss=0.04494, ctc_loss=0.08278, over 19738.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.275, pruned_loss=0.05064, ctc_loss=0.09488, over 3747405.65 frames. ], batch size: 51, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:19:45,361 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=149765.33333333334, ans=0.025
+2024-08-26 21:19:54,844 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.23 vs. limit=15.0
+2024-08-26 21:20:00,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=149818.66666666666, ans=0.09899494936611666
+2024-08-26 21:20:12,095 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=149925.33333333334, ans=0.1
+2024-08-26 21:20:26,785 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=149978.66666666666, ans=0.025
+2024-08-26 21:20:27,621 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=149978.66666666666, ans=0.025
+2024-08-26 21:20:31,162 INFO [train.py:1114] (0/4) Epoch 12, batch 750, loss[loss=0.2047, simple_loss=0.2756, pruned_loss=0.04936, ctc_loss=0.08774, over 19491.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2749, pruned_loss=0.05074, ctc_loss=0.09487, over 3774030.89 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:20:37,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=150032.0, ans=0.09899494936611666
+2024-08-26 21:20:43,555 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=150085.33333333334, ans=0.0
+2024-08-26 21:20:51,191 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=150138.66666666666, ans=0.0
+2024-08-26 21:20:51,913 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.592e+02 1.843e+02 2.247e+02 3.979e+02, threshold=3.686e+02, percent-clipped=6.0
+2024-08-26 21:20:57,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=150138.66666666666, ans=0.125
+2024-08-26 21:21:02,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=150192.0, ans=0.125
+2024-08-26 21:21:22,385 INFO [train.py:1114] (0/4) Epoch 12, batch 800, loss[loss=0.2094, simple_loss=0.2722, pruned_loss=0.05367, ctc_loss=0.09821, over 19782.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2751, pruned_loss=0.05109, ctc_loss=0.0956, over 3796126.48 frames. ], batch size: 49, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:21:35,168 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=150352.0, ans=0.0
+2024-08-26 21:21:36,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=150352.0, ans=0.125
+2024-08-26 21:21:39,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=150352.0, ans=0.1
+2024-08-26 21:21:48,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=150405.33333333334, ans=0.125
+2024-08-26 21:21:49,468 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=150405.33333333334, ans=0.125
+2024-08-26 21:22:12,971 INFO [train.py:1114] (0/4) Epoch 12, batch 850, loss[loss=0.2129, simple_loss=0.2811, pruned_loss=0.05203, ctc_loss=0.1015, over 19663.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2744, pruned_loss=0.05074, ctc_loss=0.09471, over 3814370.38 frames. ], batch size: 59, lr: 1.26e-02, grad_scale: 16.0
+2024-08-26 21:22:17,852 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=150565.33333333334, ans=0.025
+2024-08-26 21:22:21,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=150618.66666666666, ans=0.0
+2024-08-26 21:22:24,727 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.41 vs. limit=6.0
+2024-08-26 21:22:34,296 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.171e+02 1.451e+02 1.599e+02 1.811e+02 2.698e+02, threshold=3.198e+02, percent-clipped=0.0
+2024-08-26 21:22:35,579 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.50 vs. limit=12.0
+2024-08-26 21:22:42,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=150725.33333333334, ans=0.0
+2024-08-26 21:22:46,042 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=150725.33333333334, ans=0.07
+2024-08-26 21:22:49,373 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.11 vs. limit=22.5
+2024-08-26 21:22:51,067 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:22:53,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=150778.66666666666, ans=0.0
+2024-08-26 21:22:53,838 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=150778.66666666666, ans=0.0
+2024-08-26 21:22:56,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=150778.66666666666, ans=0.035
+2024-08-26 21:23:00,255 INFO [train.py:1114] (0/4) Epoch 12, batch 900, loss[loss=0.1825, simple_loss=0.252, pruned_loss=0.04114, ctc_loss=0.07651, over 19826.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2744, pruned_loss=0.05079, ctc_loss=0.09509, over 3818421.65 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:23:07,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=150832.0, ans=0.125
+2024-08-26 21:23:15,107 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=150885.33333333334, ans=0.025
+2024-08-26 21:23:42,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.85 vs. limit=15.0
+2024-08-26 21:23:47,031 INFO [train.py:1114] (0/4) Epoch 12, batch 950, loss[loss=0.1815, simple_loss=0.251, pruned_loss=0.04075, ctc_loss=0.07609, over 19519.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2748, pruned_loss=0.05095, ctc_loss=0.09553, over 3821068.99 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:23:48,364 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.18 vs. limit=15.0
+2024-08-26 21:23:49,927 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=151098.66666666666, ans=0.0
+2024-08-26 21:24:08,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=151098.66666666666, ans=0.025
+2024-08-26 21:24:10,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=151152.0, ans=0.125
+2024-08-26 21:24:12,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=151152.0, ans=0.95
+2024-08-26 21:24:20,962 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.52 vs. limit=10.0
+2024-08-26 21:24:29,929 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.208e+02 1.446e+02 1.609e+02 1.941e+02 6.709e+02, threshold=3.217e+02, percent-clipped=2.0
+2024-08-26 21:24:30,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=151205.33333333334, ans=0.125
+2024-08-26 21:24:37,871 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.33 vs. limit=15.0
+2024-08-26 21:24:56,169 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.90 vs. limit=15.0
+2024-08-26 21:24:57,588 INFO [train.py:1114] (0/4) Epoch 12, batch 1000, loss[loss=0.1883, simple_loss=0.2617, pruned_loss=0.04127, ctc_loss=0.08092, over 19850.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2752, pruned_loss=0.05101, ctc_loss=0.09556, over 3816496.46 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:24:59,126 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.33 vs. limit=15.0
+2024-08-26 21:25:24,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=151418.66666666666, ans=0.125
+2024-08-26 21:25:34,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=151472.0, ans=0.125
+2024-08-26 21:25:59,418 INFO [train.py:1114] (0/4) Epoch 12, batch 1050, loss[loss=0.1921, simple_loss=0.2699, pruned_loss=0.04147, ctc_loss=0.07825, over 19841.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2742, pruned_loss=0.0505, ctc_loss=0.09446, over 3823111.14 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:26:05,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=151632.0, ans=0.125
+2024-08-26 21:26:11,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=151685.33333333334, ans=0.0
+2024-08-26 21:26:13,704 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.36 vs. limit=15.0
+2024-08-26 21:26:20,568 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.384e+02 1.517e+02 1.769e+02 3.938e+02, threshold=3.034e+02, percent-clipped=1.0
+2024-08-26 21:26:21,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=151738.66666666666, ans=0.0
+2024-08-26 21:26:27,274 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=151792.0, ans=0.0
+2024-08-26 21:26:38,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=151845.33333333334, ans=0.0
+2024-08-26 21:26:43,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=151845.33333333334, ans=0.125
+2024-08-26 21:26:45,785 INFO [train.py:1114] (0/4) Epoch 12, batch 1100, loss[loss=0.1901, simple_loss=0.2593, pruned_loss=0.04412, ctc_loss=0.08146, over 19597.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2739, pruned_loss=0.05029, ctc_loss=0.09383, over 3830721.63 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:26:48,706 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=151898.66666666666, ans=0.0
+2024-08-26 21:26:57,248 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=151952.0, ans=0.2
+2024-08-26 21:27:04,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.41 vs. limit=15.0
+2024-08-26 21:27:05,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=152005.33333333334, ans=0.1
+2024-08-26 21:27:17,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=152058.66666666666, ans=0.125
+2024-08-26 21:27:25,295 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.45 vs. limit=15.0
+2024-08-26 21:27:27,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=152112.0, ans=0.125
+2024-08-26 21:27:27,049 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=152112.0, ans=0.125
+2024-08-26 21:27:34,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=152112.0, ans=0.125
+2024-08-26 21:27:38,458 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=152112.0, ans=0.125
+2024-08-26 21:27:38,862 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.30 vs. limit=15.0
+2024-08-26 21:27:39,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=152112.0, ans=0.0
+2024-08-26 21:27:41,050 INFO [train.py:1114] (0/4) Epoch 12, batch 1150, loss[loss=0.1888, simple_loss=0.2637, pruned_loss=0.04146, ctc_loss=0.07759, over 19581.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.274, pruned_loss=0.05021, ctc_loss=0.09366, over 3829672.44 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:27:44,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=152165.33333333334, ans=0.125
+2024-08-26 21:27:45,197 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:28:02,687 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.513e+02 1.822e+02 2.260e+02 3.131e+02, threshold=3.643e+02, percent-clipped=1.0
+2024-08-26 21:28:02,972 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=152272.0, ans=0.125
+2024-08-26 21:28:03,782 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=152272.0, ans=10.0
+2024-08-26 21:28:07,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=152272.0, ans=0.2
+2024-08-26 21:28:10,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=152325.33333333334, ans=0.125
+2024-08-26 21:28:28,107 INFO [train.py:1114] (0/4) Epoch 12, batch 1200, loss[loss=0.2089, simple_loss=0.2854, pruned_loss=0.04787, ctc_loss=0.09184, over 19837.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2749, pruned_loss=0.05039, ctc_loss=0.09419, over 3826395.84 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:28:31,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=152432.0, ans=0.2
+2024-08-26 21:28:32,112 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=152432.0, ans=0.2
+2024-08-26 21:28:40,630 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=152485.33333333334, ans=0.125
+2024-08-26 21:28:58,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=152592.0, ans=0.1
+2024-08-26 21:29:15,262 INFO [train.py:1114] (0/4) Epoch 12, batch 1250, loss[loss=0.2253, simple_loss=0.294, pruned_loss=0.05759, ctc_loss=0.1033, over 19529.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2754, pruned_loss=0.05036, ctc_loss=0.09404, over 3844268.22 frames. ], batch size: 61, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:29:27,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=152752.0, ans=0.1
+2024-08-26 21:29:36,830 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.211e+02 1.442e+02 1.596e+02 2.011e+02 3.434e+02, threshold=3.192e+02, percent-clipped=0.0
+2024-08-26 21:29:49,570 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.29 vs. limit=15.0
+2024-08-26 21:30:07,695 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.53 vs. limit=15.0
+2024-08-26 21:30:08,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=152912.0, ans=0.125
+2024-08-26 21:30:17,223 INFO [train.py:1114] (0/4) Epoch 12, batch 1300, loss[loss=0.22, simple_loss=0.2839, pruned_loss=0.05664, ctc_loss=0.1074, over 18869.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2747, pruned_loss=0.05025, ctc_loss=0.09399, over 3847329.36 frames. ], batch size: 76, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:30:44,276 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=153072.0, ans=0.125
+2024-08-26 21:31:03,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=153178.66666666666, ans=0.07
+2024-08-26 21:31:06,497 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:31:08,962 INFO [train.py:1114] (0/4) Epoch 12, batch 1350, loss[loss=0.2059, simple_loss=0.2814, pruned_loss=0.04698, ctc_loss=0.09098, over 19756.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2746, pruned_loss=0.05022, ctc_loss=0.09389, over 3858031.15 frames. ], batch size: 54, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:31:10,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=153232.0, ans=0.125
+2024-08-26 21:31:29,983 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.104e+02 1.467e+02 1.650e+02 2.044e+02 3.234e+02, threshold=3.299e+02, percent-clipped=1.0
+2024-08-26 21:31:34,109 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=153338.66666666666, ans=0.1
+2024-08-26 21:31:34,319 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.22 vs. limit=6.0
+2024-08-26 21:31:36,101 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:31:38,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=153392.0, ans=0.025
+2024-08-26 21:31:45,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=153445.33333333334, ans=0.0
+2024-08-26 21:31:55,315 INFO [train.py:1114] (0/4) Epoch 12, batch 1400, loss[loss=0.18, simple_loss=0.2466, pruned_loss=0.04147, ctc_loss=0.07632, over 19661.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2743, pruned_loss=0.05006, ctc_loss=0.0935, over 3864670.14 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:32:02,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=153498.66666666666, ans=0.125
+2024-08-26 21:32:03,788 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=153552.0, ans=0.09899494936611666
+2024-08-26 21:32:14,089 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=153605.33333333334, ans=0.1
+2024-08-26 21:32:44,531 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.37 vs. limit=10.0
+2024-08-26 21:32:47,925 INFO [train.py:1114] (0/4) Epoch 12, batch 1450, loss[loss=0.2282, simple_loss=0.2925, pruned_loss=0.06059, ctc_loss=0.107, over 19681.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2749, pruned_loss=0.05047, ctc_loss=0.09424, over 3862329.37 frames. ], batch size: 63, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:33:08,038 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.53 vs. limit=6.0
+2024-08-26 21:33:12,065 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.245e+02 1.443e+02 1.618e+02 1.909e+02 2.759e+02, threshold=3.236e+02, percent-clipped=0.0
+2024-08-26 21:33:16,990 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.76 vs. limit=22.5
+2024-08-26 21:33:33,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=153978.66666666666, ans=0.125
+2024-08-26 21:33:37,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=153978.66666666666, ans=0.125
+2024-08-26 21:33:42,291 INFO [train.py:1114] (0/4) Epoch 12, batch 1500, loss[loss=0.2115, simple_loss=0.2808, pruned_loss=0.05161, ctc_loss=0.09731, over 19579.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2755, pruned_loss=0.05085, ctc_loss=0.09494, over 3861668.37 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:33:55,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=154085.33333333334, ans=0.0
+2024-08-26 21:34:02,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=154138.66666666666, ans=0.125
+2024-08-26 21:34:03,995 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=154138.66666666666, ans=0.125
+2024-08-26 21:34:06,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=154138.66666666666, ans=0.125
+2024-08-26 21:34:06,235 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=154138.66666666666, ans=0.1
+2024-08-26 21:34:06,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=154138.66666666666, ans=0.125
+2024-08-26 21:34:17,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=154192.0, ans=0.125
+2024-08-26 21:34:29,573 INFO [train.py:1114] (0/4) Epoch 12, batch 1550, loss[loss=0.2235, simple_loss=0.2932, pruned_loss=0.05718, ctc_loss=0.09853, over 19607.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2757, pruned_loss=0.05112, ctc_loss=0.09546, over 3846894.07 frames. ], batch size: 60, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:34:42,470 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.34 vs. limit=12.0
+2024-08-26 21:34:51,386 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.237e+02 1.431e+02 1.666e+02 1.890e+02 5.087e+02, threshold=3.332e+02, percent-clipped=2.0
+2024-08-26 21:34:54,696 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=154405.33333333334, ans=0.0
+2024-08-26 21:34:56,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=154405.33333333334, ans=0.015
+2024-08-26 21:35:15,387 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=154512.0, ans=0.1
+2024-08-26 21:35:15,448 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=154512.0, ans=0.125
+2024-08-26 21:35:17,029 INFO [train.py:1114] (0/4) Epoch 12, batch 1600, loss[loss=0.2216, simple_loss=0.2915, pruned_loss=0.05523, ctc_loss=0.1029, over 19819.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2757, pruned_loss=0.05118, ctc_loss=0.09561, over 3835404.97 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:35:26,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=154618.66666666666, ans=10.0
+2024-08-26 21:35:38,144 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.01 vs. limit=22.5
+2024-08-26 21:35:39,153 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.67 vs. limit=15.0
+2024-08-26 21:35:49,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=154725.33333333334, ans=0.07
+2024-08-26 21:36:03,047 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=154778.66666666666, ans=0.0
+2024-08-26 21:36:08,914 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.93 vs. limit=22.5
+2024-08-26 21:36:11,255 INFO [train.py:1114] (0/4) Epoch 12, batch 1650, loss[loss=0.2153, simple_loss=0.2925, pruned_loss=0.05036, ctc_loss=0.09375, over 19658.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2757, pruned_loss=0.05128, ctc_loss=0.0958, over 3832233.24 frames. ], batch size: 59, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:36:21,753 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=154885.33333333334, ans=0.125
+2024-08-26 21:36:23,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=154885.33333333334, ans=0.125
+2024-08-26 21:36:34,771 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.420e+02 1.592e+02 1.938e+02 3.625e+02, threshold=3.184e+02, percent-clipped=1.0
+2024-08-26 21:36:36,000 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=154938.66666666666, ans=0.125
+2024-08-26 21:36:45,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=154992.0, ans=0.125
+2024-08-26 21:36:51,121 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=155045.33333333334, ans=0.1
+2024-08-26 21:37:00,216 INFO [train.py:1114] (0/4) Epoch 12, batch 1700, loss[loss=0.1877, simple_loss=0.2435, pruned_loss=0.0489, ctc_loss=0.08536, over 19678.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2752, pruned_loss=0.05084, ctc_loss=0.09477, over 3846671.64 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:37:12,527 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.07 vs. limit=15.0
+2024-08-26 21:37:21,219 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=155205.33333333334, ans=0.1
+2024-08-26 21:37:41,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=155312.0, ans=0.125
+2024-08-26 21:37:44,941 INFO [train.py:1114] (0/4) Epoch 12, batch 1750, loss[loss=0.1904, simple_loss=0.2514, pruned_loss=0.04702, ctc_loss=0.08841, over 19690.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2743, pruned_loss=0.05019, ctc_loss=0.09385, over 3852665.87 frames. ], batch size: 45, lr: 1.24e-02, grad_scale: 16.0
+2024-08-26 21:38:06,161 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.154e+02 1.409e+02 1.600e+02 1.878e+02 3.182e+02, threshold=3.201e+02, percent-clipped=0.0
+2024-08-26 21:38:07,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=155472.0, ans=0.125
+2024-08-26 21:38:09,221 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.13 vs. limit=10.0
+2024-08-26 21:38:21,926 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=155578.66666666666, ans=0.125
+2024-08-26 21:38:24,931 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.74 vs. limit=15.0
+2024-08-26 21:38:28,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=155632.0, ans=0.2
+2024-08-26 21:38:28,996 INFO [train.py:1114] (0/4) Epoch 12, batch 1800, loss[loss=0.2204, simple_loss=0.2924, pruned_loss=0.05418, ctc_loss=0.1003, over 19602.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2749, pruned_loss=0.05045, ctc_loss=0.09438, over 3853341.67 frames. ], batch size: 55, lr: 1.24e-02, grad_scale: 16.0
+2024-08-26 21:38:49,376 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=155738.66666666666, ans=0.125
+2024-08-26 21:38:57,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=155792.0, ans=0.2
+2024-08-26 21:38:59,876 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:39:03,393 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=155845.33333333334, ans=0.2
+2024-08-26 21:39:04,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=155845.33333333334, ans=0.125
+2024-08-26 21:39:12,706 INFO [train.py:1114] (0/4) Epoch 12, batch 1850, loss[loss=0.2151, simple_loss=0.2825, pruned_loss=0.05375, ctc_loss=0.1005, over 19579.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2741, pruned_loss=0.04998, ctc_loss=0.09337, over 3854769.03 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 16.0
+2024-08-26 21:39:34,658 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 1.542e+02 1.764e+02 2.176e+02 3.980e+02, threshold=3.528e+02, percent-clipped=3.0
+2024-08-26 21:39:36,597 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=156005.33333333334, ans=0.2
+2024-08-26 21:39:42,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=156058.66666666666, ans=0.2
+2024-08-26 21:39:48,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=156112.0, ans=0.0
+2024-08-26 21:39:56,684 INFO [train.py:1114] (0/4) Epoch 12, batch 1900, loss[loss=0.2052, simple_loss=0.279, pruned_loss=0.04719, ctc_loss=0.09233, over 19655.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2749, pruned_loss=0.05024, ctc_loss=0.09376, over 3859826.76 frames. ], batch size: 59, lr: 1.23e-02, grad_scale: 8.0
+2024-08-26 21:40:24,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=156325.33333333334, ans=0.125
+2024-08-26 21:40:27,119 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=156325.33333333334, ans=0.0
+2024-08-26 21:40:40,037 INFO [train.py:1114] (0/4) Epoch 12, batch 1950, loss[loss=0.1935, simple_loss=0.266, pruned_loss=0.04403, ctc_loss=0.08231, over 19587.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2758, pruned_loss=0.05021, ctc_loss=0.09371, over 3869081.57 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 8.0
+2024-08-26 21:40:40,472 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.17 vs. limit=15.0
+2024-08-26 21:40:47,660 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.23 vs. limit=15.0
+2024-08-26 21:40:49,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=156485.33333333334, ans=0.125
+2024-08-26 21:40:50,640 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=156485.33333333334, ans=0.1
+2024-08-26 21:40:56,762 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=156538.66666666666, ans=0.125
+2024-08-26 21:41:01,728 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 1.443e+02 1.619e+02 1.881e+02 3.638e+02, threshold=3.238e+02, percent-clipped=1.0
+2024-08-26 21:41:05,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=156592.0, ans=0.125
+2024-08-26 21:41:17,401 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=11.41 vs. limit=15.0
+2024-08-26 21:41:25,580 INFO [train.py:1114] (0/4) Epoch 12, batch 2000, loss[loss=0.1897, simple_loss=0.2452, pruned_loss=0.04993, ctc_loss=0.0859, over 19697.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2763, pruned_loss=0.05061, ctc_loss=0.09445, over 3855269.57 frames. ], batch size: 45, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:41:38,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=156752.0, ans=0.125
+2024-08-26 21:42:10,401 INFO [train.py:1114] (0/4) Epoch 12, batch 2050, loss[loss=0.1843, simple_loss=0.2466, pruned_loss=0.04442, ctc_loss=0.08275, over 19728.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2753, pruned_loss=0.05045, ctc_loss=0.09416, over 3851162.19 frames. ], batch size: 47, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:42:12,674 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.63 vs. limit=22.5
+2024-08-26 21:42:24,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=157018.66666666666, ans=0.2
+2024-08-26 21:42:33,045 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.456e+02 1.628e+02 1.934e+02 3.317e+02, threshold=3.256e+02, percent-clipped=1.0
+2024-08-26 21:42:47,520 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.15 vs. limit=6.0
+2024-08-26 21:42:51,131 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.18 vs. limit=15.0
+2024-08-26 21:42:51,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=157178.66666666666, ans=0.2
+2024-08-26 21:42:54,290 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=157178.66666666666, ans=0.0
+2024-08-26 21:42:54,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=157178.66666666666, ans=0.0
+2024-08-26 21:42:55,850 INFO [train.py:1114] (0/4) Epoch 12, batch 2100, loss[loss=0.2091, simple_loss=0.2783, pruned_loss=0.05115, ctc_loss=0.09406, over 19782.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2745, pruned_loss=0.04991, ctc_loss=0.09321, over 3859765.02 frames. ], batch size: 54, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:43:11,557 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=157285.33333333334, ans=0.125
+2024-08-26 21:43:22,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=157392.0, ans=0.1
+2024-08-26 21:44:02,275 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.97 vs. limit=15.0
+2024-08-26 21:44:03,736 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=157445.33333333334, ans=0.0
+2024-08-26 21:44:06,214 INFO [train.py:1114] (0/4) Epoch 12, batch 2150, loss[loss=0.2013, simple_loss=0.268, pruned_loss=0.04918, ctc_loss=0.09067, over 19584.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.274, pruned_loss=0.04971, ctc_loss=0.09265, over 3870387.59 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:44:14,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=157552.0, ans=0.1
+2024-08-26 21:44:21,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=157552.0, ans=0.125
+2024-08-26 21:44:24,638 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=157605.33333333334, ans=0.125
+2024-08-26 21:44:27,883 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.226e+02 1.483e+02 1.683e+02 2.213e+02 4.687e+02, threshold=3.365e+02, percent-clipped=1.0
+2024-08-26 21:44:50,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=157712.0, ans=0.125
+2024-08-26 21:45:26,353 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:45:26,962 INFO [train.py:1114] (0/4) Epoch 12, batch 2200, loss[loss=0.2148, simple_loss=0.2917, pruned_loss=0.04996, ctc_loss=0.09489, over 19582.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.274, pruned_loss=0.04976, ctc_loss=0.0927, over 3868343.65 frames. ], batch size: 57, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:45:49,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=157872.0, ans=0.0
+2024-08-26 21:46:00,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=157925.33333333334, ans=0.125
+2024-08-26 21:46:01,244 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.25 vs. limit=15.0
+2024-08-26 21:46:01,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=157978.66666666666, ans=0.125
+2024-08-26 21:46:08,716 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=157978.66666666666, ans=0.1
+2024-08-26 21:46:10,351 INFO [train.py:1114] (0/4) Epoch 12, batch 2250, loss[loss=0.1934, simple_loss=0.2771, pruned_loss=0.03904, ctc_loss=0.07888, over 19636.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2738, pruned_loss=0.04964, ctc_loss=0.09267, over 3868134.48 frames. ], batch size: 55, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:46:15,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=158032.0, ans=0.025
+2024-08-26 21:46:31,822 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.542e+02 1.805e+02 2.126e+02 6.638e+02, threshold=3.611e+02, percent-clipped=1.0
+2024-08-26 21:46:41,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=158192.0, ans=0.0
+2024-08-26 21:46:53,558 INFO [train.py:1114] (0/4) Epoch 12, batch 2300, loss[loss=0.185, simple_loss=0.2562, pruned_loss=0.04055, ctc_loss=0.08169, over 19490.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2733, pruned_loss=0.04982, ctc_loss=0.09302, over 3862323.12 frames. ], batch size: 49, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:46:58,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=158298.66666666666, ans=0.025
+2024-08-26 21:47:09,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=158352.0, ans=0.125
+2024-08-26 21:47:15,541 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.68 vs. limit=22.5
+2024-08-26 21:47:25,592 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=158458.66666666666, ans=0.1
+2024-08-26 21:47:36,484 INFO [train.py:1114] (0/4) Epoch 12, batch 2350, loss[loss=0.226, simple_loss=0.2944, pruned_loss=0.05835, ctc_loss=0.1023, over 19671.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2735, pruned_loss=0.05002, ctc_loss=0.09347, over 3864821.42 frames. ], batch size: 63, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:47:44,453 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=158565.33333333334, ans=0.125
+2024-08-26 21:47:58,859 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.457e+02 1.679e+02 1.938e+02 3.188e+02, threshold=3.358e+02, percent-clipped=0.0
+2024-08-26 21:48:10,680 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158725.33333333334, ans=0.1
+2024-08-26 21:48:14,873 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=158778.66666666666, ans=0.2
+2024-08-26 21:48:20,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158832.0, ans=0.1
+2024-08-26 21:48:21,587 INFO [train.py:1114] (0/4) Epoch 12, batch 2400, loss[loss=0.2269, simple_loss=0.2922, pruned_loss=0.05828, ctc_loss=0.1125, over 19390.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2754, pruned_loss=0.05091, ctc_loss=0.09501, over 3859457.85 frames. ], batch size: 67, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 21:48:32,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=158832.0, ans=0.0
+2024-08-26 21:48:32,850 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=158885.33333333334, ans=0.025
+2024-08-26 21:49:25,115 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.10 vs. limit=22.5
+2024-08-26 21:49:28,526 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=158992.0, ans=0.0
+2024-08-26 21:49:29,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158992.0, ans=0.1
+2024-08-26 21:49:33,485 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.32 vs. limit=15.0
+2024-08-26 21:49:36,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=159045.33333333334, ans=0.125
+2024-08-26 21:49:42,231 INFO [train.py:1114] (0/4) Epoch 12, batch 2450, loss[loss=0.2766, simple_loss=0.3147, pruned_loss=0.08497, ctc_loss=0.1712, over 13431.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2794, pruned_loss=0.05382, ctc_loss=0.1005, over 3736242.85 frames. ], batch size: 141, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 21:49:43,681 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.56 vs. limit=22.5
+2024-08-26 21:49:52,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=159152.0, ans=0.1
+2024-08-26 21:49:54,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=159152.0, ans=0.2
+2024-08-26 21:49:57,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff3.min_abs, batch_count=159152.0, ans=0.2
+2024-08-26 21:50:05,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=159205.33333333334, ans=0.0
+2024-08-26 21:50:05,689 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.611e+02 1.857e+02 2.069e+02 3.042e+02, threshold=3.714e+02, percent-clipped=0.0
+2024-08-26 21:50:08,777 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.20 vs. limit=12.0
+2024-08-26 21:50:18,211 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-12.pt
+2024-08-26 21:51:14,812 INFO [train.py:1114] (0/4) Epoch 13, batch 0, loss[loss=0.1753, simple_loss=0.2462, pruned_loss=0.0377, ctc_loss=0.07276, over 19800.00 frames. ], tot_loss[loss=0.1753, simple_loss=0.2462, pruned_loss=0.0377, ctc_loss=0.07276, over 19800.00 frames. ], batch size: 49, lr: 1.18e-02, grad_scale: 16.0
+2024-08-26 21:51:14,813 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 21:51:27,606 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.5.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.8999, 2.4765, 3.9194, 5.2323], device='cuda:0')
+2024-08-26 21:51:28,894 INFO [train.py:1146] (0/4) Epoch 13, validation: loss=0.1795, simple_loss=0.2723, pruned_loss=0.03226, ctc_loss=0.05568, over 944034.00 frames.
+2024-08-26 21:51:28,894 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 21:51:30,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=159306.66666666666, ans=0.0
+2024-08-26 21:51:31,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=159306.66666666666, ans=0.0
+2024-08-26 21:51:33,056 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.74 vs. limit=15.0
+2024-08-26 21:51:41,899 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=159360.0, ans=0.0
+2024-08-26 21:51:50,501 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.59 vs. limit=15.0
+2024-08-26 21:52:03,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=159466.66666666666, ans=0.125
+2024-08-26 21:52:06,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=159466.66666666666, ans=0.125
+2024-08-26 21:52:18,719 INFO [train.py:1114] (0/4) Epoch 13, batch 50, loss[loss=0.1633, simple_loss=0.2369, pruned_loss=0.03251, ctc_loss=0.0618, over 19681.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2754, pruned_loss=0.05073, ctc_loss=0.09541, over 844325.49 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:52:19,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=159573.33333333334, ans=0.2
+2024-08-26 21:52:37,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=159626.66666666666, ans=0.1
+2024-08-26 21:52:38,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=159626.66666666666, ans=0.125
+2024-08-26 21:52:42,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=159680.0, ans=0.0
+2024-08-26 21:52:50,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=159733.33333333334, ans=0.2
+2024-08-26 21:52:56,397 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.509e+02 1.748e+02 2.087e+02 2.763e+02, threshold=3.495e+02, percent-clipped=0.0
+2024-08-26 21:53:03,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=159786.66666666666, ans=0.125
+2024-08-26 21:53:07,757 INFO [train.py:1114] (0/4) Epoch 13, batch 100, loss[loss=0.1815, simple_loss=0.2515, pruned_loss=0.04101, ctc_loss=0.07357, over 19711.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2764, pruned_loss=0.05039, ctc_loss=0.09494, over 1498893.67 frames. ], batch size: 51, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:53:08,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=159840.0, ans=0.0
+2024-08-26 21:53:15,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=159840.0, ans=0.125
+2024-08-26 21:53:23,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=159893.33333333334, ans=0.0
+2024-08-26 21:53:27,286 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=159893.33333333334, ans=0.125
+2024-08-26 21:53:39,905 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.00 vs. limit=15.0
+2024-08-26 21:53:42,490 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=160000.0, ans=0.1
+2024-08-26 21:53:46,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=160000.0, ans=0.125
+2024-08-26 21:54:19,204 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 21:54:20,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 21:54:20,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=160053.33333333334, ans=0.0
+2024-08-26 21:54:22,687 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=160106.66666666666, ans=0.125
+2024-08-26 21:54:23,454 INFO [train.py:1114] (0/4) Epoch 13, batch 150, loss[loss=0.1914, simple_loss=0.2582, pruned_loss=0.04659, ctc_loss=0.07863, over 19717.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2734, pruned_loss=0.04927, ctc_loss=0.09255, over 2027521.62 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:54:23,596 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160106.66666666666, ans=0.1
+2024-08-26 21:54:50,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=160213.33333333334, ans=0.125
+2024-08-26 21:54:59,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=160266.66666666666, ans=0.0
+2024-08-26 21:55:02,354 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.397e+02 1.535e+02 1.726e+02 2.735e+02, threshold=3.069e+02, percent-clipped=0.0
+2024-08-26 21:55:13,385 INFO [train.py:1114] (0/4) Epoch 13, batch 200, loss[loss=0.2409, simple_loss=0.3019, pruned_loss=0.06578, ctc_loss=0.1206, over 18324.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2727, pruned_loss=0.04898, ctc_loss=0.09186, over 2435886.34 frames. ], batch size: 85, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:55:15,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=160373.33333333334, ans=0.125
+2024-08-26 21:55:20,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=160373.33333333334, ans=0.125
+2024-08-26 21:55:26,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=160426.66666666666, ans=0.125
+2024-08-26 21:55:33,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=160480.0, ans=0.5
+2024-08-26 21:55:35,486 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=160480.0, ans=0.0
+2024-08-26 21:55:47,064 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.39 vs. limit=22.5
+2024-08-26 21:55:50,898 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.92 vs. limit=15.0
+2024-08-26 21:55:55,657 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.28 vs. limit=15.0
+2024-08-26 21:56:19,213 INFO [train.py:1114] (0/4) Epoch 13, batch 250, loss[loss=0.2023, simple_loss=0.2732, pruned_loss=0.04827, ctc_loss=0.08715, over 19389.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2722, pruned_loss=0.0488, ctc_loss=0.0913, over 2755928.06 frames. ], batch size: 67, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:56:31,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=160693.33333333334, ans=0.0
+2024-08-26 21:56:57,701 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.442e+02 1.721e+02 2.190e+02 3.294e+02, threshold=3.441e+02, percent-clipped=2.0
+2024-08-26 21:56:59,023 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.81 vs. limit=15.0
+2024-08-26 21:56:59,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=160853.33333333334, ans=0.05
+2024-08-26 21:57:07,799 INFO [train.py:1114] (0/4) Epoch 13, batch 300, loss[loss=0.2129, simple_loss=0.2883, pruned_loss=0.05069, ctc_loss=0.09013, over 19506.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2725, pruned_loss=0.04893, ctc_loss=0.09128, over 3000371.21 frames. ], batch size: 61, lr: 1.17e-02, grad_scale: 8.0
+2024-08-26 21:57:08,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=160906.66666666666, ans=0.95
+2024-08-26 21:57:13,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=160906.66666666666, ans=0.125
+2024-08-26 21:57:14,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=160906.66666666666, ans=0.07
+2024-08-26 21:57:25,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=161013.33333333334, ans=0.125
+2024-08-26 21:57:50,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=161120.0, ans=0.125
+2024-08-26 21:57:55,091 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.71 vs. limit=6.0
+2024-08-26 21:57:55,510 INFO [train.py:1114] (0/4) Epoch 13, batch 350, loss[loss=0.1883, simple_loss=0.2443, pruned_loss=0.04864, ctc_loss=0.08739, over 19781.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2728, pruned_loss=0.04916, ctc_loss=0.0916, over 3190650.97 frames. ], batch size: 48, lr: 1.17e-02, grad_scale: 8.0
+2024-08-26 21:58:10,833 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.68 vs. limit=15.0
+2024-08-26 21:58:11,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=161226.66666666666, ans=0.0
+2024-08-26 21:58:33,157 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.400e+02 1.583e+02 1.867e+02 2.908e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-08-26 21:58:33,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=161386.66666666666, ans=10.0
+2024-08-26 21:58:37,342 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.60 vs. limit=15.0
+2024-08-26 21:58:39,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=161386.66666666666, ans=0.125
+2024-08-26 21:58:43,183 INFO [train.py:1114] (0/4) Epoch 13, batch 400, loss[loss=0.2133, simple_loss=0.2819, pruned_loss=0.05353, ctc_loss=0.09392, over 19512.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2723, pruned_loss=0.04893, ctc_loss=0.0912, over 3342951.10 frames. ], batch size: 54, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:58:55,949 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.68 vs. limit=15.0
+2024-08-26 21:58:58,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=161493.33333333334, ans=0.125
+2024-08-26 21:58:59,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=161493.33333333334, ans=0.95
+2024-08-26 21:59:01,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=161493.33333333334, ans=0.025
+2024-08-26 21:59:05,089 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.02 vs. limit=22.5
+2024-08-26 21:59:18,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=161600.0, ans=0.2
+2024-08-26 21:59:19,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=161600.0, ans=0.1
+2024-08-26 21:59:31,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=161706.66666666666, ans=0.0
+2024-08-26 21:59:32,232 INFO [train.py:1114] (0/4) Epoch 13, batch 450, loss[loss=0.1999, simple_loss=0.2813, pruned_loss=0.04285, ctc_loss=0.08182, over 19616.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2732, pruned_loss=0.04928, ctc_loss=0.09169, over 3451082.16 frames. ], batch size: 55, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:59:32,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=161706.66666666666, ans=0.0
+2024-08-26 21:59:51,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=161760.0, ans=0.125
+2024-08-26 21:59:52,261 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=161813.33333333334, ans=0.1
+2024-08-26 21:59:54,123 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=161813.33333333334, ans=0.125
+2024-08-26 21:59:56,173 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.35 vs. limit=10.0
+2024-08-26 22:00:01,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=161866.66666666666, ans=0.1
+2024-08-26 22:00:10,465 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.228e+02 1.449e+02 1.659e+02 1.894e+02 3.083e+02, threshold=3.319e+02, percent-clipped=0.0
+2024-08-26 22:00:20,539 INFO [train.py:1114] (0/4) Epoch 13, batch 500, loss[loss=0.218, simple_loss=0.2862, pruned_loss=0.05476, ctc_loss=0.1008, over 19664.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2723, pruned_loss=0.0489, ctc_loss=0.09107, over 3546593.79 frames. ], batch size: 63, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 22:00:32,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=162026.66666666666, ans=0.1
+2024-08-26 22:00:38,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=162026.66666666666, ans=0.2
+2024-08-26 22:00:51,341 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=162133.33333333334, ans=0.2
+2024-08-26 22:01:03,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=162186.66666666666, ans=0.0
+2024-08-26 22:01:07,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=162186.66666666666, ans=0.125
+2024-08-26 22:01:07,210 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=162186.66666666666, ans=0.1
+2024-08-26 22:01:10,495 INFO [train.py:1114] (0/4) Epoch 13, batch 550, loss[loss=0.2224, simple_loss=0.2922, pruned_loss=0.05588, ctc_loss=0.102, over 19314.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2725, pruned_loss=0.04911, ctc_loss=0.09153, over 3608138.28 frames. ], batch size: 71, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 22:01:17,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=162240.0, ans=0.125
+2024-08-26 22:01:32,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=162346.66666666666, ans=0.2
+2024-08-26 22:01:44,247 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=162400.0, ans=0.0
+2024-08-26 22:01:59,750 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 1.555e+02 1.782e+02 2.360e+02 4.088e+02, threshold=3.564e+02, percent-clipped=3.0
+2024-08-26 22:02:00,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=162453.33333333334, ans=0.0
+2024-08-26 22:02:10,198 INFO [train.py:1114] (0/4) Epoch 13, batch 600, loss[loss=0.2093, simple_loss=0.2878, pruned_loss=0.04718, ctc_loss=0.09094, over 19428.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2725, pruned_loss=0.04889, ctc_loss=0.09118, over 3665328.45 frames. ], batch size: 67, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:02:14,169 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:02:15,813 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.min_positive, batch_count=162506.66666666666, ans=0.025
+2024-08-26 22:02:16,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=162506.66666666666, ans=0.1
+2024-08-26 22:02:42,087 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=162666.66666666666, ans=0.1
+2024-08-26 22:02:43,985 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=162666.66666666666, ans=0.1
+2024-08-26 22:02:45,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=162666.66666666666, ans=0.125
+2024-08-26 22:02:48,429 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=162720.0, ans=0.125
+2024-08-26 22:02:52,109 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=162720.0, ans=0.125
+2024-08-26 22:02:57,644 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:02:58,290 INFO [train.py:1114] (0/4) Epoch 13, batch 650, loss[loss=0.1921, simple_loss=0.27, pruned_loss=0.04131, ctc_loss=0.07878, over 19748.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2719, pruned_loss=0.0485, ctc_loss=0.09041, over 3715452.02 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:03:01,346 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=162773.33333333334, ans=0.125
+2024-08-26 22:03:27,736 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=162826.66666666666, ans=0.125
+2024-08-26 22:03:47,181 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=162933.33333333334, ans=0.025
+2024-08-26 22:03:47,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=162933.33333333334, ans=0.5
+2024-08-26 22:03:57,399 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.208e+02 1.372e+02 1.512e+02 1.802e+02 3.637e+02, threshold=3.024e+02, percent-clipped=1.0
+2024-08-26 22:04:09,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=163040.0, ans=0.125
+2024-08-26 22:04:09,866 INFO [train.py:1114] (0/4) Epoch 13, batch 700, loss[loss=0.1803, simple_loss=0.2598, pruned_loss=0.03671, ctc_loss=0.0688, over 19719.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2723, pruned_loss=0.04851, ctc_loss=0.09051, over 3748105.30 frames. ], batch size: 51, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:04:42,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=163146.66666666666, ans=0.025
+2024-08-26 22:04:45,375 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=163200.0, ans=10.0
+2024-08-26 22:04:58,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=163253.33333333334, ans=0.125
+2024-08-26 22:05:04,307 INFO [train.py:1114] (0/4) Epoch 13, batch 750, loss[loss=0.2036, simple_loss=0.2851, pruned_loss=0.04326, ctc_loss=0.08909, over 19514.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2721, pruned_loss=0.04853, ctc_loss=0.09056, over 3775227.81 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:05:09,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=163306.66666666666, ans=0.125
+2024-08-26 22:05:12,155 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.98 vs. limit=12.0
+2024-08-26 22:05:15,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=163360.0, ans=0.125
+2024-08-26 22:05:23,117 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.07 vs. limit=15.0
+2024-08-26 22:05:33,990 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=163466.66666666666, ans=0.125
+2024-08-26 22:05:36,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=163466.66666666666, ans=0.0
+2024-08-26 22:05:41,870 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.233e+02 1.560e+02 1.959e+02 2.402e+02 3.823e+02, threshold=3.919e+02, percent-clipped=10.0
+2024-08-26 22:05:43,913 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=163520.0, ans=0.125
+2024-08-26 22:05:48,778 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=163520.0, ans=0.125
+2024-08-26 22:05:50,688 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:05:51,608 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=163520.0, ans=0.2
+2024-08-26 22:05:56,843 INFO [train.py:1114] (0/4) Epoch 13, batch 800, loss[loss=0.1906, simple_loss=0.2571, pruned_loss=0.04501, ctc_loss=0.08501, over 19405.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2718, pruned_loss=0.04847, ctc_loss=0.09058, over 3796428.28 frames. ], batch size: 48, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 22:05:59,027 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=1.87 vs. limit=15.0
+2024-08-26 22:06:33,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=163733.33333333334, ans=0.125
+2024-08-26 22:06:51,424 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=163786.66666666666, ans=0.0
+2024-08-26 22:06:54,790 INFO [train.py:1114] (0/4) Epoch 13, batch 850, loss[loss=0.2132, simple_loss=0.2856, pruned_loss=0.05134, ctc_loss=0.09528, over 19647.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2713, pruned_loss=0.04823, ctc_loss=0.08998, over 3815611.85 frames. ], batch size: 59, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:07:10,168 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=163893.33333333334, ans=0.0
+2024-08-26 22:07:21,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=163946.66666666666, ans=0.1
+2024-08-26 22:07:28,706 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=164000.0, ans=0.125
+2024-08-26 22:07:36,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=164000.0, ans=0.125
+2024-08-26 22:07:37,694 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.442e+02 1.756e+02 2.038e+02 3.459e+02, threshold=3.512e+02, percent-clipped=0.0
+2024-08-26 22:07:50,315 INFO [train.py:1114] (0/4) Epoch 13, batch 900, loss[loss=0.1726, simple_loss=0.242, pruned_loss=0.03755, ctc_loss=0.07002, over 19817.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2717, pruned_loss=0.04846, ctc_loss=0.09008, over 3819191.02 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:07:57,994 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:07:59,105 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.25 vs. limit=15.0
+2024-08-26 22:08:14,153 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.60 vs. limit=6.0
+2024-08-26 22:08:20,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=164266.66666666666, ans=0.0
+2024-08-26 22:08:25,162 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=164266.66666666666, ans=0.1
+2024-08-26 22:08:32,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=164320.0, ans=0.025
+2024-08-26 22:08:38,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=164320.0, ans=0.125
+2024-08-26 22:08:40,767 INFO [train.py:1114] (0/4) Epoch 13, batch 950, loss[loss=0.1848, simple_loss=0.2555, pruned_loss=0.04142, ctc_loss=0.07819, over 19491.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2724, pruned_loss=0.04897, ctc_loss=0.0912, over 3821463.59 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:08:50,232 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=164426.66666666666, ans=0.025
+2024-08-26 22:09:11,482 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=164533.33333333334, ans=0.125
+2024-08-26 22:09:12,539 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=164533.33333333334, ans=0.125
+2024-08-26 22:09:17,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=164533.33333333334, ans=0.2
+2024-08-26 22:09:20,502 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.559e+02 1.935e+02 2.172e+02 5.830e+02, threshold=3.869e+02, percent-clipped=1.0
+2024-08-26 22:09:29,545 INFO [train.py:1114] (0/4) Epoch 13, batch 1000, loss[loss=0.176, simple_loss=0.2477, pruned_loss=0.0378, ctc_loss=0.07172, over 19854.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2725, pruned_loss=0.04896, ctc_loss=0.09134, over 3817729.86 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:09:32,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=164640.0, ans=0.2
+2024-08-26 22:09:34,397 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=164640.0, ans=0.2
+2024-08-26 22:09:42,857 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=164693.33333333334, ans=0.1
+2024-08-26 22:09:44,646 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=164693.33333333334, ans=0.125
+2024-08-26 22:10:09,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=164853.33333333334, ans=0.0
+2024-08-26 22:10:19,110 INFO [train.py:1114] (0/4) Epoch 13, batch 1050, loss[loss=0.2017, simple_loss=0.2773, pruned_loss=0.04559, ctc_loss=0.08737, over 19837.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2718, pruned_loss=0.04878, ctc_loss=0.09107, over 3823736.33 frames. ], batch size: 57, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:10:20,298 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=164906.66666666666, ans=0.1
+2024-08-26 22:10:22,000 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=164906.66666666666, ans=0.125
+2024-08-26 22:10:39,352 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=165013.33333333334, ans=0.2
+2024-08-26 22:10:39,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=165013.33333333334, ans=0.1
+2024-08-26 22:10:40,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=165013.33333333334, ans=0.025
+2024-08-26 22:10:55,509 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.353e+02 1.566e+02 1.889e+02 2.686e+02, threshold=3.131e+02, percent-clipped=0.0
+2024-08-26 22:11:01,581 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=165120.0, ans=0.125
+2024-08-26 22:11:06,687 INFO [train.py:1114] (0/4) Epoch 13, batch 1100, loss[loss=0.1984, simple_loss=0.2721, pruned_loss=0.04468, ctc_loss=0.0883, over 19566.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2716, pruned_loss=0.04848, ctc_loss=0.09053, over 3830771.65 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:11:06,908 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=165173.33333333334, ans=0.0
+2024-08-26 22:11:22,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=165226.66666666666, ans=0.125
+2024-08-26 22:11:34,549 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=165333.33333333334, ans=0.5
+2024-08-26 22:11:49,344 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:11:55,417 INFO [train.py:1114] (0/4) Epoch 13, batch 1150, loss[loss=0.1933, simple_loss=0.2649, pruned_loss=0.04468, ctc_loss=0.08099, over 19597.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2714, pruned_loss=0.04852, ctc_loss=0.09053, over 3830498.68 frames. ], batch size: 52, lr: 1.15e-02, grad_scale: 16.0
+2024-08-26 22:11:55,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=165440.0, ans=0.125
+2024-08-26 22:11:56,912 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.49 vs. limit=15.0
+2024-08-26 22:12:27,268 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=165600.0, ans=0.1
+2024-08-26 22:12:34,675 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.454e+02 1.639e+02 1.902e+02 3.180e+02, threshold=3.277e+02, percent-clipped=1.0
+2024-08-26 22:12:37,587 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=165653.33333333334, ans=0.2
+2024-08-26 22:12:43,815 INFO [train.py:1114] (0/4) Epoch 13, batch 1200, loss[loss=0.2101, simple_loss=0.2808, pruned_loss=0.05105, ctc_loss=0.09334, over 19841.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2724, pruned_loss=0.04879, ctc_loss=0.09116, over 3825729.86 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:12:45,253 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.51 vs. limit=15.0
+2024-08-26 22:12:49,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=165706.66666666666, ans=0.125
+2024-08-26 22:12:53,428 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=165706.66666666666, ans=0.125
+2024-08-26 22:13:00,978 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.46 vs. limit=22.5
+2024-08-26 22:13:09,496 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=165813.33333333334, ans=0.125
+2024-08-26 22:13:32,332 INFO [train.py:1114] (0/4) Epoch 13, batch 1250, loss[loss=0.227, simple_loss=0.2885, pruned_loss=0.06124, ctc_loss=0.1078, over 19518.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2723, pruned_loss=0.04869, ctc_loss=0.09075, over 3843655.39 frames. ], batch size: 61, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:13:32,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=165973.33333333334, ans=0.125
+2024-08-26 22:13:34,223 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=165973.33333333334, ans=0.125
+2024-08-26 22:14:01,817 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=166133.33333333334, ans=0.1
+2024-08-26 22:14:11,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=166186.66666666666, ans=0.1
+2024-08-26 22:14:11,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=166186.66666666666, ans=0.125
+2024-08-26 22:14:11,807 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.418e+02 1.637e+02 2.002e+02 4.206e+02, threshold=3.274e+02, percent-clipped=1.0
+2024-08-26 22:14:23,437 INFO [train.py:1114] (0/4) Epoch 13, batch 1300, loss[loss=0.2251, simple_loss=0.2874, pruned_loss=0.05958, ctc_loss=0.1089, over 18789.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2715, pruned_loss=0.04831, ctc_loss=0.08991, over 3847040.83 frames. ], batch size: 76, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:14:40,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=166293.33333333334, ans=0.0
+2024-08-26 22:14:42,372 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.57 vs. limit=15.0
+2024-08-26 22:15:02,622 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.56 vs. limit=12.0
+2024-08-26 22:15:09,553 INFO [train.py:1114] (0/4) Epoch 13, batch 1350, loss[loss=0.1723, simple_loss=0.2553, pruned_loss=0.032, ctc_loss=0.06338, over 19774.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2708, pruned_loss=0.04777, ctc_loss=0.08902, over 3858652.72 frames. ], batch size: 54, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:15:22,675 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=166560.0, ans=0.05
+2024-08-26 22:15:31,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=166613.33333333334, ans=0.025
+2024-08-26 22:15:47,890 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.45 vs. limit=12.0
+2024-08-26 22:15:50,052 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.184e+02 1.412e+02 1.605e+02 1.958e+02 2.747e+02, threshold=3.211e+02, percent-clipped=0.0
+2024-08-26 22:15:59,142 INFO [train.py:1114] (0/4) Epoch 13, batch 1400, loss[loss=0.1825, simple_loss=0.2412, pruned_loss=0.04485, ctc_loss=0.08533, over 19692.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2705, pruned_loss=0.04772, ctc_loss=0.08897, over 3864667.05 frames. ], batch size: 46, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:16:18,917 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=166880.0, ans=0.125
+2024-08-26 22:16:28,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=166933.33333333334, ans=0.1
+2024-08-26 22:16:34,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=166933.33333333334, ans=0.125
+2024-08-26 22:16:47,825 INFO [train.py:1114] (0/4) Epoch 13, batch 1450, loss[loss=0.2049, simple_loss=0.2726, pruned_loss=0.05026, ctc_loss=0.09158, over 19679.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2714, pruned_loss=0.04816, ctc_loss=0.08964, over 3862526.47 frames. ], batch size: 63, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:16:59,131 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=167093.33333333334, ans=0.025
+2024-08-26 22:17:19,396 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.50 vs. limit=22.5
+2024-08-26 22:17:22,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=167200.0, ans=0.125
+2024-08-26 22:17:27,080 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.193e+02 1.434e+02 1.640e+02 1.966e+02 4.010e+02, threshold=3.281e+02, percent-clipped=1.0
+2024-08-26 22:17:34,681 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=167253.33333333334, ans=0.0
+2024-08-26 22:17:36,397 INFO [train.py:1114] (0/4) Epoch 13, batch 1500, loss[loss=0.214, simple_loss=0.2874, pruned_loss=0.05098, ctc_loss=0.09685, over 19580.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.272, pruned_loss=0.0482, ctc_loss=0.08968, over 3861682.56 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:18:09,556 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=167466.66666666666, ans=0.125
+2024-08-26 22:18:14,968 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=167520.0, ans=0.125
+2024-08-26 22:18:15,916 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=167520.0, ans=0.125
+2024-08-26 22:18:26,617 INFO [train.py:1114] (0/4) Epoch 13, batch 1550, loss[loss=0.2223, simple_loss=0.2884, pruned_loss=0.05708, ctc_loss=0.1051, over 19588.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2723, pruned_loss=0.04878, ctc_loss=0.09079, over 3846022.74 frames. ], batch size: 60, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:18:35,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=167626.66666666666, ans=0.2
+2024-08-26 22:18:42,275 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=167626.66666666666, ans=0.0
+2024-08-26 22:18:55,406 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.20 vs. limit=22.5
+2024-08-26 22:19:03,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=167786.66666666666, ans=0.04949747468305833
+2024-08-26 22:19:04,502 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.500e+02 1.731e+02 2.118e+02 3.338e+02, threshold=3.463e+02, percent-clipped=1.0
+2024-08-26 22:19:11,297 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=167786.66666666666, ans=0.0
+2024-08-26 22:19:12,912 INFO [train.py:1114] (0/4) Epoch 13, batch 1600, loss[loss=0.2135, simple_loss=0.2863, pruned_loss=0.0517, ctc_loss=0.09325, over 19856.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2723, pruned_loss=0.0488, ctc_loss=0.09078, over 3836438.34 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:19:30,707 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=167893.33333333334, ans=0.125
+2024-08-26 22:19:32,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=167946.66666666666, ans=0.125
+2024-08-26 22:19:33,710 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.59 vs. limit=22.5
+2024-08-26 22:19:59,755 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=167946.66666666666, ans=0.0
+2024-08-26 22:19:59,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=167946.66666666666, ans=0.0
+2024-08-26 22:20:08,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=168000.0, ans=0.0
+2024-08-26 22:20:13,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=168053.33333333334, ans=0.125
+2024-08-26 22:20:19,721 INFO [train.py:1114] (0/4) Epoch 13, batch 1650, loss[loss=0.1998, simple_loss=0.2745, pruned_loss=0.04403, ctc_loss=0.09277, over 19662.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.272, pruned_loss=0.04863, ctc_loss=0.09071, over 3833199.88 frames. ], batch size: 59, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:20:32,735 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=168160.0, ans=0.1
+2024-08-26 22:20:43,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=168213.33333333334, ans=0.125
+2024-08-26 22:20:46,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.77 vs. limit=6.0
+2024-08-26 22:20:57,545 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.381e+02 1.542e+02 1.780e+02 2.683e+02, threshold=3.084e+02, percent-clipped=0.0
+2024-08-26 22:21:01,545 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=168320.0, ans=0.0
+2024-08-26 22:21:07,572 INFO [train.py:1114] (0/4) Epoch 13, batch 1700, loss[loss=0.1868, simple_loss=0.2492, pruned_loss=0.04408, ctc_loss=0.09073, over 19674.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2716, pruned_loss=0.04817, ctc_loss=0.08991, over 3847041.83 frames. ], batch size: 46, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:21:27,652 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.56 vs. limit=12.0
+2024-08-26 22:21:39,791 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=168533.33333333334, ans=0.125
+2024-08-26 22:21:48,821 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.86 vs. limit=12.0
+2024-08-26 22:21:52,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=168586.66666666666, ans=0.025
+2024-08-26 22:21:53,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=168640.0, ans=0.05
+2024-08-26 22:21:53,902 INFO [train.py:1114] (0/4) Epoch 13, batch 1750, loss[loss=0.1778, simple_loss=0.2449, pruned_loss=0.04031, ctc_loss=0.07492, over 19627.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2707, pruned_loss=0.0478, ctc_loss=0.08911, over 3852460.98 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:23:42,134 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=168800.0, ans=0.0
+2024-08-26 22:23:43,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=168800.0, ans=0.125
+2024-08-26 22:23:50,356 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.22 vs. limit=15.0
+2024-08-26 22:23:51,722 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.171e+02 1.438e+02 1.563e+02 1.924e+02 3.851e+02, threshold=3.126e+02, percent-clipped=3.0
+2024-08-26 22:24:01,012 INFO [train.py:1114] (0/4) Epoch 13, batch 1800, loss[loss=0.2176, simple_loss=0.2944, pruned_loss=0.05169, ctc_loss=0.09358, over 19597.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2711, pruned_loss=0.04797, ctc_loss=0.08949, over 3854137.29 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:24:06,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=168906.66666666666, ans=0.025
+2024-08-26 22:24:09,271 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.65 vs. limit=15.0
+2024-08-26 22:24:25,702 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:24:25,823 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=6.85 vs. limit=15.0
+2024-08-26 22:24:44,742 INFO [train.py:1114] (0/4) Epoch 13, batch 1850, loss[loss=0.2041, simple_loss=0.2786, pruned_loss=0.04628, ctc_loss=0.09279, over 19579.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2705, pruned_loss=0.0478, ctc_loss=0.08906, over 3856853.35 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:24:49,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=169173.33333333334, ans=0.2
+2024-08-26 22:24:53,812 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=169226.66666666666, ans=0.125
+2024-08-26 22:24:58,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=169226.66666666666, ans=0.0
+2024-08-26 22:24:59,559 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.84 vs. limit=10.0
+2024-08-26 22:25:01,335 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.47 vs. limit=22.5
+2024-08-26 22:25:09,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=169280.0, ans=0.125
+2024-08-26 22:25:12,190 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=169333.33333333334, ans=0.125
+2024-08-26 22:25:21,909 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.546e+02 1.793e+02 2.323e+02 4.609e+02, threshold=3.586e+02, percent-clipped=7.0
+2024-08-26 22:25:29,826 INFO [train.py:1114] (0/4) Epoch 13, batch 1900, loss[loss=0.1988, simple_loss=0.2768, pruned_loss=0.04347, ctc_loss=0.08437, over 19653.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.271, pruned_loss=0.048, ctc_loss=0.0893, over 3861046.47 frames. ], batch size: 59, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:25:59,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=169600.0, ans=0.0
+2024-08-26 22:26:13,639 INFO [train.py:1114] (0/4) Epoch 13, batch 1950, loss[loss=0.1809, simple_loss=0.2527, pruned_loss=0.03978, ctc_loss=0.07372, over 19587.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2717, pruned_loss=0.04798, ctc_loss=0.08929, over 3870178.17 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:26:31,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=169813.33333333334, ans=0.1
+2024-08-26 22:26:31,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=169813.33333333334, ans=0.125
+2024-08-26 22:26:43,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=169866.66666666666, ans=0.1
+2024-08-26 22:26:52,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=169920.0, ans=0.1
+2024-08-26 22:26:53,524 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.527e+02 1.786e+02 2.093e+02 2.857e+02, threshold=3.573e+02, percent-clipped=0.0
+2024-08-26 22:26:58,064 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=169920.0, ans=0.1
+2024-08-26 22:27:00,498 INFO [train.py:1114] (0/4) Epoch 13, batch 2000, loss[loss=0.1829, simple_loss=0.2528, pruned_loss=0.04129, ctc_loss=0.07569, over 19613.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2727, pruned_loss=0.04861, ctc_loss=0.09044, over 3853235.11 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:27:15,476 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:27:35,787 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.07 vs. limit=22.5
+2024-08-26 22:27:41,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=170186.66666666666, ans=0.125
+2024-08-26 22:27:43,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=170240.0, ans=0.0
+2024-08-26 22:27:44,095 INFO [train.py:1114] (0/4) Epoch 13, batch 2050, loss[loss=0.1872, simple_loss=0.2591, pruned_loss=0.0422, ctc_loss=0.07719, over 19708.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2717, pruned_loss=0.04834, ctc_loss=0.08986, over 3851156.12 frames. ], batch size: 47, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:27:46,900 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=170240.0, ans=0.2
+2024-08-26 22:27:51,223 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=170240.0, ans=0.025
+2024-08-26 22:28:15,414 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=170400.0, ans=0.1
+2024-08-26 22:28:20,405 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 1.436e+02 1.652e+02 1.928e+02 2.658e+02, threshold=3.303e+02, percent-clipped=0.0
+2024-08-26 22:28:27,531 INFO [train.py:1114] (0/4) Epoch 13, batch 2100, loss[loss=0.1946, simple_loss=0.2637, pruned_loss=0.04586, ctc_loss=0.08462, over 19762.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2711, pruned_loss=0.0476, ctc_loss=0.08872, over 3858379.74 frames. ], batch size: 54, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:28:42,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=170560.0, ans=0.0
+2024-08-26 22:28:45,298 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.99 vs. limit=15.0
+2024-08-26 22:28:53,131 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-32000.pt
+2024-08-26 22:29:06,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=170666.66666666666, ans=0.0
+2024-08-26 22:29:17,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=170720.0, ans=0.125
+2024-08-26 22:29:17,853 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=170773.33333333334, ans=0.0
+2024-08-26 22:29:18,563 INFO [train.py:1114] (0/4) Epoch 13, batch 2150, loss[loss=0.1784, simple_loss=0.2568, pruned_loss=0.03659, ctc_loss=0.06699, over 19575.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2703, pruned_loss=0.04725, ctc_loss=0.08813, over 3869607.32 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:29:37,145 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=170880.0, ans=0.125
+2024-08-26 22:29:38,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=170880.0, ans=0.1
+2024-08-26 22:29:39,070 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.36 vs. limit=22.5
+2024-08-26 22:29:39,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=170880.0, ans=0.2
+2024-08-26 22:29:44,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=170933.33333333334, ans=0.2
+2024-08-26 22:29:55,023 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.462e+02 1.698e+02 2.269e+02 4.218e+02, threshold=3.397e+02, percent-clipped=7.0
+2024-08-26 22:29:56,170 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=170986.66666666666, ans=0.0
+2024-08-26 22:30:02,085 INFO [train.py:1114] (0/4) Epoch 13, batch 2200, loss[loss=0.2214, simple_loss=0.296, pruned_loss=0.05366, ctc_loss=0.09863, over 19592.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2704, pruned_loss=0.04732, ctc_loss=0.08813, over 3867172.51 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:30:05,600 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=171040.0, ans=0.125
+2024-08-26 22:30:28,143 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.95 vs. limit=10.0
+2024-08-26 22:30:36,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=171253.33333333334, ans=0.5
+2024-08-26 22:30:43,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=171253.33333333334, ans=0.0
+2024-08-26 22:30:46,568 INFO [train.py:1114] (0/4) Epoch 13, batch 2250, loss[loss=0.2306, simple_loss=0.3013, pruned_loss=0.05876, ctc_loss=0.1057, over 19623.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2709, pruned_loss=0.04747, ctc_loss=0.0886, over 3867954.74 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:30:50,487 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=26.85 vs. limit=22.5
+2024-08-26 22:31:07,171 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=171413.33333333334, ans=0.1
+2024-08-26 22:31:07,263 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=171413.33333333334, ans=0.0
+2024-08-26 22:31:22,557 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.444e+02 1.610e+02 1.869e+02 3.635e+02, threshold=3.220e+02, percent-clipped=1.0
+2024-08-26 22:31:27,047 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:31:29,429 INFO [train.py:1114] (0/4) Epoch 13, batch 2300, loss[loss=0.179, simple_loss=0.2497, pruned_loss=0.03925, ctc_loss=0.07428, over 19511.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2703, pruned_loss=0.04758, ctc_loss=0.08894, over 3862602.56 frames. ], batch size: 49, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 22:31:31,381 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=171573.33333333334, ans=0.125
+2024-08-26 22:31:32,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=171573.33333333334, ans=0.2
+2024-08-26 22:31:55,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=171733.33333333334, ans=0.2
+2024-08-26 22:31:58,149 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=171733.33333333334, ans=0.0
+2024-08-26 22:32:08,562 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=171786.66666666666, ans=0.04949747468305833
+2024-08-26 22:32:10,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=171786.66666666666, ans=0.125
+2024-08-26 22:32:13,415 INFO [train.py:1114] (0/4) Epoch 13, batch 2350, loss[loss=0.2145, simple_loss=0.2786, pruned_loss=0.05559, ctc_loss=0.0979, over 19693.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2705, pruned_loss=0.0477, ctc_loss=0.089, over 3865403.74 frames. ], batch size: 63, lr: 1.13e-02, grad_scale: 16.0
+2024-08-26 22:32:27,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=171893.33333333334, ans=0.2
+2024-08-26 22:32:50,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=171946.66666666666, ans=0.0
+2024-08-26 22:32:58,607 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.11 vs. limit=15.0
+2024-08-26 22:33:04,852 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.652e+02 1.956e+02 2.487e+02 4.828e+02, threshold=3.913e+02, percent-clipped=4.0
+2024-08-26 22:33:10,708 INFO [train.py:1114] (0/4) Epoch 13, batch 2400, loss[loss=0.2309, simple_loss=0.3025, pruned_loss=0.0579, ctc_loss=0.1087, over 19379.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2725, pruned_loss=0.04844, ctc_loss=0.09039, over 3859690.14 frames. ], batch size: 67, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 22:33:10,799 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=172106.66666666666, ans=0.125
+2024-08-26 22:33:14,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=172106.66666666666, ans=0.125
+2024-08-26 22:33:21,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=172160.0, ans=0.2
+2024-08-26 22:33:34,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=172213.33333333334, ans=0.125
+2024-08-26 22:33:52,110 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=172320.0, ans=0.2
+2024-08-26 22:33:54,748 INFO [train.py:1114] (0/4) Epoch 13, batch 2450, loss[loss=0.2742, simple_loss=0.3102, pruned_loss=0.08615, ctc_loss=0.1648, over 12724.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2766, pruned_loss=0.05146, ctc_loss=0.0963, over 3732461.70 frames. ], batch size: 140, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 22:33:59,027 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.49 vs. limit=22.5
+2024-08-26 22:34:10,675 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.52 vs. limit=15.0
+2024-08-26 22:34:13,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=172480.0, ans=0.0
+2024-08-26 22:34:29,339 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-13.pt
+2024-08-26 22:38:18,460 INFO [train.py:1114] (0/4) Epoch 14, batch 0, loss[loss=0.1888, simple_loss=0.2606, pruned_loss=0.04301, ctc_loss=0.07734, over 19413.00 frames. ], tot_loss[loss=0.1888, simple_loss=0.2606, pruned_loss=0.04301, ctc_loss=0.07734, over 19413.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:38:18,461 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 22:38:36,019 INFO [train.py:1146] (0/4) Epoch 14, validation: loss=0.1777, simple_loss=0.2705, pruned_loss=0.03149, ctc_loss=0.05468, over 944034.00 frames.
+2024-08-26 22:38:36,020 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 22:38:39,678 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.631e+02 1.782e+02 1.968e+02 3.125e+02, threshold=3.565e+02, percent-clipped=0.0
+2024-08-26 22:38:44,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=172634.66666666666, ans=0.0
+2024-08-26 22:39:04,648 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.80 vs. limit=22.5
+2024-08-26 22:39:09,126 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.11 vs. limit=15.0
+2024-08-26 22:39:21,023 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=172794.66666666666, ans=0.1
+2024-08-26 22:40:03,206 INFO [train.py:1114] (0/4) Epoch 14, batch 50, loss[loss=0.1904, simple_loss=0.2575, pruned_loss=0.04475, ctc_loss=0.08443, over 19713.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2734, pruned_loss=0.04821, ctc_loss=0.09071, over 844800.35 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:40:08,279 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.18 vs. limit=22.5
+2024-08-26 22:40:16,594 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=172848.0, ans=0.1
+2024-08-26 22:41:56,009 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=172901.33333333334, ans=0.025
+2024-08-26 22:42:01,347 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.79 vs. limit=15.0
+2024-08-26 22:42:41,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-26 22:42:42,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=172901.33333333334, ans=0.125
+2024-08-26 22:42:42,201 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.28 vs. limit=15.0
+2024-08-26 22:42:50,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=172954.66666666666, ans=0.025
+2024-08-26 22:43:09,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=173008.0, ans=0.1
+2024-08-26 22:43:12,227 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=173061.33333333334, ans=0.2
+2024-08-26 22:43:20,288 INFO [train.py:1114] (0/4) Epoch 14, batch 100, loss[loss=0.1827, simple_loss=0.2603, pruned_loss=0.03777, ctc_loss=0.07377, over 19723.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2744, pruned_loss=0.04836, ctc_loss=0.09053, over 1499143.56 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:43:23,801 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.427e+02 1.577e+02 1.836e+02 2.542e+02, threshold=3.153e+02, percent-clipped=0.0
+2024-08-26 22:43:32,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=173168.0, ans=0.125
+2024-08-26 22:44:03,581 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=173328.0, ans=0.1
+2024-08-26 22:44:10,505 INFO [train.py:1114] (0/4) Epoch 14, batch 150, loss[loss=0.1749, simple_loss=0.2436, pruned_loss=0.03821, ctc_loss=0.07451, over 19719.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2723, pruned_loss=0.04759, ctc_loss=0.08894, over 2027452.09 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:44:11,629 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=173381.33333333334, ans=0.125
+2024-08-26 22:44:11,981 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.97 vs. limit=15.0
+2024-08-26 22:44:12,594 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=173381.33333333334, ans=0.0
+2024-08-26 22:44:17,313 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.42 vs. limit=6.0
+2024-08-26 22:44:25,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=173434.66666666666, ans=0.025
+2024-08-26 22:44:29,243 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=173434.66666666666, ans=0.0
+2024-08-26 22:45:00,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=173594.66666666666, ans=0.09899494936611666
+2024-08-26 22:45:10,942 INFO [train.py:1114] (0/4) Epoch 14, batch 200, loss[loss=0.2346, simple_loss=0.2988, pruned_loss=0.06165, ctc_loss=0.1178, over 18353.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2698, pruned_loss=0.04694, ctc_loss=0.08769, over 2435099.22 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:45:14,585 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.393e+02 1.624e+02 1.885e+02 3.247e+02, threshold=3.247e+02, percent-clipped=1.0
+2024-08-26 22:45:17,521 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.12 vs. limit=15.0
+2024-08-26 22:45:18,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=173648.0, ans=0.125
+2024-08-26 22:45:23,732 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=173701.33333333334, ans=0.0
+2024-08-26 22:45:24,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=173701.33333333334, ans=0.125
+2024-08-26 22:45:48,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=173808.0, ans=10.0
+2024-08-26 22:46:04,220 INFO [train.py:1114] (0/4) Epoch 14, batch 250, loss[loss=0.1994, simple_loss=0.272, pruned_loss=0.04652, ctc_loss=0.08452, over 19394.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2697, pruned_loss=0.04673, ctc_loss=0.0873, over 2755806.45 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:46:04,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=173914.66666666666, ans=0.125
+2024-08-26 22:46:11,818 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=173914.66666666666, ans=0.025
+2024-08-26 22:46:12,207 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.71 vs. limit=10.0
+2024-08-26 22:46:25,415 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=174021.33333333334, ans=0.125
+2024-08-26 22:46:25,871 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.33 vs. limit=15.0
+2024-08-26 22:46:32,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=174074.66666666666, ans=0.125
+2024-08-26 22:46:34,548 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=174074.66666666666, ans=0.125
+2024-08-26 22:46:42,162 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=174074.66666666666, ans=0.025
+2024-08-26 22:46:46,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=174128.0, ans=0.04949747468305833
+2024-08-26 22:46:51,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=174128.0, ans=0.0
+2024-08-26 22:46:51,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=174128.0, ans=0.125
+2024-08-26 22:46:52,742 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.17 vs. limit=22.5
+2024-08-26 22:46:54,992 INFO [train.py:1114] (0/4) Epoch 14, batch 300, loss[loss=0.2, simple_loss=0.2709, pruned_loss=0.04745, ctc_loss=0.08583, over 19521.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2691, pruned_loss=0.04652, ctc_loss=0.08681, over 3001481.83 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 16.0
+2024-08-26 22:46:56,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=174181.33333333334, ans=0.125
+2024-08-26 22:46:58,016 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=174181.33333333334, ans=0.0
+2024-08-26 22:46:59,573 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.470e+02 1.728e+02 2.225e+02 3.956e+02, threshold=3.457e+02, percent-clipped=2.0
+2024-08-26 22:47:09,592 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.76 vs. limit=22.5
+2024-08-26 22:47:15,205 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.57 vs. limit=15.0
+2024-08-26 22:47:19,465 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=174288.0, ans=0.0
+2024-08-26 22:47:29,030 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=174341.33333333334, ans=0.0
+2024-08-26 22:47:35,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=174394.66666666666, ans=0.125
+2024-08-26 22:47:38,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=174394.66666666666, ans=0.125
+2024-08-26 22:47:43,369 INFO [train.py:1114] (0/4) Epoch 14, batch 350, loss[loss=0.1752, simple_loss=0.2409, pruned_loss=0.04022, ctc_loss=0.07276, over 19759.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2689, pruned_loss=0.04636, ctc_loss=0.08643, over 3191850.72 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 16.0
+2024-08-26 22:47:48,125 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=174448.0, ans=0.025
+2024-08-26 22:47:51,177 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=174448.0, ans=0.125
+2024-08-26 22:48:21,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=174554.66666666666, ans=0.1
+2024-08-26 22:48:26,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=174554.66666666666, ans=0.2
+2024-08-26 22:48:32,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=174608.0, ans=0.2
+2024-08-26 22:48:37,590 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=174608.0, ans=0.1
+2024-08-26 22:48:48,264 INFO [train.py:1114] (0/4) Epoch 14, batch 400, loss[loss=0.2109, simple_loss=0.2821, pruned_loss=0.05049, ctc_loss=0.09677, over 19496.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2687, pruned_loss=0.04643, ctc_loss=0.0866, over 3342545.93 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:48:52,771 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.491e+02 1.630e+02 1.842e+02 3.705e+02, threshold=3.261e+02, percent-clipped=1.0
+2024-08-26 22:49:10,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=174821.33333333334, ans=0.025
+2024-08-26 22:49:11,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=174821.33333333334, ans=0.125
+2024-08-26 22:49:39,154 INFO [train.py:1114] (0/4) Epoch 14, batch 450, loss[loss=0.1974, simple_loss=0.2767, pruned_loss=0.0429, ctc_loss=0.08052, over 19609.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2692, pruned_loss=0.0465, ctc_loss=0.08682, over 3450915.96 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:49:40,340 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff3.min_abs, batch_count=174981.33333333334, ans=0.2
+2024-08-26 22:49:54,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=175034.66666666666, ans=0.125
+2024-08-26 22:50:17,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=175141.33333333334, ans=0.125
+2024-08-26 22:50:20,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=175194.66666666666, ans=0.1
+2024-08-26 22:50:22,646 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=175194.66666666666, ans=0.07
+2024-08-26 22:50:23,613 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:50:27,214 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=175248.0, ans=0.0
+2024-08-26 22:50:27,954 INFO [train.py:1114] (0/4) Epoch 14, batch 500, loss[loss=0.2251, simple_loss=0.3023, pruned_loss=0.05404, ctc_loss=0.09978, over 19681.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2685, pruned_loss=0.0462, ctc_loss=0.08633, over 3546652.26 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:50:32,529 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.438e+02 1.690e+02 1.988e+02 3.244e+02, threshold=3.379e+02, percent-clipped=0.0
+2024-08-26 22:50:55,860 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=175408.0, ans=0.0
+2024-08-26 22:51:03,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=175408.0, ans=0.125
+2024-08-26 22:51:03,699 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.85 vs. limit=15.0
+2024-08-26 22:51:08,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=175461.33333333334, ans=0.0
+2024-08-26 22:51:08,932 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=175461.33333333334, ans=0.025
+2024-08-26 22:51:11,983 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.29 vs. limit=12.0
+2024-08-26 22:51:15,968 INFO [train.py:1114] (0/4) Epoch 14, batch 550, loss[loss=0.2151, simple_loss=0.2882, pruned_loss=0.05226, ctc_loss=0.09378, over 19273.00 frames. ], tot_loss[loss=0.198, simple_loss=0.269, pruned_loss=0.04624, ctc_loss=0.08655, over 3608532.80 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:51:26,828 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.whiten.whitening_limit, batch_count=175568.0, ans=12.0
+2024-08-26 22:51:27,349 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=175568.0, ans=0.0
+2024-08-26 22:51:56,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=175728.0, ans=10.0
+2024-08-26 22:52:05,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=175728.0, ans=0.125
+2024-08-26 22:52:15,480 INFO [train.py:1114] (0/4) Epoch 14, batch 600, loss[loss=0.2013, simple_loss=0.2832, pruned_loss=0.0435, ctc_loss=0.08111, over 19370.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2694, pruned_loss=0.04623, ctc_loss=0.08666, over 3666425.54 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:52:16,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=175781.33333333334, ans=0.125
+2024-08-26 22:52:20,029 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.101e+02 1.434e+02 1.658e+02 1.951e+02 2.764e+02, threshold=3.317e+02, percent-clipped=0.0
+2024-08-26 22:52:23,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=175781.33333333334, ans=0.04949747468305833
+2024-08-26 22:52:25,880 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.14 vs. limit=10.0
+2024-08-26 22:52:28,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_ff2.min_abs, batch_count=175834.66666666666, ans=0.1
+2024-08-26 22:53:17,362 INFO [train.py:1114] (0/4) Epoch 14, batch 650, loss[loss=0.2037, simple_loss=0.2803, pruned_loss=0.04707, ctc_loss=0.08235, over 19762.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.269, pruned_loss=0.046, ctc_loss=0.08629, over 3716649.43 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:53:20,724 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.92 vs. limit=15.0
+2024-08-26 22:53:27,295 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.88 vs. limit=15.0
+2024-08-26 22:53:34,058 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.70 vs. limit=15.0
+2024-08-26 22:53:47,567 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=176208.0, ans=0.125
+2024-08-26 22:54:20,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=176261.33333333334, ans=0.0
+2024-08-26 22:54:24,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=176261.33333333334, ans=0.0
+2024-08-26 22:54:29,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=176261.33333333334, ans=0.025
+2024-08-26 22:54:41,331 INFO [train.py:1114] (0/4) Epoch 14, batch 700, loss[loss=0.1876, simple_loss=0.2601, pruned_loss=0.04183, ctc_loss=0.07887, over 19729.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2696, pruned_loss=0.0463, ctc_loss=0.08681, over 3748596.56 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:54:41,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=176314.66666666666, ans=0.125
+2024-08-26 22:54:43,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=176314.66666666666, ans=15.0
+2024-08-26 22:54:52,781 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.446e+02 1.597e+02 2.123e+02 3.826e+02, threshold=3.195e+02, percent-clipped=1.0
+2024-08-26 22:55:10,636 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=176368.0, ans=0.125
+2024-08-26 22:55:18,558 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=176368.0, ans=0.2
+2024-08-26 22:55:23,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=176421.33333333334, ans=0.125
+2024-08-26 22:55:30,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176421.33333333334, ans=0.1
+2024-08-26 22:57:53,113 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.18 vs. limit=10.0
+2024-08-26 22:59:23,425 INFO [train.py:1114] (0/4) Epoch 14, batch 750, loss[loss=0.19, simple_loss=0.2664, pruned_loss=0.04161, ctc_loss=0.07573, over 19505.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.269, pruned_loss=0.0461, ctc_loss=0.08641, over 3775051.89 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 16.0
+2024-08-26 23:00:10,282 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=176581.33333333334, ans=0.2
+2024-08-26 23:00:10,406 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=176581.33333333334, ans=0.125
+2024-08-26 23:00:36,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=176634.66666666666, ans=0.125
+2024-08-26 23:00:42,431 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=176634.66666666666, ans=0.125
+2024-08-26 23:01:09,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=176688.0, ans=0.125
+2024-08-26 23:01:14,076 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.61 vs. limit=15.0
+2024-08-26 23:01:23,460 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.22 vs. limit=22.5
+2024-08-26 23:01:43,796 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=176794.66666666666, ans=0.125
+2024-08-26 23:01:45,017 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.83 vs. limit=15.0
+2024-08-26 23:01:58,980 INFO [train.py:1114] (0/4) Epoch 14, batch 800, loss[loss=0.1796, simple_loss=0.25, pruned_loss=0.03955, ctc_loss=0.07543, over 19409.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2689, pruned_loss=0.04633, ctc_loss=0.08658, over 3795596.12 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:02:00,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=176848.0, ans=0.125
+2024-08-26 23:02:11,891 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.464e+02 1.718e+02 2.120e+02 3.590e+02, threshold=3.437e+02, percent-clipped=3.0
+2024-08-26 23:05:15,130 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=177008.0, ans=0.125
+2024-08-26 23:05:52,830 INFO [train.py:1114] (0/4) Epoch 14, batch 850, loss[loss=0.2166, simple_loss=0.2915, pruned_loss=0.05118, ctc_loss=0.09808, over 19642.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2685, pruned_loss=0.04622, ctc_loss=0.08627, over 3814865.22 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:05:54,399 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=9.93 vs. limit=22.5
+2024-08-26 23:05:54,431 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.26 vs. limit=22.5
+2024-08-26 23:05:58,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=177114.66666666666, ans=0.0
+2024-08-26 23:06:22,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=177274.66666666666, ans=0.025
+2024-08-26 23:06:44,300 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177328.0, ans=0.1
+2024-08-26 23:06:46,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177381.33333333334, ans=0.1
+2024-08-26 23:06:46,761 INFO [train.py:1114] (0/4) Epoch 14, batch 900, loss[loss=0.1824, simple_loss=0.2537, pruned_loss=0.04053, ctc_loss=0.07493, over 19784.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.269, pruned_loss=0.04654, ctc_loss=0.08687, over 3818440.25 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:06:52,135 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.429e+02 1.657e+02 1.986e+02 3.410e+02, threshold=3.315e+02, percent-clipped=0.0
+2024-08-26 23:06:59,972 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.40 vs. limit=15.0
+2024-08-26 23:07:19,036 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.18 vs. limit=15.0
+2024-08-26 23:07:19,979 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=177541.33333333334, ans=6.0
+2024-08-26 23:07:26,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=177594.66666666666, ans=0.125
+2024-08-26 23:07:30,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177594.66666666666, ans=0.1
+2024-08-26 23:07:36,931 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=177594.66666666666, ans=0.2
+2024-08-26 23:07:38,548 INFO [train.py:1114] (0/4) Epoch 14, batch 950, loss[loss=0.1773, simple_loss=0.2506, pruned_loss=0.03777, ctc_loss=0.071, over 19503.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2692, pruned_loss=0.04662, ctc_loss=0.08697, over 3820636.09 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:07:48,111 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=177701.33333333334, ans=0.0
+2024-08-26 23:07:51,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=177701.33333333334, ans=0.0
+2024-08-26 23:08:00,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=177754.66666666666, ans=0.1
+2024-08-26 23:08:08,541 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=177808.0, ans=0.0
+2024-08-26 23:08:08,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=177808.0, ans=0.0
+2024-08-26 23:08:10,430 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=177808.0, ans=0.125
+2024-08-26 23:08:12,140 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=177808.0, ans=0.125
+2024-08-26 23:08:18,576 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=177861.33333333334, ans=0.125
+2024-08-26 23:08:28,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=177914.66666666666, ans=0.1
+2024-08-26 23:08:35,560 INFO [train.py:1114] (0/4) Epoch 14, batch 1000, loss[loss=0.1804, simple_loss=0.2494, pruned_loss=0.04031, ctc_loss=0.07686, over 19854.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2698, pruned_loss=0.04698, ctc_loss=0.08778, over 3816509.39 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:08:41,161 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.435e+02 1.639e+02 1.944e+02 3.185e+02, threshold=3.279e+02, percent-clipped=0.0
+2024-08-26 23:09:07,967 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=178021.33333333334, ans=0.125
+2024-08-26 23:09:10,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=178074.66666666666, ans=0.2
+2024-08-26 23:09:18,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=178128.0, ans=0.0
+2024-08-26 23:09:20,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=178128.0, ans=0.09899494936611666
+2024-08-26 23:09:20,928 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=178128.0, ans=0.0
+2024-08-26 23:09:29,120 INFO [train.py:1114] (0/4) Epoch 14, batch 1050, loss[loss=0.199, simple_loss=0.2743, pruned_loss=0.04511, ctc_loss=0.0839, over 19845.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2691, pruned_loss=0.0468, ctc_loss=0.08753, over 3823872.80 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:09:41,061 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.86 vs. limit=22.5
+2024-08-26 23:09:49,014 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=178288.0, ans=0.2
+2024-08-26 23:09:54,877 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.58 vs. limit=15.0
+2024-08-26 23:10:09,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=178341.33333333334, ans=0.125
+2024-08-26 23:10:11,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=178341.33333333334, ans=0.125
+2024-08-26 23:10:15,053 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=178394.66666666666, ans=0.1
+2024-08-26 23:10:15,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=178394.66666666666, ans=0.125
+2024-08-26 23:10:20,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=178394.66666666666, ans=0.2
+2024-08-26 23:10:47,588 INFO [train.py:1114] (0/4) Epoch 14, batch 1100, loss[loss=0.1744, simple_loss=0.2511, pruned_loss=0.03523, ctc_loss=0.06831, over 19612.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2685, pruned_loss=0.0464, ctc_loss=0.08688, over 3831458.89 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:10:53,001 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.389e+02 1.598e+02 1.774e+02 3.032e+02, threshold=3.197e+02, percent-clipped=0.0
+2024-08-26 23:11:08,951 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=178554.66666666666, ans=0.0
+2024-08-26 23:11:32,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=178661.33333333334, ans=0.0
+2024-08-26 23:11:37,944 INFO [train.py:1114] (0/4) Epoch 14, batch 1150, loss[loss=0.1779, simple_loss=0.255, pruned_loss=0.03585, ctc_loss=0.07306, over 19593.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2685, pruned_loss=0.04657, ctc_loss=0.08712, over 3828830.16 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:11:47,211 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.56 vs. limit=15.0
+2024-08-26 23:11:47,818 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=178768.0, ans=0.0
+2024-08-26 23:11:58,558 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.94 vs. limit=22.5
+2024-08-26 23:11:59,532 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.95 vs. limit=15.0
+2024-08-26 23:12:03,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=178821.33333333334, ans=0.0
+2024-08-26 23:12:04,103 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=178821.33333333334, ans=0.125
+2024-08-26 23:12:31,155 INFO [train.py:1114] (0/4) Epoch 14, batch 1200, loss[loss=0.2212, simple_loss=0.29, pruned_loss=0.05521, ctc_loss=0.105, over 19839.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2696, pruned_loss=0.04664, ctc_loss=0.08752, over 3825820.21 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:12:36,809 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.458e+02 1.687e+02 2.139e+02 4.936e+02, threshold=3.375e+02, percent-clipped=2.0
+2024-08-26 23:12:41,735 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=179034.66666666666, ans=0.0
+2024-08-26 23:12:46,308 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-26 23:13:06,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=179141.33333333334, ans=0.1
+2024-08-26 23:13:20,040 INFO [train.py:1114] (0/4) Epoch 14, batch 1250, loss[loss=0.2145, simple_loss=0.2854, pruned_loss=0.05228, ctc_loss=0.0975, over 19531.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2698, pruned_loss=0.04649, ctc_loss=0.08711, over 3843334.00 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:13:22,009 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=179248.0, ans=10.0
+2024-08-26 23:13:31,452 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=179301.33333333334, ans=0.1
+2024-08-26 23:13:54,357 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=179408.0, ans=0.015
+2024-08-26 23:13:56,459 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=179408.0, ans=0.04949747468305833
+2024-08-26 23:14:01,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=179461.33333333334, ans=0.125
+2024-08-26 23:14:04,764 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.95 vs. limit=12.0
+2024-08-26 23:14:06,682 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=179461.33333333334, ans=0.0
+2024-08-26 23:14:12,513 INFO [train.py:1114] (0/4) Epoch 14, batch 1300, loss[loss=0.2306, simple_loss=0.2948, pruned_loss=0.06118, ctc_loss=0.1101, over 18833.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.269, pruned_loss=0.04617, ctc_loss=0.08628, over 3847330.15 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:14:13,652 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=179514.66666666666, ans=0.0
+2024-08-26 23:14:16,394 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=179514.66666666666, ans=10.0
+2024-08-26 23:14:19,144 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.184e+02 1.402e+02 1.628e+02 1.914e+02 2.926e+02, threshold=3.256e+02, percent-clipped=0.0
+2024-08-26 23:14:21,354 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=179568.0, ans=0.125
+2024-08-26 23:14:21,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=179568.0, ans=0.125
+2024-08-26 23:14:30,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=179621.33333333334, ans=0.1
+2024-08-26 23:14:33,576 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.34 vs. limit=10.0
+2024-08-26 23:14:35,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=179621.33333333334, ans=0.125
+2024-08-26 23:14:44,629 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.86 vs. limit=15.0
+2024-08-26 23:14:47,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=179674.66666666666, ans=0.125
+2024-08-26 23:14:48,315 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.24 vs. limit=15.0
+2024-08-26 23:14:52,392 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:14:54,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=179728.0, ans=0.0
+2024-08-26 23:14:58,662 INFO [train.py:1114] (0/4) Epoch 14, batch 1350, loss[loss=0.1834, simple_loss=0.2614, pruned_loss=0.03719, ctc_loss=0.07722, over 19770.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2688, pruned_loss=0.04611, ctc_loss=0.086, over 3858687.41 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:15:01,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=179781.33333333334, ans=0.125
+2024-08-26 23:15:01,660 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=179781.33333333334, ans=0.025
+2024-08-26 23:16:30,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=179834.66666666666, ans=0.1
+2024-08-26 23:16:38,564 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=179888.0, ans=0.125
+2024-08-26 23:16:41,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.whiten.whitening_limit, batch_count=179941.33333333334, ans=12.0
+2024-08-26 23:16:59,385 INFO [train.py:1114] (0/4) Epoch 14, batch 1400, loss[loss=0.1794, simple_loss=0.2505, pruned_loss=0.03925, ctc_loss=0.07436, over 19674.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2689, pruned_loss=0.04605, ctc_loss=0.08606, over 3864940.55 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:17:07,629 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.482e+02 1.624e+02 2.003e+02 3.142e+02, threshold=3.248e+02, percent-clipped=0.0
+2024-08-26 23:17:16,042 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=180101.33333333334, ans=0.0
+2024-08-26 23:17:18,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=180101.33333333334, ans=0.025
+2024-08-26 23:17:38,667 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.34 vs. limit=6.0
+2024-08-26 23:17:41,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=180261.33333333334, ans=0.125
+2024-08-26 23:17:50,483 INFO [train.py:1114] (0/4) Epoch 14, batch 1450, loss[loss=0.2175, simple_loss=0.2829, pruned_loss=0.05503, ctc_loss=0.1049, over 19668.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2695, pruned_loss=0.04629, ctc_loss=0.08675, over 3862868.32 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:17:54,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=180314.66666666666, ans=0.125
+2024-08-26 23:18:43,550 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=180528.0, ans=0.2
+2024-08-26 23:18:46,184 INFO [train.py:1114] (0/4) Epoch 14, batch 1500, loss[loss=0.2046, simple_loss=0.275, pruned_loss=0.04856, ctc_loss=0.09291, over 19588.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2696, pruned_loss=0.04624, ctc_loss=0.08677, over 3861664.46 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:18:49,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=180581.33333333334, ans=0.125
+2024-08-26 23:18:51,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=180581.33333333334, ans=0.025
+2024-08-26 23:18:52,950 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.115e+02 1.461e+02 1.607e+02 1.928e+02 3.862e+02, threshold=3.214e+02, percent-clipped=2.0
+2024-08-26 23:18:59,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=180634.66666666666, ans=0.125
+2024-08-26 23:19:19,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=180741.33333333334, ans=0.1
+2024-08-26 23:20:21,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=180794.66666666666, ans=0.125
+2024-08-26 23:20:27,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=180794.66666666666, ans=0.125
+2024-08-26 23:20:30,222 INFO [train.py:1114] (0/4) Epoch 14, batch 1550, loss[loss=0.2084, simple_loss=0.2817, pruned_loss=0.04974, ctc_loss=0.0887, over 19602.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2693, pruned_loss=0.04643, ctc_loss=0.08696, over 3847541.67 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:20:36,905 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:20:47,074 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=180901.33333333334, ans=0.0
+2024-08-26 23:20:54,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=180954.66666666666, ans=0.1
+2024-08-26 23:21:05,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=181008.0, ans=0.1
+2024-08-26 23:21:08,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=181061.33333333334, ans=0.0
+2024-08-26 23:21:12,104 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn2.whiten.whitening_limit, batch_count=181061.33333333334, ans=22.5
+2024-08-26 23:21:12,638 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=181061.33333333334, ans=0.09899494936611666
+2024-08-26 23:21:20,677 INFO [train.py:1114] (0/4) Epoch 14, batch 1600, loss[loss=0.1971, simple_loss=0.2702, pruned_loss=0.0449, ctc_loss=0.08529, over 19828.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2688, pruned_loss=0.04625, ctc_loss=0.0864, over 3837473.80 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:21:23,125 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.50 vs. limit=15.0
+2024-08-26 23:21:25,498 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:21:27,131 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.461e+02 1.627e+02 1.971e+02 3.033e+02, threshold=3.255e+02, percent-clipped=0.0
+2024-08-26 23:21:32,044 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=181168.0, ans=0.2
+2024-08-26 23:21:32,334 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.78 vs. limit=15.0
+2024-08-26 23:21:35,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=181168.0, ans=0.0
+2024-08-26 23:21:38,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=181168.0, ans=0.2
+2024-08-26 23:21:38,798 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=181168.0, ans=0.125
+2024-08-26 23:23:54,622 INFO [train.py:1114] (0/4) Epoch 14, batch 1650, loss[loss=0.21, simple_loss=0.2857, pruned_loss=0.04878, ctc_loss=0.09204, over 19654.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2688, pruned_loss=0.04636, ctc_loss=0.08655, over 3833134.13 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:24:05,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=181434.66666666666, ans=0.125
+2024-08-26 23:24:17,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=181488.0, ans=0.0
+2024-08-26 23:24:20,618 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=181488.0, ans=0.1
+2024-08-26 23:24:35,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=181594.66666666666, ans=0.0
+2024-08-26 23:24:38,209 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=181594.66666666666, ans=0.0
+2024-08-26 23:24:40,741 INFO [train.py:1114] (0/4) Epoch 14, batch 1700, loss[loss=0.1667, simple_loss=0.2348, pruned_loss=0.03544, ctc_loss=0.06915, over 19703.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2686, pruned_loss=0.04613, ctc_loss=0.08606, over 3847514.77 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:24:47,146 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.192e+02 1.441e+02 1.691e+02 2.079e+02 3.382e+02, threshold=3.381e+02, percent-clipped=3.0
+2024-08-26 23:24:50,836 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=181701.33333333334, ans=0.025
+2024-08-26 23:24:53,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=181701.33333333334, ans=0.0
+2024-08-26 23:25:00,727 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=181754.66666666666, ans=0.2
+2024-08-26 23:25:06,999 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.16 vs. limit=22.5
+2024-08-26 23:25:07,019 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.77 vs. limit=22.5
+2024-08-26 23:25:08,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=181808.0, ans=0.1
+2024-08-26 23:25:17,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=181861.33333333334, ans=0.2
+2024-08-26 23:25:19,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=181861.33333333334, ans=0.0
+2024-08-26 23:25:19,935 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:25:24,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=181914.66666666666, ans=0.125
+2024-08-26 23:25:25,111 INFO [train.py:1114] (0/4) Epoch 14, batch 1750, loss[loss=0.1709, simple_loss=0.2306, pruned_loss=0.04075, ctc_loss=0.07453, over 19617.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2683, pruned_loss=0.04604, ctc_loss=0.08583, over 3852353.23 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:25:48,503 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182021.33333333334, ans=0.1
+2024-08-26 23:25:48,901 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.63 vs. limit=6.0
+2024-08-26 23:25:55,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=182074.66666666666, ans=0.125
+2024-08-26 23:26:08,528 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.68 vs. limit=15.0
+2024-08-26 23:26:09,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=182074.66666666666, ans=0.2
+2024-08-26 23:26:19,464 INFO [train.py:1114] (0/4) Epoch 14, batch 1800, loss[loss=0.1731, simple_loss=0.2519, pruned_loss=0.03449, ctc_loss=0.06336, over 19592.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2686, pruned_loss=0.04599, ctc_loss=0.08589, over 3852353.41 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:26:26,559 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.533e+02 1.884e+02 2.505e+02 4.097e+02, threshold=3.767e+02, percent-clipped=5.0
+2024-08-26 23:26:48,533 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=182341.33333333334, ans=0.0
+2024-08-26 23:26:53,111 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.22 vs. limit=15.0
+2024-08-26 23:26:58,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=182394.66666666666, ans=0.125
+2024-08-26 23:27:05,183 INFO [train.py:1114] (0/4) Epoch 14, batch 1850, loss[loss=0.2013, simple_loss=0.2781, pruned_loss=0.04486, ctc_loss=0.08688, over 19596.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2685, pruned_loss=0.04574, ctc_loss=0.0855, over 3854748.92 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:27:05,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=182448.0, ans=0.07
+2024-08-26 23:27:09,799 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=182448.0, ans=0.0
+2024-08-26 23:28:45,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=182501.33333333334, ans=0.0
+2024-08-26 23:28:56,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=182608.0, ans=0.0
+2024-08-26 23:29:14,708 INFO [train.py:1114] (0/4) Epoch 14, batch 1900, loss[loss=0.2033, simple_loss=0.2866, pruned_loss=0.04396, ctc_loss=0.08026, over 19628.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2692, pruned_loss=0.0462, ctc_loss=0.08607, over 3861097.39 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:29:21,593 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.217e+02 1.441e+02 1.690e+02 2.071e+02 3.452e+02, threshold=3.379e+02, percent-clipped=0.0
+2024-08-26 23:29:21,792 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=182714.66666666666, ans=0.125
+2024-08-26 23:29:22,644 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=182768.0, ans=0.0
+2024-08-26 23:29:27,284 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.88 vs. limit=15.0
+2024-08-26 23:29:33,234 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.35 vs. limit=15.0
+2024-08-26 23:29:57,802 INFO [train.py:1114] (0/4) Epoch 14, batch 1950, loss[loss=0.1791, simple_loss=0.2533, pruned_loss=0.03814, ctc_loss=0.07167, over 19583.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2702, pruned_loss=0.04632, ctc_loss=0.08628, over 3870300.04 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:30:04,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=182981.33333333334, ans=0.0
+2024-08-26 23:30:25,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=183141.33333333334, ans=0.1
+2024-08-26 23:30:25,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=183141.33333333334, ans=0.125
+2024-08-26 23:30:44,845 INFO [train.py:1114] (0/4) Epoch 14, batch 2000, loss[loss=0.1716, simple_loss=0.2422, pruned_loss=0.03659, ctc_loss=0.06964, over 19643.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2706, pruned_loss=0.04661, ctc_loss=0.08682, over 3854479.09 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:30:50,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=183248.0, ans=0.0
+2024-08-26 23:30:52,059 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.411e+02 1.571e+02 1.845e+02 2.838e+02, threshold=3.143e+02, percent-clipped=0.0
+2024-08-26 23:32:08,134 INFO [train.py:1114] (0/4) Epoch 14, batch 2050, loss[loss=0.1835, simple_loss=0.245, pruned_loss=0.04475, ctc_loss=0.08145, over 19725.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2699, pruned_loss=0.04672, ctc_loss=0.08695, over 3850346.95 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:32:15,223 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=183514.66666666666, ans=0.025
+2024-08-26 23:32:25,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=183621.33333333334, ans=0.1
+2024-08-26 23:32:26,470 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=183621.33333333334, ans=0.2
+2024-08-26 23:32:27,675 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.29 vs. limit=15.0
+2024-08-26 23:32:51,514 INFO [train.py:1114] (0/4) Epoch 14, batch 2100, loss[loss=0.187, simple_loss=0.2598, pruned_loss=0.04142, ctc_loss=0.07828, over 19758.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2686, pruned_loss=0.04595, ctc_loss=0.08579, over 3857367.71 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:32:58,375 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.193e+02 1.491e+02 1.652e+02 1.860e+02 2.729e+02, threshold=3.304e+02, percent-clipped=0.0
+2024-08-26 23:34:03,638 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183941.33333333334, ans=0.1
+2024-08-26 23:34:16,393 INFO [train.py:1114] (0/4) Epoch 14, batch 2150, loss[loss=0.2013, simple_loss=0.2736, pruned_loss=0.04667, ctc_loss=0.08918, over 19570.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2683, pruned_loss=0.04581, ctc_loss=0.08546, over 3868244.08 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:34:42,768 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=184208.0, ans=0.0
+2024-08-26 23:34:47,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=184208.0, ans=0.125
+2024-08-26 23:34:59,941 INFO [train.py:1114] (0/4) Epoch 14, batch 2200, loss[loss=0.197, simple_loss=0.2741, pruned_loss=0.04371, ctc_loss=0.08128, over 19567.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2685, pruned_loss=0.04616, ctc_loss=0.08597, over 3866837.54 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:35:01,844 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:35:01,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=184314.66666666666, ans=0.1
+2024-08-26 23:35:04,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=184314.66666666666, ans=0.125
+2024-08-26 23:35:06,952 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.447e+02 1.750e+02 2.552e+02 4.295e+02, threshold=3.499e+02, percent-clipped=8.0
+2024-08-26 23:35:11,103 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.08 vs. limit=8.0
+2024-08-26 23:35:14,126 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:35:22,749 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=184421.33333333334, ans=0.125
+2024-08-26 23:35:24,749 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.87 vs. limit=12.0
+2024-08-26 23:35:43,880 INFO [train.py:1114] (0/4) Epoch 14, batch 2250, loss[loss=0.1938, simple_loss=0.2744, pruned_loss=0.04161, ctc_loss=0.07507, over 19595.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2688, pruned_loss=0.04611, ctc_loss=0.08584, over 3866495.61 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:35:46,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=184581.33333333334, ans=0.125
+2024-08-26 23:35:47,460 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=184581.33333333334, ans=0.0
+2024-08-26 23:35:50,967 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.59 vs. limit=15.0
+2024-08-26 23:35:51,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=184634.66666666666, ans=0.0
+2024-08-26 23:36:01,013 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=184688.0, ans=0.125
+2024-08-26 23:36:10,301 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184741.33333333334, ans=0.1
+2024-08-26 23:36:11,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=184741.33333333334, ans=0.025
+2024-08-26 23:36:24,512 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.39 vs. limit=22.5
+2024-08-26 23:36:27,374 INFO [train.py:1114] (0/4) Epoch 14, batch 2300, loss[loss=0.1859, simple_loss=0.2514, pruned_loss=0.043, ctc_loss=0.08609, over 19508.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.268, pruned_loss=0.04608, ctc_loss=0.08583, over 3860452.97 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:36:34,429 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=184848.0, ans=0.125
+2024-08-26 23:36:35,134 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.458e+02 1.662e+02 2.114e+02 3.033e+02, threshold=3.324e+02, percent-clipped=0.0
+2024-08-26 23:36:44,305 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.88 vs. limit=12.0
+2024-08-26 23:36:52,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=184954.66666666666, ans=0.1
+2024-08-26 23:37:04,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=185061.33333333334, ans=0.0
+2024-08-26 23:37:10,950 INFO [train.py:1114] (0/4) Epoch 14, batch 2350, loss[loss=0.2183, simple_loss=0.2851, pruned_loss=0.05581, ctc_loss=0.0996, over 19697.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2679, pruned_loss=0.04623, ctc_loss=0.08598, over 3864029.08 frames. ], batch size: 63, lr: 1.05e-02, grad_scale: 16.0
+2024-08-26 23:37:15,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=185114.66666666666, ans=0.0
+2024-08-26 23:37:16,754 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:37:22,845 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=185168.0, ans=0.0
+2024-08-26 23:37:28,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=185221.33333333334, ans=0.125
+2024-08-26 23:37:34,170 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.47 vs. limit=15.0
+2024-08-26 23:37:42,520 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=185274.66666666666, ans=0.125
+2024-08-26 23:37:55,033 INFO [train.py:1114] (0/4) Epoch 14, batch 2400, loss[loss=0.2109, simple_loss=0.2807, pruned_loss=0.05151, ctc_loss=0.09517, over 19439.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2702, pruned_loss=0.04721, ctc_loss=0.08762, over 3858659.76 frames. ], batch size: 67, lr: 1.05e-02, grad_scale: 32.0
+2024-08-26 23:38:02,848 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.569e+02 1.843e+02 2.357e+02 3.475e+02, threshold=3.685e+02, percent-clipped=2.0
+2024-08-26 23:38:10,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=185434.66666666666, ans=0.0
+2024-08-26 23:38:11,720 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=185488.0, ans=0.0
+2024-08-26 23:38:28,742 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=9.77 vs. limit=15.0
+2024-08-26 23:38:39,316 INFO [train.py:1114] (0/4) Epoch 14, batch 2450, loss[loss=0.2632, simple_loss=0.3066, pruned_loss=0.0791, ctc_loss=0.1543, over 13414.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2736, pruned_loss=0.04949, ctc_loss=0.09229, over 3733724.63 frames. ], batch size: 143, lr: 1.05e-02, grad_scale: 16.0
+2024-08-26 23:38:43,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=185648.0, ans=0.0
+2024-08-26 23:38:45,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=185648.0, ans=0.125
+2024-08-26 23:38:46,890 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.01 vs. limit=22.5
+2024-08-26 23:38:47,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=185701.33333333334, ans=0.07
+2024-08-26 23:38:54,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=185701.33333333334, ans=0.0
+2024-08-26 23:39:13,272 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-14.pt
+2024-08-26 23:40:44,468 INFO [train.py:1114] (0/4) Epoch 15, batch 0, loss[loss=0.196, simple_loss=0.2639, pruned_loss=0.04552, ctc_loss=0.09263, over 19411.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2639, pruned_loss=0.04552, ctc_loss=0.09263, over 19411.00 frames. ], batch size: 48, lr: 1.02e-02, grad_scale: 32.0
+2024-08-26 23:40:46,075 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-26 23:41:25,227 INFO [train.py:1146] (0/4) Epoch 15, validation: loss=0.1751, simple_loss=0.2686, pruned_loss=0.03035, ctc_loss=0.05216, over 944034.00 frames.
+2024-08-26 23:41:25,227 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-26 23:41:30,803 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=185856.0, ans=0.2
+2024-08-26 23:41:31,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=185856.0, ans=0.125
+2024-08-26 23:41:46,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=185856.0, ans=0.125
+2024-08-26 23:42:30,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=185962.66666666666, ans=0.09899494936611666
+2024-08-26 23:42:34,498 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.661e+02 1.811e+02 2.041e+02 3.400e+02, threshold=3.623e+02, percent-clipped=0.0
+2024-08-26 23:42:55,152 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.50 vs. limit=15.0
+2024-08-26 23:45:26,828 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.66 vs. limit=12.0
+2024-08-26 23:49:22,216 INFO [train.py:1114] (0/4) Epoch 15, batch 50, loss[loss=0.1927, simple_loss=0.2565, pruned_loss=0.04794, ctc_loss=0.08278, over 19710.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2715, pruned_loss=0.04669, ctc_loss=0.08744, over 843490.78 frames. ], batch size: 47, lr: 1.02e-02, grad_scale: 16.0
+2024-08-26 23:51:28,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=186122.66666666666, ans=0.07
+2024-08-26 23:51:49,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=186122.66666666666, ans=0.125
+2024-08-26 23:53:59,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=186282.66666666666, ans=0.2
+2024-08-26 23:54:00,160 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=186282.66666666666, ans=0.125
+2024-08-26 23:54:48,879 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.82 vs. limit=15.0
+2024-08-26 23:58:27,660 INFO [train.py:1114] (0/4) Epoch 15, batch 100, loss[loss=0.1851, simple_loss=0.2553, pruned_loss=0.04139, ctc_loss=0.08035, over 19716.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.272, pruned_loss=0.0464, ctc_loss=0.0865, over 1497089.71 frames. ], batch size: 51, lr: 1.02e-02, grad_scale: 16.0
+2024-08-26 23:58:28,351 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.96 vs. limit=15.0
+2024-08-27 00:06:52,641 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.493e+02 1.771e+02 2.166e+02 3.428e+02, threshold=3.543e+02, percent-clipped=0.0
+2024-08-27 00:09:13,627 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.68 vs. limit=15.0
+2024-08-27 00:10:02,702 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.72 vs. limit=15.0
+2024-08-27 00:12:03,631 INFO [train.py:1114] (0/4) Epoch 15, batch 150, loss[loss=0.1768, simple_loss=0.2447, pruned_loss=0.03921, ctc_loss=0.07611, over 19727.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2694, pruned_loss=0.0456, ctc_loss=0.08488, over 2025845.62 frames. ], batch size: 47, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:12:15,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=186656.0, ans=0.125
+2024-08-27 00:12:29,065 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=186709.33333333334, ans=0.2
+2024-08-27 00:14:20,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=186816.0, ans=0.125
+2024-08-27 00:17:05,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=186869.33333333334, ans=0.125
+2024-08-27 00:17:10,133 INFO [train.py:1114] (0/4) Epoch 15, batch 200, loss[loss=0.2088, simple_loss=0.2834, pruned_loss=0.04876, ctc_loss=0.09197, over 18056.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2676, pruned_loss=0.04537, ctc_loss=0.08462, over 2434205.28 frames. ], batch size: 85, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:17:44,750 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.68 vs. limit=10.0
+2024-08-27 00:17:52,108 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.48 vs. limit=15.0
+2024-08-27 00:17:54,377 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=187029.33333333334, ans=0.125
+2024-08-27 00:17:59,322 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.145e+02 1.435e+02 1.602e+02 1.959e+02 3.588e+02, threshold=3.205e+02, percent-clipped=1.0
+2024-08-27 00:18:47,705 INFO [train.py:1114] (0/4) Epoch 15, batch 250, loss[loss=0.2055, simple_loss=0.2837, pruned_loss=0.0469, ctc_loss=0.08378, over 19445.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.268, pruned_loss=0.04526, ctc_loss=0.08446, over 2754227.35 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:19:50,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=187189.33333333334, ans=0.05
+2024-08-27 00:20:05,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=187242.66666666666, ans=0.0
+2024-08-27 00:20:13,643 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=187296.0, ans=0.125
+2024-08-27 00:20:18,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=187349.33333333334, ans=0.5
+2024-08-27 00:20:27,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=187349.33333333334, ans=15.0
+2024-08-27 00:20:34,637 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=187402.66666666666, ans=0.125
+2024-08-27 00:21:10,726 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=187402.66666666666, ans=0.125
+2024-08-27 00:21:12,352 INFO [train.py:1114] (0/4) Epoch 15, batch 300, loss[loss=0.2071, simple_loss=0.2839, pruned_loss=0.04776, ctc_loss=0.08696, over 19512.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2672, pruned_loss=0.04498, ctc_loss=0.084, over 2999797.41 frames. ], batch size: 61, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:21:17,435 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187456.0, ans=0.1
+2024-08-27 00:21:25,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=187509.33333333334, ans=0.125
+2024-08-27 00:22:00,740 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.02 vs. limit=10.0
+2024-08-27 00:22:03,851 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.482e+02 1.757e+02 2.250e+02 4.561e+02, threshold=3.514e+02, percent-clipped=7.0
+2024-08-27 00:22:10,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=187562.66666666666, ans=0.025
+2024-08-27 00:22:31,076 INFO [train.py:1114] (0/4) Epoch 15, batch 350, loss[loss=0.1699, simple_loss=0.2431, pruned_loss=0.03491, ctc_loss=0.06708, over 19746.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2677, pruned_loss=0.04503, ctc_loss=0.08425, over 3189316.06 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:24:54,353 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=187776.0, ans=0.125
+2024-08-27 00:24:59,799 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=187829.33333333334, ans=0.95
+2024-08-27 00:25:03,878 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.43 vs. limit=6.0
+2024-08-27 00:25:09,023 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187882.66666666666, ans=0.1
+2024-08-27 00:25:25,424 INFO [train.py:1114] (0/4) Epoch 15, batch 400, loss[loss=0.1937, simple_loss=0.2722, pruned_loss=0.04201, ctc_loss=0.07773, over 19497.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2672, pruned_loss=0.04494, ctc_loss=0.08406, over 3342205.60 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:25:27,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187989.33333333334, ans=0.1
+2024-08-27 00:25:31,296 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=187989.33333333334, ans=0.05
+2024-08-27 00:25:46,844 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.414e+02 1.733e+02 2.120e+02 3.671e+02, threshold=3.466e+02, percent-clipped=1.0
+2024-08-27 00:26:13,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=188149.33333333334, ans=0.2
+2024-08-27 00:26:33,886 INFO [train.py:1114] (0/4) Epoch 15, batch 450, loss[loss=0.1923, simple_loss=0.2748, pruned_loss=0.03979, ctc_loss=0.07554, over 19609.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2679, pruned_loss=0.04525, ctc_loss=0.08483, over 3449783.93 frames. ], batch size: 55, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:26:34,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=188256.0, ans=0.125
+2024-08-27 00:26:52,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=188309.33333333334, ans=0.2
+2024-08-27 00:27:33,699 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.65 vs. limit=15.0
+2024-08-27 00:27:54,327 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=188469.33333333334, ans=0.125
+2024-08-27 00:27:58,671 INFO [train.py:1114] (0/4) Epoch 15, batch 500, loss[loss=0.1981, simple_loss=0.2808, pruned_loss=0.04277, ctc_loss=0.07459, over 19712.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2671, pruned_loss=0.04488, ctc_loss=0.08399, over 3544980.18 frames. ], batch size: 63, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:28:15,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=188576.0, ans=0.1
+2024-08-27 00:28:25,431 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.423e+02 1.716e+02 2.052e+02 3.766e+02, threshold=3.431e+02, percent-clipped=1.0
+2024-08-27 00:28:25,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=188629.33333333334, ans=0.0
+2024-08-27 00:28:25,762 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=188629.33333333334, ans=0.125
+2024-08-27 00:28:44,246 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=188682.66666666666, ans=0.125
+2024-08-27 00:28:45,943 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=188682.66666666666, ans=0.125
+2024-08-27 00:28:47,908 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=188736.0, ans=0.025
+2024-08-27 00:28:49,717 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=188736.0, ans=0.1
+2024-08-27 00:28:50,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=188736.0, ans=0.125
+2024-08-27 00:28:56,803 INFO [train.py:1114] (0/4) Epoch 15, batch 550, loss[loss=0.208, simple_loss=0.282, pruned_loss=0.04897, ctc_loss=0.08994, over 19249.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2675, pruned_loss=0.04532, ctc_loss=0.08468, over 3605052.13 frames. ], batch size: 71, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:29:27,150 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=188789.33333333334, ans=0.0
+2024-08-27 00:30:15,104 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=189002.66666666666, ans=0.2
+2024-08-27 00:30:17,657 INFO [train.py:1114] (0/4) Epoch 15, batch 600, loss[loss=0.2181, simple_loss=0.2937, pruned_loss=0.05182, ctc_loss=0.09719, over 19440.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2681, pruned_loss=0.04558, ctc_loss=0.08514, over 3663301.89 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:31:07,355 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.25 vs. limit=22.5
+2024-08-27 00:31:13,707 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=189109.33333333334, ans=0.125
+2024-08-27 00:31:17,452 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=189162.66666666666, ans=0.125
+2024-08-27 00:31:17,534 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=189162.66666666666, ans=0.125
+2024-08-27 00:31:18,196 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.468e+02 1.719e+02 2.297e+02 4.329e+02, threshold=3.438e+02, percent-clipped=2.0
+2024-08-27 00:31:20,250 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=189162.66666666666, ans=0.125
+2024-08-27 00:31:52,649 INFO [train.py:1114] (0/4) Epoch 15, batch 650, loss[loss=0.1844, simple_loss=0.2629, pruned_loss=0.0385, ctc_loss=0.07236, over 19768.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2673, pruned_loss=0.04517, ctc_loss=0.08448, over 3714569.97 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:31:54,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=189322.66666666666, ans=0.0
+2024-08-27 00:31:59,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=189322.66666666666, ans=0.125
+2024-08-27 00:32:19,695 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=189429.33333333334, ans=0.2
+2024-08-27 00:32:21,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-27 00:33:04,270 INFO [train.py:1114] (0/4) Epoch 15, batch 700, loss[loss=0.1749, simple_loss=0.2461, pruned_loss=0.037, ctc_loss=0.07441, over 19719.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2676, pruned_loss=0.0451, ctc_loss=0.08439, over 3746563.06 frames. ], batch size: 51, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:33:08,447 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.86 vs. limit=10.0
+2024-08-27 00:33:13,688 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=189642.66666666666, ans=0.125
+2024-08-27 00:33:14,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=189642.66666666666, ans=0.125
+2024-08-27 00:33:59,042 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=189642.66666666666, ans=0.0
+2024-08-27 00:34:02,035 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=189696.0, ans=0.125
+2024-08-27 00:34:03,643 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.167e+02 1.548e+02 1.878e+02 2.334e+02 4.066e+02, threshold=3.756e+02, percent-clipped=4.0
+2024-08-27 00:34:07,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=189696.0, ans=0.0
+2024-08-27 00:34:08,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_abs, batch_count=189696.0, ans=0.5
+2024-08-27 00:34:09,333 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=189696.0, ans=0.0
+2024-08-27 00:34:12,202 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=189749.33333333334, ans=0.0
+2024-08-27 00:34:12,443 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.12 vs. limit=22.5
+2024-08-27 00:35:17,144 INFO [train.py:1114] (0/4) Epoch 15, batch 750, loss[loss=0.204, simple_loss=0.2828, pruned_loss=0.04483, ctc_loss=0.08891, over 19502.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2672, pruned_loss=0.04503, ctc_loss=0.08404, over 3773103.81 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:35:17,440 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=189856.0, ans=0.0
+2024-08-27 00:35:18,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189856.0, ans=0.1
+2024-08-27 00:35:19,768 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.89 vs. limit=22.5
+2024-08-27 00:35:25,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=189856.0, ans=0.125
+2024-08-27 00:35:34,232 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=189909.33333333334, ans=0.025
+2024-08-27 00:35:44,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=190016.0, ans=0.125
+2024-08-27 00:35:45,490 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=190016.0, ans=0.125
+2024-08-27 00:36:06,217 INFO [train.py:1114] (0/4) Epoch 15, batch 800, loss[loss=0.176, simple_loss=0.2451, pruned_loss=0.0387, ctc_loss=0.07349, over 19806.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2666, pruned_loss=0.04477, ctc_loss=0.08359, over 3796038.15 frames. ], batch size: 49, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:36:06,678 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.65 vs. limit=15.0
+2024-08-27 00:36:07,777 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.12 vs. limit=15.0
+2024-08-27 00:36:09,199 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=190122.66666666666, ans=0.025
+2024-08-27 00:36:15,408 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.89 vs. limit=15.0
+2024-08-27 00:36:27,961 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=190229.33333333334, ans=10.0
+2024-08-27 00:36:29,652 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.516e+02 1.778e+02 2.217e+02 3.654e+02, threshold=3.555e+02, percent-clipped=0.0
+2024-08-27 00:36:29,951 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=190229.33333333334, ans=0.0
+2024-08-27 00:36:37,292 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=190282.66666666666, ans=0.0
+2024-08-27 00:36:54,853 INFO [train.py:1114] (0/4) Epoch 15, batch 850, loss[loss=0.2152, simple_loss=0.2901, pruned_loss=0.05132, ctc_loss=0.09399, over 19670.00 frames. ], tot_loss[loss=0.194, simple_loss=0.266, pruned_loss=0.04439, ctc_loss=0.08285, over 3816028.85 frames. ], batch size: 59, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:36:58,738 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=190389.33333333334, ans=0.1
+2024-08-27 00:37:05,069 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=190442.66666666666, ans=0.0
+2024-08-27 00:37:07,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=190442.66666666666, ans=0.125
+2024-08-27 00:37:13,258 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=190442.66666666666, ans=0.125
+2024-08-27 00:37:13,282 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=190442.66666666666, ans=0.1
+2024-08-27 00:37:23,650 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=190549.33333333334, ans=0.0
+2024-08-27 00:37:24,866 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.19 vs. limit=12.0
+2024-08-27 00:37:30,528 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.26 vs. limit=22.5
+2024-08-27 00:37:34,307 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=190602.66666666666, ans=0.1
+2024-08-27 00:37:42,021 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 00:37:46,589 INFO [train.py:1114] (0/4) Epoch 15, batch 900, loss[loss=0.1767, simple_loss=0.2484, pruned_loss=0.03854, ctc_loss=0.06981, over 19402.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2662, pruned_loss=0.0446, ctc_loss=0.08323, over 3818918.45 frames. ], batch size: 48, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:37:51,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=190656.0, ans=0.0
+2024-08-27 00:38:12,641 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.206e+02 1.396e+02 1.546e+02 1.855e+02 3.193e+02, threshold=3.091e+02, percent-clipped=0.0
+2024-08-27 00:38:19,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=190816.0, ans=0.2
+2024-08-27 00:38:42,122 INFO [train.py:1114] (0/4) Epoch 15, batch 950, loss[loss=0.1962, simple_loss=0.2658, pruned_loss=0.04649, ctc_loss=0.08392, over 19500.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2665, pruned_loss=0.0448, ctc_loss=0.08342, over 3820506.49 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:38:42,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=190922.66666666666, ans=0.125
+2024-08-27 00:38:42,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=190922.66666666666, ans=0.0
+2024-08-27 00:38:44,339 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=190922.66666666666, ans=0.07
+2024-08-27 00:38:51,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=190922.66666666666, ans=0.0
+2024-08-27 00:38:55,377 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.67 vs. limit=12.0
+2024-08-27 00:39:09,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=191029.33333333334, ans=0.025
+2024-08-27 00:39:12,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=191029.33333333334, ans=0.125
+2024-08-27 00:39:13,215 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.29 vs. limit=15.0
+2024-08-27 00:39:16,780 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=191029.33333333334, ans=0.125
+2024-08-27 00:39:28,038 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 00:39:31,673 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=191136.0, ans=0.025
+2024-08-27 00:39:37,066 INFO [train.py:1114] (0/4) Epoch 15, batch 1000, loss[loss=0.177, simple_loss=0.2502, pruned_loss=0.03801, ctc_loss=0.06973, over 19842.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2667, pruned_loss=0.04478, ctc_loss=0.08344, over 3817654.75 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:39:41,094 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=191189.33333333334, ans=0.125
+2024-08-27 00:39:44,879 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=191189.33333333334, ans=0.025
+2024-08-27 00:39:49,769 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.39 vs. limit=22.5
+2024-08-27 00:39:56,311 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.87 vs. limit=15.0
+2024-08-27 00:40:00,299 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.403e+02 1.586e+02 1.924e+02 3.101e+02, threshold=3.172e+02, percent-clipped=1.0
+2024-08-27 00:40:09,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=191349.33333333334, ans=0.1
+2024-08-27 00:40:17,599 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.73 vs. limit=15.0
+2024-08-27 00:40:24,713 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=191456.0, ans=0.125
+2024-08-27 00:40:25,468 INFO [train.py:1114] (0/4) Epoch 15, batch 1050, loss[loss=0.1811, simple_loss=0.2605, pruned_loss=0.03635, ctc_loss=0.07246, over 19840.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2659, pruned_loss=0.0446, ctc_loss=0.08319, over 3823923.35 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:40:40,573 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.45 vs. limit=15.0
+2024-08-27 00:40:53,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=191562.66666666666, ans=0.125
+2024-08-27 00:40:53,431 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.26 vs. limit=15.0
+2024-08-27 00:40:56,127 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=191616.0, ans=0.2
+2024-08-27 00:41:12,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=191669.33333333334, ans=0.1
+2024-08-27 00:41:14,670 INFO [train.py:1114] (0/4) Epoch 15, batch 1100, loss[loss=0.1911, simple_loss=0.263, pruned_loss=0.04403, ctc_loss=0.07767, over 19589.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2658, pruned_loss=0.04449, ctc_loss=0.08291, over 3831082.54 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:41:18,940 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.64 vs. limit=22.5
+2024-08-27 00:41:21,392 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=191722.66666666666, ans=10.0
+2024-08-27 00:41:28,920 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=191776.0, ans=0.125
+2024-08-27 00:41:29,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=191776.0, ans=0.2
+2024-08-27 00:41:36,199 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 1.518e+02 1.811e+02 2.066e+02 3.149e+02, threshold=3.622e+02, percent-clipped=0.0
+2024-08-27 00:41:42,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=191882.66666666666, ans=0.125
+2024-08-27 00:42:01,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=191936.0, ans=0.0
+2024-08-27 00:42:07,799 INFO [train.py:1114] (0/4) Epoch 15, batch 1150, loss[loss=0.1952, simple_loss=0.2706, pruned_loss=0.04299, ctc_loss=0.08477, over 19572.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2665, pruned_loss=0.0448, ctc_loss=0.08357, over 3828740.94 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:42:07,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=191989.33333333334, ans=0.07
+2024-08-27 00:42:08,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_na.min_abs, batch_count=191989.33333333334, ans=0.02
+2024-08-27 00:42:08,968 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-36000.pt
+2024-08-27 00:42:21,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=191989.33333333334, ans=0.125
+2024-08-27 00:42:21,726 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=8.52 vs. limit=12.0
+2024-08-27 00:42:26,814 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=192042.66666666666, ans=0.125
+2024-08-27 00:42:31,659 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=192042.66666666666, ans=0.1
+2024-08-27 00:43:02,698 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=192202.66666666666, ans=0.125
+2024-08-27 00:43:04,218 INFO [train.py:1114] (0/4) Epoch 15, batch 1200, loss[loss=0.1954, simple_loss=0.2716, pruned_loss=0.04345, ctc_loss=0.08062, over 19835.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2671, pruned_loss=0.0449, ctc_loss=0.08395, over 3822545.46 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-27 00:44:32,769 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.478e+02 1.729e+02 2.216e+02 4.347e+02, threshold=3.458e+02, percent-clipped=1.0
+2024-08-27 00:45:40,577 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=192416.0, ans=0.125
+2024-08-27 00:45:43,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=192416.0, ans=0.125
+2024-08-27 00:45:53,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=192469.33333333334, ans=0.125
+2024-08-27 00:46:05,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=192469.33333333334, ans=0.2
+2024-08-27 00:46:12,657 INFO [train.py:1114] (0/4) Epoch 15, batch 1250, loss[loss=0.2154, simple_loss=0.2857, pruned_loss=0.05331, ctc_loss=0.0959, over 19506.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2676, pruned_loss=0.04494, ctc_loss=0.08374, over 3841818.15 frames. ], batch size: 61, lr: 1.00e-02, grad_scale: 32.0
+2024-08-27 00:46:34,137 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=192522.66666666666, ans=0.125
+2024-08-27 00:46:34,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=192522.66666666666, ans=0.125
+2024-08-27 00:46:50,789 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.95 vs. limit=10.0
+2024-08-27 00:47:08,969 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.68 vs. limit=15.0
+2024-08-27 00:47:22,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=192629.33333333334, ans=0.125
+2024-08-27 00:47:28,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=192629.33333333334, ans=0.125
+2024-08-27 00:47:35,271 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=192629.33333333334, ans=0.1
+2024-08-27 00:47:35,543 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.40 vs. limit=15.0
+2024-08-27 00:48:26,339 INFO [train.py:1114] (0/4) Epoch 15, batch 1300, loss[loss=0.215, simple_loss=0.2871, pruned_loss=0.05115, ctc_loss=0.1014, over 18896.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2669, pruned_loss=0.04452, ctc_loss=0.08313, over 3846791.24 frames. ], batch size: 76, lr: 9.99e-03, grad_scale: 32.0
+2024-08-27 00:48:30,275 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=192789.33333333334, ans=0.05
+2024-08-27 00:48:42,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=192789.33333333334, ans=0.05
+2024-08-27 00:49:04,480 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=192842.66666666666, ans=0.0
+2024-08-27 00:49:08,504 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=192842.66666666666, ans=0.125
+2024-08-27 00:49:50,649 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.210e+02 1.421e+02 1.669e+02 2.080e+02 3.869e+02, threshold=3.339e+02, percent-clipped=2.0
+2024-08-27 00:50:39,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=193002.66666666666, ans=0.025
+2024-08-27 00:50:43,609 INFO [train.py:1114] (0/4) Epoch 15, batch 1350, loss[loss=0.172, simple_loss=0.247, pruned_loss=0.03578, ctc_loss=0.06367, over 19767.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2663, pruned_loss=0.04433, ctc_loss=0.08273, over 3857434.60 frames. ], batch size: 54, lr: 9.98e-03, grad_scale: 32.0
+2024-08-27 00:51:41,200 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=193056.0, ans=0.0
+2024-08-27 00:51:57,161 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.61 vs. limit=15.0
+2024-08-27 00:52:04,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=193162.66666666666, ans=0.125
+2024-08-27 00:52:04,671 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.56 vs. limit=15.0
+2024-08-27 00:52:38,712 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.41 vs. limit=6.0
+2024-08-27 00:52:41,342 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.89 vs. limit=10.0
+2024-08-27 00:53:24,225 INFO [train.py:1114] (0/4) Epoch 15, batch 1400, loss[loss=0.1803, simple_loss=0.2385, pruned_loss=0.04392, ctc_loss=0.08546, over 19663.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2662, pruned_loss=0.04454, ctc_loss=0.08301, over 3864045.08 frames. ], batch size: 46, lr: 9.98e-03, grad_scale: 32.0
+2024-08-27 00:53:24,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=193322.66666666666, ans=0.5
+2024-08-27 00:53:27,359 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=193322.66666666666, ans=0.1
+2024-08-27 00:53:29,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=193322.66666666666, ans=0.07
+2024-08-27 00:53:36,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=193376.0, ans=0.0
+2024-08-27 00:53:57,406 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.168e+02 1.449e+02 1.647e+02 2.125e+02 3.032e+02, threshold=3.293e+02, percent-clipped=0.0
+2024-08-27 00:54:19,928 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=193429.33333333334, ans=0.0
+2024-08-27 00:54:22,575 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=193482.66666666666, ans=0.0
+2024-08-27 00:54:48,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=193482.66666666666, ans=0.1
+2024-08-27 00:54:54,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=193482.66666666666, ans=0.2
+2024-08-27 00:54:55,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=193536.0, ans=0.125
+2024-08-27 00:55:08,671 INFO [train.py:1114] (0/4) Epoch 15, batch 1450, loss[loss=0.212, simple_loss=0.2842, pruned_loss=0.05113, ctc_loss=0.09392, over 19645.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.267, pruned_loss=0.04469, ctc_loss=0.08336, over 3861696.22 frames. ], batch size: 63, lr: 9.97e-03, grad_scale: 32.0
+2024-08-27 00:55:21,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=193589.33333333334, ans=0.0
+2024-08-27 00:56:01,040 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.32 vs. limit=6.0
+2024-08-27 00:56:23,592 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.97 vs. limit=22.5
+2024-08-27 00:56:35,044 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=193802.66666666666, ans=0.125
+2024-08-27 00:56:39,603 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.62 vs. limit=8.0
+2024-08-27 00:56:39,792 INFO [train.py:1114] (0/4) Epoch 15, batch 1500, loss[loss=0.2034, simple_loss=0.2821, pruned_loss=0.04524, ctc_loss=0.08538, over 19575.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2675, pruned_loss=0.04474, ctc_loss=0.08353, over 3861285.39 frames. ], batch size: 57, lr: 9.96e-03, grad_scale: 32.0
+2024-08-27 00:56:57,275 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=193856.0, ans=0.125
+2024-08-27 00:57:53,499 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.81 vs. limit=15.0
+2024-08-27 00:57:55,074 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=193909.33333333334, ans=0.125
+2024-08-27 00:58:20,410 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.504e+02 1.720e+02 2.138e+02 3.076e+02, threshold=3.439e+02, percent-clipped=0.0
+2024-08-27 00:58:22,772 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_abs, batch_count=193962.66666666666, ans=0.5
+2024-08-27 00:58:50,637 INFO [train.py:1114] (0/4) Epoch 15, batch 1550, loss[loss=0.2191, simple_loss=0.2838, pruned_loss=0.05694, ctc_loss=0.1012, over 19610.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2672, pruned_loss=0.04481, ctc_loss=0.08353, over 3847267.13 frames. ], batch size: 60, lr: 9.96e-03, grad_scale: 32.0
+2024-08-27 00:59:07,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=194176.0, ans=0.125
+2024-08-27 00:59:11,653 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.14 vs. limit=10.0
+2024-08-27 00:59:23,669 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=194282.66666666666, ans=0.125
+2024-08-27 00:59:25,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=194282.66666666666, ans=0.1
+2024-08-27 00:59:26,185 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=19.09 vs. limit=22.5
+2024-08-27 00:59:36,418 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.11 vs. limit=15.0
+2024-08-27 00:59:37,707 INFO [train.py:1114] (0/4) Epoch 15, batch 1600, loss[loss=0.203, simple_loss=0.2796, pruned_loss=0.04627, ctc_loss=0.08456, over 19833.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2676, pruned_loss=0.04525, ctc_loss=0.08445, over 3836125.80 frames. ], batch size: 57, lr: 9.95e-03, grad_scale: 32.0
+2024-08-27 00:59:52,991 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.91 vs. limit=15.0
+2024-08-27 00:59:56,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=194496.0, ans=0.025
+2024-08-27 00:59:57,866 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=194496.0, ans=0.125
+2024-08-27 01:00:17,423 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.455e+02 1.710e+02 2.060e+02 3.831e+02, threshold=3.419e+02, percent-clipped=3.0
+2024-08-27 01:00:17,751 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:00:26,835 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=194496.0, ans=0.125
+2024-08-27 01:00:40,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=194602.66666666666, ans=0.0
+2024-08-27 01:00:48,596 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.21 vs. limit=6.0
+2024-08-27 01:00:50,779 INFO [train.py:1114] (0/4) Epoch 15, batch 1650, loss[loss=0.1901, simple_loss=0.2719, pruned_loss=0.03903, ctc_loss=0.07549, over 19643.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2672, pruned_loss=0.04499, ctc_loss=0.08392, over 3832052.74 frames. ], batch size: 59, lr: 9.94e-03, grad_scale: 16.0
+2024-08-27 01:01:08,865 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.87 vs. limit=15.0
+2024-08-27 01:01:14,088 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=194709.33333333334, ans=0.125
+2024-08-27 01:01:41,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=194816.0, ans=0.0
+2024-08-27 01:01:49,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=194869.33333333334, ans=0.125
+2024-08-27 01:02:11,998 INFO [train.py:1114] (0/4) Epoch 15, batch 1700, loss[loss=0.1682, simple_loss=0.2347, pruned_loss=0.03721, ctc_loss=0.0682, over 19682.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.267, pruned_loss=0.04451, ctc_loss=0.08307, over 3846484.66 frames. ], batch size: 46, lr: 9.94e-03, grad_scale: 16.0
+2024-08-27 01:02:16,177 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.42 vs. limit=15.0
+2024-08-27 01:02:26,824 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.19 vs. limit=15.0
+2024-08-27 01:02:27,410 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=194976.0, ans=0.2
+2024-08-27 01:02:30,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=194976.0, ans=0.2
+2024-08-27 01:02:31,572 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=195029.33333333334, ans=0.0
+2024-08-27 01:02:33,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=195029.33333333334, ans=0.125
+2024-08-27 01:02:36,950 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.414e+02 1.817e+02 2.372e+02 3.799e+02, threshold=3.634e+02, percent-clipped=1.0
+2024-08-27 01:02:44,323 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=195082.66666666666, ans=0.025
+2024-08-27 01:02:45,450 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.57 vs. limit=22.5
+2024-08-27 01:03:00,189 INFO [train.py:1114] (0/4) Epoch 15, batch 1750, loss[loss=0.1775, simple_loss=0.2452, pruned_loss=0.03993, ctc_loss=0.07478, over 19659.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2666, pruned_loss=0.04437, ctc_loss=0.08284, over 3851315.28 frames. ], batch size: 45, lr: 9.93e-03, grad_scale: 16.0
+2024-08-27 01:03:00,382 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=195189.33333333334, ans=0.09899494936611666
+2024-08-27 01:03:01,712 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.27 vs. limit=15.0
+2024-08-27 01:03:05,686 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=195189.33333333334, ans=0.125
+2024-08-27 01:03:19,020 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=195296.0, ans=0.1
+2024-08-27 01:03:22,125 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.64 vs. limit=15.0
+2024-08-27 01:03:30,574 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=195349.33333333334, ans=0.125
+2024-08-27 01:03:34,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=195349.33333333334, ans=0.0
+2024-08-27 01:03:45,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=195402.66666666666, ans=0.0
+2024-08-27 01:03:46,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=195402.66666666666, ans=0.0
+2024-08-27 01:03:49,206 INFO [train.py:1114] (0/4) Epoch 15, batch 1800, loss[loss=0.1946, simple_loss=0.2743, pruned_loss=0.04137, ctc_loss=0.08012, over 19617.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2665, pruned_loss=0.04422, ctc_loss=0.08256, over 3852338.26 frames. ], batch size: 55, lr: 9.92e-03, grad_scale: 16.0
+2024-08-27 01:03:55,285 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.77 vs. limit=15.0
+2024-08-27 01:03:57,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=195509.33333333334, ans=0.025
+2024-08-27 01:04:34,452 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.516e+02 1.927e+02 2.557e+02 3.874e+02, threshold=3.854e+02, percent-clipped=2.0
+2024-08-27 01:04:36,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=195562.66666666666, ans=0.125
+2024-08-27 01:05:43,283 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=195669.33333333334, ans=0.0
+2024-08-27 01:05:44,950 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=195669.33333333334, ans=0.0
+2024-08-27 01:05:54,925 INFO [train.py:1114] (0/4) Epoch 15, batch 1850, loss[loss=0.2031, simple_loss=0.2809, pruned_loss=0.04589, ctc_loss=0.0841, over 19592.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2664, pruned_loss=0.04428, ctc_loss=0.08265, over 3856614.83 frames. ], batch size: 57, lr: 9.92e-03, grad_scale: 16.0
+2024-08-27 01:06:10,896 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=195776.0, ans=0.125
+2024-08-27 01:06:22,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=195829.33333333334, ans=0.125
+2024-08-27 01:06:28,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=195829.33333333334, ans=6.0
+2024-08-27 01:06:33,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=195882.66666666666, ans=0.125
+2024-08-27 01:06:39,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=195882.66666666666, ans=0.2
+2024-08-27 01:06:41,033 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=195936.0, ans=0.125
+2024-08-27 01:06:49,449 INFO [train.py:1114] (0/4) Epoch 15, batch 1900, loss[loss=0.204, simple_loss=0.2794, pruned_loss=0.04626, ctc_loss=0.09039, over 19668.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2672, pruned_loss=0.04464, ctc_loss=0.08317, over 3862380.04 frames. ], batch size: 59, lr: 9.91e-03, grad_scale: 16.0
+2024-08-27 01:06:58,889 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.60 vs. limit=22.5
+2024-08-27 01:07:07,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=196042.66666666666, ans=0.0
+2024-08-27 01:07:09,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=196096.0, ans=0.1
+2024-08-27 01:07:43,087 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.422e+02 1.649e+02 2.231e+02 4.535e+02, threshold=3.297e+02, percent-clipped=1.0
+2024-08-27 01:08:04,607 INFO [train.py:1114] (0/4) Epoch 15, batch 1950, loss[loss=0.1836, simple_loss=0.2574, pruned_loss=0.03974, ctc_loss=0.07609, over 19589.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.268, pruned_loss=0.04472, ctc_loss=0.08327, over 3870699.90 frames. ], batch size: 52, lr: 9.90e-03, grad_scale: 16.0
+2024-08-27 01:08:13,934 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.63 vs. limit=15.0
+2024-08-27 01:08:45,066 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.71 vs. limit=15.0
+2024-08-27 01:08:49,896 INFO [train.py:1114] (0/4) Epoch 15, batch 2000, loss[loss=0.1693, simple_loss=0.235, pruned_loss=0.03802, ctc_loss=0.06882, over 19619.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2685, pruned_loss=0.0451, ctc_loss=0.08396, over 3855755.56 frames. ], batch size: 45, lr: 9.90e-03, grad_scale: 32.0
+2024-08-27 01:08:54,614 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=196522.66666666666, ans=0.125
+2024-08-27 01:09:34,014 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=196576.0, ans=0.09899494936611666
+2024-08-27 01:09:39,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=196576.0, ans=0.0
+2024-08-27 01:09:46,678 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.403e+02 1.640e+02 2.044e+02 3.050e+02, threshold=3.279e+02, percent-clipped=0.0
+2024-08-27 01:10:10,625 INFO [train.py:1114] (0/4) Epoch 15, batch 2050, loss[loss=0.1672, simple_loss=0.2386, pruned_loss=0.03454, ctc_loss=0.06685, over 19726.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2675, pruned_loss=0.04486, ctc_loss=0.08355, over 3851699.63 frames. ], batch size: 47, lr: 9.89e-03, grad_scale: 32.0
+2024-08-27 01:10:20,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=196842.66666666666, ans=0.125
+2024-08-27 01:10:36,272 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff3.min_abs, batch_count=196896.0, ans=0.2
+2024-08-27 01:10:54,828 INFO [train.py:1114] (0/4) Epoch 15, batch 2100, loss[loss=0.1998, simple_loss=0.2731, pruned_loss=0.04548, ctc_loss=0.089, over 19763.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2666, pruned_loss=0.04446, ctc_loss=0.08286, over 3858793.46 frames. ], batch size: 54, lr: 9.88e-03, grad_scale: 32.0
+2024-08-27 01:11:10,071 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.07 vs. limit=22.5
+2024-08-27 01:11:19,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=197109.33333333334, ans=0.125
+2024-08-27 01:11:20,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=197109.33333333334, ans=0.125
+2024-08-27 01:11:26,630 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.442e+02 1.703e+02 2.065e+02 4.080e+02, threshold=3.406e+02, percent-clipped=2.0
+2024-08-27 01:11:29,644 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.84 vs. limit=22.5
+2024-08-27 01:11:48,549 INFO [train.py:1114] (0/4) Epoch 15, batch 2150, loss[loss=0.1973, simple_loss=0.2683, pruned_loss=0.04632, ctc_loss=0.08416, over 19595.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2661, pruned_loss=0.04432, ctc_loss=0.0826, over 3869908.97 frames. ], batch size: 52, lr: 9.88e-03, grad_scale: 32.0
+2024-08-27 01:11:54,075 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=197322.66666666666, ans=0.07
+2024-08-27 01:11:56,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=197376.0, ans=0.2
+2024-08-27 01:12:00,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=197376.0, ans=0.0
+2024-08-27 01:12:02,523 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:12:06,903 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=197429.33333333334, ans=0.0
+2024-08-27 01:12:07,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=197429.33333333334, ans=0.125
+2024-08-27 01:12:13,862 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=197482.66666666666, ans=0.125
+2024-08-27 01:12:15,859 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.66 vs. limit=22.5
+2024-08-27 01:12:24,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197536.0, ans=0.1
+2024-08-27 01:12:25,258 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.36 vs. limit=15.0
+2024-08-27 01:12:26,776 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=197536.0, ans=0.0
+2024-08-27 01:12:29,839 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.58 vs. limit=15.0
+2024-08-27 01:12:31,732 INFO [train.py:1114] (0/4) Epoch 15, batch 2200, loss[loss=0.2053, simple_loss=0.2825, pruned_loss=0.04695, ctc_loss=0.08553, over 19603.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2665, pruned_loss=0.04435, ctc_loss=0.08264, over 3866967.17 frames. ], batch size: 57, lr: 9.87e-03, grad_scale: 16.0
+2024-08-27 01:12:34,538 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=197589.33333333334, ans=0.0
+2024-08-27 01:12:40,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=197642.66666666666, ans=0.125
+2024-08-27 01:12:43,044 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=197642.66666666666, ans=0.125
+2024-08-27 01:12:54,929 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.477e+02 1.816e+02 2.262e+02 3.833e+02, threshold=3.631e+02, percent-clipped=4.0
+2024-08-27 01:12:58,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=197749.33333333334, ans=0.1
+2024-08-27 01:12:59,438 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=197749.33333333334, ans=0.125
+2024-08-27 01:13:00,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197749.33333333334, ans=0.1
+2024-08-27 01:13:01,211 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.77 vs. limit=15.0
+2024-08-27 01:13:03,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=197749.33333333334, ans=0.1
+2024-08-27 01:13:15,775 INFO [train.py:1114] (0/4) Epoch 15, batch 2250, loss[loss=0.2114, simple_loss=0.29, pruned_loss=0.0488, ctc_loss=0.08783, over 19614.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.267, pruned_loss=0.0446, ctc_loss=0.08303, over 3866993.22 frames. ], batch size: 55, lr: 9.87e-03, grad_scale: 16.0
+2024-08-27 01:13:18,898 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.76 vs. limit=15.0
+2024-08-27 01:13:23,561 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=197909.33333333334, ans=0.0
+2024-08-27 01:13:24,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=197909.33333333334, ans=0.1
+2024-08-27 01:13:30,376 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=197909.33333333334, ans=0.125
+2024-08-27 01:13:45,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=198016.0, ans=0.025
+2024-08-27 01:13:49,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.74 vs. limit=15.0
+2024-08-27 01:13:52,535 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=198069.33333333334, ans=0.0
+2024-08-27 01:13:58,193 INFO [train.py:1114] (0/4) Epoch 15, batch 2300, loss[loss=0.1637, simple_loss=0.2376, pruned_loss=0.03235, ctc_loss=0.06268, over 19503.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2657, pruned_loss=0.04466, ctc_loss=0.08316, over 3861395.04 frames. ], batch size: 49, lr: 9.86e-03, grad_scale: 16.0
+2024-08-27 01:14:04,513 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.71 vs. limit=15.0
+2024-08-27 01:14:13,883 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=198176.0, ans=0.09899494936611666
+2024-08-27 01:14:57,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=198229.33333333334, ans=0.0
+2024-08-27 01:15:02,208 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.441e+02 1.617e+02 1.954e+02 3.129e+02, threshold=3.235e+02, percent-clipped=0.0
+2024-08-27 01:15:13,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=198336.0, ans=0.0
+2024-08-27 01:15:23,096 INFO [train.py:1114] (0/4) Epoch 15, batch 2350, loss[loss=0.2059, simple_loss=0.2841, pruned_loss=0.04615, ctc_loss=0.08841, over 19705.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2659, pruned_loss=0.04503, ctc_loss=0.08351, over 3864267.56 frames. ], batch size: 63, lr: 9.85e-03, grad_scale: 16.0
+2024-08-27 01:15:24,376 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=198389.33333333334, ans=0.125
+2024-08-27 01:15:37,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=198442.66666666666, ans=0.125
+2024-08-27 01:15:43,953 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=198496.0, ans=0.0
+2024-08-27 01:15:49,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=198549.33333333334, ans=0.125
+2024-08-27 01:15:57,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=198549.33333333334, ans=0.125
+2024-08-27 01:16:31,104 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=198656.0, ans=0.2
+2024-08-27 01:16:31,888 INFO [train.py:1114] (0/4) Epoch 15, batch 2400, loss[loss=0.2086, simple_loss=0.2825, pruned_loss=0.04866, ctc_loss=0.09331, over 19363.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2679, pruned_loss=0.04558, ctc_loss=0.08455, over 3858534.45 frames. ], batch size: 67, lr: 9.85e-03, grad_scale: 32.0
+2024-08-27 01:17:19,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=198656.0, ans=0.125
+2024-08-27 01:17:28,157 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=198709.33333333334, ans=0.025
+2024-08-27 01:17:32,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=198762.66666666666, ans=0.0
+2024-08-27 01:17:35,478 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.164e+02 1.452e+02 1.605e+02 2.004e+02 3.213e+02, threshold=3.211e+02, percent-clipped=0.0
+2024-08-27 01:17:39,734 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.67 vs. limit=15.0
+2024-08-27 01:17:41,695 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.79 vs. limit=6.0
+2024-08-27 01:17:57,656 INFO [train.py:1114] (0/4) Epoch 15, batch 2450, loss[loss=0.2385, simple_loss=0.2893, pruned_loss=0.06893, ctc_loss=0.1243, over 13381.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2712, pruned_loss=0.04799, ctc_loss=0.08942, over 3731305.61 frames. ], batch size: 141, lr: 9.84e-03, grad_scale: 32.0
+2024-08-27 01:18:22,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=199029.33333333334, ans=0.09899494936611666
+2024-08-27 01:18:39,445 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-15.pt
+2024-08-27 01:20:20,966 INFO [train.py:1114] (0/4) Epoch 16, batch 0, loss[loss=0.182, simple_loss=0.2472, pruned_loss=0.04261, ctc_loss=0.07891, over 19833.00 frames. ], tot_loss[loss=0.182, simple_loss=0.2472, pruned_loss=0.04261, ctc_loss=0.07891, over 19833.00 frames. ], batch size: 49, lr: 9.52e-03, grad_scale: 32.0
+2024-08-27 01:20:20,967 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-27 01:21:17,376 INFO [train.py:1146] (0/4) Epoch 16, validation: loss=0.1744, simple_loss=0.2673, pruned_loss=0.03034, ctc_loss=0.05204, over 944034.00 frames.
+2024-08-27 01:21:17,378 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-27 01:21:17,579 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=199130.66666666666, ans=0.0
+2024-08-27 01:21:24,980 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=199130.66666666666, ans=0.125
+2024-08-27 01:21:39,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=199237.33333333334, ans=0.125
+2024-08-27 01:21:48,826 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.35 vs. limit=15.0
+2024-08-27 01:21:54,894 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.674e+02 1.811e+02 2.106e+02 3.737e+02, threshold=3.622e+02, percent-clipped=2.0
+2024-08-27 01:21:55,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.38 vs. limit=22.5
+2024-08-27 01:21:57,166 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.06 vs. limit=15.0
+2024-08-27 01:22:02,689 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=199344.0, ans=0.0
+2024-08-27 01:22:03,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=199344.0, ans=0.0
+2024-08-27 01:22:07,225 INFO [train.py:1114] (0/4) Epoch 16, batch 50, loss[loss=0.1601, simple_loss=0.2367, pruned_loss=0.02991, ctc_loss=0.05931, over 19702.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2667, pruned_loss=0.04399, ctc_loss=0.08312, over 845293.61 frames. ], batch size: 47, lr: 9.51e-03, grad_scale: 32.0
+2024-08-27 01:22:07,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=199397.33333333334, ans=0.025
+2024-08-27 01:22:31,922 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=199504.0, ans=0.125
+2024-08-27 01:22:32,192 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.23 vs. limit=12.0
+2024-08-27 01:22:45,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=199610.66666666666, ans=0.0
+2024-08-27 01:22:53,635 INFO [train.py:1114] (0/4) Epoch 16, batch 100, loss[loss=0.1738, simple_loss=0.2524, pruned_loss=0.03444, ctc_loss=0.06588, over 19711.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2695, pruned_loss=0.04487, ctc_loss=0.08438, over 1499669.28 frames. ], batch size: 51, lr: 9.51e-03, grad_scale: 32.0
+2024-08-27 01:23:15,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=199770.66666666666, ans=0.0
+2024-08-27 01:23:33,428 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.159e+02 1.434e+02 1.536e+02 1.885e+02 3.287e+02, threshold=3.072e+02, percent-clipped=0.0
+2024-08-27 01:23:45,318 INFO [train.py:1114] (0/4) Epoch 16, batch 150, loss[loss=0.1781, simple_loss=0.2383, pruned_loss=0.04246, ctc_loss=0.0824, over 19725.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2665, pruned_loss=0.04424, ctc_loss=0.08296, over 2028484.55 frames. ], batch size: 47, lr: 9.50e-03, grad_scale: 32.0
+2024-08-27 01:23:45,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=199930.66666666666, ans=0.2
+2024-08-27 01:23:56,732 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=199984.0, ans=0.2
+2024-08-27 01:24:03,113 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=200037.33333333334, ans=0.0
+2024-08-27 01:24:09,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=200037.33333333334, ans=0.125
+2024-08-27 01:24:26,712 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=200144.0, ans=0.0
+2024-08-27 01:24:35,669 INFO [train.py:1114] (0/4) Epoch 16, batch 200, loss[loss=0.2032, simple_loss=0.2721, pruned_loss=0.04911, ctc_loss=0.08996, over 18100.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.2658, pruned_loss=0.0441, ctc_loss=0.08227, over 2435806.75 frames. ], batch size: 85, lr: 9.49e-03, grad_scale: 32.0
+2024-08-27 01:24:59,298 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.19 vs. limit=12.0
+2024-08-27 01:25:01,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=200304.0, ans=0.0
+2024-08-27 01:25:08,304 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.63 vs. limit=15.0
+2024-08-27 01:25:12,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=200357.33333333334, ans=0.0
+2024-08-27 01:25:14,234 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.526e+02 1.826e+02 2.235e+02 3.925e+02, threshold=3.652e+02, percent-clipped=6.0
+2024-08-27 01:25:46,116 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=200410.66666666666, ans=0.125
+2024-08-27 01:25:52,467 INFO [train.py:1114] (0/4) Epoch 16, batch 250, loss[loss=0.1881, simple_loss=0.2681, pruned_loss=0.03902, ctc_loss=0.07499, over 19425.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2655, pruned_loss=0.04379, ctc_loss=0.08176, over 2756064.49 frames. ], batch size: 67, lr: 9.49e-03, grad_scale: 32.0
+2024-08-27 01:26:04,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=200464.0, ans=0.0
+2024-08-27 01:26:07,712 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=200517.33333333334, ans=0.125
+2024-08-27 01:26:08,827 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=200517.33333333334, ans=0.125
+2024-08-27 01:26:13,395 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=200517.33333333334, ans=0.025
+2024-08-27 01:26:19,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=200570.66666666666, ans=0.0
+2024-08-27 01:26:19,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.09 vs. limit=15.0
+2024-08-27 01:26:20,000 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=200570.66666666666, ans=0.1
+2024-08-27 01:26:35,661 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=200624.0, ans=0.125
+2024-08-27 01:26:42,098 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=200677.33333333334, ans=0.2
+2024-08-27 01:26:42,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=200677.33333333334, ans=0.125
+2024-08-27 01:26:44,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=200677.33333333334, ans=0.125
+2024-08-27 01:26:46,538 INFO [train.py:1114] (0/4) Epoch 16, batch 300, loss[loss=0.2133, simple_loss=0.2802, pruned_loss=0.05398, ctc_loss=0.09578, over 19542.00 frames. ], tot_loss[loss=0.1925, simple_loss=0.2651, pruned_loss=0.04365, ctc_loss=0.08145, over 3000525.35 frames. ], batch size: 61, lr: 9.48e-03, grad_scale: 32.0
+2024-08-27 01:27:22,584 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.168e+02 1.450e+02 1.677e+02 2.025e+02 3.129e+02, threshold=3.354e+02, percent-clipped=0.0
+2024-08-27 01:27:25,470 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=200944.0, ans=0.5
+2024-08-27 01:27:25,762 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.09 vs. limit=15.0
+2024-08-27 01:27:28,390 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=200944.0, ans=0.0
+2024-08-27 01:27:33,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=200944.0, ans=0.0
+2024-08-27 01:27:36,632 INFO [train.py:1114] (0/4) Epoch 16, batch 350, loss[loss=0.1687, simple_loss=0.2397, pruned_loss=0.03608, ctc_loss=0.06369, over 19764.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2663, pruned_loss=0.04408, ctc_loss=0.08215, over 3189776.14 frames. ], batch size: 48, lr: 9.48e-03, grad_scale: 32.0
+2024-08-27 01:27:37,933 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.56 vs. limit=12.0
+2024-08-27 01:27:48,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=201050.66666666666, ans=0.05
+2024-08-27 01:28:24,286 INFO [train.py:1114] (0/4) Epoch 16, batch 400, loss[loss=0.1812, simple_loss=0.2608, pruned_loss=0.03663, ctc_loss=0.07069, over 19493.00 frames. ], tot_loss[loss=0.1928, simple_loss=0.2656, pruned_loss=0.0437, ctc_loss=0.08163, over 3341541.24 frames. ], batch size: 54, lr: 9.47e-03, grad_scale: 32.0
+2024-08-27 01:28:46,184 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.89 vs. limit=22.5
+2024-08-27 01:28:49,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=201370.66666666666, ans=0.0
+2024-08-27 01:28:52,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=201424.0, ans=0.0
+2024-08-27 01:28:54,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=201424.0, ans=0.95
+2024-08-27 01:28:57,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=201424.0, ans=0.025
+2024-08-27 01:28:58,540 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.444e+02 1.663e+02 2.108e+02 3.293e+02, threshold=3.326e+02, percent-clipped=0.0
+2024-08-27 01:29:06,234 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:29:07,076 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=201477.33333333334, ans=0.125
+2024-08-27 01:29:10,807 INFO [train.py:1114] (0/4) Epoch 16, batch 450, loss[loss=0.1918, simple_loss=0.2747, pruned_loss=0.03946, ctc_loss=0.07521, over 19623.00 frames. ], tot_loss[loss=0.1923, simple_loss=0.2653, pruned_loss=0.04344, ctc_loss=0.08129, over 3449745.03 frames. ], batch size: 55, lr: 9.46e-03, grad_scale: 32.0
+2024-08-27 01:29:10,942 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=201530.66666666666, ans=0.125
+2024-08-27 01:29:11,068 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=201530.66666666666, ans=0.0
+2024-08-27 01:29:53,391 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=201744.0, ans=0.125
+2024-08-27 01:30:01,618 INFO [train.py:1114] (0/4) Epoch 16, batch 500, loss[loss=0.188, simple_loss=0.2675, pruned_loss=0.03903, ctc_loss=0.07602, over 19663.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2641, pruned_loss=0.04311, ctc_loss=0.08061, over 3545183.97 frames. ], batch size: 63, lr: 9.46e-03, grad_scale: 32.0
+2024-08-27 01:30:24,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=201904.0, ans=0.0
+2024-08-27 01:30:30,607 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.67 vs. limit=6.0
+2024-08-27 01:30:34,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=201957.33333333334, ans=0.125
+2024-08-27 01:30:35,562 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.65 vs. limit=10.0
+2024-08-27 01:30:39,486 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.484e+02 1.746e+02 2.096e+02 4.072e+02, threshold=3.492e+02, percent-clipped=1.0
+2024-08-27 01:30:46,187 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202010.66666666666, ans=0.1
+2024-08-27 01:30:51,382 INFO [train.py:1114] (0/4) Epoch 16, batch 550, loss[loss=0.2026, simple_loss=0.2763, pruned_loss=0.04693, ctc_loss=0.08764, over 19241.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.264, pruned_loss=0.04317, ctc_loss=0.08056, over 3606918.73 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 32.0
+2024-08-27 01:30:56,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=202064.0, ans=0.1
+2024-08-27 01:31:08,546 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.12 vs. limit=15.0
+2024-08-27 01:31:10,102 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=202170.66666666666, ans=0.125
+2024-08-27 01:31:12,832 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=202170.66666666666, ans=0.125
+2024-08-27 01:31:16,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202170.66666666666, ans=0.1
+2024-08-27 01:31:19,282 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:31:19,288 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=202224.0, ans=0.0
+2024-08-27 01:31:25,004 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=202224.0, ans=0.025
+2024-08-27 01:31:34,288 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=202277.33333333334, ans=0.125
+2024-08-27 01:31:36,215 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=202277.33333333334, ans=0.0
+2024-08-27 01:31:37,734 INFO [train.py:1114] (0/4) Epoch 16, batch 600, loss[loss=0.2014, simple_loss=0.2811, pruned_loss=0.04425, ctc_loss=0.08282, over 19441.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2644, pruned_loss=0.04325, ctc_loss=0.08078, over 3664117.11 frames. ], batch size: 67, lr: 9.45e-03, grad_scale: 32.0
+2024-08-27 01:31:45,254 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=202330.66666666666, ans=0.07
+2024-08-27 01:31:50,187 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.47 vs. limit=15.0
+2024-08-27 01:32:09,179 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.30 vs. limit=15.0
+2024-08-27 01:32:14,245 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.474e+02 1.879e+02 2.462e+02 5.922e+02, threshold=3.759e+02, percent-clipped=13.0
+2024-08-27 01:32:19,178 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=202544.0, ans=0.05
+2024-08-27 01:32:26,170 INFO [train.py:1114] (0/4) Epoch 16, batch 650, loss[loss=0.1879, simple_loss=0.2595, pruned_loss=0.0423, ctc_loss=0.07951, over 19769.00 frames. ], tot_loss[loss=0.1907, simple_loss=0.2636, pruned_loss=0.04285, ctc_loss=0.08027, over 3715349.48 frames. ], batch size: 54, lr: 9.44e-03, grad_scale: 32.0
+2024-08-27 01:32:30,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=202597.33333333334, ans=0.125
+2024-08-27 01:32:33,814 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:32:38,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=202650.66666666666, ans=0.1
+2024-08-27 01:32:39,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=202650.66666666666, ans=0.035
+2024-08-27 01:32:47,711 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=202704.0, ans=0.025
+2024-08-27 01:33:03,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=202757.33333333334, ans=0.125
+2024-08-27 01:33:03,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=202757.33333333334, ans=0.0
+2024-08-27 01:33:18,144 INFO [train.py:1114] (0/4) Epoch 16, batch 700, loss[loss=0.1751, simple_loss=0.2471, pruned_loss=0.03707, ctc_loss=0.07261, over 19717.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2642, pruned_loss=0.04301, ctc_loss=0.08072, over 3747773.79 frames. ], batch size: 51, lr: 9.43e-03, grad_scale: 32.0
+2024-08-27 01:33:24,918 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=202864.0, ans=0.0
+2024-08-27 01:33:25,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=202864.0, ans=0.1
+2024-08-27 01:33:27,026 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.83 vs. limit=22.5
+2024-08-27 01:33:36,099 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=202970.66666666666, ans=0.2
+2024-08-27 01:33:41,524 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=202970.66666666666, ans=0.125
+2024-08-27 01:33:44,919 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.22 vs. limit=15.0
+2024-08-27 01:33:50,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=203024.0, ans=0.125
+2024-08-27 01:33:52,578 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.460e+02 1.707e+02 2.152e+02 4.812e+02, threshold=3.413e+02, percent-clipped=3.0
+2024-08-27 01:33:56,529 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=203077.33333333334, ans=0.0
+2024-08-27 01:33:59,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=203077.33333333334, ans=0.0
+2024-08-27 01:34:03,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.43 vs. limit=10.0
+2024-08-27 01:34:03,449 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.08 vs. limit=15.0
+2024-08-27 01:34:04,705 INFO [train.py:1114] (0/4) Epoch 16, batch 750, loss[loss=0.1891, simple_loss=0.2716, pruned_loss=0.03837, ctc_loss=0.07467, over 19493.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2637, pruned_loss=0.04277, ctc_loss=0.08016, over 3772968.21 frames. ], batch size: 54, lr: 9.43e-03, grad_scale: 32.0
+2024-08-27 01:34:09,850 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.68 vs. limit=15.0
+2024-08-27 01:34:12,400 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=203130.66666666666, ans=0.125
+2024-08-27 01:34:12,773 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.97 vs. limit=15.0
+2024-08-27 01:34:16,921 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=203184.0, ans=0.1
+2024-08-27 01:34:28,230 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=203237.33333333334, ans=0.05
+2024-08-27 01:34:57,324 INFO [train.py:1114] (0/4) Epoch 16, batch 800, loss[loss=0.1826, simple_loss=0.2526, pruned_loss=0.04065, ctc_loss=0.078, over 19828.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.2637, pruned_loss=0.04286, ctc_loss=0.08026, over 3794945.14 frames. ], batch size: 49, lr: 9.42e-03, grad_scale: 32.0
+2024-08-27 01:35:49,637 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.508e+02 1.846e+02 2.334e+02 3.502e+02, threshold=3.692e+02, percent-clipped=1.0
+2024-08-27 01:36:01,627 INFO [train.py:1114] (0/4) Epoch 16, batch 850, loss[loss=0.1922, simple_loss=0.2723, pruned_loss=0.04118, ctc_loss=0.07444, over 19643.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2636, pruned_loss=0.04278, ctc_loss=0.08002, over 3814588.77 frames. ], batch size: 59, lr: 9.42e-03, grad_scale: 32.0
+2024-08-27 01:36:19,479 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=203717.33333333334, ans=0.125
+2024-08-27 01:36:45,506 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:36:46,455 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=203877.33333333334, ans=0.2
+2024-08-27 01:36:51,744 INFO [train.py:1114] (0/4) Epoch 16, batch 900, loss[loss=0.1789, simple_loss=0.2569, pruned_loss=0.03662, ctc_loss=0.06928, over 19403.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2644, pruned_loss=0.04328, ctc_loss=0.08095, over 3818681.23 frames. ], batch size: 48, lr: 9.41e-03, grad_scale: 32.0
+2024-08-27 01:37:15,223 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=204037.33333333334, ans=0.0
+2024-08-27 01:37:16,226 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=204037.33333333334, ans=0.2
+2024-08-27 01:37:26,150 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.118e+02 1.398e+02 1.563e+02 1.898e+02 3.698e+02, threshold=3.126e+02, percent-clipped=1.0
+2024-08-27 01:37:35,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=204144.0, ans=0.125
+2024-08-27 01:37:38,110 INFO [train.py:1114] (0/4) Epoch 16, batch 950, loss[loss=0.1774, simple_loss=0.2497, pruned_loss=0.03818, ctc_loss=0.07204, over 19500.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.265, pruned_loss=0.04362, ctc_loss=0.08151, over 3821331.88 frames. ], batch size: 49, lr: 9.40e-03, grad_scale: 32.0
+2024-08-27 01:37:42,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=204197.33333333334, ans=0.125
+2024-08-27 01:37:44,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=204197.33333333334, ans=0.125
+2024-08-27 01:37:56,666 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=204250.66666666666, ans=0.125
+2024-08-27 01:37:58,579 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=204304.0, ans=0.1
+2024-08-27 01:38:08,842 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=204357.33333333334, ans=0.1
+2024-08-27 01:38:15,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=204357.33333333334, ans=0.0
+2024-08-27 01:38:20,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=204410.66666666666, ans=0.125
+2024-08-27 01:38:29,234 INFO [train.py:1114] (0/4) Epoch 16, batch 1000, loss[loss=0.1733, simple_loss=0.2497, pruned_loss=0.03544, ctc_loss=0.06502, over 19863.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2655, pruned_loss=0.04377, ctc_loss=0.08182, over 3817099.88 frames. ], batch size: 52, lr: 9.40e-03, grad_scale: 32.0
+2024-08-27 01:38:33,646 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.61 vs. limit=22.5
+2024-08-27 01:38:47,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=204570.66666666666, ans=0.1
+2024-08-27 01:38:48,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=204570.66666666666, ans=0.0
+2024-08-27 01:38:54,792 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=204570.66666666666, ans=0.125
+2024-08-27 01:39:01,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=204624.0, ans=0.1
+2024-08-27 01:39:07,621 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.409e+02 1.616e+02 2.034e+02 3.159e+02, threshold=3.231e+02, percent-clipped=1.0
+2024-08-27 01:39:08,691 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=204624.0, ans=0.125
+2024-08-27 01:39:19,854 INFO [train.py:1114] (0/4) Epoch 16, batch 1050, loss[loss=0.1898, simple_loss=0.2694, pruned_loss=0.04064, ctc_loss=0.07231, over 19848.00 frames. ], tot_loss[loss=0.1925, simple_loss=0.265, pruned_loss=0.04364, ctc_loss=0.08177, over 3823437.21 frames. ], batch size: 57, lr: 9.39e-03, grad_scale: 32.0
+2024-08-27 01:39:27,550 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=204730.66666666666, ans=0.5
+2024-08-27 01:39:27,599 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=204730.66666666666, ans=0.0
+2024-08-27 01:39:28,546 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=204784.0, ans=0.125
+2024-08-27 01:39:29,374 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=204784.0, ans=0.125
+2024-08-27 01:39:37,059 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=204784.0, ans=0.0
+2024-08-27 01:39:37,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=204784.0, ans=0.125
+2024-08-27 01:39:39,345 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.06 vs. limit=15.0
+2024-08-27 01:39:40,299 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.78 vs. limit=12.0
+2024-08-27 01:39:43,676 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=204837.33333333334, ans=0.0
+2024-08-27 01:39:57,576 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=204944.0, ans=0.1
+2024-08-27 01:40:07,054 INFO [train.py:1114] (0/4) Epoch 16, batch 1100, loss[loss=0.183, simple_loss=0.258, pruned_loss=0.03941, ctc_loss=0.07289, over 19578.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2646, pruned_loss=0.04328, ctc_loss=0.08118, over 3830357.68 frames. ], batch size: 52, lr: 9.39e-03, grad_scale: 32.0
+2024-08-27 01:40:14,788 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.79 vs. limit=15.0
+2024-08-27 01:40:25,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=205104.0, ans=0.125
+2024-08-27 01:40:26,815 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.39 vs. limit=15.0
+2024-08-27 01:40:38,198 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.08 vs. limit=22.5
+2024-08-27 01:40:38,874 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=205157.33333333334, ans=0.125
+2024-08-27 01:40:39,912 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=205157.33333333334, ans=0.125
+2024-08-27 01:40:43,607 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=205157.33333333334, ans=0.025
+2024-08-27 01:40:44,426 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.119e+02 1.474e+02 1.664e+02 2.002e+02 3.685e+02, threshold=3.328e+02, percent-clipped=2.0
+2024-08-27 01:40:55,297 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=205210.66666666666, ans=0.125
+2024-08-27 01:40:55,524 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.31 vs. limit=12.0
+2024-08-27 01:40:57,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=205210.66666666666, ans=0.0
+2024-08-27 01:40:58,199 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.29 vs. limit=6.0
+2024-08-27 01:40:59,536 INFO [train.py:1114] (0/4) Epoch 16, batch 1150, loss[loss=0.1812, simple_loss=0.2529, pruned_loss=0.03998, ctc_loss=0.07391, over 19586.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2643, pruned_loss=0.04323, ctc_loss=0.08104, over 3828742.07 frames. ], batch size: 52, lr: 9.38e-03, grad_scale: 32.0
+2024-08-27 01:42:51,165 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:43:16,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=205370.66666666666, ans=0.125
+2024-08-27 01:43:18,359 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=205424.0, ans=0.07
+2024-08-27 01:43:32,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=205477.33333333334, ans=0.125
+2024-08-27 01:43:39,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=205530.66666666666, ans=0.0
+2024-08-27 01:43:40,068 INFO [train.py:1114] (0/4) Epoch 16, batch 1200, loss[loss=0.1881, simple_loss=0.2645, pruned_loss=0.04054, ctc_loss=0.07653, over 19826.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2651, pruned_loss=0.04369, ctc_loss=0.08189, over 3825157.00 frames. ], batch size: 57, lr: 9.38e-03, grad_scale: 32.0
+2024-08-27 01:43:49,542 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=205530.66666666666, ans=0.125
+2024-08-27 01:44:03,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=205637.33333333334, ans=0.125
+2024-08-27 01:44:16,050 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.206e+02 1.520e+02 1.803e+02 2.158e+02 3.897e+02, threshold=3.606e+02, percent-clipped=2.0
+2024-08-27 01:44:27,523 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=205797.33333333334, ans=0.0
+2024-08-27 01:44:28,169 INFO [train.py:1114] (0/4) Epoch 16, batch 1250, loss[loss=0.2045, simple_loss=0.2813, pruned_loss=0.04697, ctc_loss=0.08458, over 19537.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2656, pruned_loss=0.04355, ctc_loss=0.08166, over 3843181.77 frames. ], batch size: 61, lr: 9.37e-03, grad_scale: 32.0
+2024-08-27 01:44:32,025 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=205797.33333333334, ans=0.0
+2024-08-27 01:44:37,220 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.67 vs. limit=15.0
+2024-08-27 01:45:08,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=206010.66666666666, ans=0.0
+2024-08-27 01:45:14,316 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=206010.66666666666, ans=0.025
+2024-08-27 01:45:15,169 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=206010.66666666666, ans=0.125
+2024-08-27 01:45:17,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=206064.0, ans=0.125
+2024-08-27 01:45:17,715 INFO [train.py:1114] (0/4) Epoch 16, batch 1300, loss[loss=0.1951, simple_loss=0.271, pruned_loss=0.04377, ctc_loss=0.07904, over 18946.00 frames. ], tot_loss[loss=0.1915, simple_loss=0.2645, pruned_loss=0.04307, ctc_loss=0.08087, over 3847064.72 frames. ], batch size: 76, lr: 9.36e-03, grad_scale: 32.0
+2024-08-27 01:45:20,586 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=206064.0, ans=0.125
+2024-08-27 01:45:27,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=206117.33333333334, ans=0.0
+2024-08-27 01:45:37,547 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.14 vs. limit=6.0
+2024-08-27 01:45:38,914 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=206170.66666666666, ans=0.125
+2024-08-27 01:45:52,819 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.516e+02 1.773e+02 2.282e+02 3.618e+02, threshold=3.546e+02, percent-clipped=1.0
+2024-08-27 01:45:56,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=206277.33333333334, ans=0.1
+2024-08-27 01:45:58,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=206277.33333333334, ans=0.125
+2024-08-27 01:46:06,818 INFO [train.py:1114] (0/4) Epoch 16, batch 1350, loss[loss=0.182, simple_loss=0.2603, pruned_loss=0.0374, ctc_loss=0.07227, over 19769.00 frames. ], tot_loss[loss=0.1914, simple_loss=0.2645, pruned_loss=0.04303, ctc_loss=0.08061, over 3859016.68 frames. ], batch size: 54, lr: 9.36e-03, grad_scale: 32.0
+2024-08-27 01:46:14,861 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=5.29 vs. limit=15.0
+2024-08-27 01:46:38,609 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=206490.66666666666, ans=0.1
+2024-08-27 01:46:53,274 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=206544.0, ans=0.0
+2024-08-27 01:46:56,696 INFO [train.py:1114] (0/4) Epoch 16, batch 1400, loss[loss=0.1671, simple_loss=0.2269, pruned_loss=0.03938, ctc_loss=0.07147, over 19644.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.264, pruned_loss=0.04283, ctc_loss=0.08029, over 3865651.80 frames. ], batch size: 46, lr: 9.35e-03, grad_scale: 32.0
+2024-08-27 01:47:01,565 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=206597.33333333334, ans=0.125
+2024-08-27 01:47:11,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=206650.66666666666, ans=0.0
+2024-08-27 01:47:27,210 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=206757.33333333334, ans=0.1
+2024-08-27 01:48:25,294 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.410e+02 1.569e+02 1.892e+02 4.037e+02, threshold=3.138e+02, percent-clipped=1.0
+2024-08-27 01:48:27,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=206810.66666666666, ans=0.2
+2024-08-27 01:48:37,431 INFO [train.py:1114] (0/4) Epoch 16, batch 1450, loss[loss=0.2154, simple_loss=0.2801, pruned_loss=0.05529, ctc_loss=0.1006, over 19666.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2643, pruned_loss=0.04285, ctc_loss=0.08049, over 3862662.88 frames. ], batch size: 63, lr: 9.35e-03, grad_scale: 32.0
+2024-08-27 01:49:12,114 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=207024.0, ans=0.04949747468305833
+2024-08-27 01:49:20,299 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=207077.33333333334, ans=0.025
+2024-08-27 01:49:23,210 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:49:25,843 INFO [train.py:1114] (0/4) Epoch 16, batch 1500, loss[loss=0.2127, simple_loss=0.2885, pruned_loss=0.04951, ctc_loss=0.09468, over 19605.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2652, pruned_loss=0.04305, ctc_loss=0.08101, over 3862701.02 frames. ], batch size: 57, lr: 9.34e-03, grad_scale: 32.0
+2024-08-27 01:49:52,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=207237.33333333334, ans=0.1
+2024-08-27 01:49:53,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=207237.33333333334, ans=0.5
+2024-08-27 01:50:03,763 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.422e+02 1.666e+02 2.042e+02 4.208e+02, threshold=3.332e+02, percent-clipped=3.0
+2024-08-27 01:50:18,921 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.26 vs. limit=22.5
+2024-08-27 01:50:22,133 INFO [train.py:1114] (0/4) Epoch 16, batch 1550, loss[loss=0.2124, simple_loss=0.2823, pruned_loss=0.05141, ctc_loss=0.09915, over 19591.00 frames. ], tot_loss[loss=0.192, simple_loss=0.2652, pruned_loss=0.04319, ctc_loss=0.0812, over 3846710.12 frames. ], batch size: 60, lr: 9.33e-03, grad_scale: 32.0
+2024-08-27 01:50:37,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=207450.66666666666, ans=0.025
+2024-08-27 01:51:04,486 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.02 vs. limit=22.5
+2024-08-27 01:51:10,018 INFO [train.py:1114] (0/4) Epoch 16, batch 1600, loss[loss=0.1712, simple_loss=0.2528, pruned_loss=0.0332, ctc_loss=0.0583, over 19829.00 frames. ], tot_loss[loss=0.1915, simple_loss=0.2646, pruned_loss=0.04306, ctc_loss=0.08086, over 3835215.71 frames. ], batch size: 57, lr: 9.33e-03, grad_scale: 32.0
+2024-08-27 01:51:26,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=207717.33333333334, ans=0.04949747468305833
+2024-08-27 01:51:26,419 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=207717.33333333334, ans=0.1
+2024-08-27 01:51:32,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=207770.66666666666, ans=0.125
+2024-08-27 01:51:50,486 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.43 vs. limit=15.0
+2024-08-27 01:51:55,662 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.401e+02 1.606e+02 1.975e+02 3.175e+02, threshold=3.213e+02, percent-clipped=0.0
+2024-08-27 01:51:57,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=207877.33333333334, ans=0.125
+2024-08-27 01:52:14,354 INFO [train.py:1114] (0/4) Epoch 16, batch 1650, loss[loss=0.1959, simple_loss=0.2745, pruned_loss=0.0425, ctc_loss=0.08096, over 19647.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2646, pruned_loss=0.04325, ctc_loss=0.08098, over 3832569.16 frames. ], batch size: 59, lr: 9.32e-03, grad_scale: 32.0
+2024-08-27 01:52:16,506 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=207930.66666666666, ans=0.2
+2024-08-27 01:52:31,291 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=207984.0, ans=0.2
+2024-08-27 01:52:41,359 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=208037.33333333334, ans=0.125
+2024-08-27 01:52:46,303 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.81 vs. limit=15.0
+2024-08-27 01:53:04,346 INFO [train.py:1114] (0/4) Epoch 16, batch 1700, loss[loss=0.1591, simple_loss=0.2274, pruned_loss=0.03332, ctc_loss=0.06052, over 19662.00 frames. ], tot_loss[loss=0.191, simple_loss=0.2642, pruned_loss=0.04287, ctc_loss=0.08024, over 3846213.36 frames. ], batch size: 46, lr: 9.32e-03, grad_scale: 64.0
+2024-08-27 01:53:06,662 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.48 vs. limit=15.0
+2024-08-27 01:53:14,969 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.79 vs. limit=22.5
+2024-08-27 01:53:21,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=208304.0, ans=0.125
+2024-08-27 01:53:42,381 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.468e+02 1.742e+02 2.214e+02 3.607e+02, threshold=3.484e+02, percent-clipped=2.0
+2024-08-27 01:53:53,050 INFO [train.py:1114] (0/4) Epoch 16, batch 1750, loss[loss=0.172, simple_loss=0.237, pruned_loss=0.03927, ctc_loss=0.07118, over 19699.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.2638, pruned_loss=0.04284, ctc_loss=0.08008, over 3850569.47 frames. ], batch size: 45, lr: 9.31e-03, grad_scale: 32.0
+2024-08-27 01:53:58,488 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=208464.0, ans=0.1
+2024-08-27 01:54:00,416 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.83 vs. limit=15.0
+2024-08-27 01:54:01,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=208517.33333333334, ans=0.125
+2024-08-27 01:54:11,764 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=208570.66666666666, ans=0.125
+2024-08-27 01:54:26,730 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=208624.0, ans=0.0
+2024-08-27 01:54:28,607 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=208677.33333333334, ans=0.1
+2024-08-27 01:54:32,067 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=208677.33333333334, ans=0.1
+2024-08-27 01:54:37,047 INFO [train.py:1114] (0/4) Epoch 16, batch 1800, loss[loss=0.1879, simple_loss=0.2638, pruned_loss=0.04097, ctc_loss=0.0753, over 19609.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.264, pruned_loss=0.04294, ctc_loss=0.08003, over 3853165.70 frames. ], batch size: 55, lr: 9.31e-03, grad_scale: 32.0
+2024-08-27 01:54:39,352 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.09 vs. limit=22.5
+2024-08-27 01:54:54,628 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=208837.33333333334, ans=0.1
+2024-08-27 01:55:04,525 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.39 vs. limit=15.0
+2024-08-27 01:55:10,169 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.563e+02 1.995e+02 2.578e+02 4.186e+02, threshold=3.991e+02, percent-clipped=7.0
+2024-08-27 01:55:16,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=208944.0, ans=0.1
+2024-08-27 01:55:16,510 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=208944.0, ans=0.025
+2024-08-27 01:55:20,656 INFO [train.py:1114] (0/4) Epoch 16, batch 1850, loss[loss=0.2044, simple_loss=0.2837, pruned_loss=0.04605, ctc_loss=0.08266, over 19587.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2635, pruned_loss=0.04285, ctc_loss=0.07966, over 3856066.70 frames. ], batch size: 57, lr: 9.30e-03, grad_scale: 32.0
+2024-08-27 01:55:20,765 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=208997.33333333334, ans=0.125
+2024-08-27 01:55:25,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=208997.33333333334, ans=0.125
+2024-08-27 01:55:29,790 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.22 vs. limit=15.0
+2024-08-27 01:55:35,231 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=209050.66666666666, ans=0.1
+2024-08-27 01:55:41,543 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.38 vs. limit=15.0
+2024-08-27 01:55:44,723 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=209104.0, ans=0.0
+2024-08-27 01:56:02,058 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=209210.66666666666, ans=0.1
+2024-08-27 01:56:04,479 INFO [train.py:1114] (0/4) Epoch 16, batch 1900, loss[loss=0.2, simple_loss=0.2804, pruned_loss=0.04301, ctc_loss=0.08389, over 19654.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2644, pruned_loss=0.04311, ctc_loss=0.08002, over 3861856.26 frames. ], batch size: 59, lr: 9.29e-03, grad_scale: 32.0
+2024-08-27 01:56:12,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=209317.33333333334, ans=0.0
+2024-08-27 01:56:14,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=209317.33333333334, ans=0.0
+2024-08-27 01:56:20,318 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=209317.33333333334, ans=0.125
+2024-08-27 01:56:29,967 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:56:37,674 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.165e+02 1.418e+02 1.626e+02 2.079e+02 4.675e+02, threshold=3.252e+02, percent-clipped=2.0
+2024-08-27 01:56:43,085 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=209477.33333333334, ans=0.2
+2024-08-27 01:56:48,334 INFO [train.py:1114] (0/4) Epoch 16, batch 1950, loss[loss=0.1775, simple_loss=0.2506, pruned_loss=0.03846, ctc_loss=0.06875, over 19579.00 frames. ], tot_loss[loss=0.1921, simple_loss=0.2655, pruned_loss=0.04327, ctc_loss=0.08042, over 3870276.46 frames. ], batch size: 52, lr: 9.29e-03, grad_scale: 32.0
+2024-08-27 01:56:48,530 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=209530.66666666666, ans=0.0
+2024-08-27 01:56:56,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=209584.0, ans=0.125
+2024-08-27 01:57:04,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=209584.0, ans=0.2
+2024-08-27 01:57:08,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=209637.33333333334, ans=0.95
+2024-08-27 01:57:10,963 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.44 vs. limit=15.0
+2024-08-27 01:57:22,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=209690.66666666666, ans=0.125
+2024-08-27 01:57:25,142 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=209744.0, ans=0.1
+2024-08-27 01:57:27,831 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=209744.0, ans=0.0
+2024-08-27 01:57:29,609 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=209744.0, ans=0.125
+2024-08-27 01:57:34,488 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.80 vs. limit=15.0
+2024-08-27 01:57:35,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-27 01:57:35,866 INFO [train.py:1114] (0/4) Epoch 16, batch 2000, loss[loss=0.152, simple_loss=0.2227, pruned_loss=0.0294, ctc_loss=0.05601, over 19680.00 frames. ], tot_loss[loss=0.1934, simple_loss=0.2664, pruned_loss=0.04385, ctc_loss=0.08161, over 3856251.01 frames. ], batch size: 45, lr: 9.28e-03, grad_scale: 32.0
+2024-08-27 01:57:38,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-27 01:57:39,615 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=209797.33333333334, ans=0.125
+2024-08-27 01:57:55,036 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.90 vs. limit=15.0
+2024-08-27 01:58:07,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=209957.33333333334, ans=0.025
+2024-08-27 01:58:07,793 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=209957.33333333334, ans=0.05
+2024-08-27 01:58:09,432 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.194e+02 1.401e+02 1.655e+02 2.254e+02 4.011e+02, threshold=3.310e+02, percent-clipped=6.0
+2024-08-27 01:58:11,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=210010.66666666666, ans=0.125
+2024-08-27 01:58:13,126 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=210010.66666666666, ans=0.0
+2024-08-27 01:58:14,156 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=210010.66666666666, ans=0.0
+2024-08-27 01:58:14,975 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=210010.66666666666, ans=0.125
+2024-08-27 01:58:15,478 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.09 vs. limit=15.0
+2024-08-27 01:58:20,006 INFO [train.py:1114] (0/4) Epoch 16, batch 2050, loss[loss=0.164, simple_loss=0.2329, pruned_loss=0.03515, ctc_loss=0.06226, over 19724.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2651, pruned_loss=0.04358, ctc_loss=0.08115, over 3852418.76 frames. ], batch size: 47, lr: 9.28e-03, grad_scale: 32.0
+2024-08-27 01:58:41,858 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=210170.66666666666, ans=0.125
+2024-08-27 01:58:44,576 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=210170.66666666666, ans=0.125
+2024-08-27 01:58:52,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=210224.0, ans=0.1
+2024-08-27 01:59:03,126 INFO [train.py:1114] (0/4) Epoch 16, batch 2100, loss[loss=0.1883, simple_loss=0.2676, pruned_loss=0.03991, ctc_loss=0.07308, over 19785.00 frames. ], tot_loss[loss=0.1915, simple_loss=0.2645, pruned_loss=0.04313, ctc_loss=0.08036, over 3859205.20 frames. ], batch size: 54, lr: 9.27e-03, grad_scale: 32.0
+2024-08-27 01:59:08,578 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.33 vs. limit=15.0
+2024-08-27 01:59:17,993 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=210384.0, ans=0.125
+2024-08-27 01:59:23,871 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=210437.33333333334, ans=0.025
+2024-08-27 01:59:26,708 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.70 vs. limit=22.5
+2024-08-27 01:59:26,967 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.26 vs. limit=10.0
+2024-08-27 01:59:34,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=210490.66666666666, ans=0.0
+2024-08-27 01:59:35,724 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.547e+02 1.892e+02 2.472e+02 4.594e+02, threshold=3.784e+02, percent-clipped=3.0
+2024-08-27 01:59:41,216 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:59:42,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=210544.0, ans=0.125
+2024-08-27 01:59:43,997 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.92 vs. limit=22.5
+2024-08-27 01:59:47,026 INFO [train.py:1114] (0/4) Epoch 16, batch 2150, loss[loss=0.1746, simple_loss=0.2515, pruned_loss=0.03507, ctc_loss=0.06905, over 19604.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2636, pruned_loss=0.04276, ctc_loss=0.0797, over 3869138.34 frames. ], batch size: 52, lr: 9.27e-03, grad_scale: 32.0
+2024-08-27 01:59:59,580 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=210650.66666666666, ans=6.0
+2024-08-27 02:00:02,742 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=210650.66666666666, ans=0.125
+2024-08-27 02:00:13,179 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=210757.33333333334, ans=0.125
+2024-08-27 02:00:30,187 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.96 vs. limit=5.0
+2024-08-27 02:00:30,373 INFO [train.py:1114] (0/4) Epoch 16, batch 2200, loss[loss=0.1957, simple_loss=0.268, pruned_loss=0.04547, ctc_loss=0.08118, over 19590.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2634, pruned_loss=0.04246, ctc_loss=0.07936, over 3866681.74 frames. ], batch size: 57, lr: 9.26e-03, grad_scale: 32.0
+2024-08-27 02:01:06,355 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.493e+02 1.671e+02 2.113e+02 4.070e+02, threshold=3.342e+02, percent-clipped=1.0
+2024-08-27 02:01:11,525 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=211077.33333333334, ans=0.125
+2024-08-27 02:01:17,554 INFO [train.py:1114] (0/4) Epoch 16, batch 2250, loss[loss=0.1968, simple_loss=0.2752, pruned_loss=0.043, ctc_loss=0.08093, over 19629.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2637, pruned_loss=0.04244, ctc_loss=0.0794, over 3866935.30 frames. ], batch size: 55, lr: 9.25e-03, grad_scale: 32.0
+2024-08-27 02:01:31,763 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.81 vs. limit=15.0
+2024-08-27 02:01:39,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=211237.33333333334, ans=0.125
+2024-08-27 02:01:39,396 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.43 vs. limit=6.0
+2024-08-27 02:01:44,297 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:01:49,469 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=211290.66666666666, ans=0.125
+2024-08-27 02:01:58,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=211344.0, ans=0.0
+2024-08-27 02:02:00,434 INFO [train.py:1114] (0/4) Epoch 16, batch 2300, loss[loss=0.1693, simple_loss=0.2442, pruned_loss=0.03427, ctc_loss=0.06488, over 19502.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2628, pruned_loss=0.04242, ctc_loss=0.07917, over 3861191.67 frames. ], batch size: 49, lr: 9.25e-03, grad_scale: 32.0
+2024-08-27 02:02:00,640 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=211397.33333333334, ans=0.0
+2024-08-27 02:02:12,601 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=211450.66666666666, ans=0.04949747468305833
+2024-08-27 02:02:16,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=211450.66666666666, ans=0.0
+2024-08-27 02:02:25,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=211557.33333333334, ans=0.1
+2024-08-27 02:02:28,180 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=211557.33333333334, ans=0.1
+2024-08-27 02:02:33,257 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.186e+02 1.480e+02 1.722e+02 2.096e+02 3.640e+02, threshold=3.444e+02, percent-clipped=3.0
+2024-08-27 02:02:41,819 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.35 vs. limit=15.0
+2024-08-27 02:02:44,148 INFO [train.py:1114] (0/4) Epoch 16, batch 2350, loss[loss=0.2232, simple_loss=0.2886, pruned_loss=0.05842, ctc_loss=0.1023, over 19649.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.2636, pruned_loss=0.04302, ctc_loss=0.08004, over 3862969.39 frames. ], batch size: 63, lr: 9.24e-03, grad_scale: 32.0
+2024-08-27 02:02:56,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=211717.33333333334, ans=0.0
+2024-08-27 02:03:22,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=211824.0, ans=0.0
+2024-08-27 02:03:25,501 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=211877.33333333334, ans=0.125
+2024-08-27 02:03:27,015 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=211877.33333333334, ans=0.125
+2024-08-27 02:03:30,553 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=211877.33333333334, ans=0.1
+2024-08-27 02:03:33,821 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=211930.66666666666, ans=0.1
+2024-08-27 02:03:34,519 INFO [train.py:1114] (0/4) Epoch 16, batch 2400, loss[loss=0.225, simple_loss=0.294, pruned_loss=0.05828, ctc_loss=0.09858, over 19453.00 frames. ], tot_loss[loss=0.1931, simple_loss=0.2659, pruned_loss=0.0439, ctc_loss=0.08143, over 3857514.17 frames. ], batch size: 67, lr: 9.24e-03, grad_scale: 32.0
+2024-08-27 02:03:39,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=211930.66666666666, ans=0.125
+2024-08-27 02:03:43,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.min_positive, batch_count=211984.0, ans=0.025
+2024-08-27 02:03:44,189 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:03:48,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=211984.0, ans=0.0
+2024-08-27 02:03:53,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=212037.33333333334, ans=0.125
+2024-08-27 02:03:57,109 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:04:01,452 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=212090.66666666666, ans=0.0
+2024-08-27 02:04:07,966 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.276e+02 1.442e+02 1.653e+02 2.239e+02 3.362e+02, threshold=3.307e+02, percent-clipped=0.0
+2024-08-27 02:04:08,294 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=212090.66666666666, ans=0.0
+2024-08-27 02:04:13,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=212144.0, ans=0.2
+2024-08-27 02:04:17,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=212144.0, ans=0.0
+2024-08-27 02:04:18,796 INFO [train.py:1114] (0/4) Epoch 16, batch 2450, loss[loss=0.2435, simple_loss=0.2889, pruned_loss=0.07095, ctc_loss=0.1406, over 13465.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2692, pruned_loss=0.04618, ctc_loss=0.08601, over 3733785.23 frames. ], batch size: 140, lr: 9.23e-03, grad_scale: 32.0
+2024-08-27 02:04:23,989 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.89 vs. limit=15.0
+2024-08-27 02:04:30,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=212250.66666666666, ans=0.025
+2024-08-27 02:04:39,057 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.60 vs. limit=22.5
+2024-08-27 02:04:42,184 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=212304.0, ans=0.0
+2024-08-27 02:04:52,941 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-16.pt
+2024-08-27 02:05:43,525 INFO [train.py:1114] (0/4) Epoch 17, batch 0, loss[loss=0.185, simple_loss=0.2569, pruned_loss=0.04078, ctc_loss=0.07875, over 19800.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2569, pruned_loss=0.04078, ctc_loss=0.07875, over 19800.00 frames. ], batch size: 49, lr: 8.95e-03, grad_scale: 32.0
+2024-08-27 02:05:43,526 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-27 02:05:50,949 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.3685, 4.7511, 5.3221, 5.1868], device='cuda:0')
+2024-08-27 02:05:53,290 INFO [train.py:1146] (0/4) Epoch 17, validation: loss=0.172, simple_loss=0.265, pruned_loss=0.02949, ctc_loss=0.04976, over 944034.00 frames.
+2024-08-27 02:05:53,290 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-27 02:06:02,521 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=212458.66666666666, ans=0.1
+2024-08-27 02:06:05,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_na.min_abs, batch_count=212458.66666666666, ans=0.02
+2024-08-27 02:06:12,038 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=212512.0, ans=0.125
+2024-08-27 02:06:12,854 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=212512.0, ans=0.0
+2024-08-27 02:06:40,302 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.204e+02 1.629e+02 1.801e+02 2.001e+02 3.255e+02, threshold=3.602e+02, percent-clipped=0.0
+2024-08-27 02:06:40,336 INFO [train.py:1114] (0/4) Epoch 17, batch 50, loss[loss=0.17, simple_loss=0.2384, pruned_loss=0.03751, ctc_loss=0.06631, over 19711.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2649, pruned_loss=0.0421, ctc_loss=0.07957, over 844772.04 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 16.0
+2024-08-27 02:06:48,627 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=212725.33333333334, ans=0.1
+2024-08-27 02:06:51,696 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.13 vs. limit=15.0
+2024-08-27 02:07:04,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=212778.66666666666, ans=0.125
+2024-08-27 02:07:04,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=212778.66666666666, ans=0.05
+2024-08-27 02:07:13,796 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.31 vs. limit=12.0
+2024-08-27 02:07:21,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=212885.33333333334, ans=10.0
+2024-08-27 02:07:22,597 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212885.33333333334, ans=0.1
+2024-08-27 02:07:29,652 INFO [train.py:1114] (0/4) Epoch 17, batch 100, loss[loss=0.1758, simple_loss=0.2502, pruned_loss=0.03671, ctc_loss=0.06976, over 19727.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2665, pruned_loss=0.043, ctc_loss=0.08084, over 1498273.11 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 16.0
+2024-08-27 02:08:09,499 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=213098.66666666666, ans=0.125
+2024-08-27 02:08:16,823 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=213152.0, ans=0.125
+2024-08-27 02:08:20,135 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.458e+02 1.665e+02 2.006e+02 3.256e+02, threshold=3.330e+02, percent-clipped=0.0
+2024-08-27 02:08:20,169 INFO [train.py:1114] (0/4) Epoch 17, batch 150, loss[loss=0.1762, simple_loss=0.2435, pruned_loss=0.03967, ctc_loss=0.07384, over 19685.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.2645, pruned_loss=0.04269, ctc_loss=0.08014, over 2027311.53 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 16.0
+2024-08-27 02:08:23,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=213205.33333333334, ans=0.2
+2024-08-27 02:08:41,271 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-40000.pt
+2024-08-27 02:08:57,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=213312.0, ans=0.125
+2024-08-27 02:10:35,319 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=213418.66666666666, ans=10.0
+2024-08-27 02:10:53,153 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=213418.66666666666, ans=0.2
+2024-08-27 02:10:55,458 INFO [train.py:1114] (0/4) Epoch 17, batch 200, loss[loss=0.216, simple_loss=0.2819, pruned_loss=0.05503, ctc_loss=0.1001, over 18082.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.2636, pruned_loss=0.04255, ctc_loss=0.07958, over 2434813.02 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 16.0
+2024-08-27 02:10:56,492 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=213472.0, ans=0.0
+2024-08-27 02:11:03,915 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213472.0, ans=0.1
+2024-08-27 02:11:04,074 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=213472.0, ans=0.0
+2024-08-27 02:11:04,969 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=213472.0, ans=0.95
+2024-08-27 02:11:07,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=213525.33333333334, ans=0.125
+2024-08-27 02:11:19,745 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=213578.66666666666, ans=0.125
+2024-08-27 02:11:23,757 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.13 vs. limit=12.0
+2024-08-27 02:11:41,008 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=213685.33333333334, ans=0.1
+2024-08-27 02:11:48,513 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=213738.66666666666, ans=0.1
+2024-08-27 02:11:49,177 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.098e+02 1.468e+02 1.730e+02 2.457e+02 4.645e+02, threshold=3.460e+02, percent-clipped=6.0
+2024-08-27 02:11:49,210 INFO [train.py:1114] (0/4) Epoch 17, batch 250, loss[loss=0.192, simple_loss=0.2647, pruned_loss=0.04364, ctc_loss=0.0801, over 19421.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2626, pruned_loss=0.04198, ctc_loss=0.07852, over 2755282.98 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 16.0
+2024-08-27 02:12:14,665 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:12:20,501 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.31 vs. limit=15.0
+2024-08-27 02:12:25,883 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.32 vs. limit=12.0
+2024-08-27 02:12:39,445 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.12 vs. limit=22.5
+2024-08-27 02:14:34,926 INFO [train.py:1114] (0/4) Epoch 17, batch 300, loss[loss=0.1925, simple_loss=0.2716, pruned_loss=0.04132, ctc_loss=0.07691, over 19530.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2621, pruned_loss=0.04181, ctc_loss=0.07808, over 3000773.43 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 16.0
+2024-08-27 02:14:36,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=214005.33333333334, ans=0.125
+2024-08-27 02:14:40,167 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.58 vs. limit=15.0
+2024-08-27 02:14:46,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=214058.66666666666, ans=0.0
+2024-08-27 02:14:48,255 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=214058.66666666666, ans=0.04949747468305833
+2024-08-27 02:14:59,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=214112.0, ans=0.0
+2024-08-27 02:16:23,327 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.13 vs. limit=15.0
+2024-08-27 02:16:48,693 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.450e+02 1.705e+02 2.074e+02 4.169e+02, threshold=3.410e+02, percent-clipped=2.0
+2024-08-27 02:16:48,727 INFO [train.py:1114] (0/4) Epoch 17, batch 350, loss[loss=0.1651, simple_loss=0.2397, pruned_loss=0.03291, ctc_loss=0.06195, over 19745.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.263, pruned_loss=0.0421, ctc_loss=0.07876, over 3190658.25 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 16.0
+2024-08-27 02:16:49,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214272.0, ans=0.1
+2024-08-27 02:16:57,434 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=214325.33333333334, ans=0.125
+2024-08-27 02:17:03,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=214325.33333333334, ans=0.125
+2024-08-27 02:17:17,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=214432.0, ans=0.2
+2024-08-27 02:17:25,303 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=214432.0, ans=0.125
+2024-08-27 02:17:31,773 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=214485.33333333334, ans=0.05
+2024-08-27 02:17:31,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214485.33333333334, ans=0.1
+2024-08-27 02:17:36,092 INFO [train.py:1114] (0/4) Epoch 17, batch 400, loss[loss=0.1921, simple_loss=0.2719, pruned_loss=0.0401, ctc_loss=0.08008, over 19506.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2628, pruned_loss=0.04204, ctc_loss=0.0786, over 3343295.33 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-27 02:17:39,161 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=214538.66666666666, ans=0.05
+2024-08-27 02:17:42,987 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=214538.66666666666, ans=0.1
+2024-08-27 02:18:09,189 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=214698.66666666666, ans=0.125
+2024-08-27 02:18:15,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=214752.0, ans=0.125
+2024-08-27 02:18:25,568 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.479e+02 1.707e+02 2.031e+02 4.496e+02, threshold=3.413e+02, percent-clipped=2.0
+2024-08-27 02:18:25,601 INFO [train.py:1114] (0/4) Epoch 17, batch 450, loss[loss=0.1788, simple_loss=0.2674, pruned_loss=0.03229, ctc_loss=0.06397, over 19619.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2632, pruned_loss=0.04223, ctc_loss=0.07889, over 3449981.31 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-27 02:18:29,362 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=214805.33333333334, ans=0.015
+2024-08-27 02:18:29,566 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214805.33333333334, ans=0.1
+2024-08-27 02:18:49,425 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.12 vs. limit=22.5
+2024-08-27 02:18:52,685 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:18:53,012 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.00 vs. limit=12.0
+2024-08-27 02:19:11,642 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=215018.66666666666, ans=0.0
+2024-08-27 02:19:18,887 INFO [train.py:1114] (0/4) Epoch 17, batch 500, loss[loss=0.2009, simple_loss=0.2799, pruned_loss=0.04453, ctc_loss=0.08197, over 19663.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2625, pruned_loss=0.04171, ctc_loss=0.0781, over 3545189.32 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-27 02:19:19,023 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:19:20,790 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=215072.0, ans=0.0
+2024-08-27 02:19:24,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=215072.0, ans=0.125
+2024-08-27 02:19:25,606 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=215072.0, ans=0.07
+2024-08-27 02:19:28,441 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=215125.33333333334, ans=0.0
+2024-08-27 02:19:44,269 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=215178.66666666666, ans=0.0
+2024-08-27 02:19:52,625 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=215232.0, ans=0.5
+2024-08-27 02:20:25,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=215285.33333333334, ans=0.2
+2024-08-27 02:20:44,559 INFO [train.py:1114] (0/4) Epoch 17, batch 550, loss[loss=0.2107, simple_loss=0.2823, pruned_loss=0.05038, ctc_loss=0.09573, over 19206.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2627, pruned_loss=0.04193, ctc_loss=0.07835, over 3607486.88 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 16.0
+2024-08-27 02:20:45,394 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.446e+02 1.711e+02 2.254e+02 3.980e+02, threshold=3.422e+02, percent-clipped=2.0
+2024-08-27 02:20:45,750 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=215338.66666666666, ans=0.125
+2024-08-27 02:20:47,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=215338.66666666666, ans=0.0
+2024-08-27 02:21:09,913 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=215445.33333333334, ans=0.125
+2024-08-27 02:21:19,036 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=215498.66666666666, ans=0.025
+2024-08-27 02:21:32,071 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=215605.33333333334, ans=0.0
+2024-08-27 02:21:42,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_na.min_abs, batch_count=215605.33333333334, ans=0.02
+2024-08-27 02:21:43,257 INFO [train.py:1114] (0/4) Epoch 17, batch 600, loss[loss=0.2051, simple_loss=0.2783, pruned_loss=0.04752, ctc_loss=0.09202, over 19446.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.263, pruned_loss=0.04196, ctc_loss=0.07839, over 3664553.91 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 16.0
+2024-08-27 02:21:56,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=215658.66666666666, ans=0.0
+2024-08-27 02:21:57,138 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=215658.66666666666, ans=0.0
+2024-08-27 02:22:27,213 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=215818.66666666666, ans=0.125
+2024-08-27 02:22:35,481 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.71 vs. limit=6.0
+2024-08-27 02:22:35,807 INFO [train.py:1114] (0/4) Epoch 17, batch 650, loss[loss=0.1712, simple_loss=0.2479, pruned_loss=0.03456, ctc_loss=0.06352, over 19762.00 frames. ], tot_loss[loss=0.1881, simple_loss=0.2621, pruned_loss=0.04154, ctc_loss=0.07772, over 3715413.88 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 16.0
+2024-08-27 02:22:36,659 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.454e+02 1.765e+02 2.281e+02 4.784e+02, threshold=3.530e+02, percent-clipped=4.0
+2024-08-27 02:23:10,983 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216032.0, ans=0.1
+2024-08-27 02:23:13,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=216032.0, ans=0.125
+2024-08-27 02:23:24,052 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.46 vs. limit=15.0
+2024-08-27 02:23:25,370 INFO [train.py:1114] (0/4) Epoch 17, batch 700, loss[loss=0.1732, simple_loss=0.2573, pruned_loss=0.03241, ctc_loss=0.06087, over 19728.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.2631, pruned_loss=0.04178, ctc_loss=0.07808, over 3747076.79 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 16.0
+2024-08-27 02:23:28,365 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=216138.66666666666, ans=0.125
+2024-08-27 02:23:46,302 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.40 vs. limit=15.0
+2024-08-27 02:23:47,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=216245.33333333334, ans=0.0
+2024-08-27 02:23:50,641 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=216245.33333333334, ans=0.125
+2024-08-27 02:23:53,732 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.79 vs. limit=15.0
+2024-08-27 02:27:37,132 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=216352.0, ans=0.1
+2024-08-27 02:28:51,343 INFO [train.py:1114] (0/4) Epoch 17, batch 750, loss[loss=0.1876, simple_loss=0.2713, pruned_loss=0.03735, ctc_loss=0.073, over 19495.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2632, pruned_loss=0.04194, ctc_loss=0.0783, over 3773742.85 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 16.0
+2024-08-27 02:29:21,548 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.483e+02 1.820e+02 2.509e+02 4.091e+02, threshold=3.640e+02, percent-clipped=8.0
+2024-08-27 02:32:03,751 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216405.33333333334, ans=0.1
+2024-08-27 02:38:07,121 INFO [train.py:1114] (0/4) Epoch 17, batch 800, loss[loss=0.1737, simple_loss=0.2405, pruned_loss=0.03873, ctc_loss=0.07362, over 19403.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2631, pruned_loss=0.04198, ctc_loss=0.07858, over 3795895.10 frames. ], batch size: 48, lr: 8.86e-03, grad_scale: 32.0
+2024-08-27 02:39:28,315 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216725.33333333334, ans=0.1
+2024-08-27 02:39:36,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=216725.33333333334, ans=0.025
+2024-08-27 02:40:06,846 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=216778.66666666666, ans=0.025
+2024-08-27 02:40:10,062 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.86 vs. limit=15.0
+2024-08-27 02:40:19,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=216832.0, ans=0.125
+2024-08-27 02:40:43,402 INFO [train.py:1114] (0/4) Epoch 17, batch 850, loss[loss=0.1971, simple_loss=0.2769, pruned_loss=0.04329, ctc_loss=0.07703, over 19634.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.2629, pruned_loss=0.04183, ctc_loss=0.07816, over 3815942.73 frames. ], batch size: 59, lr: 8.86e-03, grad_scale: 32.0
+2024-08-27 02:40:44,265 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.490e+02 1.788e+02 2.181e+02 3.218e+02, threshold=3.576e+02, percent-clipped=0.0
+2024-08-27 02:40:46,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=216938.66666666666, ans=0.0
+2024-08-27 02:40:56,120 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=216992.0, ans=0.125
+2024-08-27 02:40:57,249 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216992.0, ans=0.1
+2024-08-27 02:41:18,195 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=217045.33333333334, ans=0.1
+2024-08-27 02:41:23,739 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.63 vs. limit=6.0
+2024-08-27 02:41:28,220 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=217098.66666666666, ans=0.025
+2024-08-27 02:41:31,531 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=217098.66666666666, ans=0.025
+2024-08-27 02:41:34,066 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=217098.66666666666, ans=0.07
+2024-08-27 02:41:34,977 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=217098.66666666666, ans=0.1
+2024-08-27 02:41:37,370 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.60 vs. limit=10.0
+2024-08-27 02:41:48,113 INFO [train.py:1114] (0/4) Epoch 17, batch 900, loss[loss=0.1622, simple_loss=0.2363, pruned_loss=0.03176, ctc_loss=0.06143, over 19407.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2633, pruned_loss=0.04218, ctc_loss=0.07889, over 3820019.15 frames. ], batch size: 48, lr: 8.85e-03, grad_scale: 32.0
+2024-08-27 02:41:49,544 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.92 vs. limit=22.5
+2024-08-27 02:41:52,175 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=15.0
+2024-08-27 02:42:01,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=217258.66666666666, ans=0.125
+2024-08-27 02:42:03,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=217258.66666666666, ans=0.2
+2024-08-27 02:42:03,902 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=217258.66666666666, ans=0.125
+2024-08-27 02:42:23,128 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=217365.33333333334, ans=0.2
+2024-08-27 02:42:37,989 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=217418.66666666666, ans=0.07
+2024-08-27 02:42:42,348 INFO [train.py:1114] (0/4) Epoch 17, batch 950, loss[loss=0.1934, simple_loss=0.2605, pruned_loss=0.04579, ctc_loss=0.08676, over 19522.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2637, pruned_loss=0.04243, ctc_loss=0.0794, over 3821118.76 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-27 02:42:43,217 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.442e+02 1.596e+02 1.963e+02 3.277e+02, threshold=3.193e+02, percent-clipped=0.0
+2024-08-27 02:43:04,944 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=217525.33333333334, ans=0.1
+2024-08-27 02:44:44,877 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=217632.0, ans=0.1
+2024-08-27 02:45:06,516 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=217685.33333333334, ans=0.2
+2024-08-27 02:45:22,505 INFO [train.py:1114] (0/4) Epoch 17, batch 1000, loss[loss=0.1624, simple_loss=0.2442, pruned_loss=0.02932, ctc_loss=0.05473, over 19843.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.2644, pruned_loss=0.04272, ctc_loss=0.07987, over 3816399.91 frames. ], batch size: 52, lr: 8.84e-03, grad_scale: 32.0
+2024-08-27 02:45:22,760 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=217738.66666666666, ans=0.0
+2024-08-27 02:45:22,910 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=217738.66666666666, ans=0.125
+2024-08-27 02:45:24,515 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=217738.66666666666, ans=0.1
+2024-08-27 02:45:44,711 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.76 vs. limit=15.0
+2024-08-27 02:45:46,338 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=217792.0, ans=0.125
+2024-08-27 02:45:48,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=217845.33333333334, ans=0.1
+2024-08-27 02:45:51,010 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=217845.33333333334, ans=0.5
+2024-08-27 02:46:05,623 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=217845.33333333334, ans=0.2
+2024-08-27 02:46:21,426 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.65 vs. limit=22.5
+2024-08-27 02:46:27,356 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=218005.33333333334, ans=0.125
+2024-08-27 02:46:28,504 INFO [train.py:1114] (0/4) Epoch 17, batch 1050, loss[loss=0.1996, simple_loss=0.2735, pruned_loss=0.04417, ctc_loss=0.09344, over 19845.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2637, pruned_loss=0.04245, ctc_loss=0.07927, over 3821538.54 frames. ], batch size: 57, lr: 8.84e-03, grad_scale: 32.0
+2024-08-27 02:46:29,428 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.403e+02 1.586e+02 2.025e+02 2.959e+02, threshold=3.171e+02, percent-clipped=1.0
+2024-08-27 02:46:29,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=218005.33333333334, ans=0.125
+2024-08-27 02:47:08,201 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=218112.0, ans=0.125
+2024-08-27 02:47:11,323 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.43 vs. limit=12.0
+2024-08-27 02:47:25,232 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=218218.66666666666, ans=0.125
+2024-08-27 02:47:38,618 INFO [train.py:1114] (0/4) Epoch 17, batch 1100, loss[loss=0.1778, simple_loss=0.2591, pruned_loss=0.03525, ctc_loss=0.06508, over 19593.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2631, pruned_loss=0.04199, ctc_loss=0.0785, over 3829762.59 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-27 02:47:39,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=218272.0, ans=0.2
+2024-08-27 02:47:43,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=218272.0, ans=0.125
+2024-08-27 02:47:49,017 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=218325.33333333334, ans=0.125
+2024-08-27 02:48:24,852 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.10 vs. limit=22.5
+2024-08-27 02:48:26,524 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.56 vs. limit=15.0
+2024-08-27 02:48:29,040 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=218325.33333333334, ans=0.125
+2024-08-27 02:48:55,384 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=218378.66666666666, ans=0.1
+2024-08-27 02:49:08,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=218432.0, ans=0.1
+2024-08-27 02:49:09,403 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=218432.0, ans=0.2
+2024-08-27 02:49:09,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=218432.0, ans=0.025
+2024-08-27 02:49:17,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=218485.33333333334, ans=0.125
+2024-08-27 02:49:22,253 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=218485.33333333334, ans=0.025
+2024-08-27 02:49:27,456 INFO [train.py:1114] (0/4) Epoch 17, batch 1150, loss[loss=0.1788, simple_loss=0.2551, pruned_loss=0.03743, ctc_loss=0.06901, over 19563.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2635, pruned_loss=0.04229, ctc_loss=0.07894, over 3829113.45 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-27 02:49:28,308 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.459e+02 1.619e+02 1.965e+02 3.390e+02, threshold=3.239e+02, percent-clipped=1.0
+2024-08-27 02:49:44,094 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.52 vs. limit=15.0
+2024-08-27 02:49:45,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=218645.33333333334, ans=0.2
+2024-08-27 02:49:51,685 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.45 vs. limit=15.0
+2024-08-27 02:49:54,402 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.64 vs. limit=22.5
+2024-08-27 02:49:59,165 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.99 vs. limit=10.0
+2024-08-27 02:49:59,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=218698.66666666666, ans=0.125
+2024-08-27 02:50:09,840 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=218752.0, ans=0.0
+2024-08-27 02:50:14,204 INFO [train.py:1114] (0/4) Epoch 17, batch 1200, loss[loss=0.1969, simple_loss=0.2754, pruned_loss=0.04338, ctc_loss=0.07903, over 19840.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.264, pruned_loss=0.04245, ctc_loss=0.07916, over 3824969.65 frames. ], batch size: 57, lr: 8.82e-03, grad_scale: 32.0
+2024-08-27 02:50:23,076 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.52 vs. limit=10.0
+2024-08-27 02:50:52,251 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:51:02,853 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.39 vs. limit=6.0
+2024-08-27 02:51:28,547 INFO [train.py:1114] (0/4) Epoch 17, batch 1250, loss[loss=0.1974, simple_loss=0.2749, pruned_loss=0.04342, ctc_loss=0.08289, over 19509.00 frames. ], tot_loss[loss=0.19, simple_loss=0.264, pruned_loss=0.04226, ctc_loss=0.07859, over 3843027.80 frames. ], batch size: 61, lr: 8.82e-03, grad_scale: 32.0
+2024-08-27 02:51:29,443 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.488e+02 1.826e+02 2.228e+02 3.440e+02, threshold=3.652e+02, percent-clipped=1.0
+2024-08-27 02:52:40,142 INFO [train.py:1114] (0/4) Epoch 17, batch 1300, loss[loss=0.208, simple_loss=0.2826, pruned_loss=0.0485, ctc_loss=0.09084, over 18867.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2632, pruned_loss=0.04203, ctc_loss=0.07835, over 3845692.99 frames. ], batch size: 76, lr: 8.81e-03, grad_scale: 32.0
+2024-08-27 02:52:45,127 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.09 vs. limit=15.0
+2024-08-27 02:52:52,573 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.59 vs. limit=15.0
+2024-08-27 02:52:54,222 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=219392.0, ans=0.1
+2024-08-27 02:52:59,556 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=219445.33333333334, ans=0.125
+2024-08-27 02:53:01,839 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=219445.33333333334, ans=15.0
+2024-08-27 02:53:49,249 INFO [train.py:1114] (0/4) Epoch 17, batch 1350, loss[loss=0.1768, simple_loss=0.2517, pruned_loss=0.03681, ctc_loss=0.07077, over 19764.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2624, pruned_loss=0.04171, ctc_loss=0.07757, over 3857308.50 frames. ], batch size: 54, lr: 8.81e-03, grad_scale: 32.0
+2024-08-27 02:53:50,128 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.111e+02 1.487e+02 1.709e+02 2.118e+02 3.687e+02, threshold=3.418e+02, percent-clipped=1.0
+2024-08-27 02:53:51,355 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=219605.33333333334, ans=0.125
+2024-08-27 02:53:53,204 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=219605.33333333334, ans=0.0
+2024-08-27 02:54:01,705 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.68 vs. limit=15.0
+2024-08-27 02:54:04,901 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=219658.66666666666, ans=0.125
+2024-08-27 02:54:15,373 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.99 vs. limit=15.0
+2024-08-27 02:54:42,972 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.14 vs. limit=15.0
+2024-08-27 02:54:47,053 INFO [train.py:1114] (0/4) Epoch 17, batch 1400, loss[loss=0.1823, simple_loss=0.2458, pruned_loss=0.04298, ctc_loss=0.08201, over 19668.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2622, pruned_loss=0.04178, ctc_loss=0.07771, over 3864290.68 frames. ], batch size: 46, lr: 8.80e-03, grad_scale: 32.0
+2024-08-27 02:54:51,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=219872.0, ans=0.125
+2024-08-27 02:54:53,655 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=219872.0, ans=0.0
+2024-08-27 02:54:57,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=219925.33333333334, ans=0.125
+2024-08-27 02:55:19,690 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=220032.0, ans=0.0
+2024-08-27 02:55:28,982 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=220032.0, ans=0.0
+2024-08-27 02:55:39,016 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:55:41,670 INFO [train.py:1114] (0/4) Epoch 17, batch 1450, loss[loss=0.2155, simple_loss=0.2858, pruned_loss=0.05326, ctc_loss=0.09695, over 19666.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.263, pruned_loss=0.04199, ctc_loss=0.07837, over 3863106.64 frames. ], batch size: 63, lr: 8.80e-03, grad_scale: 32.0
+2024-08-27 02:55:42,532 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.445e+02 1.654e+02 2.032e+02 3.496e+02, threshold=3.307e+02, percent-clipped=1.0
+2024-08-27 02:55:52,789 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=220192.0, ans=0.125
+2024-08-27 02:56:06,332 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=220245.33333333334, ans=0.125
+2024-08-27 02:56:21,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=220298.66666666666, ans=0.035
+2024-08-27 02:56:24,910 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=220352.0, ans=0.0
+2024-08-27 02:56:31,599 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=220352.0, ans=0.2
+2024-08-27 02:56:35,339 INFO [train.py:1114] (0/4) Epoch 17, batch 1500, loss[loss=0.203, simple_loss=0.2731, pruned_loss=0.04898, ctc_loss=0.08713, over 19599.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.263, pruned_loss=0.04195, ctc_loss=0.07826, over 3863722.97 frames. ], batch size: 57, lr: 8.79e-03, grad_scale: 32.0
+2024-08-27 02:56:40,484 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=220405.33333333334, ans=0.025
+2024-08-27 02:56:48,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=220458.66666666666, ans=0.1
+2024-08-27 02:57:00,369 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=220512.0, ans=0.125
+2024-08-27 02:57:10,728 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=220565.33333333334, ans=0.125
+2024-08-27 02:57:18,359 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=220618.66666666666, ans=0.0
+2024-08-27 02:57:22,838 INFO [train.py:1114] (0/4) Epoch 17, batch 1550, loss[loss=0.2095, simple_loss=0.2766, pruned_loss=0.05222, ctc_loss=0.09488, over 19627.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2632, pruned_loss=0.04227, ctc_loss=0.07894, over 3848490.77 frames. ], batch size: 60, lr: 8.79e-03, grad_scale: 32.0
+2024-08-27 02:57:23,800 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.186e+02 1.433e+02 1.700e+02 2.311e+02 3.923e+02, threshold=3.401e+02, percent-clipped=1.0
+2024-08-27 02:57:51,118 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:57:51,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=220725.33333333334, ans=0.125
+2024-08-27 02:57:52,264 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=220725.33333333334, ans=0.125
+2024-08-27 02:58:00,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=220778.66666666666, ans=0.0
+2024-08-27 02:58:00,475 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=220778.66666666666, ans=0.1
+2024-08-27 02:58:05,443 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=220778.66666666666, ans=0.0
+2024-08-27 02:58:14,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=220832.0, ans=0.0
+2024-08-27 02:58:14,192 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=220832.0, ans=0.1
+2024-08-27 02:58:16,115 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=220832.0, ans=0.2
+2024-08-27 02:58:16,937 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=220832.0, ans=0.2
+2024-08-27 02:58:18,651 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=220885.33333333334, ans=0.0
+2024-08-27 02:58:27,685 INFO [train.py:1114] (0/4) Epoch 17, batch 1600, loss[loss=0.1898, simple_loss=0.2635, pruned_loss=0.04207, ctc_loss=0.07972, over 19833.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2632, pruned_loss=0.04223, ctc_loss=0.07886, over 3837459.82 frames. ], batch size: 57, lr: 8.78e-03, grad_scale: 32.0
+2024-08-27 02:59:25,398 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=220992.0, ans=0.125
+2024-08-27 03:00:24,032 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=221045.33333333334, ans=0.125
+2024-08-27 03:00:38,536 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.06 vs. limit=15.0
+2024-08-27 03:00:50,757 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=221152.0, ans=0.5
+2024-08-27 03:00:55,413 INFO [train.py:1114] (0/4) Epoch 17, batch 1650, loss[loss=0.1928, simple_loss=0.2767, pruned_loss=0.03943, ctc_loss=0.07504, over 19655.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2624, pruned_loss=0.0418, ctc_loss=0.07824, over 3833286.80 frames. ], batch size: 59, lr: 8.77e-03, grad_scale: 32.0
+2024-08-27 03:00:55,840 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.02 vs. limit=15.0
+2024-08-27 03:00:58,230 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.431e+02 1.952e+02 2.452e+02 3.980e+02, threshold=3.905e+02, percent-clipped=5.0
+2024-08-27 03:01:05,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=221205.33333333334, ans=0.125
+2024-08-27 03:01:12,150 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=221258.66666666666, ans=0.0
+2024-08-27 03:01:25,771 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=221365.33333333334, ans=0.125
+2024-08-27 03:01:49,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=221418.66666666666, ans=0.0
+2024-08-27 03:01:55,528 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=221418.66666666666, ans=0.1
+2024-08-27 03:01:57,109 INFO [train.py:1114] (0/4) Epoch 17, batch 1700, loss[loss=0.1683, simple_loss=0.2352, pruned_loss=0.03726, ctc_loss=0.06737, over 19662.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2623, pruned_loss=0.04164, ctc_loss=0.0778, over 3846857.71 frames. ], batch size: 46, lr: 8.77e-03, grad_scale: 32.0
+2024-08-27 03:02:01,974 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=221472.0, ans=0.125
+2024-08-27 03:02:05,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=221525.33333333334, ans=0.0
+2024-08-27 03:02:07,194 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=221525.33333333334, ans=0.125
+2024-08-27 03:02:43,279 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=221685.33333333334, ans=0.0
+2024-08-27 03:02:48,262 INFO [train.py:1114] (0/4) Epoch 17, batch 1750, loss[loss=0.1787, simple_loss=0.246, pruned_loss=0.04037, ctc_loss=0.07653, over 19628.00 frames. ], tot_loss[loss=0.1881, simple_loss=0.2621, pruned_loss=0.04156, ctc_loss=0.0776, over 3850512.50 frames. ], batch size: 45, lr: 8.76e-03, grad_scale: 16.0
+2024-08-27 03:02:49,970 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.526e+02 1.896e+02 2.459e+02 4.889e+02, threshold=3.791e+02, percent-clipped=1.0
+2024-08-27 03:03:07,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=221738.66666666666, ans=0.0
+2024-08-27 03:03:07,846 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.80 vs. limit=22.5
+2024-08-27 03:03:11,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=221792.0, ans=0.0
+2024-08-27 03:03:13,981 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.64 vs. limit=10.0
+2024-08-27 03:03:18,280 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=221792.0, ans=0.0
+2024-08-27 03:03:20,101 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.34 vs. limit=12.0
+2024-08-27 03:03:27,007 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=221845.33333333334, ans=0.1
+2024-08-27 03:03:40,038 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=221952.0, ans=0.125
+2024-08-27 03:03:45,216 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=221952.0, ans=0.125
+2024-08-27 03:03:46,718 INFO [train.py:1114] (0/4) Epoch 17, batch 1800, loss[loss=0.1835, simple_loss=0.2583, pruned_loss=0.04026, ctc_loss=0.07033, over 19609.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2624, pruned_loss=0.04167, ctc_loss=0.07771, over 3852141.97 frames. ], batch size: 55, lr: 8.76e-03, grad_scale: 16.0
+2024-08-27 03:03:49,490 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 03:03:50,389 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=222005.33333333334, ans=0.2
+2024-08-27 03:03:53,811 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 03:04:18,753 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=222165.33333333334, ans=0.125
+2024-08-27 03:04:24,958 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 03:04:30,759 INFO [train.py:1114] (0/4) Epoch 17, batch 1850, loss[loss=0.2226, simple_loss=0.2937, pruned_loss=0.05569, ctc_loss=0.1005, over 19585.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2624, pruned_loss=0.04171, ctc_loss=0.07775, over 3855966.23 frames. ], batch size: 57, lr: 8.75e-03, grad_scale: 16.0
+2024-08-27 03:04:32,492 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.249e+02 1.484e+02 1.846e+02 2.436e+02 4.218e+02, threshold=3.691e+02, percent-clipped=2.0
+2024-08-27 03:04:58,962 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=222432.0, ans=0.025
+2024-08-27 03:05:08,965 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=5.87 vs. limit=15.0
+2024-08-27 03:05:11,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=222485.33333333334, ans=0.125
+2024-08-27 03:05:14,606 INFO [train.py:1114] (0/4) Epoch 17, batch 1900, loss[loss=0.2084, simple_loss=0.2821, pruned_loss=0.04915, ctc_loss=0.09111, over 19659.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2631, pruned_loss=0.04186, ctc_loss=0.0781, over 3861031.91 frames. ], batch size: 59, lr: 8.75e-03, grad_scale: 16.0
+2024-08-27 03:05:19,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=222538.66666666666, ans=0.125
+2024-08-27 03:05:43,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=222698.66666666666, ans=0.5
+2024-08-27 03:06:00,581 INFO [train.py:1114] (0/4) Epoch 17, batch 1950, loss[loss=0.188, simple_loss=0.2653, pruned_loss=0.04025, ctc_loss=0.07579, over 19559.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.2639, pruned_loss=0.04187, ctc_loss=0.07821, over 3869686.70 frames. ], batch size: 52, lr: 8.74e-03, grad_scale: 16.0
+2024-08-27 03:06:02,421 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.462e+02 1.715e+02 2.122e+02 4.504e+02, threshold=3.430e+02, percent-clipped=1.0
+2024-08-27 03:06:09,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=222858.66666666666, ans=0.125
+2024-08-27 03:06:17,358 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=222912.0, ans=0.125
+2024-08-27 03:06:21,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=222912.0, ans=0.125
+2024-08-27 03:06:24,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=222912.0, ans=0.125
+2024-08-27 03:06:31,808 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 03:06:47,289 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=223072.0, ans=0.0
+2024-08-27 03:06:48,016 INFO [train.py:1114] (0/4) Epoch 17, batch 2000, loss[loss=0.1714, simple_loss=0.2371, pruned_loss=0.03882, ctc_loss=0.07016, over 19628.00 frames. ], tot_loss[loss=0.1904, simple_loss=0.2646, pruned_loss=0.04224, ctc_loss=0.07902, over 3854277.82 frames. ], batch size: 45, lr: 8.74e-03, grad_scale: 32.0
+2024-08-27 03:08:01,251 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=223125.33333333334, ans=0.1
+2024-08-27 03:09:25,744 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=223232.0, ans=0.125
+2024-08-27 03:09:27,433 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=223232.0, ans=0.125
+2024-08-27 03:09:30,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=223285.33333333334, ans=0.1
+2024-08-27 03:09:39,984 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=223285.33333333334, ans=0.1
+2024-08-27 03:09:41,594 INFO [train.py:1114] (0/4) Epoch 17, batch 2050, loss[loss=0.1703, simple_loss=0.2399, pruned_loss=0.03715, ctc_loss=0.06598, over 19707.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2632, pruned_loss=0.04199, ctc_loss=0.07848, over 3850647.74 frames. ], batch size: 47, lr: 8.73e-03, grad_scale: 32.0
+2024-08-27 03:09:42,030 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.52 vs. limit=15.0
+2024-08-27 03:09:43,284 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.433e+02 1.718e+02 2.194e+02 3.489e+02, threshold=3.436e+02, percent-clipped=1.0
+2024-08-27 03:09:51,373 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=223338.66666666666, ans=0.125
+2024-08-27 03:09:51,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=223338.66666666666, ans=0.0
+2024-08-27 03:10:20,851 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=223392.0, ans=0.125
+2024-08-27 03:10:33,147 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=223445.33333333334, ans=0.125
+2024-08-27 03:10:34,037 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=223445.33333333334, ans=0.125
+2024-08-27 03:10:35,055 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.49 vs. limit=12.0
+2024-08-27 03:10:46,785 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.70 vs. limit=22.5
+2024-08-27 03:13:17,699 INFO [train.py:1114] (0/4) Epoch 17, batch 2100, loss[loss=0.1946, simple_loss=0.2713, pruned_loss=0.0424, ctc_loss=0.08284, over 19773.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2625, pruned_loss=0.04158, ctc_loss=0.07758, over 3857785.87 frames. ], batch size: 54, lr: 8.73e-03, grad_scale: 32.0
+2024-08-27 03:13:42,005 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=223658.66666666666, ans=0.125
+2024-08-27 03:24:04,537 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=223818.66666666666, ans=0.2
+2024-08-27 03:24:40,106 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=223818.66666666666, ans=0.125
+2024-08-27 03:24:47,082 INFO [train.py:1114] (0/4) Epoch 17, batch 2150, loss[loss=0.1827, simple_loss=0.2563, pruned_loss=0.04018, ctc_loss=0.07176, over 19594.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2619, pruned_loss=0.04147, ctc_loss=0.07715, over 3868200.70 frames. ], batch size: 52, lr: 8.72e-03, grad_scale: 32.0
+2024-08-27 03:24:49,693 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.181e+02 1.464e+02 1.691e+02 2.317e+02 5.931e+02, threshold=3.382e+02, percent-clipped=6.0
+2024-08-27 03:24:56,708 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=223872.0, ans=0.125
+2024-08-27 03:24:57,597 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=223872.0, ans=0.2
+2024-08-27 03:24:57,749 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.68 vs. limit=22.5
+2024-08-27 03:26:31,804 INFO [train.py:1114] (0/4) Epoch 17, batch 2200, loss[loss=0.2055, simple_loss=0.2824, pruned_loss=0.04749, ctc_loss=0.08373, over 19586.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2621, pruned_loss=0.04148, ctc_loss=0.07725, over 3867184.45 frames. ], batch size: 57, lr: 8.72e-03, grad_scale: 32.0
+2024-08-27 03:26:35,464 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=224138.66666666666, ans=0.125
+2024-08-27 03:26:35,500 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=224138.66666666666, ans=0.0
+2024-08-27 03:27:06,589 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=224298.66666666666, ans=0.125
+2024-08-27 03:27:07,709 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.61 vs. limit=6.0
+2024-08-27 03:27:26,233 INFO [train.py:1114] (0/4) Epoch 17, batch 2250, loss[loss=0.1978, simple_loss=0.2737, pruned_loss=0.04411, ctc_loss=0.08443, over 19615.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2622, pruned_loss=0.04142, ctc_loss=0.07716, over 3867576.77 frames. ], batch size: 55, lr: 8.71e-03, grad_scale: 32.0
+2024-08-27 03:27:29,877 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.517e+02 1.774e+02 2.256e+02 3.791e+02, threshold=3.548e+02, percent-clipped=1.0
+2024-08-27 03:27:42,741 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=224458.66666666666, ans=0.025
+2024-08-27 03:27:44,749 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.75 vs. limit=15.0
+2024-08-27 03:28:04,624 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=224565.33333333334, ans=0.125
+2024-08-27 03:29:04,923 INFO [train.py:1114] (0/4) Epoch 17, batch 2300, loss[loss=0.1692, simple_loss=0.2439, pruned_loss=0.03442, ctc_loss=0.06424, over 19495.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2617, pruned_loss=0.04161, ctc_loss=0.07739, over 3861679.45 frames. ], batch size: 49, lr: 8.71e-03, grad_scale: 16.0
+2024-08-27 03:29:07,567 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=224672.0, ans=0.125
+2024-08-27 03:29:08,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten.whitening_limit, batch_count=224672.0, ans=15.0
+2024-08-27 03:29:11,183 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.73 vs. limit=12.0
+2024-08-27 03:29:39,009 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=224725.33333333334, ans=0.125
+2024-08-27 03:33:09,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=224778.66666666666, ans=0.0
+2024-08-27 03:35:45,129 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.11 vs. limit=22.5
+2024-08-27 03:36:02,820 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=224832.0, ans=0.1
+2024-08-27 03:36:06,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=224832.0, ans=0.125
+2024-08-27 03:36:15,421 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=224885.33333333334, ans=0.2
+2024-08-27 03:36:33,924 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.74 vs. limit=22.5
+2024-08-27 03:36:41,027 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.42 vs. limit=22.5
+2024-08-27 03:36:49,584 INFO [train.py:1114] (0/4) Epoch 17, batch 2350, loss[loss=0.2077, simple_loss=0.2788, pruned_loss=0.05002, ctc_loss=0.0914, over 19653.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2615, pruned_loss=0.04189, ctc_loss=0.07767, over 3864539.45 frames. ], batch size: 63, lr: 8.70e-03, grad_scale: 16.0
+2024-08-27 03:37:01,827 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.439e+02 1.647e+02 2.102e+02 4.091e+02, threshold=3.295e+02, percent-clipped=1.0
+2024-08-27 03:37:22,775 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=224992.0, ans=0.125
+2024-08-27 03:37:43,734 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=225045.33333333334, ans=0.125
+2024-08-27 03:37:53,573 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=225045.33333333334, ans=0.125
+2024-08-27 03:37:57,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=225045.33333333334, ans=0.125
+2024-08-27 03:38:16,267 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=225045.33333333334, ans=0.2
+2024-08-27 03:39:26,219 INFO [train.py:1114] (0/4) Epoch 17, batch 2400, loss[loss=0.2137, simple_loss=0.2848, pruned_loss=0.05229, ctc_loss=0.0948, over 19364.00 frames. ], tot_loss[loss=0.1904, simple_loss=0.2638, pruned_loss=0.04269, ctc_loss=0.07907, over 3858657.20 frames. ], batch size: 67, lr: 8.70e-03, grad_scale: 32.0
+2024-08-27 03:44:22,611 INFO [train.py:1114] (0/4) Epoch 17, batch 2450, loss[loss=0.2636, simple_loss=0.3075, pruned_loss=0.08025, ctc_loss=0.1481, over 13661.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2673, pruned_loss=0.04502, ctc_loss=0.08368, over 3734008.74 frames. ], batch size: 140, lr: 8.69e-03, grad_scale: 32.0
+2024-08-27 03:44:30,553 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.519e+02 1.805e+02 2.064e+02 2.900e+02, threshold=3.609e+02, percent-clipped=0.0
+2024-08-27 03:44:41,239 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=225525.33333333334, ans=0.0
+2024-08-27 03:44:44,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=225525.33333333334, ans=0.0
+2024-08-27 03:45:41,460 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 03:46:21,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=225578.66666666666, ans=0.2
+2024-08-27 03:47:32,420 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=225632.0, ans=10.0
+2024-08-27 03:47:34,393 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.98 vs. limit=15.0
+2024-08-27 03:47:37,196 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-17.pt
+2024-08-27 03:50:08,490 INFO [train.py:1114] (0/4) Epoch 18, batch 0, loss[loss=0.1718, simple_loss=0.2426, pruned_loss=0.03656, ctc_loss=0.06963, over 19813.00 frames. ], tot_loss[loss=0.1718, simple_loss=0.2426, pruned_loss=0.03656, ctc_loss=0.06963, over 19813.00 frames. ], batch size: 49, lr: 8.44e-03, grad_scale: 32.0
+2024-08-27 03:50:08,490 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-27 03:56:59,325 INFO [train.py:1146] (0/4) Epoch 18, validation: loss=0.1731, simple_loss=0.2653, pruned_loss=0.0303, ctc_loss=0.05087, over 944034.00 frames.
+2024-08-27 03:56:59,325 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-27 03:58:10,844 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.68 vs. limit=15.0
+2024-08-27 03:58:29,128 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.52 vs. limit=12.0
+2024-08-27 03:59:06,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-27 03:59:19,748 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.41 vs. limit=15.0
+2024-08-27 03:59:20,580 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.80 vs. limit=15.0
+2024-08-27 03:59:24,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=225840.0, ans=0.125
+2024-08-27 03:59:36,497 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=225893.33333333334, ans=0.2
+2024-08-27 03:59:37,305 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=225893.33333333334, ans=0.125
+2024-08-27 03:59:40,036 INFO [train.py:1114] (0/4) Epoch 18, batch 50, loss[loss=0.1738, simple_loss=0.2453, pruned_loss=0.03652, ctc_loss=0.07347, over 19716.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.265, pruned_loss=0.04201, ctc_loss=0.07871, over 845746.19 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-27 03:59:52,917 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.620e+02 1.870e+02 2.127e+02 3.474e+02, threshold=3.740e+02, percent-clipped=0.0
+2024-08-27 04:00:02,476 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=226053.33333333334, ans=0.125
+2024-08-27 04:00:07,653 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=226053.33333333334, ans=0.125
+2024-08-27 04:00:11,326 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:00:24,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=226160.0, ans=0.125
+2024-08-27 04:00:34,099 INFO [train.py:1114] (0/4) Epoch 18, batch 100, loss[loss=0.177, simple_loss=0.2513, pruned_loss=0.03785, ctc_loss=0.06743, over 19718.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2663, pruned_loss=0.04265, ctc_loss=0.0798, over 1499054.83 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-27 04:00:34,391 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:00:38,842 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.29 vs. limit=15.0
+2024-08-27 04:05:28,412 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=226320.0, ans=0.125
+2024-08-27 04:05:39,100 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=226320.0, ans=0.2
+2024-08-27 04:05:55,651 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.74 vs. limit=5.0
+2024-08-27 04:06:00,496 INFO [train.py:1114] (0/4) Epoch 18, batch 150, loss[loss=0.1743, simple_loss=0.2407, pruned_loss=0.03984, ctc_loss=0.07086, over 19715.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2628, pruned_loss=0.04135, ctc_loss=0.07703, over 2028052.35 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-27 04:06:16,203 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.462e+02 1.764e+02 2.186e+02 3.977e+02, threshold=3.529e+02, percent-clipped=1.0
+2024-08-27 04:06:16,388 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=226533.33333333334, ans=0.0
+2024-08-27 04:06:17,424 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:06:28,418 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226586.66666666666, ans=0.125
+2024-08-27 04:06:29,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=226586.66666666666, ans=0.04949747468305833
+2024-08-27 04:06:35,208 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=226640.0, ans=0.125
+2024-08-27 04:06:38,992 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=226640.0, ans=0.025
+2024-08-27 04:06:44,445 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=226693.33333333334, ans=0.125
+2024-08-27 04:06:49,806 INFO [train.py:1114] (0/4) Epoch 18, batch 200, loss[loss=0.194, simple_loss=0.2707, pruned_loss=0.04355, ctc_loss=0.07536, over 18248.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2619, pruned_loss=0.04105, ctc_loss=0.07649, over 2435754.48 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-27 04:06:56,681 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.96 vs. limit=22.5
+2024-08-27 04:07:07,626 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.60 vs. limit=15.0
+2024-08-27 04:07:24,196 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=226906.66666666666, ans=0.125
+2024-08-27 04:07:24,262 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=226906.66666666666, ans=0.125
+2024-08-27 04:07:35,902 INFO [train.py:1114] (0/4) Epoch 18, batch 250, loss[loss=0.2006, simple_loss=0.2715, pruned_loss=0.048, ctc_loss=0.08435, over 19404.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2621, pruned_loss=0.04113, ctc_loss=0.07669, over 2755802.13 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-27 04:07:46,440 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=227066.66666666666, ans=0.0
+2024-08-27 04:07:48,265 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=227066.66666666666, ans=0.125
+2024-08-27 04:07:50,820 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.244e+02 1.521e+02 1.873e+02 2.606e+02 4.367e+02, threshold=3.746e+02, percent-clipped=8.0
+2024-08-27 04:07:52,021 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=227066.66666666666, ans=0.0
+2024-08-27 04:07:58,451 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=227120.0, ans=0.1
+2024-08-27 04:08:18,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=227173.33333333334, ans=6.0
+2024-08-27 04:08:31,214 INFO [train.py:1114] (0/4) Epoch 18, batch 300, loss[loss=0.1841, simple_loss=0.2643, pruned_loss=0.03834, ctc_loss=0.06837, over 19499.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2617, pruned_loss=0.04108, ctc_loss=0.07666, over 3000417.31 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-27 04:08:44,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=227333.33333333334, ans=0.0
+2024-08-27 04:08:53,036 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:09:05,118 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.02 vs. limit=22.5
+2024-08-27 04:09:14,022 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=227493.33333333334, ans=0.2
+2024-08-27 04:09:15,346 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.24 vs. limit=6.0
+2024-08-27 04:09:17,357 INFO [train.py:1114] (0/4) Epoch 18, batch 350, loss[loss=0.1608, simple_loss=0.2306, pruned_loss=0.03272, ctc_loss=0.06409, over 19741.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2616, pruned_loss=0.04096, ctc_loss=0.0767, over 3191008.27 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-27 04:09:17,559 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=227546.66666666666, ans=0.125
+2024-08-27 04:09:26,205 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.19 vs. limit=10.0
+2024-08-27 04:09:28,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=227600.0, ans=0.0
+2024-08-27 04:09:30,331 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.460e+02 1.643e+02 1.956e+02 3.165e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-27 04:10:14,804 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=227706.66666666666, ans=0.0
+2024-08-27 04:10:17,446 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=227706.66666666666, ans=0.125
+2024-08-27 04:10:17,462 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=227706.66666666666, ans=0.0
+2024-08-27 04:10:40,375 INFO [train.py:1114] (0/4) Epoch 18, batch 400, loss[loss=0.1871, simple_loss=0.2651, pruned_loss=0.03948, ctc_loss=0.07522, over 19506.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2616, pruned_loss=0.04072, ctc_loss=0.07638, over 3343005.44 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-27 04:10:49,078 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=227866.66666666666, ans=0.125
+2024-08-27 04:11:58,076 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.71 vs. limit=15.0
+2024-08-27 04:12:21,976 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=227973.33333333334, ans=0.1
+2024-08-27 04:12:32,837 INFO [train.py:1114] (0/4) Epoch 18, batch 450, loss[loss=0.1736, simple_loss=0.2559, pruned_loss=0.03283, ctc_loss=0.06406, over 19611.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2611, pruned_loss=0.04042, ctc_loss=0.0759, over 3451708.44 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-27 04:12:34,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=228080.0, ans=0.125
+2024-08-27 04:12:38,345 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.14 vs. limit=10.0
+2024-08-27 04:12:57,342 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.62 vs. limit=15.0
+2024-08-27 04:12:57,364 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.10 vs. limit=22.5
+2024-08-27 04:12:59,749 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.395e+02 1.673e+02 2.305e+02 3.910e+02, threshold=3.347e+02, percent-clipped=3.0
+2024-08-27 04:13:14,657 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=228240.0, ans=0.0
+2024-08-27 04:13:18,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=228240.0, ans=0.125
+2024-08-27 04:13:32,852 INFO [train.py:1114] (0/4) Epoch 18, batch 500, loss[loss=0.1991, simple_loss=0.2671, pruned_loss=0.04699, ctc_loss=0.09291, over 19634.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2604, pruned_loss=0.04017, ctc_loss=0.07541, over 3546079.07 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-27 04:13:45,886 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=228400.0, ans=0.1
+2024-08-27 04:13:46,763 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:13:46,904 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=228400.0, ans=0.1
+2024-08-27 04:13:50,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=228400.0, ans=0.0
+2024-08-27 04:14:02,632 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=228506.66666666666, ans=0.0
+2024-08-27 04:14:20,879 INFO [train.py:1114] (0/4) Epoch 18, batch 550, loss[loss=0.1954, simple_loss=0.2718, pruned_loss=0.04365, ctc_loss=0.07923, over 19273.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2609, pruned_loss=0.04048, ctc_loss=0.07588, over 3608445.94 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-27 04:14:28,470 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=228613.33333333334, ans=0.0
+2024-08-27 04:14:34,514 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.436e+02 1.681e+02 2.031e+02 3.505e+02, threshold=3.363e+02, percent-clipped=1.0
+2024-08-27 04:14:46,277 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=228720.0, ans=0.1
+2024-08-27 04:14:47,885 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:15:12,275 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=228826.66666666666, ans=0.125
+2024-08-27 04:15:14,800 INFO [train.py:1114] (0/4) Epoch 18, batch 600, loss[loss=0.1967, simple_loss=0.2695, pruned_loss=0.04536, ctc_loss=0.08315, over 19393.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.261, pruned_loss=0.04056, ctc_loss=0.0758, over 3666186.54 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-27 04:15:19,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=228880.0, ans=0.2
+2024-08-27 04:15:33,865 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=228933.33333333334, ans=0.125
+2024-08-27 04:15:43,225 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=228986.66666666666, ans=0.125
+2024-08-27 04:15:54,544 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=229040.0, ans=0.125
+2024-08-27 04:16:56,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=229093.33333333334, ans=0.0
+2024-08-27 04:17:05,577 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.71 vs. limit=10.0
+2024-08-27 04:17:06,968 INFO [train.py:1114] (0/4) Epoch 18, batch 650, loss[loss=0.1875, simple_loss=0.2581, pruned_loss=0.04256, ctc_loss=0.07935, over 19762.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2599, pruned_loss=0.04003, ctc_loss=0.07484, over 3716392.84 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-27 04:17:08,072 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=229146.66666666666, ans=0.07
+2024-08-27 04:17:13,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=229146.66666666666, ans=0.025
+2024-08-27 04:17:15,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=229200.0, ans=0.125
+2024-08-27 04:17:20,097 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.194e+02 1.567e+02 1.955e+02 2.726e+02 4.189e+02, threshold=3.909e+02, percent-clipped=6.0
+2024-08-27 04:17:20,558 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.58 vs. limit=6.0
+2024-08-27 04:17:23,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=229200.0, ans=0.0
+2024-08-27 04:17:23,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=229200.0, ans=0.025
+2024-08-27 04:17:32,234 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=229253.33333333334, ans=0.125
+2024-08-27 04:17:34,132 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229306.66666666666, ans=0.1
+2024-08-27 04:17:39,710 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:17:40,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-27 04:18:45,803 INFO [train.py:1114] (0/4) Epoch 18, batch 700, loss[loss=0.1787, simple_loss=0.251, pruned_loss=0.03921, ctc_loss=0.0699, over 19714.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2604, pruned_loss=0.04023, ctc_loss=0.07512, over 3749192.59 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 32.0
+2024-08-27 04:18:54,046 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=229413.33333333334, ans=0.125
+2024-08-27 04:18:57,674 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=229466.66666666666, ans=0.0
+2024-08-27 04:18:58,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229466.66666666666, ans=0.1
+2024-08-27 04:19:02,846 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.08 vs. limit=15.0
+2024-08-27 04:19:08,339 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=229520.0, ans=0.1
+2024-08-27 04:19:13,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=229520.0, ans=0.125
+2024-08-27 04:19:22,107 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.31 vs. limit=15.0
+2024-08-27 04:19:33,234 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.74 vs. limit=15.0
+2024-08-27 04:19:34,990 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.13 vs. limit=15.0
+2024-08-27 04:19:35,533 INFO [train.py:1114] (0/4) Epoch 18, batch 750, loss[loss=0.1828, simple_loss=0.2666, pruned_loss=0.03506, ctc_loss=0.07188, over 19502.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2605, pruned_loss=0.04032, ctc_loss=0.07538, over 3774959.33 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 16.0
+2024-08-27 04:19:36,595 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=229680.0, ans=0.2
+2024-08-27 04:19:37,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=229680.0, ans=0.125
+2024-08-27 04:19:44,130 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.16 vs. limit=6.0
+2024-08-27 04:19:44,744 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=229733.33333333334, ans=0.1
+2024-08-27 04:19:47,584 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229733.33333333334, ans=0.1
+2024-08-27 04:19:49,143 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.399e+02 1.632e+02 2.193e+02 3.721e+02, threshold=3.263e+02, percent-clipped=0.0
+2024-08-27 04:19:56,205 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.20 vs. limit=12.0
+2024-08-27 04:19:57,654 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=229786.66666666666, ans=0.1
+2024-08-27 04:19:57,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=229786.66666666666, ans=0.0
+2024-08-27 04:20:10,937 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.21 vs. limit=22.5
+2024-08-27 04:20:25,620 INFO [train.py:1114] (0/4) Epoch 18, batch 800, loss[loss=0.1671, simple_loss=0.2372, pruned_loss=0.03559, ctc_loss=0.06459, over 19807.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2603, pruned_loss=0.04029, ctc_loss=0.07536, over 3796787.06 frames. ], batch size: 49, lr: 8.37e-03, grad_scale: 32.0
+2024-08-27 04:20:53,580 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.70 vs. limit=12.0
+2024-08-27 04:21:02,001 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=230000.0, ans=0.025
+2024-08-27 04:21:04,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=230053.33333333334, ans=0.125
+2024-08-27 04:21:06,886 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.40 vs. limit=15.0
+2024-08-27 04:21:11,348 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:21:31,534 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=230160.0, ans=0.125
+2024-08-27 04:21:31,588 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=230160.0, ans=0.0
+2024-08-27 04:21:33,150 INFO [train.py:1114] (0/4) Epoch 18, batch 850, loss[loss=0.2081, simple_loss=0.2808, pruned_loss=0.04898, ctc_loss=0.09358, over 19633.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2604, pruned_loss=0.04029, ctc_loss=0.07521, over 3815142.45 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-27 04:21:34,417 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.05 vs. limit=22.5
+2024-08-27 04:21:42,590 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:21:57,957 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.249e+02 1.452e+02 1.736e+02 2.395e+02 3.551e+02, threshold=3.472e+02, percent-clipped=2.0
+2024-08-27 04:22:06,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=230320.0, ans=0.04949747468305833
+2024-08-27 04:22:13,560 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=230373.33333333334, ans=0.125
+2024-08-27 04:22:14,505 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=230373.33333333334, ans=0.125
+2024-08-27 04:22:18,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=230373.33333333334, ans=0.1
+2024-08-27 04:22:26,722 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=230426.66666666666, ans=0.2
+2024-08-27 04:22:30,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=230480.0, ans=0.2
+2024-08-27 04:22:31,213 INFO [train.py:1114] (0/4) Epoch 18, batch 900, loss[loss=0.1716, simple_loss=0.2454, pruned_loss=0.03511, ctc_loss=0.06896, over 19425.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2608, pruned_loss=0.04058, ctc_loss=0.076, over 3819486.86 frames. ], batch size: 48, lr: 8.36e-03, grad_scale: 32.0
+2024-08-27 04:22:37,803 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=230480.0, ans=0.1
+2024-08-27 04:22:39,748 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=230533.33333333334, ans=0.0
+2024-08-27 04:22:42,856 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.65 vs. limit=6.0
+2024-08-27 04:22:56,645 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=230586.66666666666, ans=0.125
+2024-08-27 04:22:57,660 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=230586.66666666666, ans=0.125
+2024-08-27 04:23:17,846 INFO [train.py:1114] (0/4) Epoch 18, batch 950, loss[loss=0.1654, simple_loss=0.244, pruned_loss=0.03092, ctc_loss=0.06262, over 19484.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2615, pruned_loss=0.04098, ctc_loss=0.07669, over 3820910.45 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-27 04:23:20,241 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=8.90 vs. limit=22.5
+2024-08-27 04:23:36,277 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.393e+02 1.674e+02 2.227e+02 4.492e+02, threshold=3.349e+02, percent-clipped=5.0
+2024-08-27 04:23:48,958 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=18.39 vs. limit=15.0
+2024-08-27 04:23:56,571 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.40 vs. limit=6.0
+2024-08-27 04:24:09,117 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=230960.0, ans=0.2
+2024-08-27 04:24:15,454 INFO [train.py:1114] (0/4) Epoch 18, batch 1000, loss[loss=0.1718, simple_loss=0.2499, pruned_loss=0.03334, ctc_loss=0.06736, over 19851.00 frames. ], tot_loss[loss=0.1878, simple_loss=0.2622, pruned_loss=0.04125, ctc_loss=0.07712, over 3815875.19 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-27 04:24:28,177 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.37 vs. limit=6.0
+2024-08-27 04:24:35,645 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.74 vs. limit=22.5
+2024-08-27 04:24:39,266 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=231120.0, ans=0.0
+2024-08-27 04:24:43,152 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.54 vs. limit=15.0
+2024-08-27 04:24:57,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=231226.66666666666, ans=0.0
+2024-08-27 04:24:57,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=231226.66666666666, ans=0.0
+2024-08-27 04:25:01,735 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=231226.66666666666, ans=0.0
+2024-08-27 04:25:01,737 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=231226.66666666666, ans=0.125
+2024-08-27 04:25:11,435 INFO [train.py:1114] (0/4) Epoch 18, batch 1050, loss[loss=0.194, simple_loss=0.2761, pruned_loss=0.04009, ctc_loss=0.07941, over 19852.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2615, pruned_loss=0.04116, ctc_loss=0.0769, over 3822177.37 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-27 04:25:12,570 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=231280.0, ans=10.0
+2024-08-27 04:25:18,952 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=231280.0, ans=0.0
+2024-08-27 04:25:25,220 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.118e+02 1.375e+02 1.549e+02 1.865e+02 3.480e+02, threshold=3.097e+02, percent-clipped=1.0
+2024-08-27 04:25:34,924 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.77 vs. limit=15.0
+2024-08-27 04:25:38,218 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=231440.0, ans=0.0
+2024-08-27 04:25:49,285 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:25:56,806 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=231546.66666666666, ans=0.0
+2024-08-27 04:25:57,488 INFO [train.py:1114] (0/4) Epoch 18, batch 1100, loss[loss=0.192, simple_loss=0.2657, pruned_loss=0.04432, ctc_loss=0.07421, over 19596.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2611, pruned_loss=0.04098, ctc_loss=0.07656, over 3829672.02 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 16.0
+2024-08-27 04:26:01,404 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=231546.66666666666, ans=0.0
+2024-08-27 04:26:36,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=231653.33333333334, ans=0.125
+2024-08-27 04:26:54,405 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=231706.66666666666, ans=0.0
+2024-08-27 04:27:00,508 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.45 vs. limit=15.0
+2024-08-27 04:27:31,637 INFO [train.py:1114] (0/4) Epoch 18, batch 1150, loss[loss=0.183, simple_loss=0.2541, pruned_loss=0.04014, ctc_loss=0.07903, over 19608.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2614, pruned_loss=0.04113, ctc_loss=0.07701, over 3827298.87 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 16.0
+2024-08-27 04:27:44,118 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=231866.66666666666, ans=0.025
+2024-08-27 04:27:47,257 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=231866.66666666666, ans=0.0
+2024-08-27 04:27:50,641 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.153e+02 1.426e+02 1.640e+02 2.078e+02 3.185e+02, threshold=3.280e+02, percent-clipped=3.0
+2024-08-27 04:28:03,182 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=231920.0, ans=0.125
+2024-08-27 04:28:06,823 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=231920.0, ans=0.0
+2024-08-27 04:28:26,369 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.60 vs. limit=22.5
+2024-08-27 04:28:27,951 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=232026.66666666666, ans=0.2
+2024-08-27 04:28:32,341 INFO [train.py:1114] (0/4) Epoch 18, batch 1200, loss[loss=0.1998, simple_loss=0.2789, pruned_loss=0.04406, ctc_loss=0.08161, over 19837.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2626, pruned_loss=0.04135, ctc_loss=0.07752, over 3824043.11 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-27 04:28:37,884 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=232080.0, ans=0.125
+2024-08-27 04:28:46,155 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=232133.33333333334, ans=0.125
+2024-08-27 04:28:57,135 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=232186.66666666666, ans=0.0
+2024-08-27 04:28:58,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=232186.66666666666, ans=0.2
+2024-08-27 04:29:05,423 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=232240.0, ans=0.125
+2024-08-27 04:29:09,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=232240.0, ans=0.125
+2024-08-27 04:29:19,792 INFO [train.py:1114] (0/4) Epoch 18, batch 1250, loss[loss=0.213, simple_loss=0.2771, pruned_loss=0.05452, ctc_loss=0.09965, over 19507.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2631, pruned_loss=0.04135, ctc_loss=0.07741, over 3842402.06 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-27 04:29:19,965 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=232346.66666666666, ans=0.125
+2024-08-27 04:29:29,329 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=232400.0, ans=0.1
+2024-08-27 04:29:34,585 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.452e+02 1.815e+02 2.295e+02 4.200e+02, threshold=3.630e+02, percent-clipped=5.0
+2024-08-27 04:29:35,740 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=232400.0, ans=0.125
+2024-08-27 04:29:41,206 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=232453.33333333334, ans=0.2
+2024-08-27 04:29:43,062 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:30:25,683 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232506.66666666666, ans=0.1
+2024-08-27 04:30:39,933 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232560.0, ans=0.1
+2024-08-27 04:30:43,396 INFO [train.py:1114] (0/4) Epoch 18, batch 1300, loss[loss=0.1934, simple_loss=0.2691, pruned_loss=0.04232, ctc_loss=0.08291, over 18824.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2617, pruned_loss=0.04093, ctc_loss=0.07669, over 3845983.72 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 16.0
+2024-08-27 04:30:54,226 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.77 vs. limit=15.0
+2024-08-27 04:30:56,907 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.42 vs. limit=12.0
+2024-08-27 04:31:05,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232720.0, ans=0.1
+2024-08-27 04:31:18,425 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=232773.33333333334, ans=0.0
+2024-08-27 04:31:29,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=232826.66666666666, ans=0.125
+2024-08-27 04:31:33,100 INFO [train.py:1114] (0/4) Epoch 18, batch 1350, loss[loss=0.1789, simple_loss=0.2482, pruned_loss=0.03907, ctc_loss=0.0789, over 19761.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2612, pruned_loss=0.04084, ctc_loss=0.07624, over 3857349.73 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 16.0
+2024-08-27 04:31:45,528 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=232933.33333333334, ans=0.125
+2024-08-27 04:31:48,898 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.387e+02 1.655e+02 2.106e+02 4.022e+02, threshold=3.310e+02, percent-clipped=4.0
+2024-08-27 04:31:49,313 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.03 vs. limit=15.0
+2024-08-27 04:32:19,566 INFO [train.py:1114] (0/4) Epoch 18, batch 1400, loss[loss=0.1616, simple_loss=0.2317, pruned_loss=0.03361, ctc_loss=0.06053, over 19657.00 frames. ], tot_loss[loss=0.1869, simple_loss=0.2613, pruned_loss=0.0409, ctc_loss=0.07643, over 3864164.22 frames. ], batch size: 46, lr: 8.31e-03, grad_scale: 16.0
+2024-08-27 04:32:19,925 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.50 vs. limit=15.0
+2024-08-27 04:32:27,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=233146.66666666666, ans=0.125
+2024-08-27 04:32:45,205 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=233200.0, ans=0.0
+2024-08-27 04:33:00,146 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=233253.33333333334, ans=0.0
+2024-08-27 04:33:40,121 INFO [train.py:1114] (0/4) Epoch 18, batch 1450, loss[loss=0.2024, simple_loss=0.2789, pruned_loss=0.04546, ctc_loss=0.08767, over 19668.00 frames. ], tot_loss[loss=0.1875, simple_loss=0.2618, pruned_loss=0.04116, ctc_loss=0.07687, over 3861721.28 frames. ], batch size: 63, lr: 8.30e-03, grad_scale: 16.0
+2024-08-27 04:34:21,007 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.457e+02 1.713e+02 1.981e+02 3.848e+02, threshold=3.426e+02, percent-clipped=1.0
+2024-08-27 04:34:26,960 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.93 vs. limit=15.0
+2024-08-27 04:35:50,221 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=233626.66666666666, ans=0.125
+2024-08-27 04:35:51,362 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.23 vs. limit=22.5
+2024-08-27 04:36:46,868 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=233680.0, ans=0.125
+2024-08-27 04:36:59,898 INFO [train.py:1114] (0/4) Epoch 18, batch 1500, loss[loss=0.1779, simple_loss=0.2584, pruned_loss=0.03578, ctc_loss=0.06442, over 19584.00 frames. ], tot_loss[loss=0.1876, simple_loss=0.2621, pruned_loss=0.04111, ctc_loss=0.07691, over 3861790.55 frames. ], batch size: 57, lr: 8.30e-03, grad_scale: 16.0
+2024-08-27 04:37:01,051 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=233680.0, ans=0.1
+2024-08-27 04:37:01,450 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.91 vs. limit=10.0
+2024-08-27 04:37:04,180 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=233680.0, ans=0.0
+2024-08-27 04:37:34,923 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:37:34,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=233680.0, ans=0.1
+2024-08-27 04:37:45,024 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=233733.33333333334, ans=0.0
+2024-08-27 04:37:46,996 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=233733.33333333334, ans=0.1
+2024-08-27 04:38:31,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=233840.0, ans=0.125
+2024-08-27 04:39:00,335 INFO [train.py:1114] (0/4) Epoch 18, batch 1550, loss[loss=0.208, simple_loss=0.2825, pruned_loss=0.0489, ctc_loss=0.08943, over 19603.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2617, pruned_loss=0.04099, ctc_loss=0.07671, over 3845633.94 frames. ], batch size: 60, lr: 8.30e-03, grad_scale: 16.0
+2024-08-27 04:39:00,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=233946.66666666666, ans=0.125
+2024-08-27 04:39:02,408 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=233946.66666666666, ans=0.125
+2024-08-27 04:39:35,555 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.24 vs. limit=22.5
+2024-08-27 04:39:37,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=234000.0, ans=0.05
+2024-08-27 04:39:38,958 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=234000.0, ans=0.125
+2024-08-27 04:39:50,260 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=234000.0, ans=0.5
+2024-08-27 04:39:51,778 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.454e+02 1.713e+02 2.109e+02 3.815e+02, threshold=3.426e+02, percent-clipped=1.0
+2024-08-27 04:39:51,998 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=234000.0, ans=0.125
+2024-08-27 04:40:09,634 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=234106.66666666666, ans=0.0
+2024-08-27 04:40:34,724 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=234213.33333333334, ans=0.125
+2024-08-27 04:40:35,442 INFO [train.py:1114] (0/4) Epoch 18, batch 1600, loss[loss=0.198, simple_loss=0.279, pruned_loss=0.04252, ctc_loss=0.07987, over 19848.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2615, pruned_loss=0.0409, ctc_loss=0.07657, over 3835104.16 frames. ], batch size: 57, lr: 8.29e-03, grad_scale: 32.0
+2024-08-27 04:40:36,635 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=234213.33333333334, ans=0.125
+2024-08-27 04:40:40,316 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=234213.33333333334, ans=0.1
+2024-08-27 04:40:50,336 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=234266.66666666666, ans=0.125
+2024-08-27 04:40:58,857 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:41:01,602 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=234320.0, ans=0.04949747468305833
+2024-08-27 04:41:01,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=234320.0, ans=0.2
+2024-08-27 04:41:24,552 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=234373.33333333334, ans=0.0
+2024-08-27 04:41:26,422 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=234373.33333333334, ans=0.125
+2024-08-27 04:41:42,031 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=234426.66666666666, ans=0.2
+2024-08-27 04:41:44,729 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=234426.66666666666, ans=0.125
+2024-08-27 04:41:48,478 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=234426.66666666666, ans=0.125
+2024-08-27 04:41:53,471 INFO [train.py:1114] (0/4) Epoch 18, batch 1650, loss[loss=0.1956, simple_loss=0.2767, pruned_loss=0.04101, ctc_loss=0.08102, over 19673.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2612, pruned_loss=0.04081, ctc_loss=0.07637, over 3832693.46 frames. ], batch size: 59, lr: 8.29e-03, grad_scale: 32.0
+2024-08-27 04:42:16,286 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 1.559e+02 1.894e+02 2.296e+02 3.896e+02, threshold=3.788e+02, percent-clipped=3.0
+2024-08-27 04:42:25,613 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=234586.66666666666, ans=0.125
+2024-08-27 04:42:35,873 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/checkpoint-44000.pt
+2024-08-27 04:42:56,000 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.84 vs. limit=15.0
+2024-08-27 04:43:00,728 INFO [train.py:1114] (0/4) Epoch 18, batch 1700, loss[loss=0.1772, simple_loss=0.2399, pruned_loss=0.04169, ctc_loss=0.07774, over 19685.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2608, pruned_loss=0.04043, ctc_loss=0.07573, over 3846869.25 frames. ], batch size: 46, lr: 8.28e-03, grad_scale: 32.0
+2024-08-27 04:43:15,502 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=234800.0, ans=0.125
+2024-08-27 04:43:17,963 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=234800.0, ans=0.95
+2024-08-27 04:43:18,101 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=234800.0, ans=0.125
+2024-08-27 04:43:28,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=234853.33333333334, ans=0.035
+2024-08-27 04:43:32,417 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=234906.66666666666, ans=0.125
+2024-08-27 04:43:46,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=234960.0, ans=0.1
+2024-08-27 04:43:46,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=234960.0, ans=0.2
+2024-08-27 04:43:53,906 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=234960.0, ans=0.2
+2024-08-27 04:43:59,944 INFO [train.py:1114] (0/4) Epoch 18, batch 1750, loss[loss=0.1725, simple_loss=0.2391, pruned_loss=0.0389, ctc_loss=0.07031, over 19659.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2602, pruned_loss=0.04019, ctc_loss=0.07528, over 3850963.92 frames. ], batch size: 45, lr: 8.28e-03, grad_scale: 32.0
+2024-08-27 04:44:05,402 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=235013.33333333334, ans=0.125
+2024-08-27 04:44:14,413 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=235066.66666666666, ans=0.125
+2024-08-27 04:44:16,974 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.479e+02 1.670e+02 2.161e+02 3.908e+02, threshold=3.340e+02, percent-clipped=1.0
+2024-08-27 04:44:18,843 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=235120.0, ans=0.0
+2024-08-27 04:44:20,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=235120.0, ans=0.125
+2024-08-27 04:44:21,110 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.47 vs. limit=15.0
+2024-08-27 04:44:28,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=235173.33333333334, ans=0.025
+2024-08-27 04:44:33,658 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=235173.33333333334, ans=0.125
+2024-08-27 04:44:50,515 INFO [train.py:1114] (0/4) Epoch 18, batch 1800, loss[loss=0.1775, simple_loss=0.2637, pruned_loss=0.03388, ctc_loss=0.05899, over 19610.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2604, pruned_loss=0.04006, ctc_loss=0.07491, over 3852616.49 frames. ], batch size: 55, lr: 8.27e-03, grad_scale: 16.0
+2024-08-27 04:44:56,810 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=235280.0, ans=0.0
+2024-08-27 04:44:56,881 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=235280.0, ans=0.125
+2024-08-27 04:45:06,401 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=235333.33333333334, ans=0.2
+2024-08-27 04:45:10,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=235386.66666666666, ans=0.025
+2024-08-27 04:45:23,495 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=235386.66666666666, ans=0.025
+2024-08-27 04:45:27,082 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=235440.0, ans=0.09899494936611666
+2024-08-27 04:45:30,960 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=235440.0, ans=0.0
+2024-08-27 04:45:35,491 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=235440.0, ans=0.1
+2024-08-27 04:45:36,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=235440.0, ans=0.0
+2024-08-27 04:45:48,623 INFO [train.py:1114] (0/4) Epoch 18, batch 1850, loss[loss=0.2133, simple_loss=0.2846, pruned_loss=0.05188, ctc_loss=0.09554, over 19590.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2605, pruned_loss=0.04005, ctc_loss=0.07488, over 3855800.83 frames. ], batch size: 57, lr: 8.27e-03, grad_scale: 8.0
+2024-08-27 04:45:52,348 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=235546.66666666666, ans=0.125
+2024-08-27 04:46:34,512 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.73 vs. limit=22.5
+2024-08-27 04:46:38,525 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.500e+02 1.800e+02 2.247e+02 4.177e+02, threshold=3.601e+02, percent-clipped=3.0
+2024-08-27 04:46:40,622 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.69 vs. limit=15.0
+2024-08-27 04:46:44,449 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=235653.33333333334, ans=0.125
+2024-08-27 04:46:52,598 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=235706.66666666666, ans=0.025
+2024-08-27 04:46:56,811 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=235760.0, ans=0.125
+2024-08-27 04:47:04,777 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=235760.0, ans=0.125
+2024-08-27 04:47:06,326 INFO [train.py:1114] (0/4) Epoch 18, batch 1900, loss[loss=0.2, simple_loss=0.2776, pruned_loss=0.04488, ctc_loss=0.08143, over 19649.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2611, pruned_loss=0.04029, ctc_loss=0.07527, over 3860635.93 frames. ], batch size: 59, lr: 8.26e-03, grad_scale: 8.0
+2024-08-27 04:47:07,287 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=235813.33333333334, ans=0.035
+2024-08-27 04:47:09,891 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=235813.33333333334, ans=0.0
+2024-08-27 04:47:22,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=235920.0, ans=0.125
+2024-08-27 04:47:29,498 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=235920.0, ans=0.125
+2024-08-27 04:47:44,548 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.04 vs. limit=15.0
+2024-08-27 04:47:46,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=236026.66666666666, ans=0.0
+2024-08-27 04:47:46,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=236026.66666666666, ans=0.125
+2024-08-27 04:47:51,925 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=236026.66666666666, ans=0.125
+2024-08-27 04:47:53,590 INFO [train.py:1114] (0/4) Epoch 18, batch 1950, loss[loss=0.1751, simple_loss=0.2563, pruned_loss=0.03407, ctc_loss=0.0642, over 19579.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.262, pruned_loss=0.04032, ctc_loss=0.07546, over 3870189.79 frames. ], batch size: 52, lr: 8.26e-03, grad_scale: 8.0
+2024-08-27 04:47:54,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=236080.0, ans=0.04949747468305833
+2024-08-27 04:48:06,888 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=236133.33333333334, ans=0.125
+2024-08-27 04:48:07,707 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=236133.33333333334, ans=0.07
+2024-08-27 04:48:09,454 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=236133.33333333334, ans=0.1
+2024-08-27 04:48:12,688 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.245e+02 1.481e+02 1.697e+02 2.159e+02 5.555e+02, threshold=3.394e+02, percent-clipped=1.0
+2024-08-27 04:48:13,282 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.31 vs. limit=15.0
+2024-08-27 04:48:20,709 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=236186.66666666666, ans=0.025
+2024-08-27 04:48:35,599 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=236293.33333333334, ans=0.2
+2024-08-27 04:48:50,294 INFO [train.py:1114] (0/4) Epoch 18, batch 2000, loss[loss=0.1634, simple_loss=0.2308, pruned_loss=0.03579, ctc_loss=0.06111, over 19655.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2625, pruned_loss=0.04063, ctc_loss=0.07603, over 3855253.43 frames. ], batch size: 45, lr: 8.25e-03, grad_scale: 8.0
+2024-08-27 04:48:58,731 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=236346.66666666666, ans=0.125
+2024-08-27 04:49:06,877 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:49:46,594 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=236453.33333333334, ans=0.1
+2024-08-27 04:49:47,057 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.87 vs. limit=12.0
+2024-08-27 04:50:16,429 INFO [train.py:1114] (0/4) Epoch 18, batch 2050, loss[loss=0.1706, simple_loss=0.2381, pruned_loss=0.03808, ctc_loss=0.0674, over 19732.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2616, pruned_loss=0.04063, ctc_loss=0.0761, over 3850587.51 frames. ], batch size: 47, lr: 8.25e-03, grad_scale: 8.0
+2024-08-27 04:50:46,693 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=236720.0, ans=0.0
+2024-08-27 04:50:47,338 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.468e+02 1.842e+02 2.423e+02 4.039e+02, threshold=3.684e+02, percent-clipped=4.0
+2024-08-27 04:50:53,942 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.99 vs. limit=22.5
+2024-08-27 04:51:07,612 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=236826.66666666666, ans=0.0
+2024-08-27 04:51:13,548 INFO [train.py:1114] (0/4) Epoch 18, batch 2100, loss[loss=0.1773, simple_loss=0.2563, pruned_loss=0.03593, ctc_loss=0.06647, over 19774.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.2614, pruned_loss=0.0405, ctc_loss=0.07575, over 3858150.81 frames. ], batch size: 54, lr: 8.25e-03, grad_scale: 8.0
+2024-08-27 04:51:32,876 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.23 vs. limit=15.0
+2024-08-27 04:51:34,168 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=236933.33333333334, ans=0.1
+2024-08-27 04:52:03,074 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=237040.0, ans=0.2
+2024-08-27 04:52:03,159 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=237040.0, ans=0.1
+2024-08-27 04:52:11,699 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=237093.33333333334, ans=0.125
+2024-08-27 04:52:13,279 INFO [train.py:1114] (0/4) Epoch 18, batch 2150, loss[loss=0.1783, simple_loss=0.2565, pruned_loss=0.03616, ctc_loss=0.06959, over 19594.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2605, pruned_loss=0.04021, ctc_loss=0.07501, over 3868534.95 frames. ], batch size: 52, lr: 8.24e-03, grad_scale: 8.0
+2024-08-27 04:52:14,011 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=237146.66666666666, ans=0.0
+2024-08-27 04:52:26,865 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=237200.0, ans=0.125
+2024-08-27 04:52:30,382 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=237253.33333333334, ans=0.125
+2024-08-27 04:52:30,463 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=237253.33333333334, ans=0.125
+2024-08-27 04:52:31,101 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.418e+02 1.667e+02 2.145e+02 4.483e+02, threshold=3.333e+02, percent-clipped=3.0
+2024-08-27 04:52:33,026 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=237253.33333333334, ans=0.125
+2024-08-27 04:52:33,060 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=237253.33333333334, ans=0.1
+2024-08-27 04:52:39,940 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=237306.66666666666, ans=0.125
+2024-08-27 04:52:56,554 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=237413.33333333334, ans=0.09899494936611666
+2024-08-27 04:52:57,215 INFO [train.py:1114] (0/4) Epoch 18, batch 2200, loss[loss=0.1778, simple_loss=0.2534, pruned_loss=0.03787, ctc_loss=0.06581, over 19586.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2603, pruned_loss=0.04011, ctc_loss=0.07486, over 3867468.35 frames. ], batch size: 57, lr: 8.24e-03, grad_scale: 8.0
+2024-08-27 04:53:45,576 INFO [train.py:1114] (0/4) Epoch 18, batch 2250, loss[loss=0.1942, simple_loss=0.276, pruned_loss=0.04037, ctc_loss=0.0793, over 19603.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2609, pruned_loss=0.0402, ctc_loss=0.07514, over 3867383.66 frames. ], batch size: 55, lr: 8.23e-03, grad_scale: 8.0
+2024-08-27 04:58:07,202 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.445e+02 1.673e+02 2.181e+02 3.635e+02, threshold=3.347e+02, percent-clipped=1.0
+2024-08-27 04:59:44,893 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=237893.33333333334, ans=0.125
+2024-08-27 05:00:05,706 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.93 vs. limit=15.0
+2024-08-27 05:00:07,276 INFO [train.py:1114] (0/4) Epoch 18, batch 2300, loss[loss=0.1878, simple_loss=0.2588, pruned_loss=0.04297, ctc_loss=0.07712, over 19496.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2602, pruned_loss=0.04024, ctc_loss=0.07536, over 3860900.49 frames. ], batch size: 49, lr: 8.23e-03, grad_scale: 8.0
+2024-08-27 05:04:40,698 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.55 vs. limit=15.0
+2024-08-27 05:04:54,667 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.06 vs. limit=10.0
+2024-08-27 05:05:47,816 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=238106.66666666666, ans=0.025
+2024-08-27 05:05:52,863 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.25 vs. limit=22.5
+2024-08-27 05:06:01,801 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=238160.0, ans=0.025
+2024-08-27 05:06:14,066 INFO [train.py:1114] (0/4) Epoch 18, batch 2350, loss[loss=0.2076, simple_loss=0.2815, pruned_loss=0.04885, ctc_loss=0.0899, over 19647.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2605, pruned_loss=0.04063, ctc_loss=0.07596, over 3863844.14 frames. ], batch size: 63, lr: 8.22e-03, grad_scale: 8.0
+2024-08-27 05:09:12,948 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=238266.66666666666, ans=0.025
+2024-08-27 05:09:45,141 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.379e+02 1.605e+02 2.102e+02 3.614e+02, threshold=3.209e+02, percent-clipped=2.0
+2024-08-27 05:10:12,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=238320.0, ans=0.1
+2024-08-27 05:10:20,259 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.19 vs. limit=10.0
+2024-08-27 05:11:02,012 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=238373.33333333334, ans=0.0
+2024-08-27 05:11:47,611 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=238426.66666666666, ans=0.0
+2024-08-27 05:11:47,693 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=238426.66666666666, ans=0.125
+2024-08-27 05:11:49,633 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=238426.66666666666, ans=0.025
+2024-08-27 05:11:54,344 INFO [train.py:1114] (0/4) Epoch 18, batch 2400, loss[loss=0.2085, simple_loss=0.2752, pruned_loss=0.05248, ctc_loss=0.0918, over 19382.00 frames. ], tot_loss[loss=0.188, simple_loss=0.2625, pruned_loss=0.04129, ctc_loss=0.07711, over 3858008.55 frames. ], batch size: 67, lr: 8.22e-03, grad_scale: 16.0
+2024-08-27 05:13:54,665 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=238640.0, ans=0.025
+2024-08-27 05:14:34,817 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.77 vs. limit=10.0
+2024-08-27 05:14:36,108 INFO [train.py:1114] (0/4) Epoch 18, batch 2450, loss[loss=0.2496, simple_loss=0.2951, pruned_loss=0.07462, ctc_loss=0.1374, over 13211.00 frames. ], tot_loss[loss=0.1931, simple_loss=0.2661, pruned_loss=0.04375, ctc_loss=0.08175, over 3733992.69 frames. ], batch size: 140, lr: 8.21e-03, grad_scale: 16.0
+2024-08-27 05:14:39,787 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=238746.66666666666, ans=0.0
+2024-08-27 05:14:45,943 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=9.02 vs. limit=22.5
+2024-08-27 05:15:10,725 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=238800.0, ans=0.2
+2024-08-27 05:15:19,820 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.338e+02 1.631e+02 1.872e+02 2.220e+02 3.951e+02, threshold=3.743e+02, percent-clipped=5.0
+2024-08-27 05:15:29,087 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=238853.33333333334, ans=0.0
+2024-08-27 05:15:33,701 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=238853.33333333334, ans=0.2
+2024-08-27 05:15:46,063 INFO [checkpoint.py:75] (0/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-18.pt
+2024-08-27 05:19:02,739 INFO [train.py:1114] (0/4) Epoch 19, batch 0, loss[loss=0.1853, simple_loss=0.2584, pruned_loss=0.04146, ctc_loss=0.0731, over 19421.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2584, pruned_loss=0.04146, ctc_loss=0.0731, over 19421.00 frames. ], batch size: 48, lr: 7.99e-03, grad_scale: 32.0
+2024-08-27 05:19:02,740 INFO [train.py:1137] (0/4) Computing validation loss
+2024-08-27 05:20:01,889 INFO [zipformer.py:1858] (0/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([4.2820, 3.6340, 4.1929, 4.1283], device='cuda:0')
+2024-08-27 05:20:05,932 INFO [train.py:1146] (0/4) Epoch 19, validation: loss=0.1709, simple_loss=0.2636, pruned_loss=0.02933, ctc_loss=0.04896, over 944034.00 frames.
+2024-08-27 05:20:05,933 INFO [train.py:1147] (0/4) Maximum memory allocated so far is 13201MB
+2024-08-27 05:20:07,143 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=238954.66666666666, ans=0.125
+2024-08-27 05:20:09,786 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=238954.66666666666, ans=0.0
+2024-08-27 05:21:28,805 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=239114.66666666666, ans=0.125
+2024-08-27 05:21:35,407 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:22:54,439 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=239221.33333333334, ans=0.125
+2024-08-27 05:22:55,134 INFO [train.py:1114] (0/4) Epoch 19, batch 50, loss[loss=0.1762, simple_loss=0.2477, pruned_loss=0.03737, ctc_loss=0.07481, over 19679.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.2619, pruned_loss=0.03972, ctc_loss=0.07474, over 845033.20 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-27 05:23:20,778 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:23:21,648 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=239221.33333333334, ans=0.07
+2024-08-27 05:23:28,900 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.91 vs. limit=15.0
+2024-08-27 05:23:46,795 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=239328.0, ans=0.0
+2024-08-27 05:23:58,973 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=239381.33333333334, ans=0.0
+2024-08-27 05:23:59,559 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.492e+02 1.734e+02 2.135e+02 3.431e+02, threshold=3.468e+02, percent-clipped=0.0
+2024-08-27 05:24:15,340 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.67 vs. limit=22.5
+2024-08-27 05:24:18,947 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2.whitening_limit, batch_count=239488.0, ans=15.0
+2024-08-27 05:24:19,424 INFO [train.py:1114] (0/4) Epoch 19, batch 100, loss[loss=0.1682, simple_loss=0.2427, pruned_loss=0.03464, ctc_loss=0.06132, over 19728.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2638, pruned_loss=0.04052, ctc_loss=0.07626, over 1498716.65 frames. ], batch size: 51, lr: 7.98e-03, grad_scale: 32.0
+2024-08-27 05:24:26,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=239488.0, ans=0.2
+2024-08-27 05:24:34,359 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=239541.33333333334, ans=0.0
+2024-08-27 05:24:38,957 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=239541.33333333334, ans=0.125
+2024-08-27 05:24:39,798 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=239594.66666666666, ans=0.0
+2024-08-27 05:25:42,663 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.32 vs. limit=12.0
+2024-08-27 05:25:46,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=239648.0, ans=0.0
+2024-08-27 05:25:52,794 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=239648.0, ans=0.0
+2024-08-27 05:25:55,913 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.14 vs. limit=15.0
+2024-08-27 05:26:05,121 INFO [train.py:1114] (0/4) Epoch 19, batch 150, loss[loss=0.167, simple_loss=0.235, pruned_loss=0.03683, ctc_loss=0.06332, over 19754.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2616, pruned_loss=0.0403, ctc_loss=0.07568, over 2027546.02 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-27 05:27:05,022 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=239861.33333333334, ans=0.0
+2024-08-27 05:27:11,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=239861.33333333334, ans=0.125
+2024-08-27 05:27:18,307 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=239914.66666666666, ans=0.1
+2024-08-27 05:27:20,080 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=239914.66666666666, ans=0.0
+2024-08-27 05:27:20,691 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.059e+02 1.500e+02 1.966e+02 2.497e+02 3.604e+02, threshold=3.932e+02, percent-clipped=3.0
+2024-08-27 05:27:27,399 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=239914.66666666666, ans=0.125
+2024-08-27 05:27:47,326 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=239968.0, ans=0.0
+2024-08-27 05:27:47,473 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=239968.0, ans=0.0
+2024-08-27 05:27:59,141 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=239968.0, ans=0.125
+2024-08-27 05:28:10,142 INFO [train.py:1114] (0/4) Epoch 19, batch 200, loss[loss=0.2011, simple_loss=0.27, pruned_loss=0.04855, ctc_loss=0.0874, over 18444.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2598, pruned_loss=0.03992, ctc_loss=0.07469, over 2435779.19 frames. ], batch size: 85, lr: 7.97e-03, grad_scale: 32.0
+2024-08-27 05:28:10,306 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=240021.33333333334, ans=0.0
+2024-08-27 05:28:13,988 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=240021.33333333334, ans=0.125
+2024-08-27 05:28:21,226 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:28:28,517 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=240128.0, ans=0.125
+2024-08-27 05:29:09,746 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_na.min_abs, batch_count=240128.0, ans=0.02
+2024-08-27 05:29:18,108 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=240181.33333333334, ans=0.125
+2024-08-27 05:29:27,151 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=240234.66666666666, ans=0.0
+2024-08-27 05:29:32,168 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.30 vs. limit=15.0
+2024-08-27 05:29:34,217 INFO [train.py:1114] (0/4) Epoch 19, batch 250, loss[loss=0.1965, simple_loss=0.2702, pruned_loss=0.04488, ctc_loss=0.08258, over 19356.00 frames. ], tot_loss[loss=0.1835, simple_loss=0.2588, pruned_loss=0.03931, ctc_loss=0.07378, over 2756817.04 frames. ], batch size: 67, lr: 7.97e-03, grad_scale: 32.0
+2024-08-27 05:29:39,043 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=240288.0, ans=0.0
+2024-08-27 05:29:47,519 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.66 vs. limit=15.0
+2024-08-27 05:29:58,183 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=240394.66666666666, ans=0.0
+2024-08-27 05:30:02,558 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.446e+02 1.683e+02 2.499e+02 4.574e+02, threshold=3.367e+02, percent-clipped=7.0
+2024-08-27 05:30:05,568 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=240448.0, ans=0.125
+2024-08-27 05:30:09,331 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=240448.0, ans=0.2
+2024-08-27 05:30:10,152 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240448.0, ans=0.1
+2024-08-27 05:30:14,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=240501.33333333334, ans=0.025
+2024-08-27 05:30:16,089 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=7.68 vs. limit=15.0
+2024-08-27 05:30:22,802 INFO [train.py:1114] (0/4) Epoch 19, batch 300, loss[loss=0.203, simple_loss=0.2805, pruned_loss=0.04571, ctc_loss=0.08512, over 19482.00 frames. ], tot_loss[loss=0.183, simple_loss=0.2586, pruned_loss=0.03905, ctc_loss=0.07332, over 3000995.93 frames. ], batch size: 61, lr: 7.96e-03, grad_scale: 32.0
+2024-08-27 05:30:43,062 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=240661.33333333334, ans=0.0
+2024-08-27 05:30:55,808 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=240714.66666666666, ans=0.0
+2024-08-27 05:31:00,212 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=240768.0, ans=0.0
+2024-08-27 05:31:07,494 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=240768.0, ans=0.0
+2024-08-27 05:31:09,976 INFO [train.py:1114] (0/4) Epoch 19, batch 350, loss[loss=0.1755, simple_loss=0.2445, pruned_loss=0.03808, ctc_loss=0.07572, over 19748.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2595, pruned_loss=0.03934, ctc_loss=0.07366, over 3190586.01 frames. ], batch size: 48, lr: 7.96e-03, grad_scale: 32.0
+2024-08-27 05:31:25,514 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=240874.66666666666, ans=0.1
+2024-08-27 05:31:26,582 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=240874.66666666666, ans=0.125
+2024-08-27 05:31:39,922 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.453e+02 1.753e+02 2.405e+02 3.677e+02, threshold=3.507e+02, percent-clipped=2.0
+2024-08-27 05:31:51,949 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=241034.66666666666, ans=0.0
+2024-08-27 05:31:53,028 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=241034.66666666666, ans=0.125
+2024-08-27 05:31:56,471 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=241088.0, ans=0.125
+2024-08-27 05:31:57,288 INFO [train.py:1114] (0/4) Epoch 19, batch 400, loss[loss=0.1851, simple_loss=0.265, pruned_loss=0.03779, ctc_loss=0.07396, over 19494.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2592, pruned_loss=0.03924, ctc_loss=0.07358, over 3342052.19 frames. ], batch size: 54, lr: 7.95e-03, grad_scale: 32.0
+2024-08-27 05:32:09,337 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=241088.0, ans=0.2
+2024-08-27 05:32:35,233 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=241194.66666666666, ans=0.125
+2024-08-27 05:32:46,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=241248.0, ans=0.125
+2024-08-27 05:32:53,897 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=241301.33333333334, ans=0.0
+2024-08-27 05:32:56,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=241301.33333333334, ans=0.125
+2024-08-27 05:32:59,289 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:33:00,037 INFO [train.py:1114] (0/4) Epoch 19, batch 450, loss[loss=0.1805, simple_loss=0.2678, pruned_loss=0.03303, ctc_loss=0.06791, over 19626.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2591, pruned_loss=0.03919, ctc_loss=0.07341, over 3450414.33 frames. ], batch size: 55, lr: 7.95e-03, grad_scale: 32.0
+2024-08-27 05:33:10,273 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=241354.66666666666, ans=0.0
+2024-08-27 05:33:10,991 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=241408.0, ans=0.1
+2024-08-27 05:33:11,133 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=241408.0, ans=0.07
+2024-08-27 05:33:12,864 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=241408.0, ans=0.025
+2024-08-27 05:33:20,124 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=241461.33333333334, ans=0.125
+2024-08-27 05:33:20,532 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.90 vs. limit=15.0
+2024-08-27 05:33:25,649 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=241461.33333333334, ans=0.125
+2024-08-27 05:33:26,705 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=241461.33333333334, ans=0.125
+2024-08-27 05:33:30,920 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.397e+02 1.631e+02 2.046e+02 3.175e+02, threshold=3.262e+02, percent-clipped=0.0
+2024-08-27 05:33:49,302 INFO [train.py:1114] (0/4) Epoch 19, batch 500, loss[loss=0.1959, simple_loss=0.2738, pruned_loss=0.04316, ctc_loss=0.07913, over 19656.00 frames. ], tot_loss[loss=0.1829, simple_loss=0.2585, pruned_loss=0.03903, ctc_loss=0.07307, over 3545263.34 frames. ], batch size: 63, lr: 7.95e-03, grad_scale: 32.0
+2024-08-27 05:34:12,528 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=241728.0, ans=0.125
+2024-08-27 05:34:20,050 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=241781.33333333334, ans=0.125
+2024-08-27 05:34:20,986 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=241781.33333333334, ans=0.125
+2024-08-27 05:34:23,671 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=241781.33333333334, ans=0.2
+2024-08-27 05:34:24,929 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.83 vs. limit=15.0
+2024-08-27 05:34:33,779 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-27 05:34:34,590 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=241834.66666666666, ans=0.1
+2024-08-27 05:34:39,019 INFO [train.py:1114] (0/4) Epoch 19, batch 550, loss[loss=0.1888, simple_loss=0.2617, pruned_loss=0.04219, ctc_loss=0.07897, over 19368.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2592, pruned_loss=0.03938, ctc_loss=0.07376, over 3607835.04 frames. ], batch size: 71, lr: 7.94e-03, grad_scale: 32.0
+2024-08-27 05:34:40,086 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=241888.0, ans=0.2
+2024-08-27 05:35:07,797 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=242048.0, ans=0.0
+2024-08-27 05:35:09,357 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.385e+02 1.667e+02 1.980e+02 3.512e+02, threshold=3.334e+02, percent-clipped=2.0
+2024-08-27 05:35:18,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=242101.33333333334, ans=0.0
+2024-08-27 05:35:24,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=242101.33333333334, ans=0.1
+2024-08-27 05:35:27,094 INFO [train.py:1114] (0/4) Epoch 19, batch 600, loss[loss=0.1877, simple_loss=0.269, pruned_loss=0.0388, ctc_loss=0.07183, over 19400.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2591, pruned_loss=0.03922, ctc_loss=0.07339, over 3666587.04 frames. ], batch size: 67, lr: 7.94e-03, grad_scale: 32.0
+2024-08-27 05:35:42,946 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=242208.0, ans=0.125
+2024-08-27 05:35:43,863 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=242208.0, ans=0.1
+2024-08-27 05:36:18,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=242368.0, ans=0.025
+2024-08-27 05:36:19,762 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=242368.0, ans=0.0
+2024-08-27 05:36:23,049 INFO [train.py:1114] (0/4) Epoch 19, batch 650, loss[loss=0.1642, simple_loss=0.239, pruned_loss=0.03217, ctc_loss=0.06266, over 19773.00 frames. ], tot_loss[loss=0.1828, simple_loss=0.2583, pruned_loss=0.03905, ctc_loss=0.0731, over 3716546.96 frames. ], batch size: 54, lr: 7.93e-03, grad_scale: 32.0
+2024-08-27 05:36:23,370 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=242421.33333333334, ans=0.04949747468305833
+2024-08-27 05:36:39,870 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.49 vs. limit=22.5
+2024-08-27 05:36:53,251 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.470e+02 1.907e+02 2.471e+02 4.129e+02, threshold=3.814e+02, percent-clipped=9.0
+2024-08-27 05:36:57,434 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.67 vs. limit=15.0
+2024-08-27 05:37:00,970 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=242634.66666666666, ans=0.0
+2024-08-27 05:37:05,065 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.02 vs. limit=12.0
+2024-08-27 05:37:30,670 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=242634.66666666666, ans=0.2
+2024-08-27 05:37:33,297 INFO [train.py:1114] (0/4) Epoch 19, batch 700, loss[loss=0.1823, simple_loss=0.2555, pruned_loss=0.04009, ctc_loss=0.07244, over 19733.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2586, pruned_loss=0.03933, ctc_loss=0.07347, over 3748806.62 frames. ], batch size: 51, lr: 7.93e-03, grad_scale: 32.0
+2024-08-27 05:37:35,342 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=242688.0, ans=0.125
+2024-08-27 05:37:44,343 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=242741.33333333334, ans=0.95
+2024-08-27 05:37:49,866 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=242741.33333333334, ans=0.125
+2024-08-27 05:37:55,791 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.39 vs. limit=6.0
+2024-08-27 05:37:59,856 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=242794.66666666666, ans=0.125
+2024-08-27 05:38:08,427 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=242848.0, ans=0.0
+2024-08-27 05:38:11,167 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=242901.33333333334, ans=0.0
+2024-08-27 05:38:23,006 INFO [train.py:1114] (0/4) Epoch 19, batch 750, loss[loss=0.1847, simple_loss=0.2737, pruned_loss=0.03533, ctc_loss=0.06268, over 19496.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2588, pruned_loss=0.03949, ctc_loss=0.07379, over 3774571.12 frames. ], batch size: 54, lr: 7.92e-03, grad_scale: 32.0
+2024-08-27 05:38:25,913 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:38:27,949 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.14 vs. limit=10.0
+2024-08-27 05:38:31,582 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.28 vs. limit=15.0
+2024-08-27 05:38:34,386 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.89 vs. limit=15.0
+2024-08-27 05:38:37,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=243008.0, ans=0.1
+2024-08-27 05:38:51,402 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.489e+02 1.823e+02 2.314e+02 3.772e+02, threshold=3.647e+02, percent-clipped=0.0
+2024-08-27 05:38:52,508 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=243114.66666666666, ans=0.0
+2024-08-27 05:38:55,428 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.63 vs. limit=10.0
+2024-08-27 05:39:04,770 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=243168.0, ans=0.125
+2024-08-27 05:39:11,744 INFO [train.py:1114] (0/4) Epoch 19, batch 800, loss[loss=0.1659, simple_loss=0.2387, pruned_loss=0.03408, ctc_loss=0.06263, over 19802.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2591, pruned_loss=0.03975, ctc_loss=0.07419, over 3795407.26 frames. ], batch size: 49, lr: 7.92e-03, grad_scale: 32.0
+2024-08-27 05:39:23,802 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=243274.66666666666, ans=0.05
+2024-08-27 05:39:40,703 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=243381.33333333334, ans=0.2
+2024-08-27 05:39:58,025 INFO [train.py:1114] (0/4) Epoch 19, batch 850, loss[loss=0.1802, simple_loss=0.2673, pruned_loss=0.03375, ctc_loss=0.064, over 19670.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2589, pruned_loss=0.03959, ctc_loss=0.07383, over 3815217.60 frames. ], batch size: 59, lr: 7.92e-03, grad_scale: 32.0
+2024-08-27 05:40:07,485 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=243488.0, ans=0.2
+2024-08-27 05:40:12,029 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=243541.33333333334, ans=0.125
+2024-08-27 05:40:28,699 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.388e+02 1.609e+02 2.074e+02 4.897e+02, threshold=3.218e+02, percent-clipped=1.0
+2024-08-27 05:40:33,896 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.60 vs. limit=15.0
+2024-08-27 05:40:42,590 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=243701.33333333334, ans=0.125
+2024-08-27 05:40:49,412 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.34 vs. limit=6.0
+2024-08-27 05:40:51,593 INFO [train.py:1114] (0/4) Epoch 19, batch 900, loss[loss=0.1698, simple_loss=0.2452, pruned_loss=0.0344, ctc_loss=0.06385, over 19806.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2591, pruned_loss=0.03971, ctc_loss=0.07401, over 3819233.74 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-27 05:41:48,622 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=243861.33333333334, ans=0.125
+2024-08-27 05:45:20,784 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=243914.66666666666, ans=0.125
+2024-08-27 05:45:28,371 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=243914.66666666666, ans=0.125
+2024-08-27 05:45:29,312 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=243914.66666666666, ans=0.125
+2024-08-27 05:46:16,458 INFO [train.py:1114] (0/4) Epoch 19, batch 950, loss[loss=0.1629, simple_loss=0.2371, pruned_loss=0.03277, ctc_loss=0.058, over 19501.00 frames. ], tot_loss[loss=0.1845, simple_loss=0.2595, pruned_loss=0.0399, ctc_loss=0.07434, over 3820559.24 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-27 05:46:40,176 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=244074.66666666666, ans=0.125
+2024-08-27 05:46:49,949 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:46:54,759 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=244128.0, ans=0.0
+2024-08-27 05:47:00,337 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.59 vs. limit=12.0
+2024-08-27 05:47:02,964 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=244128.0, ans=0.125
+2024-08-27 05:47:05,282 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.465e+02 1.729e+02 2.037e+02 3.385e+02, threshold=3.459e+02, percent-clipped=1.0
+2024-08-27 05:47:07,400 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=244181.33333333334, ans=0.07
+2024-08-27 05:47:23,106 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.10 vs. limit=15.0
+2024-08-27 05:47:24,467 INFO [train.py:1114] (0/4) Epoch 19, batch 1000, loss[loss=0.1667, simple_loss=0.2533, pruned_loss=0.0293, ctc_loss=0.05386, over 19856.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2606, pruned_loss=0.04036, ctc_loss=0.07521, over 3815653.69 frames. ], batch size: 52, lr: 7.90e-03, grad_scale: 32.0
+2024-08-27 05:47:36,882 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=244341.33333333334, ans=0.125
+2024-08-27 05:47:38,079 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.91 vs. limit=22.5
+2024-08-27 05:47:44,184 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=244394.66666666666, ans=0.09899494936611666
+2024-08-27 05:47:44,338 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.47 vs. limit=6.0
+2024-08-27 05:48:05,603 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=244501.33333333334, ans=0.5
+2024-08-27 05:48:10,237 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=244501.33333333334, ans=0.125
+2024-08-27 05:48:12,792 INFO [train.py:1114] (0/4) Epoch 19, batch 1050, loss[loss=0.1901, simple_loss=0.2712, pruned_loss=0.03948, ctc_loss=0.07512, over 19834.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2597, pruned_loss=0.04007, ctc_loss=0.07483, over 3822306.47 frames. ], batch size: 57, lr: 7.90e-03, grad_scale: 32.0
+2024-08-27 05:48:18,410 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=244554.66666666666, ans=0.1
+2024-08-27 05:48:27,639 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=244608.0, ans=0.95
+2024-08-27 05:48:35,822 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=244661.33333333334, ans=0.125
+2024-08-27 05:48:36,266 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.28 vs. limit=15.0
+2024-08-27 05:48:42,874 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.407e+02 1.559e+02 1.901e+02 2.565e+02, threshold=3.118e+02, percent-clipped=0.0
+2024-08-27 05:48:47,766 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=244714.66666666666, ans=0.0
+2024-08-27 05:49:02,407 INFO [train.py:1114] (0/4) Epoch 19, batch 1100, loss[loss=0.1714, simple_loss=0.2462, pruned_loss=0.03549, ctc_loss=0.0642, over 19574.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2593, pruned_loss=0.03969, ctc_loss=0.0743, over 3830280.28 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-27 05:49:12,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=244874.66666666666, ans=0.0
+2024-08-27 05:49:34,245 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=244981.33333333334, ans=0.125
+2024-08-27 05:49:51,755 INFO [train.py:1114] (0/4) Epoch 19, batch 1150, loss[loss=0.1547, simple_loss=0.2325, pruned_loss=0.02783, ctc_loss=0.05308, over 19596.00 frames. ], tot_loss[loss=0.1845, simple_loss=0.2594, pruned_loss=0.03982, ctc_loss=0.07464, over 3828089.38 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-27 05:49:58,563 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=245088.0, ans=0.125
+2024-08-27 05:51:13,107 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=245141.33333333334, ans=0.2
+2024-08-27 05:51:15,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=245141.33333333334, ans=0.0
+2024-08-27 05:51:16,050 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.02 vs. limit=15.0
+2024-08-27 05:51:19,939 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.51 vs. limit=15.0
+2024-08-27 05:51:34,211 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=245194.66666666666, ans=0.125
+2024-08-27 05:52:27,619 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.437e+02 1.648e+02 2.100e+02 3.411e+02, threshold=3.296e+02, percent-clipped=3.0
+2024-08-27 05:52:31,396 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=245248.0, ans=0.05
+2024-08-27 05:52:42,600 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=245301.33333333334, ans=0.125
+2024-08-27 05:52:44,364 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=245301.33333333334, ans=0.125
+2024-08-27 05:52:46,998 INFO [train.py:1114] (0/4) Epoch 19, batch 1200, loss[loss=0.1918, simple_loss=0.2639, pruned_loss=0.04264, ctc_loss=0.08591, over 19828.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2598, pruned_loss=0.03966, ctc_loss=0.07437, over 3823389.80 frames. ], batch size: 57, lr: 7.89e-03, grad_scale: 32.0
+2024-08-27 05:52:52,164 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.11 vs. limit=22.5
+2024-08-27 05:53:13,867 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.53 vs. limit=15.0
+2024-08-27 05:53:20,551 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=245514.66666666666, ans=0.1
+2024-08-27 05:53:24,281 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=245514.66666666666, ans=0.1
+2024-08-27 05:53:26,164 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=245568.0, ans=0.0
+2024-08-27 05:53:35,242 INFO [train.py:1114] (0/4) Epoch 19, batch 1250, loss[loss=0.1928, simple_loss=0.2655, pruned_loss=0.04439, ctc_loss=0.0784, over 19532.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2608, pruned_loss=0.04011, ctc_loss=0.07497, over 3841958.57 frames. ], batch size: 61, lr: 7.88e-03, grad_scale: 32.0
+2024-08-27 05:54:02,330 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=245728.0, ans=0.0
+2024-08-27 05:54:04,174 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=245781.33333333334, ans=0.125
+2024-08-27 05:54:05,256 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=245781.33333333334, ans=0.0
+2024-08-27 05:54:05,854 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.471e+02 1.735e+02 2.173e+02 3.319e+02, threshold=3.470e+02, percent-clipped=1.0
+2024-08-27 05:54:06,172 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=245781.33333333334, ans=0.04949747468305833
+2024-08-27 05:54:26,192 INFO [train.py:1114] (0/4) Epoch 19, batch 1300, loss[loss=0.1884, simple_loss=0.2646, pruned_loss=0.04173, ctc_loss=0.07173, over 18889.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2601, pruned_loss=0.03989, ctc_loss=0.0745, over 3846145.41 frames. ], batch size: 76, lr: 7.88e-03, grad_scale: 32.0
+2024-08-27 05:54:30,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=245888.0, ans=0.07
+2024-08-27 05:54:37,911 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=245941.33333333334, ans=0.125
+2024-08-27 05:54:46,093 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=245994.66666666666, ans=0.0
+2024-08-27 05:54:55,616 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=246048.0, ans=0.1
+2024-08-27 05:55:10,493 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=246101.33333333334, ans=0.0
+2024-08-27 05:55:13,165 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=246154.66666666666, ans=0.1
+2024-08-27 05:55:13,889 INFO [train.py:1114] (0/4) Epoch 19, batch 1350, loss[loss=0.1675, simple_loss=0.2512, pruned_loss=0.03072, ctc_loss=0.05604, over 19769.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2594, pruned_loss=0.0395, ctc_loss=0.07374, over 3856948.23 frames. ], batch size: 54, lr: 7.87e-03, grad_scale: 16.0
+2024-08-27 05:55:14,934 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=246154.66666666666, ans=0.125
+2024-08-27 05:55:17,796 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=246154.66666666666, ans=0.125
+2024-08-27 05:55:26,365 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.40 vs. limit=22.5
+2024-08-27 05:55:27,228 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=246208.0, ans=0.125
+2024-08-27 05:55:28,378 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.96 vs. limit=22.5
+2024-08-27 05:55:32,667 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=246208.0, ans=0.125
+2024-08-27 05:55:45,525 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.133e+02 1.414e+02 1.634e+02 2.144e+02 3.359e+02, threshold=3.268e+02, percent-clipped=0.0
+2024-08-27 05:56:03,857 INFO [train.py:1114] (0/4) Epoch 19, batch 1400, loss[loss=0.1426, simple_loss=0.2195, pruned_loss=0.02395, ctc_loss=0.04458, over 19657.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2593, pruned_loss=0.03957, ctc_loss=0.07378, over 3863914.06 frames. ], batch size: 46, lr: 7.87e-03, grad_scale: 16.0
+2024-08-27 05:56:14,706 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.76 vs. limit=12.0
+2024-08-27 05:56:15,185 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=246474.66666666666, ans=0.0
+2024-08-27 05:56:18,919 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=246474.66666666666, ans=0.125
+2024-08-27 05:56:23,153 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.14 vs. limit=6.0
+2024-08-27 05:56:24,529 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=246528.0, ans=0.0
+2024-08-27 05:56:34,361 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.76 vs. limit=15.0
+2024-08-27 05:56:37,697 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=246581.33333333334, ans=0.09899494936611666
+2024-08-27 05:56:42,401 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.26 vs. limit=6.0
+2024-08-27 05:56:44,073 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=246634.66666666666, ans=0.2
+2024-08-27 05:56:45,890 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=246634.66666666666, ans=0.1
+2024-08-27 05:56:49,593 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=246634.66666666666, ans=0.125
+2024-08-27 05:56:53,085 INFO [train.py:1114] (0/4) Epoch 19, batch 1450, loss[loss=0.1965, simple_loss=0.2697, pruned_loss=0.04469, ctc_loss=0.08484, over 19667.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2596, pruned_loss=0.03984, ctc_loss=0.07412, over 3862670.75 frames. ], batch size: 63, lr: 7.87e-03, grad_scale: 16.0
+2024-08-27 05:57:23,747 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=246848.0, ans=0.2
+2024-08-27 05:57:25,417 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.422e+02 1.608e+02 1.963e+02 3.546e+02, threshold=3.216e+02, percent-clipped=4.0
+2024-08-27 05:57:32,139 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=246901.33333333334, ans=0.1
+2024-08-27 05:57:34,859 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=246901.33333333334, ans=0.125
+2024-08-27 05:57:35,940 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=246901.33333333334, ans=0.0
+2024-08-27 05:57:37,656 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=246901.33333333334, ans=0.1
+2024-08-27 05:57:42,296 INFO [train.py:1114] (0/4) Epoch 19, batch 1500, loss[loss=0.1876, simple_loss=0.2658, pruned_loss=0.03902, ctc_loss=0.07827, over 19578.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2596, pruned_loss=0.03955, ctc_loss=0.07378, over 3862431.58 frames. ], batch size: 57, lr: 7.86e-03, grad_scale: 16.0
+2024-08-27 05:57:53,677 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=247008.0, ans=0.1
+2024-08-27 05:58:44,792 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.10 vs. limit=10.0
+2024-08-27 05:58:59,238 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=247168.0, ans=0.5
+2024-08-27 05:59:01,744 INFO [train.py:1114] (0/4) Epoch 19, batch 1550, loss[loss=0.2011, simple_loss=0.2785, pruned_loss=0.04441, ctc_loss=0.08739, over 19613.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2595, pruned_loss=0.03958, ctc_loss=0.07402, over 3847641.78 frames. ], batch size: 60, lr: 7.86e-03, grad_scale: 16.0
+2024-08-27 05:59:16,981 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=247274.66666666666, ans=0.0
+2024-08-27 05:59:32,807 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=247328.0, ans=0.1
+2024-08-27 05:59:38,411 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=247328.0, ans=0.125
+2024-08-27 05:59:43,861 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.416e+02 1.634e+02 2.007e+02 4.215e+02, threshold=3.267e+02, percent-clipped=2.0
+2024-08-27 05:59:45,263 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=247381.33333333334, ans=0.0
+2024-08-27 05:59:46,485 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.44 vs. limit=15.0
+2024-08-27 05:59:51,059 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.54 vs. limit=15.0
+2024-08-27 05:59:59,599 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.77 vs. limit=15.0
+2024-08-27 06:00:02,731 INFO [train.py:1114] (0/4) Epoch 19, batch 1600, loss[loss=0.1812, simple_loss=0.2652, pruned_loss=0.03435, ctc_loss=0.0712, over 19846.00 frames. ], tot_loss[loss=0.1845, simple_loss=0.2596, pruned_loss=0.03979, ctc_loss=0.07431, over 3836418.62 frames. ], batch size: 57, lr: 7.85e-03, grad_scale: 32.0
+2024-08-27 06:00:06,875 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=247488.0, ans=22.5
+2024-08-27 06:00:15,206 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=247541.33333333334, ans=0.1
+2024-08-27 06:00:16,002 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=247541.33333333334, ans=0.1
+2024-08-27 06:00:16,455 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.41 vs. limit=15.0
+2024-08-27 06:00:21,847 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=247594.66666666666, ans=0.0
+2024-08-27 06:00:51,748 INFO [train.py:1114] (0/4) Epoch 19, batch 1650, loss[loss=0.2031, simple_loss=0.2822, pruned_loss=0.04464, ctc_loss=0.087, over 19628.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2597, pruned_loss=0.03999, ctc_loss=0.07454, over 3833472.39 frames. ], batch size: 59, lr: 7.85e-03, grad_scale: 32.0
+2024-08-27 06:01:00,186 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=247808.0, ans=0.125
+2024-08-27 06:01:21,512 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.147e+02 1.539e+02 1.985e+02 2.467e+02 4.637e+02, threshold=3.969e+02, percent-clipped=10.0
+2024-08-27 06:01:35,618 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=247968.0, ans=0.1
+2024-08-27 06:01:39,973 INFO [train.py:1114] (0/4) Epoch 19, batch 1700, loss[loss=0.1847, simple_loss=0.2463, pruned_loss=0.04505, ctc_loss=0.08268, over 19657.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2594, pruned_loss=0.03966, ctc_loss=0.07406, over 3847350.29 frames. ], batch size: 46, lr: 7.84e-03, grad_scale: 32.0
+2024-08-27 06:01:58,752 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=248128.0, ans=0.125
+2024-08-27 06:02:16,321 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=248234.66666666666, ans=0.025
+2024-08-27 06:02:19,226 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.49 vs. limit=15.0
+2024-08-27 06:02:23,585 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.53 vs. limit=15.0
+2024-08-27 06:02:23,934 INFO [train.py:1114] (0/4) Epoch 19, batch 1750, loss[loss=0.1827, simple_loss=0.2444, pruned_loss=0.04414, ctc_loss=0.08169, over 19654.00 frames. ], tot_loss[loss=0.1839, simple_loss=0.2591, pruned_loss=0.03954, ctc_loss=0.07395, over 3852186.62 frames. ], batch size: 45, lr: 7.84e-03, grad_scale: 32.0
+2024-08-27 06:02:31,206 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=248288.0, ans=0.0
+2024-08-27 06:02:57,022 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.179e+02 1.492e+02 1.808e+02 2.313e+02 3.735e+02, threshold=3.616e+02, percent-clipped=0.0
+2024-08-27 06:02:59,018 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=248448.0, ans=0.125
+2024-08-27 06:03:00,291 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.86 vs. limit=22.5
+2024-08-27 06:03:01,610 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=248448.0, ans=0.1
+2024-08-27 06:03:18,745 INFO [train.py:1114] (0/4) Epoch 19, batch 1800, loss[loss=0.18, simple_loss=0.2624, pruned_loss=0.036, ctc_loss=0.06371, over 19604.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2594, pruned_loss=0.03932, ctc_loss=0.07377, over 3854361.58 frames. ], batch size: 55, lr: 7.84e-03, grad_scale: 16.0
+2024-08-27 06:03:22,313 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=248554.66666666666, ans=0.125
+2024-08-27 06:03:23,314 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=248554.66666666666, ans=0.2
+2024-08-27 06:03:30,643 INFO [scaling.py:1024] (0/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=7.72 vs. limit=15.0
+2024-08-27 06:03:55,513 INFO [scaling.py:1024] (0/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.55 vs. limit=8.0
+2024-08-27 06:03:56,849 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=248768.0, ans=0.0
+2024-08-27 06:04:02,716 INFO [train.py:1114] (0/4) Epoch 19, batch 1850, loss[loss=0.1949, simple_loss=0.2738, pruned_loss=0.04257, ctc_loss=0.07726, over 19595.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2595, pruned_loss=0.03943, ctc_loss=0.07381, over 3857567.59 frames. ], batch size: 57, lr: 7.83e-03, grad_scale: 16.0
+2024-08-27 06:04:32,741 WARNING [optim.py:487] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.498e+02 2.037e+02 3.063e+02 6.275e+02, threshold=4.074e+02, percent-clipped=13.0
+2024-08-27 06:04:37,382 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=248981.33333333334, ans=0.1
+2024-08-27 06:04:47,719 INFO [train.py:1114] (0/4) Epoch 19, batch 1900, loss[loss=0.1938, simple_loss=0.2807, pruned_loss=0.0391, ctc_loss=0.07161, over 19668.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2603, pruned_loss=0.03948, ctc_loss=0.07386, over 3862216.50 frames. ], batch size: 59, lr: 7.83e-03, grad_scale: 16.0
+2024-08-27 06:04:47,832 INFO [scaling.py:1120] (0/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 06:04:51,309 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=249088.0, ans=0.1
+2024-08-27 06:05:03,543 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=249141.33333333334, ans=0.0
+2024-08-27 06:05:06,769 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=249194.66666666666, ans=0.125
+2024-08-27 06:05:13,619 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=249194.66666666666, ans=0.125
+2024-08-27 06:05:13,710 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=249194.66666666666, ans=0.025
+2024-08-27 06:05:16,684 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=249248.0, ans=10.0
+2024-08-27 06:05:47,907 INFO [scaling.py:214] (0/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=249248.0, ans=0.125
+2024-08-27 06:05:58,944 INFO [train.py:1114] (0/4) Epoch 19, batch 1950, loss[loss=0.1674, simple_loss=0.2453, pruned_loss=0.03269, ctc_loss=0.06049, over 19586.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.261, pruned_loss=0.03981, ctc_loss=0.07441, over 3871303.88 frames. ], batch size: 52, lr: 7.82e-03, grad_scale: 16.0
diff --git a/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-03-1 b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-03-1
new file mode 100644
index 0000000000000000000000000000000000000000..25a77e5c2feca414713fb3d228ea48ff9f0776eb
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-03-1
@@ -0,0 +1,5424 @@
+2024-08-26 14:14:06,055 INFO [train.py:1182] (1/4) Training started
+2024-08-26 14:14:09,228 INFO [train.py:1192] (1/4) Device: cuda:1
+2024-08-26 14:14:11,784 INFO [train.py:1210] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2652.int.cedar.computecanada.ca', 'IP address': '172.16.146.89'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 4, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-26 14:14:11,784 INFO [train.py:1212] (1/4) About to create model
+2024-08-26 14:14:12,458 INFO [train.py:1216] (1/4) Number of model parameters: 65805511
+2024-08-26 14:14:12,459 INFO [checkpoint.py:112] (1/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-3.pt
+2024-08-26 14:14:20,052 INFO [train.py:1231] (1/4) Using DDP
+2024-08-26 14:14:24,078 INFO [train.py:1243] (1/4) Loading optimizer state dict
+2024-08-26 14:14:24,266 INFO [train.py:1251] (1/4) Loading scheduler state dict
+2024-08-26 14:14:24,266 INFO [asr_datamodule.py:894] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:696] (1/4) Disable MUSAN
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:714] (1/4) Enable SpecAugment
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:715] (1/4) Time warp factor: 80
+2024-08-26 14:14:27,297 INFO [asr_datamodule.py:725] (1/4) Num frame mask: 10
+2024-08-26 14:14:27,297 INFO [asr_datamodule.py:738] (1/4) About to create train dataset
+2024-08-26 14:14:27,297 INFO [asr_datamodule.py:765] (1/4) Using DynamicBucketingSampler.
+2024-08-26 14:14:28,822 INFO [asr_datamodule.py:782] (1/4) About to create train dataloader
+2024-08-26 14:14:28,829 INFO [asr_datamodule.py:911] (1/4) About to get dev-clean cuts
+2024-08-26 14:14:31,125 INFO [asr_datamodule.py:918] (1/4) About to get dev-other cuts
+2024-08-26 14:14:32,027 INFO [asr_datamodule.py:814] (1/4) About to create dev dataset
+2024-08-26 14:14:32,333 INFO [asr_datamodule.py:831] (1/4) About to create dev dataloader
+2024-08-26 14:14:32,333 INFO [train.py:1435] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-26 14:18:38,801 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=4, num_channels=128, metric=4.99 vs. limit=3.0
+2024-08-26 14:18:40,629 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12881MB
+2024-08-26 14:18:41,873 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12881MB
+2024-08-26 14:18:49,645 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12881MB
+2024-08-26 14:18:50,847 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12881MB
+2024-08-26 14:19:04,362 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=384, metric=23.10 vs. limit=7.5
+2024-08-26 14:19:04,871 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12881MB
+2024-08-26 14:19:05,853 INFO [scaling.py:1024] (1/4) Whitening: name=None, num_groups=1, num_channels=256, metric=4.08 vs. limit=7.5
+2024-08-26 14:19:06,160 INFO [train.py:1463] (1/4) Maximum memory allocated so far is 12881MB
+2024-08-26 14:19:06,178 INFO [train.py:1344] (1/4) Loading grad scaler state dict
+2024-08-26 14:19:52,361 INFO [train.py:1114] (1/4) Epoch 4, batch 0, loss[loss=0.2966, simple_loss=0.3244, pruned_loss=0.09742, ctc_loss=0.1849, over 19425.00 frames. ], tot_loss[loss=0.2966, simple_loss=0.3244, pruned_loss=0.09742, ctc_loss=0.1849, over 19425.00 frames. ], batch size: 48, lr: 3.30e-02, grad_scale: 32.0
+2024-08-26 14:19:52,361 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 14:20:25,789 INFO [train.py:1146] (1/4) Epoch 4, validation: loss=0.2421, simple_loss=0.3218, pruned_loss=0.05945, ctc_loss=0.1086, over 944034.00 frames.
+2024-08-26 14:20:25,790 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12881MB
+2024-08-26 14:20:26,377 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.26 vs. limit=6.0
+2024-08-26 14:22:31,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=39989.333333333336, ans=0.00217623188405797
+2024-08-26 14:22:36,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=39989.333333333336, ans=0.2
+2024-08-26 14:23:04,536 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.845e+02 2.126e+02 2.642e+02 4.004e+02, threshold=4.252e+02, percent-clipped=0.0
+2024-08-26 14:23:26,401 INFO [train.py:1114] (1/4) Epoch 4, batch 50, loss[loss=0.2348, simple_loss=0.2851, pruned_loss=0.06712, ctc_loss=0.1255, over 19702.00 frames. ], tot_loss[loss=0.3012, simple_loss=0.3361, pruned_loss=0.09664, ctc_loss=0.1827, over 845181.07 frames. ], batch size: 47, lr: 3.30e-02, grad_scale: 32.0
+2024-08-26 14:24:00,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=40149.333333333336, ans=0.125
+2024-08-26 14:24:23,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40202.666666666664, ans=0.1
+2024-08-26 14:25:32,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=40362.666666666664, ans=0.125
+2024-08-26 14:25:33,107 INFO [train.py:1114] (1/4) Epoch 4, batch 100, loss[loss=0.2742, simple_loss=0.3215, pruned_loss=0.08177, ctc_loss=0.1584, over 19702.00 frames. ], tot_loss[loss=0.2983, simple_loss=0.3349, pruned_loss=0.09513, ctc_loss=0.1788, over 1499126.13 frames. ], batch size: 51, lr: 3.29e-02, grad_scale: 32.0
+2024-08-26 14:25:39,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=40362.666666666664, ans=0.0
+2024-08-26 14:25:44,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=40362.666666666664, ans=0.0
+2024-08-26 14:26:17,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40469.333333333336, ans=0.1
+2024-08-26 14:26:40,719 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.662e+02 1.906e+02 2.226e+02 3.245e+02, threshold=3.812e+02, percent-clipped=0.0
+2024-08-26 14:26:48,538 INFO [train.py:1114] (1/4) Epoch 4, batch 150, loss[loss=0.238, simple_loss=0.2837, pruned_loss=0.07037, ctc_loss=0.1291, over 19700.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.3302, pruned_loss=0.0928, ctc_loss=0.1741, over 2026958.37 frames. ], batch size: 47, lr: 3.28e-02, grad_scale: 32.0
+2024-08-26 14:26:48,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40629.333333333336, ans=0.1
+2024-08-26 14:26:57,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=40629.333333333336, ans=0.04949747468305833
+2024-08-26 14:27:53,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=40842.666666666664, ans=0.1
+2024-08-26 14:28:03,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=40842.666666666664, ans=0.125
+2024-08-26 14:28:04,924 INFO [train.py:1114] (1/4) Epoch 4, batch 200, loss[loss=0.3169, simple_loss=0.3491, pruned_loss=0.1033, ctc_loss=0.1952, over 18345.00 frames. ], tot_loss[loss=0.2912, simple_loss=0.3289, pruned_loss=0.09221, ctc_loss=0.1726, over 2434725.20 frames. ], batch size: 85, lr: 3.28e-02, grad_scale: 32.0
+2024-08-26 14:28:07,018 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40896.0, ans=0.1
+2024-08-26 14:28:16,617 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.89 vs. limit=15.0
+2024-08-26 14:28:21,260 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.25 vs. limit=22.5
+2024-08-26 14:28:29,037 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=41002.666666666664, ans=0.09899494936611666
+2024-08-26 14:28:31,921 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=41002.666666666664, ans=0.025
+2024-08-26 14:28:43,743 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.75 vs. limit=22.5
+2024-08-26 14:28:49,770 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.824e+02 2.102e+02 2.533e+02 3.992e+02, threshold=4.203e+02, percent-clipped=3.0
+2024-08-26 14:28:55,763 INFO [train.py:1114] (1/4) Epoch 4, batch 250, loss[loss=0.2998, simple_loss=0.3381, pruned_loss=0.09599, ctc_loss=0.1738, over 19353.00 frames. ], tot_loss[loss=0.2901, simple_loss=0.3287, pruned_loss=0.09152, ctc_loss=0.1713, over 2754787.26 frames. ], batch size: 67, lr: 3.27e-02, grad_scale: 32.0
+2024-08-26 14:29:02,258 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.53 vs. limit=22.5
+2024-08-26 14:29:08,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=41216.0, ans=0.125
+2024-08-26 14:29:08,688 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=41216.0, ans=0.125
+2024-08-26 14:29:19,417 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.30 vs. limit=15.0
+2024-08-26 14:29:22,933 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=41269.333333333336, ans=0.1
+2024-08-26 14:29:45,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=41376.0, ans=0.2
+2024-08-26 14:29:46,780 INFO [train.py:1114] (1/4) Epoch 4, batch 300, loss[loss=0.2771, simple_loss=0.3248, pruned_loss=0.08269, ctc_loss=0.1602, over 19508.00 frames. ], tot_loss[loss=0.2896, simple_loss=0.3281, pruned_loss=0.09133, ctc_loss=0.1711, over 3000076.96 frames. ], batch size: 61, lr: 3.27e-02, grad_scale: 32.0
+2024-08-26 14:30:10,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=41536.0, ans=0.125
+2024-08-26 14:30:16,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=41536.0, ans=0.125
+2024-08-26 14:30:20,022 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.58 vs. limit=15.0
+2024-08-26 14:30:32,082 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.674e+02 1.880e+02 2.161e+02 3.950e+02, threshold=3.761e+02, percent-clipped=0.0
+2024-08-26 14:30:37,853 INFO [train.py:1114] (1/4) Epoch 4, batch 350, loss[loss=0.2618, simple_loss=0.2982, pruned_loss=0.08233, ctc_loss=0.1519, over 19757.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3282, pruned_loss=0.09139, ctc_loss=0.1711, over 3188636.47 frames. ], batch size: 48, lr: 3.26e-02, grad_scale: 32.0
+2024-08-26 14:30:43,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=41696.0, ans=0.04949747468305833
+2024-08-26 14:30:44,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=41696.0, ans=10.0
+2024-08-26 14:30:53,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=41749.333333333336, ans=0.025
+2024-08-26 14:31:13,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=41856.0, ans=0.0
+2024-08-26 14:31:35,389 INFO [train.py:1114] (1/4) Epoch 4, batch 400, loss[loss=0.3054, simple_loss=0.3384, pruned_loss=0.09967, ctc_loss=0.1827, over 19504.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3278, pruned_loss=0.09071, ctc_loss=0.1699, over 3340991.85 frames. ], batch size: 54, lr: 3.26e-02, grad_scale: 32.0
+2024-08-26 14:31:51,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=42016.0, ans=0.0
+2024-08-26 14:32:05,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=42122.666666666664, ans=0.125
+2024-08-26 14:32:14,677 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=42176.0, ans=0.1
+2024-08-26 14:32:19,258 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 1.828e+02 2.157e+02 2.598e+02 8.551e+02, threshold=4.314e+02, percent-clipped=2.0
+2024-08-26 14:32:21,714 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.60 vs. limit=15.0
+2024-08-26 14:32:23,142 INFO [train.py:1114] (1/4) Epoch 4, batch 450, loss[loss=0.3272, simple_loss=0.3642, pruned_loss=0.1059, ctc_loss=0.1961, over 19623.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3285, pruned_loss=0.09159, ctc_loss=0.1709, over 3449441.54 frames. ], batch size: 55, lr: 3.25e-02, grad_scale: 8.0
+2024-08-26 14:32:33,548 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.12 vs. limit=15.0
+2024-08-26 14:32:40,915 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=42282.666666666664, ans=0.125
+2024-08-26 14:33:01,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=42389.333333333336, ans=0.0016544927536231869
+2024-08-26 14:33:12,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=42442.666666666664, ans=0.125
+2024-08-26 14:33:14,191 INFO [train.py:1114] (1/4) Epoch 4, batch 500, loss[loss=0.3097, simple_loss=0.3475, pruned_loss=0.09887, ctc_loss=0.1854, over 19692.00 frames. ], tot_loss[loss=0.288, simple_loss=0.3268, pruned_loss=0.09066, ctc_loss=0.1695, over 3545718.72 frames. ], batch size: 63, lr: 3.25e-02, grad_scale: 8.0
+2024-08-26 14:33:43,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=42602.666666666664, ans=0.125
+2024-08-26 14:34:07,924 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.676e+02 1.857e+02 2.171e+02 5.331e+02, threshold=3.714e+02, percent-clipped=2.0
+2024-08-26 14:34:11,744 INFO [train.py:1114] (1/4) Epoch 4, batch 550, loss[loss=0.3025, simple_loss=0.3411, pruned_loss=0.09523, ctc_loss=0.1838, over 19214.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3269, pruned_loss=0.09079, ctc_loss=0.1698, over 3606988.10 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 8.0
+2024-08-26 14:34:26,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=42816.0, ans=0.125
+2024-08-26 14:34:28,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=42816.0, ans=0.125
+2024-08-26 14:34:29,401 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=42816.0, ans=0.0
+2024-08-26 14:34:37,201 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=42869.333333333336, ans=0.125
+2024-08-26 14:34:41,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=42869.333333333336, ans=0.125
+2024-08-26 14:34:42,563 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.19 vs. limit=10.0
+2024-08-26 14:34:52,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=42976.0, ans=0.0015269565217391305
+2024-08-26 14:35:03,318 INFO [train.py:1114] (1/4) Epoch 4, batch 600, loss[loss=0.3208, simple_loss=0.3571, pruned_loss=0.1041, ctc_loss=0.1909, over 19407.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3269, pruned_loss=0.09073, ctc_loss=0.1698, over 3664793.90 frames. ], batch size: 67, lr: 3.24e-02, grad_scale: 8.0
+2024-08-26 14:35:04,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=43029.333333333336, ans=0.1
+2024-08-26 14:35:05,677 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=43029.333333333336, ans=0.125
+2024-08-26 14:35:11,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=43029.333333333336, ans=0.00151536231884058
+2024-08-26 14:35:15,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=43082.666666666664, ans=0.125
+2024-08-26 14:35:27,721 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=43136.0, ans=0.125
+2024-08-26 14:35:45,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=43242.666666666664, ans=0.125
+2024-08-26 14:35:45,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=43242.666666666664, ans=0.1
+2024-08-26 14:35:47,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=43242.666666666664, ans=0.0014689855072463776
+2024-08-26 14:35:48,738 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:35:50,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.whiten.whitening_limit, batch_count=43242.666666666664, ans=12.0
+2024-08-26 14:35:50,404 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 1.699e+02 1.953e+02 2.270e+02 5.390e+02, threshold=3.906e+02, percent-clipped=1.0
+2024-08-26 14:35:54,190 INFO [train.py:1114] (1/4) Epoch 4, batch 650, loss[loss=0.2951, simple_loss=0.335, pruned_loss=0.09274, ctc_loss=0.1743, over 19790.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.3255, pruned_loss=0.08973, ctc_loss=0.1681, over 3715390.33 frames. ], batch size: 54, lr: 3.23e-02, grad_scale: 8.0
+2024-08-26 14:36:29,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=43456.0, ans=0.125
+2024-08-26 14:36:41,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=43509.333333333336, ans=0.0
+2024-08-26 14:36:48,327 INFO [train.py:1114] (1/4) Epoch 4, batch 700, loss[loss=0.2722, simple_loss=0.3143, pruned_loss=0.08368, ctc_loss=0.157, over 19727.00 frames. ], tot_loss[loss=0.287, simple_loss=0.3264, pruned_loss=0.09007, ctc_loss=0.1688, over 3747440.16 frames. ], batch size: 51, lr: 3.22e-02, grad_scale: 8.0
+2024-08-26 14:37:00,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=43616.0, ans=0.125
+2024-08-26 14:37:17,714 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=43722.666666666664, ans=0.0013646376811594207
+2024-08-26 14:37:18,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=43722.666666666664, ans=0.0
+2024-08-26 14:37:32,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=43776.0, ans=0.0
+2024-08-26 14:37:33,328 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=43776.0, ans=0.0
+2024-08-26 14:37:36,033 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.717e+02 1.974e+02 2.287e+02 3.794e+02, threshold=3.948e+02, percent-clipped=0.0
+2024-08-26 14:37:37,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=43776.0, ans=0.125
+2024-08-26 14:37:39,965 INFO [train.py:1114] (1/4) Epoch 4, batch 750, loss[loss=0.2841, simple_loss=0.3351, pruned_loss=0.08525, ctc_loss=0.1566, over 19483.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3255, pruned_loss=0.08965, ctc_loss=0.1681, over 3772264.34 frames. ], batch size: 54, lr: 3.22e-02, grad_scale: 8.0
+2024-08-26 14:37:54,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=43882.666666666664, ans=0.125
+2024-08-26 14:38:01,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=43936.0, ans=0.125
+2024-08-26 14:38:05,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=43936.0, ans=0.0
+2024-08-26 14:38:06,055 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.24 vs. limit=15.0
+2024-08-26 14:38:15,887 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.87 vs. limit=10.0
+2024-08-26 14:38:15,914 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=9.90 vs. limit=15.0
+2024-08-26 14:38:25,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=44042.666666666664, ans=0.2
+2024-08-26 14:38:31,794 INFO [train.py:1114] (1/4) Epoch 4, batch 800, loss[loss=0.2753, simple_loss=0.3089, pruned_loss=0.08905, ctc_loss=0.159, over 19810.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3249, pruned_loss=0.08924, ctc_loss=0.1674, over 3794731.81 frames. ], batch size: 49, lr: 3.21e-02, grad_scale: 16.0
+2024-08-26 14:38:43,168 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.97 vs. limit=5.0
+2024-08-26 14:38:49,358 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=44149.333333333336, ans=0.05
+2024-08-26 14:38:54,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=44202.666666666664, ans=0.125
+2024-08-26 14:39:16,257 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.706e+02 1.876e+02 2.197e+02 5.470e+02, threshold=3.751e+02, percent-clipped=2.0
+2024-08-26 14:39:19,385 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=44309.333333333336, ans=0.125
+2024-08-26 14:39:22,913 INFO [train.py:1114] (1/4) Epoch 4, batch 850, loss[loss=0.2964, simple_loss=0.3423, pruned_loss=0.09203, ctc_loss=0.1663, over 19670.00 frames. ], tot_loss[loss=0.284, simple_loss=0.324, pruned_loss=0.08871, ctc_loss=0.1665, over 3815241.96 frames. ], batch size: 59, lr: 3.21e-02, grad_scale: 16.0
+2024-08-26 14:39:23,502 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=15.16 vs. limit=15.0
+2024-08-26 14:39:52,972 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=44522.666666666664, ans=0.125
+2024-08-26 14:39:53,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=44522.666666666664, ans=0.125
+2024-08-26 14:40:11,458 INFO [train.py:1114] (1/4) Epoch 4, batch 900, loss[loss=0.2626, simple_loss=0.3021, pruned_loss=0.08108, ctc_loss=0.1523, over 19799.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3248, pruned_loss=0.08926, ctc_loss=0.1672, over 3819422.89 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 16.0
+2024-08-26 14:40:16,023 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.02 vs. limit=6.0
+2024-08-26 14:40:16,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=44629.333333333336, ans=0.025
+2024-08-26 14:40:28,020 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=44682.666666666664, ans=0.025
+2024-08-26 14:40:28,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=44682.666666666664, ans=0.1
+2024-08-26 14:40:37,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=44736.0, ans=0.0011443478260869562
+2024-08-26 14:40:37,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=44736.0, ans=0.0
+2024-08-26 14:40:44,039 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=6.90 vs. limit=15.0
+2024-08-26 14:40:59,413 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.686e+02 1.871e+02 2.157e+02 4.639e+02, threshold=3.742e+02, percent-clipped=1.0
+2024-08-26 14:41:03,418 INFO [train.py:1114] (1/4) Epoch 4, batch 950, loss[loss=0.2498, simple_loss=0.2969, pruned_loss=0.07345, ctc_loss=0.1396, over 19496.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3255, pruned_loss=0.08965, ctc_loss=0.1679, over 3821051.77 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 16.0
+2024-08-26 14:41:20,514 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.13 vs. limit=6.0
+2024-08-26 14:41:35,915 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=45056.0, ans=0.125
+2024-08-26 14:41:35,935 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=45056.0, ans=0.2
+2024-08-26 14:41:54,729 INFO [train.py:1114] (1/4) Epoch 4, batch 1000, loss[loss=0.2509, simple_loss=0.3063, pruned_loss=0.07159, ctc_loss=0.1308, over 19857.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3266, pruned_loss=0.09035, ctc_loss=0.1688, over 3817432.80 frames. ], batch size: 52, lr: 3.19e-02, grad_scale: 16.0
+2024-08-26 14:42:00,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=45162.666666666664, ans=0.125
+2024-08-26 14:42:38,237 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.78 vs. limit=22.5
+2024-08-26 14:42:42,497 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.702e+02 1.844e+02 2.187e+02 3.225e+02, threshold=3.689e+02, percent-clipped=0.0
+2024-08-26 14:42:46,501 INFO [train.py:1114] (1/4) Epoch 4, batch 1050, loss[loss=0.2833, simple_loss=0.3327, pruned_loss=0.08504, ctc_loss=0.1596, over 19835.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.3252, pruned_loss=0.08942, ctc_loss=0.1671, over 3823387.51 frames. ], batch size: 57, lr: 3.19e-02, grad_scale: 16.0
+2024-08-26 14:43:01,310 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=45482.666666666664, ans=0.125
+2024-08-26 14:43:07,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=45536.0, ans=0.0
+2024-08-26 14:43:38,138 INFO [train.py:1114] (1/4) Epoch 4, batch 1100, loss[loss=0.2729, simple_loss=0.3239, pruned_loss=0.08106, ctc_loss=0.1492, over 19585.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.3242, pruned_loss=0.08888, ctc_loss=0.1662, over 3829412.18 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-26 14:43:38,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=45696.0, ans=0.2
+2024-08-26 14:43:40,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=45696.0, ans=0.05
+2024-08-26 14:43:47,153 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45749.333333333336, ans=0.1
+2024-08-26 14:43:52,022 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.94 vs. limit=22.5
+2024-08-26 14:43:53,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=45749.333333333336, ans=0.125
+2024-08-26 14:43:59,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=45802.666666666664, ans=0.2
+2024-08-26 14:44:15,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=45856.0, ans=0.125
+2024-08-26 14:44:23,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=45909.333333333336, ans=0.09899494936611666
+2024-08-26 14:44:25,680 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.748e+02 1.997e+02 2.350e+02 6.199e+02, threshold=3.995e+02, percent-clipped=5.0
+2024-08-26 14:44:29,534 INFO [train.py:1114] (1/4) Epoch 4, batch 1150, loss[loss=0.2663, simple_loss=0.3141, pruned_loss=0.0802, ctc_loss=0.145, over 19587.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3247, pruned_loss=0.08922, ctc_loss=0.1666, over 3827714.63 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-26 14:44:34,052 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.71 vs. limit=15.0
+2024-08-26 14:44:37,670 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=45962.666666666664, ans=0.125
+2024-08-26 14:45:30,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=45962.666666666664, ans=0.1
+2024-08-26 14:45:35,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=46016.0, ans=0.125
+2024-08-26 14:46:05,912 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=46069.333333333336, ans=0.0008544927536231883
+2024-08-26 14:46:20,314 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.81 vs. limit=22.5
+2024-08-26 14:46:32,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=46176.0, ans=0.1
+2024-08-26 14:46:38,928 INFO [train.py:1114] (1/4) Epoch 4, batch 1200, loss[loss=0.2996, simple_loss=0.3416, pruned_loss=0.09333, ctc_loss=0.1774, over 19854.00 frames. ], tot_loss[loss=0.2859, simple_loss=0.3258, pruned_loss=0.08954, ctc_loss=0.1671, over 3824320.42 frames. ], batch size: 57, lr: 3.17e-02, grad_scale: 32.0
+2024-08-26 14:46:45,931 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=46229.333333333336, ans=0.0008197101449275365
+2024-08-26 14:46:59,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=46336.0, ans=0.1
+2024-08-26 14:47:02,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=46336.0, ans=0.0007965217391304354
+2024-08-26 14:47:05,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=46336.0, ans=0.0
+2024-08-26 14:47:16,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=46442.666666666664, ans=0.2
+2024-08-26 14:47:23,216 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 1.767e+02 1.944e+02 2.283e+02 5.479e+02, threshold=3.889e+02, percent-clipped=1.0
+2024-08-26 14:47:29,952 INFO [train.py:1114] (1/4) Epoch 4, batch 1250, loss[loss=0.3007, simple_loss=0.3422, pruned_loss=0.09498, ctc_loss=0.1729, over 19492.00 frames. ], tot_loss[loss=0.2847, simple_loss=0.3255, pruned_loss=0.08884, ctc_loss=0.1657, over 3842481.00 frames. ], batch size: 61, lr: 3.17e-02, grad_scale: 32.0
+2024-08-26 14:47:48,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=46602.666666666664, ans=0.035
+2024-08-26 14:47:51,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=46602.666666666664, ans=0.125
+2024-08-26 14:47:56,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=46602.666666666664, ans=0.0007385507246376825
+2024-08-26 14:48:21,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=46762.666666666664, ans=0.125
+2024-08-26 14:48:22,038 INFO [train.py:1114] (1/4) Epoch 4, batch 1300, loss[loss=0.316, simple_loss=0.3506, pruned_loss=0.1029, ctc_loss=0.1891, over 18872.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3238, pruned_loss=0.0877, ctc_loss=0.1639, over 3847173.41 frames. ], batch size: 76, lr: 3.16e-02, grad_scale: 32.0
+2024-08-26 14:48:22,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=46762.666666666664, ans=0.2
+2024-08-26 14:48:23,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=46762.666666666664, ans=0.125
+2024-08-26 14:49:06,440 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.633e+02 1.793e+02 2.136e+02 4.035e+02, threshold=3.586e+02, percent-clipped=1.0
+2024-08-26 14:49:08,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=46976.0, ans=0.125
+2024-08-26 14:49:09,700 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.04 vs. limit=15.0
+2024-08-26 14:49:10,201 INFO [train.py:1114] (1/4) Epoch 4, batch 1350, loss[loss=0.2515, simple_loss=0.3032, pruned_loss=0.0744, ctc_loss=0.1275, over 19767.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.3234, pruned_loss=0.08721, ctc_loss=0.1629, over 3856426.63 frames. ], batch size: 54, lr: 3.16e-02, grad_scale: 32.0
+2024-08-26 14:49:10,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=47029.333333333336, ans=0.1
+2024-08-26 14:49:19,045 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=47029.333333333336, ans=0.125
+2024-08-26 14:49:23,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=47082.666666666664, ans=0.1
+2024-08-26 14:49:30,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=47082.666666666664, ans=0.09899494936611666
+2024-08-26 14:49:34,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=47136.0, ans=0.125
+2024-08-26 14:49:35,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=47136.0, ans=0.2
+2024-08-26 14:49:43,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1.whitening_limit, batch_count=47189.333333333336, ans=10.0
+2024-08-26 14:50:01,602 INFO [train.py:1114] (1/4) Epoch 4, batch 1400, loss[loss=0.2417, simple_loss=0.2781, pruned_loss=0.07456, ctc_loss=0.1404, over 19673.00 frames. ], tot_loss[loss=0.281, simple_loss=0.3228, pruned_loss=0.08707, ctc_loss=0.1627, over 3863554.18 frames. ], batch size: 46, lr: 3.15e-02, grad_scale: 32.0
+2024-08-26 14:50:01,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=47296.0, ans=0.1
+2024-08-26 14:50:02,072 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=1.99 vs. limit=15.0
+2024-08-26 14:50:49,033 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.701e+02 1.930e+02 2.137e+02 5.469e+02, threshold=3.859e+02, percent-clipped=2.0
+2024-08-26 14:50:53,061 INFO [train.py:1114] (1/4) Epoch 4, batch 1450, loss[loss=0.3054, simple_loss=0.3469, pruned_loss=0.09723, ctc_loss=0.1734, over 19649.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.3234, pruned_loss=0.08718, ctc_loss=0.1629, over 3859954.86 frames. ], batch size: 63, lr: 3.15e-02, grad_scale: 32.0
+2024-08-26 14:51:28,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.31 vs. limit=15.0
+2024-08-26 14:51:30,548 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.18 vs. limit=15.0
+2024-08-26 14:51:31,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=47722.666666666664, ans=0.1
+2024-08-26 14:51:32,316 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:51:43,460 INFO [train.py:1114] (1/4) Epoch 4, batch 1500, loss[loss=0.3142, simple_loss=0.3495, pruned_loss=0.1018, ctc_loss=0.1883, over 19579.00 frames. ], tot_loss[loss=0.2818, simple_loss=0.3238, pruned_loss=0.08726, ctc_loss=0.1633, over 3860060.18 frames. ], batch size: 57, lr: 3.14e-02, grad_scale: 32.0
+2024-08-26 14:51:47,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=47829.333333333336, ans=0.0
+2024-08-26 14:52:11,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=47936.0, ans=0.2
+2024-08-26 14:52:18,504 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=47989.333333333336, ans=0.125
+2024-08-26 14:52:34,701 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.743e+02 1.956e+02 2.243e+02 3.928e+02, threshold=3.912e+02, percent-clipped=1.0
+2024-08-26 14:52:38,429 INFO [train.py:1114] (1/4) Epoch 4, batch 1550, loss[loss=0.2944, simple_loss=0.3339, pruned_loss=0.09281, ctc_loss=0.173, over 19613.00 frames. ], tot_loss[loss=0.2827, simple_loss=0.3241, pruned_loss=0.08776, ctc_loss=0.1642, over 3844874.58 frames. ], batch size: 60, lr: 3.14e-02, grad_scale: 32.0
+2024-08-26 14:52:42,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=48096.0, ans=10.0
+2024-08-26 14:52:43,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=48096.0, ans=0.125
+2024-08-26 14:53:21,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-26 14:53:23,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-26 14:53:25,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-26 14:53:29,865 INFO [train.py:1114] (1/4) Epoch 4, batch 1600, loss[loss=0.278, simple_loss=0.3292, pruned_loss=0.08255, ctc_loss=0.154, over 19836.00 frames. ], tot_loss[loss=0.2823, simple_loss=0.3236, pruned_loss=0.08776, ctc_loss=0.1639, over 3834288.08 frames. ], batch size: 57, lr: 3.13e-02, grad_scale: 32.0
+2024-08-26 14:53:31,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=48362.666666666664, ans=0.125
+2024-08-26 14:53:42,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.whiten.whitening_limit, batch_count=48416.0, ans=12.0
+2024-08-26 14:53:42,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=48416.0, ans=0.125
+2024-08-26 14:53:48,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=48469.333333333336, ans=0.2
+2024-08-26 14:54:15,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=48576.0, ans=0.2
+2024-08-26 14:54:18,015 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.701e+02 1.882e+02 2.341e+02 4.982e+02, threshold=3.764e+02, percent-clipped=3.0
+2024-08-26 14:54:21,786 INFO [train.py:1114] (1/4) Epoch 4, batch 1650, loss[loss=0.2992, simple_loss=0.3368, pruned_loss=0.09492, ctc_loss=0.1795, over 19651.00 frames. ], tot_loss[loss=0.2813, simple_loss=0.3227, pruned_loss=0.08731, ctc_loss=0.1632, over 3829513.74 frames. ], batch size: 59, lr: 3.13e-02, grad_scale: 32.0
+2024-08-26 14:54:25,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=48629.333333333336, ans=0.07
+2024-08-26 14:54:40,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=48736.0, ans=0.5
+2024-08-26 14:54:43,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=48736.0, ans=0.1
+2024-08-26 14:55:03,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=48789.333333333336, ans=0.0
+2024-08-26 14:55:17,034 INFO [train.py:1114] (1/4) Epoch 4, batch 1700, loss[loss=0.2409, simple_loss=0.2838, pruned_loss=0.07336, ctc_loss=0.1281, over 19694.00 frames. ], tot_loss[loss=0.2806, simple_loss=0.3224, pruned_loss=0.08687, ctc_loss=0.1624, over 3843496.50 frames. ], batch size: 46, lr: 3.12e-02, grad_scale: 32.0
+2024-08-26 14:55:22,117 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:55:30,586 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.19 vs. limit=15.0
+2024-08-26 14:55:36,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=49002.666666666664, ans=0.125
+2024-08-26 14:55:36,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=49002.666666666664, ans=0.125
+2024-08-26 14:55:45,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=49056.0, ans=0.125
+2024-08-26 14:55:52,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=49056.0, ans=0.125
+2024-08-26 14:55:55,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=49109.333333333336, ans=0.00019362318840579658
+2024-08-26 14:55:55,466 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.91 vs. limit=22.5
+2024-08-26 14:55:56,377 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.56 vs. limit=22.5
+2024-08-26 14:55:57,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=49109.333333333336, ans=0.125
+2024-08-26 14:55:59,539 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 1.770e+02 1.975e+02 2.193e+02 4.882e+02, threshold=3.950e+02, percent-clipped=1.0
+2024-08-26 14:55:59,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=49109.333333333336, ans=0.125
+2024-08-26 14:56:00,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=49109.333333333336, ans=0.125
+2024-08-26 14:56:03,227 INFO [train.py:1114] (1/4) Epoch 4, batch 1750, loss[loss=0.2361, simple_loss=0.2814, pruned_loss=0.06967, ctc_loss=0.1286, over 19641.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3219, pruned_loss=0.08659, ctc_loss=0.162, over 3848065.76 frames. ], batch size: 45, lr: 3.11e-02, grad_scale: 32.0
+2024-08-26 14:56:19,289 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=49216.0, ans=0.2
+2024-08-26 14:56:35,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=49322.666666666664, ans=0.125
+2024-08-26 14:56:42,826 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.43 vs. limit=15.0
+2024-08-26 14:56:48,547 INFO [train.py:1114] (1/4) Epoch 4, batch 1800, loss[loss=0.2779, simple_loss=0.326, pruned_loss=0.08356, ctc_loss=0.1565, over 19620.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3221, pruned_loss=0.08685, ctc_loss=0.1623, over 3849760.61 frames. ], batch size: 55, lr: 3.11e-02, grad_scale: 32.0
+2024-08-26 14:56:56,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=49429.333333333336, ans=0.1
+2024-08-26 14:56:58,239 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.15 vs. limit=15.0
+2024-08-26 14:57:03,514 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.89 vs. limit=15.0
+2024-08-26 14:57:28,685 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=49642.666666666664, ans=0.125
+2024-08-26 14:57:30,226 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.664e+02 1.898e+02 2.172e+02 3.982e+02, threshold=3.795e+02, percent-clipped=1.0
+2024-08-26 14:57:31,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=49642.666666666664, ans=0.0
+2024-08-26 14:57:33,984 INFO [train.py:1114] (1/4) Epoch 4, batch 1850, loss[loss=0.3059, simple_loss=0.3405, pruned_loss=0.09873, ctc_loss=0.1845, over 19583.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3218, pruned_loss=0.08668, ctc_loss=0.1619, over 3853486.93 frames. ], batch size: 57, lr: 3.10e-02, grad_scale: 32.0
+2024-08-26 14:57:52,792 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.19 vs. limit=15.0
+2024-08-26 14:58:11,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=49909.333333333336, ans=0.1
+2024-08-26 14:58:19,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=49909.333333333336, ans=0.125
+2024-08-26 14:58:20,551 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=49962.666666666664, ans=0.0
+2024-08-26 14:58:21,303 INFO [train.py:1114] (1/4) Epoch 4, batch 1900, loss[loss=0.2698, simple_loss=0.317, pruned_loss=0.07952, ctc_loss=0.1591, over 19639.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3224, pruned_loss=0.08651, ctc_loss=0.1614, over 3858891.13 frames. ], batch size: 59, lr: 3.10e-02, grad_scale: 16.0
+2024-08-26 14:58:22,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=49962.666666666664, ans=0.025
+2024-08-26 14:58:42,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=50069.333333333336, ans=0.125
+2024-08-26 14:58:48,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=50122.666666666664, ans=0.0
+2024-08-26 14:58:59,795 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:59:03,248 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.687e+02 1.820e+02 2.228e+02 3.741e+02, threshold=3.639e+02, percent-clipped=0.0
+2024-08-26 14:59:05,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=50229.333333333336, ans=0.125
+2024-08-26 14:59:06,127 INFO [train.py:1114] (1/4) Epoch 4, batch 1950, loss[loss=0.2398, simple_loss=0.2959, pruned_loss=0.06618, ctc_loss=0.1285, over 19581.00 frames. ], tot_loss[loss=0.2808, simple_loss=0.3236, pruned_loss=0.08664, ctc_loss=0.1618, over 3868493.40 frames. ], batch size: 52, lr: 3.09e-02, grad_scale: 16.0
+2024-08-26 14:59:37,449 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:59:53,431 INFO [train.py:1114] (1/4) Epoch 4, batch 2000, loss[loss=0.2458, simple_loss=0.2834, pruned_loss=0.07606, ctc_loss=0.1401, over 19608.00 frames. ], tot_loss[loss=0.2817, simple_loss=0.324, pruned_loss=0.08714, ctc_loss=0.1627, over 3853087.84 frames. ], batch size: 45, lr: 3.09e-02, grad_scale: 32.0
+2024-08-26 15:00:08,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=50549.333333333336, ans=0.2
+2024-08-26 15:00:12,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=50602.666666666664, ans=0.2
+2024-08-26 15:00:16,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=50602.666666666664, ans=0.07
+2024-08-26 15:00:32,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=50709.333333333336, ans=0.125
+2024-08-26 15:00:33,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=50709.333333333336, ans=0.025
+2024-08-26 15:00:35,430 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.722e+02 2.023e+02 2.377e+02 8.657e+02, threshold=4.047e+02, percent-clipped=4.0
+2024-08-26 15:00:38,082 INFO [train.py:1114] (1/4) Epoch 4, batch 2050, loss[loss=0.2286, simple_loss=0.2818, pruned_loss=0.06365, ctc_loss=0.1205, over 19738.00 frames. ], tot_loss[loss=0.2808, simple_loss=0.3228, pruned_loss=0.0869, ctc_loss=0.1622, over 3850142.93 frames. ], batch size: 47, lr: 3.08e-02, grad_scale: 32.0
+2024-08-26 15:00:39,240 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=50762.666666666664, ans=0.025
+2024-08-26 15:00:40,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.whiten.whitening_limit, batch_count=50762.666666666664, ans=12.0
+2024-08-26 15:00:45,464 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=50762.666666666664, ans=0.2
+2024-08-26 15:00:50,789 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:01:05,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=50922.666666666664, ans=0.0
+2024-08-26 15:01:21,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=51029.333333333336, ans=0.07
+2024-08-26 15:01:22,459 INFO [train.py:1114] (1/4) Epoch 4, batch 2100, loss[loss=0.2692, simple_loss=0.3129, pruned_loss=0.08336, ctc_loss=0.147, over 19799.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.3219, pruned_loss=0.08611, ctc_loss=0.1609, over 3857068.53 frames. ], batch size: 54, lr: 3.08e-02, grad_scale: 32.0
+2024-08-26 15:01:34,525 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.50 vs. limit=15.0
+2024-08-26 15:01:35,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=51082.666666666664, ans=0.0
+2024-08-26 15:01:38,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=51082.666666666664, ans=0.125
+2024-08-26 15:01:39,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=51136.0, ans=0.0
+2024-08-26 15:01:49,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=51189.333333333336, ans=0.025
+2024-08-26 15:01:55,501 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:02:04,152 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.626e+02 1.780e+02 1.895e+02 2.709e+02, threshold=3.561e+02, percent-clipped=0.0
+2024-08-26 15:02:07,170 INFO [train.py:1114] (1/4) Epoch 4, batch 2150, loss[loss=0.2678, simple_loss=0.3126, pruned_loss=0.0819, ctc_loss=0.1479, over 19577.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3204, pruned_loss=0.08531, ctc_loss=0.1595, over 3867589.37 frames. ], batch size: 52, lr: 3.07e-02, grad_scale: 32.0
+2024-08-26 15:02:08,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=51296.0, ans=0.125
+2024-08-26 15:02:10,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=51296.0, ans=0.1
+2024-08-26 15:02:13,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=51296.0, ans=0.125
+2024-08-26 15:02:28,851 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.91 vs. limit=22.5
+2024-08-26 15:02:31,153 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.35 vs. limit=10.0
+2024-08-26 15:02:41,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=51456.0, ans=0.125
+2024-08-26 15:02:44,046 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:02:44,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=51509.333333333336, ans=0.0
+2024-08-26 15:02:54,358 INFO [train.py:1114] (1/4) Epoch 4, batch 2200, loss[loss=0.2911, simple_loss=0.341, pruned_loss=0.08751, ctc_loss=0.1652, over 19581.00 frames. ], tot_loss[loss=0.2779, simple_loss=0.3206, pruned_loss=0.08564, ctc_loss=0.1598, over 3866382.53 frames. ], batch size: 57, lr: 3.07e-02, grad_scale: 32.0
+2024-08-26 15:03:00,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=51562.666666666664, ans=0.125
+2024-08-26 15:03:03,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=51616.0, ans=0.0
+2024-08-26 15:03:04,232 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=51616.0, ans=0.125
+2024-08-26 15:03:21,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=51722.666666666664, ans=0.125
+2024-08-26 15:03:32,287 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=51776.0, ans=0.0
+2024-08-26 15:03:36,249 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.54 vs. limit=22.5
+2024-08-26 15:03:36,542 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 1.687e+02 1.993e+02 2.251e+02 9.209e+02, threshold=3.987e+02, percent-clipped=2.0
+2024-08-26 15:03:39,208 INFO [train.py:1114] (1/4) Epoch 4, batch 2250, loss[loss=0.2827, simple_loss=0.3329, pruned_loss=0.08425, ctc_loss=0.16, over 19607.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3207, pruned_loss=0.08589, ctc_loss=0.1603, over 3866626.85 frames. ], batch size: 55, lr: 3.06e-02, grad_scale: 32.0
+2024-08-26 15:03:54,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=51882.666666666664, ans=0.1
+2024-08-26 15:03:55,886 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=51936.0, ans=0.0
+2024-08-26 15:03:56,340 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.39 vs. limit=15.0
+2024-08-26 15:04:02,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=51936.0, ans=0.125
+2024-08-26 15:04:13,939 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.70 vs. limit=6.0
+2024-08-26 15:04:18,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=52042.666666666664, ans=0.125
+2024-08-26 15:04:23,334 INFO [train.py:1114] (1/4) Epoch 4, batch 2300, loss[loss=0.2687, simple_loss=0.3106, pruned_loss=0.08111, ctc_loss=0.1617, over 19509.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.3203, pruned_loss=0.08606, ctc_loss=0.1607, over 3861051.19 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 32.0
+2024-08-26 15:04:24,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=52096.0, ans=0.0
+2024-08-26 15:04:36,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=52149.333333333336, ans=0.125
+2024-08-26 15:04:37,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=52149.333333333336, ans=0.0
+2024-08-26 15:04:37,753 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=52149.333333333336, ans=0.2
+2024-08-26 15:04:39,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=52149.333333333336, ans=10.0
+2024-08-26 15:04:39,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=52149.333333333336, ans=0.1
+2024-08-26 15:04:41,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=52202.666666666664, ans=0.125
+2024-08-26 15:04:43,069 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=13.08 vs. limit=15.0
+2024-08-26 15:04:49,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=52202.666666666664, ans=0.0
+2024-08-26 15:05:06,728 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.800e+02 1.978e+02 2.376e+02 5.904e+02, threshold=3.955e+02, percent-clipped=2.0
+2024-08-26 15:05:09,381 INFO [train.py:1114] (1/4) Epoch 4, batch 2350, loss[loss=0.3076, simple_loss=0.3397, pruned_loss=0.1014, ctc_loss=0.182, over 19643.00 frames. ], tot_loss[loss=0.278, simple_loss=0.32, pruned_loss=0.08596, ctc_loss=0.1604, over 3864614.61 frames. ], batch size: 63, lr: 3.05e-02, grad_scale: 32.0
+2024-08-26 15:05:10,821 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.32 vs. limit=10.0
+2024-08-26 15:05:16,595 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=52362.666666666664, ans=0.2
+2024-08-26 15:05:21,703 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:05:25,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=52469.333333333336, ans=0.125
+2024-08-26 15:05:26,840 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=52469.333333333336, ans=0.125
+2024-08-26 15:05:32,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=52469.333333333336, ans=0.125
+2024-08-26 15:05:41,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=52522.666666666664, ans=0.0
+2024-08-26 15:05:45,188 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:05:50,552 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.49 vs. limit=22.5
+2024-08-26 15:06:03,786 INFO [train.py:1114] (1/4) Epoch 4, batch 2400, loss[loss=0.3068, simple_loss=0.3446, pruned_loss=0.09733, ctc_loss=0.1861, over 19266.00 frames. ], tot_loss[loss=0.2806, simple_loss=0.3225, pruned_loss=0.08696, ctc_loss=0.162, over 3858684.65 frames. ], batch size: 71, lr: 3.05e-02, grad_scale: 32.0
+2024-08-26 15:06:12,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=52682.666666666664, ans=0.125
+2024-08-26 15:06:21,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=52682.666666666664, ans=0.125
+2024-08-26 15:06:24,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=52682.666666666664, ans=0.025
+2024-08-26 15:06:24,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=52682.666666666664, ans=0.0
+2024-08-26 15:06:53,227 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.824e+02 2.127e+02 2.398e+02 5.215e+02, threshold=4.254e+02, percent-clipped=1.0
+2024-08-26 15:06:55,090 INFO [train.py:1114] (1/4) Epoch 4, batch 2450, loss[loss=0.3596, simple_loss=0.361, pruned_loss=0.129, ctc_loss=0.2503, over 13884.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.328, pruned_loss=0.09151, ctc_loss=0.1708, over 3734050.30 frames. ], batch size: 140, lr: 3.05e-02, grad_scale: 16.0
+2024-08-26 15:06:56,622 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.80 vs. limit=15.0
+2024-08-26 15:06:57,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=52896.0, ans=0.09899494936611666
+2024-08-26 15:06:58,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=52896.0, ans=10.0
+2024-08-26 15:07:07,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=52949.333333333336, ans=0.125
+2024-08-26 15:07:14,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=53002.666666666664, ans=0.125
+2024-08-26 15:07:19,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=53002.666666666664, ans=0.125
+2024-08-26 15:07:22,148 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=53056.0, ans=0.0
+2024-08-26 15:07:26,182 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.59 vs. limit=6.0
+2024-08-26 15:09:12,285 INFO [train.py:1114] (1/4) Epoch 5, batch 0, loss[loss=0.277, simple_loss=0.3033, pruned_loss=0.09095, ctc_loss=0.172, over 19837.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3033, pruned_loss=0.09095, ctc_loss=0.172, over 19837.00 frames. ], batch size: 49, lr: 2.83e-02, grad_scale: 32.0
+2024-08-26 15:09:12,286 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 15:09:22,083 INFO [train.py:1146] (1/4) Epoch 5, validation: loss=0.2289, simple_loss=0.3118, pruned_loss=0.05352, ctc_loss=0.09739, over 944034.00 frames.
+2024-08-26 15:09:22,763 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12882MB
+2024-08-26 15:09:31,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=53157.333333333336, ans=0.0
+2024-08-26 15:09:38,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=53157.333333333336, ans=0.125
+2024-08-26 15:09:40,786 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.17 vs. limit=15.0
+2024-08-26 15:09:54,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=53264.0, ans=0.0
+2024-08-26 15:10:10,888 INFO [train.py:1114] (1/4) Epoch 5, batch 50, loss[loss=0.2388, simple_loss=0.2885, pruned_loss=0.06925, ctc_loss=0.1264, over 19700.00 frames. ], tot_loss[loss=0.2817, simple_loss=0.3238, pruned_loss=0.087, ctc_loss=0.1638, over 844585.17 frames. ], batch size: 47, lr: 2.83e-02, grad_scale: 32.0
+2024-08-26 15:10:13,341 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.22 vs. limit=10.0
+2024-08-26 15:10:22,332 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.804e+02 2.028e+02 2.297e+02 4.038e+02, threshold=4.056e+02, percent-clipped=0.0
+2024-08-26 15:10:49,266 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=53530.666666666664, ans=0.125
+2024-08-26 15:10:51,439 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.48 vs. limit=15.0
+2024-08-26 15:10:53,461 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.50 vs. limit=15.0
+2024-08-26 15:11:00,639 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=53637.333333333336, ans=0.0
+2024-08-26 15:11:01,263 INFO [train.py:1114] (1/4) Epoch 5, batch 100, loss[loss=0.2432, simple_loss=0.2962, pruned_loss=0.06876, ctc_loss=0.1313, over 19728.00 frames. ], tot_loss[loss=0.2814, simple_loss=0.3243, pruned_loss=0.08662, ctc_loss=0.1629, over 1499416.85 frames. ], batch size: 51, lr: 2.82e-02, grad_scale: 32.0
+2024-08-26 15:11:17,604 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_positive, batch_count=53690.666666666664, ans=0.05
+2024-08-26 15:11:29,290 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=10.51 vs. limit=15.0
+2024-08-26 15:11:59,575 INFO [train.py:1114] (1/4) Epoch 5, batch 150, loss[loss=0.2547, simple_loss=0.2939, pruned_loss=0.07962, ctc_loss=0.1408, over 19717.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.3212, pruned_loss=0.08505, ctc_loss=0.1594, over 2027995.75 frames. ], batch size: 47, lr: 2.82e-02, grad_scale: 32.0
+2024-08-26 15:12:10,015 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.696e+02 1.862e+02 2.172e+02 3.492e+02, threshold=3.724e+02, percent-clipped=0.0
+2024-08-26 15:12:13,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=53957.333333333336, ans=0.2
+2024-08-26 15:12:30,140 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.04 vs. limit=15.0
+2024-08-26 15:12:44,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=54117.333333333336, ans=0.125
+2024-08-26 15:12:48,450 INFO [train.py:1114] (1/4) Epoch 5, batch 200, loss[loss=0.3021, simple_loss=0.3408, pruned_loss=0.09488, ctc_loss=0.1841, over 18206.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3188, pruned_loss=0.08391, ctc_loss=0.1572, over 2435865.93 frames. ], batch size: 85, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:12:53,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=54170.666666666664, ans=0.1
+2024-08-26 15:12:58,431 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.61 vs. limit=22.5
+2024-08-26 15:13:32,862 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.87 vs. limit=22.5
+2024-08-26 15:13:41,970 INFO [train.py:1114] (1/4) Epoch 5, batch 250, loss[loss=0.287, simple_loss=0.3314, pruned_loss=0.08746, ctc_loss=0.1693, over 19452.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.3182, pruned_loss=0.08348, ctc_loss=0.156, over 2756215.66 frames. ], batch size: 67, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:13:50,504 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.685e+02 1.803e+02 2.078e+02 3.456e+02, threshold=3.607e+02, percent-clipped=0.0
+2024-08-26 15:14:04,008 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:14:18,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=54597.333333333336, ans=0.1
+2024-08-26 15:14:32,304 INFO [train.py:1114] (1/4) Epoch 5, batch 300, loss[loss=0.277, simple_loss=0.3292, pruned_loss=0.0819, ctc_loss=0.1525, over 19520.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.3171, pruned_loss=0.08318, ctc_loss=0.1553, over 3000581.82 frames. ], batch size: 61, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:15:14,581 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=54917.333333333336, ans=0.025
+2024-08-26 15:15:22,169 INFO [train.py:1114] (1/4) Epoch 5, batch 350, loss[loss=0.2783, simple_loss=0.3109, pruned_loss=0.08899, ctc_loss=0.1693, over 19774.00 frames. ], tot_loss[loss=0.2731, simple_loss=0.3178, pruned_loss=0.0831, ctc_loss=0.1554, over 3190486.70 frames. ], batch size: 48, lr: 2.80e-02, grad_scale: 32.0
+2024-08-26 15:15:23,496 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:15:25,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=54970.666666666664, ans=0.125
+2024-08-26 15:15:25,576 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=54970.666666666664, ans=0.05
+2024-08-26 15:15:29,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=54970.666666666664, ans=0.0
+2024-08-26 15:15:31,137 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=55024.0, ans=0.025
+2024-08-26 15:15:31,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=55024.0, ans=0.04949747468305833
+2024-08-26 15:15:31,760 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.717e+02 1.933e+02 2.233e+02 3.797e+02, threshold=3.865e+02, percent-clipped=1.0
+2024-08-26 15:15:37,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=55024.0, ans=6.0
+2024-08-26 15:15:38,918 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.06 vs. limit=15.0
+2024-08-26 15:15:45,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=55077.333333333336, ans=0.125
+2024-08-26 15:16:15,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=55237.333333333336, ans=0.1
+2024-08-26 15:16:15,652 INFO [train.py:1114] (1/4) Epoch 5, batch 400, loss[loss=0.261, simple_loss=0.3146, pruned_loss=0.07541, ctc_loss=0.1415, over 19505.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3171, pruned_loss=0.08238, ctc_loss=0.1543, over 3341696.15 frames. ], batch size: 54, lr: 2.80e-02, grad_scale: 32.0
+2024-08-26 15:16:26,782 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=55290.666666666664, ans=0.125
+2024-08-26 15:16:37,736 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.30 vs. limit=6.0
+2024-08-26 15:16:39,656 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.03 vs. limit=15.0
+2024-08-26 15:16:41,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=55344.0, ans=0.125
+2024-08-26 15:16:48,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=55397.333333333336, ans=0.0
+2024-08-26 15:16:55,789 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=55450.666666666664, ans=0.125
+2024-08-26 15:17:07,110 INFO [train.py:1114] (1/4) Epoch 5, batch 450, loss[loss=0.2699, simple_loss=0.3213, pruned_loss=0.07934, ctc_loss=0.1497, over 19626.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.317, pruned_loss=0.08243, ctc_loss=0.1542, over 3450532.18 frames. ], batch size: 55, lr: 2.79e-02, grad_scale: 16.0
+2024-08-26 15:17:08,198 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=55504.0, ans=0.025
+2024-08-26 15:17:17,440 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.642e+02 1.899e+02 2.179e+02 3.523e+02, threshold=3.798e+02, percent-clipped=0.0
+2024-08-26 15:17:28,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55610.666666666664, ans=0.1
+2024-08-26 15:17:36,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=55664.0, ans=0.125
+2024-08-26 15:17:45,506 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:18:04,472 INFO [train.py:1114] (1/4) Epoch 5, batch 500, loss[loss=0.2877, simple_loss=0.3379, pruned_loss=0.08704, ctc_loss=0.1584, over 19671.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3148, pruned_loss=0.08118, ctc_loss=0.1516, over 3545615.28 frames. ], batch size: 63, lr: 2.79e-02, grad_scale: 16.0
+2024-08-26 15:18:12,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=55770.666666666664, ans=0.0
+2024-08-26 15:18:42,267 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.16 vs. limit=15.0
+2024-08-26 15:18:50,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=55877.333333333336, ans=0.1
+2024-08-26 15:18:52,257 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=55930.666666666664, ans=0.0
+2024-08-26 15:19:03,867 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=55930.666666666664, ans=0.5
+2024-08-26 15:19:47,001 INFO [train.py:1114] (1/4) Epoch 5, batch 550, loss[loss=0.2988, simple_loss=0.3436, pruned_loss=0.09273, ctc_loss=0.171, over 19310.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3153, pruned_loss=0.08151, ctc_loss=0.1523, over 3607774.45 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 16.0
+2024-08-26 15:19:47,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=56037.333333333336, ans=0.035
+2024-08-26 15:20:01,260 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:20:02,416 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=56090.666666666664, ans=0.125
+2024-08-26 15:20:04,964 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 1.676e+02 1.860e+02 2.053e+02 4.118e+02, threshold=3.720e+02, percent-clipped=1.0
+2024-08-26 15:20:41,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=56197.333333333336, ans=0.07
+2024-08-26 15:20:47,049 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=12.10 vs. limit=15.0
+2024-08-26 15:20:54,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=56250.666666666664, ans=0.125
+2024-08-26 15:20:56,217 INFO [train.py:1114] (1/4) Epoch 5, batch 600, loss[loss=0.2743, simple_loss=0.3241, pruned_loss=0.08206, ctc_loss=0.1509, over 19452.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3156, pruned_loss=0.08134, ctc_loss=0.1521, over 3664908.07 frames. ], batch size: 67, lr: 2.78e-02, grad_scale: 16.0
+2024-08-26 15:21:01,731 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.32 vs. limit=15.0
+2024-08-26 15:21:14,055 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=56357.333333333336, ans=0.1
+2024-08-26 15:21:25,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=56410.666666666664, ans=0.125
+2024-08-26 15:21:27,337 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=56410.666666666664, ans=0.125
+2024-08-26 15:21:44,931 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=56517.333333333336, ans=0.125
+2024-08-26 15:21:49,364 INFO [train.py:1114] (1/4) Epoch 5, batch 650, loss[loss=0.2351, simple_loss=0.2949, pruned_loss=0.06268, ctc_loss=0.1251, over 19773.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3145, pruned_loss=0.08045, ctc_loss=0.1505, over 3716151.69 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:21:59,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=56624.0, ans=0.1
+2024-08-26 15:21:59,580 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.97 vs. limit=12.0
+2024-08-26 15:21:59,892 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.659e+02 1.803e+02 2.095e+02 3.596e+02, threshold=3.607e+02, percent-clipped=0.0
+2024-08-26 15:22:05,066 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.93 vs. limit=22.5
+2024-08-26 15:22:06,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=56624.0, ans=0.0
+2024-08-26 15:22:19,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=56730.666666666664, ans=0.2
+2024-08-26 15:22:30,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=56784.0, ans=0.125
+2024-08-26 15:22:33,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=56784.0, ans=0.0
+2024-08-26 15:22:39,299 INFO [train.py:1114] (1/4) Epoch 5, batch 700, loss[loss=0.2616, simple_loss=0.3071, pruned_loss=0.07848, ctc_loss=0.1482, over 19703.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3149, pruned_loss=0.08085, ctc_loss=0.1512, over 3748123.76 frames. ], batch size: 51, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:22:52,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=56890.666666666664, ans=0.025
+2024-08-26 15:23:03,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=56944.0, ans=0.125
+2024-08-26 15:23:09,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=56997.333333333336, ans=0.125
+2024-08-26 15:23:11,263 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.94 vs. limit=22.5
+2024-08-26 15:23:24,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=57050.666666666664, ans=0.0
+2024-08-26 15:23:29,297 INFO [train.py:1114] (1/4) Epoch 5, batch 750, loss[loss=0.2647, simple_loss=0.318, pruned_loss=0.07758, ctc_loss=0.1405, over 19491.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3145, pruned_loss=0.08073, ctc_loss=0.151, over 3774347.55 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:23:39,289 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.74 vs. limit=15.0
+2024-08-26 15:23:39,770 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.732e+02 1.957e+02 2.375e+02 6.184e+02, threshold=3.914e+02, percent-clipped=3.0
+2024-08-26 15:23:41,289 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=57157.333333333336, ans=22.5
+2024-08-26 15:23:55,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=57210.666666666664, ans=0.125
+2024-08-26 15:24:01,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=57264.0, ans=0.125
+2024-08-26 15:24:19,570 INFO [train.py:1114] (1/4) Epoch 5, batch 800, loss[loss=0.2561, simple_loss=0.2986, pruned_loss=0.07754, ctc_loss=0.146, over 19818.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3142, pruned_loss=0.08061, ctc_loss=0.1506, over 3795939.85 frames. ], batch size: 49, lr: 2.76e-02, grad_scale: 32.0
+2024-08-26 15:24:46,535 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=57477.333333333336, ans=0.0
+2024-08-26 15:25:10,623 INFO [train.py:1114] (1/4) Epoch 5, batch 850, loss[loss=0.2949, simple_loss=0.3394, pruned_loss=0.09125, ctc_loss=0.1699, over 19645.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3138, pruned_loss=0.08029, ctc_loss=0.15, over 3814806.93 frames. ], batch size: 59, lr: 2.76e-02, grad_scale: 32.0
+2024-08-26 15:25:16,850 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.24 vs. limit=10.0
+2024-08-26 15:25:24,565 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.744e+02 1.971e+02 2.331e+02 4.591e+02, threshold=3.942e+02, percent-clipped=1.0
+2024-08-26 15:25:29,109 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.42 vs. limit=15.0
+2024-08-26 15:25:45,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=57797.333333333336, ans=0.125
+2024-08-26 15:25:55,447 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.68 vs. limit=10.0
+2024-08-26 15:25:56,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=57850.666666666664, ans=0.0
+2024-08-26 15:26:01,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=57850.666666666664, ans=0.1
+2024-08-26 15:26:07,580 INFO [train.py:1114] (1/4) Epoch 5, batch 900, loss[loss=0.25, simple_loss=0.2967, pruned_loss=0.07383, ctc_loss=0.139, over 19813.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3143, pruned_loss=0.08076, ctc_loss=0.1507, over 3819785.67 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-26 15:26:12,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=57904.0, ans=0.125
+2024-08-26 15:26:18,476 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.95 vs. limit=12.0
+2024-08-26 15:26:58,239 INFO [train.py:1114] (1/4) Epoch 5, batch 950, loss[loss=0.2583, simple_loss=0.3068, pruned_loss=0.07725, ctc_loss=0.1381, over 19489.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3146, pruned_loss=0.08111, ctc_loss=0.1515, over 3821498.10 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-26 15:27:02,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=58170.666666666664, ans=0.1
+2024-08-26 15:27:05,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=58170.666666666664, ans=0.05
+2024-08-26 15:27:05,567 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.44 vs. limit=22.5
+2024-08-26 15:27:11,436 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.648e+02 1.859e+02 2.135e+02 3.098e+02, threshold=3.718e+02, percent-clipped=0.0
+2024-08-26 15:27:13,354 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.00 vs. limit=15.0
+2024-08-26 15:27:24,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=58277.333333333336, ans=0.025
+2024-08-26 15:27:25,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=58277.333333333336, ans=0.125
+2024-08-26 15:27:36,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=58330.666666666664, ans=0.0
+2024-08-26 15:27:46,342 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=58384.0, ans=0.0
+2024-08-26 15:27:49,899 INFO [train.py:1114] (1/4) Epoch 5, batch 1000, loss[loss=0.2466, simple_loss=0.2918, pruned_loss=0.07306, ctc_loss=0.1381, over 19869.00 frames. ], tot_loss[loss=0.2694, simple_loss=0.3152, pruned_loss=0.08137, ctc_loss=0.1522, over 3818493.34 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-26 15:27:58,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=58437.333333333336, ans=0.125
+2024-08-26 15:28:05,147 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.33 vs. limit=6.0
+2024-08-26 15:28:40,055 INFO [train.py:1114] (1/4) Epoch 5, batch 1050, loss[loss=0.2594, simple_loss=0.3199, pruned_loss=0.07188, ctc_loss=0.1381, over 19830.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3146, pruned_loss=0.08105, ctc_loss=0.1516, over 3824036.28 frames. ], batch size: 57, lr: 2.74e-02, grad_scale: 32.0
+2024-08-26 15:28:42,269 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=58704.0, ans=0.0
+2024-08-26 15:28:46,170 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=58704.0, ans=0.125
+2024-08-26 15:28:48,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=58704.0, ans=0.125
+2024-08-26 15:28:49,325 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=58757.333333333336, ans=0.1
+2024-08-26 15:28:50,845 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.680e+02 1.893e+02 2.161e+02 3.731e+02, threshold=3.786e+02, percent-clipped=1.0
+2024-08-26 15:29:22,485 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.08 vs. limit=15.0
+2024-08-26 15:29:29,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=58917.333333333336, ans=0.125
+2024-08-26 15:29:32,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=58917.333333333336, ans=0.125
+2024-08-26 15:29:33,655 INFO [train.py:1114] (1/4) Epoch 5, batch 1100, loss[loss=0.236, simple_loss=0.2906, pruned_loss=0.06478, ctc_loss=0.1293, over 19580.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3141, pruned_loss=0.08055, ctc_loss=0.1509, over 3831028.62 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 16.0
+2024-08-26 15:29:43,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=59024.0, ans=0.0
+2024-08-26 15:29:49,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=59024.0, ans=0.0
+2024-08-26 15:30:00,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=59077.333333333336, ans=0.125
+2024-08-26 15:30:11,112 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=59130.666666666664, ans=0.125
+2024-08-26 15:30:12,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=59130.666666666664, ans=0.125
+2024-08-26 15:30:20,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=59184.0, ans=0.025
+2024-08-26 15:30:24,312 INFO [train.py:1114] (1/4) Epoch 5, batch 1150, loss[loss=0.2502, simple_loss=0.3057, pruned_loss=0.07116, ctc_loss=0.1311, over 19596.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3138, pruned_loss=0.08038, ctc_loss=0.1505, over 3830538.20 frames. ], batch size: 52, lr: 2.73e-02, grad_scale: 16.0
+2024-08-26 15:30:26,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=59237.333333333336, ans=0.125
+2024-08-26 15:30:35,929 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.591e+02 1.744e+02 2.042e+02 4.394e+02, threshold=3.489e+02, percent-clipped=2.0
+2024-08-26 15:30:42,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=59290.666666666664, ans=0.0
+2024-08-26 15:30:46,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=59344.0, ans=0.0
+2024-08-26 15:30:58,451 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.50 vs. limit=15.0
+2024-08-26 15:31:15,272 INFO [train.py:1114] (1/4) Epoch 5, batch 1200, loss[loss=0.2737, simple_loss=0.322, pruned_loss=0.08236, ctc_loss=0.1519, over 19832.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3149, pruned_loss=0.08094, ctc_loss=0.1517, over 3824495.26 frames. ], batch size: 57, lr: 2.73e-02, grad_scale: 32.0
+2024-08-26 15:31:18,318 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=59504.0, ans=0.025
+2024-08-26 15:31:19,768 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.76 vs. limit=15.0
+2024-08-26 15:31:22,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=59504.0, ans=0.025
+2024-08-26 15:31:31,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=59557.333333333336, ans=0.125
+2024-08-26 15:31:33,467 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.48 vs. limit=15.0
+2024-08-26 15:31:36,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=59610.666666666664, ans=0.125
+2024-08-26 15:31:40,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=59610.666666666664, ans=0.0
+2024-08-26 15:31:46,590 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=59664.0, ans=0.125
+2024-08-26 15:32:06,263 INFO [train.py:1114] (1/4) Epoch 5, batch 1250, loss[loss=0.2694, simple_loss=0.3228, pruned_loss=0.07993, ctc_loss=0.1405, over 19512.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3151, pruned_loss=0.0806, ctc_loss=0.1506, over 3842699.00 frames. ], batch size: 61, lr: 2.72e-02, grad_scale: 32.0
+2024-08-26 15:32:13,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=59770.666666666664, ans=0.125
+2024-08-26 15:32:18,027 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.635e+02 1.798e+02 2.001e+02 4.301e+02, threshold=3.596e+02, percent-clipped=1.0
+2024-08-26 15:32:26,710 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.00 vs. limit=15.0
+2024-08-26 15:32:30,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=59877.333333333336, ans=0.0
+2024-08-26 15:32:42,037 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.25 vs. limit=15.0
+2024-08-26 15:32:56,451 INFO [train.py:1114] (1/4) Epoch 5, batch 1300, loss[loss=0.2799, simple_loss=0.3231, pruned_loss=0.08508, ctc_loss=0.1662, over 18800.00 frames. ], tot_loss[loss=0.2661, simple_loss=0.3135, pruned_loss=0.07957, ctc_loss=0.149, over 3846418.41 frames. ], batch size: 76, lr: 2.72e-02, grad_scale: 32.0
+2024-08-26 15:33:00,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=60037.333333333336, ans=0.0
+2024-08-26 15:33:02,211 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=60037.333333333336, ans=0.025
+2024-08-26 15:33:06,933 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=60090.666666666664, ans=0.0
+2024-08-26 15:33:07,861 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=60090.666666666664, ans=0.95
+2024-08-26 15:33:42,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=60250.666666666664, ans=0.1
+2024-08-26 15:33:43,520 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.12 vs. limit=15.0
+2024-08-26 15:33:43,914 INFO [train.py:1114] (1/4) Epoch 5, batch 1350, loss[loss=0.264, simple_loss=0.32, pruned_loss=0.07543, ctc_loss=0.1429, over 19761.00 frames. ], tot_loss[loss=0.2647, simple_loss=0.3126, pruned_loss=0.07892, ctc_loss=0.1476, over 3857363.12 frames. ], batch size: 54, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:33:49,941 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=60304.0, ans=0.0
+2024-08-26 15:33:50,227 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.33 vs. limit=15.0
+2024-08-26 15:33:54,086 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.59 vs. limit=22.5
+2024-08-26 15:33:55,382 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.610e+02 1.752e+02 1.989e+02 4.527e+02, threshold=3.503e+02, percent-clipped=1.0
+2024-08-26 15:33:55,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=60357.333333333336, ans=0.125
+2024-08-26 15:34:07,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=60410.666666666664, ans=0.0
+2024-08-26 15:34:20,097 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.27 vs. limit=22.5
+2024-08-26 15:34:25,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=60517.333333333336, ans=0.0
+2024-08-26 15:34:28,578 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.59 vs. limit=22.5
+2024-08-26 15:34:32,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=60517.333333333336, ans=0.2
+2024-08-26 15:34:34,190 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.33 vs. limit=15.0
+2024-08-26 15:34:34,572 INFO [train.py:1114] (1/4) Epoch 5, batch 1400, loss[loss=0.2394, simple_loss=0.2777, pruned_loss=0.07386, ctc_loss=0.1335, over 19679.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3127, pruned_loss=0.07935, ctc_loss=0.1482, over 3863772.33 frames. ], batch size: 46, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:34:53,836 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=60677.333333333336, ans=0.0
+2024-08-26 15:34:54,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=60677.333333333336, ans=0.0
+2024-08-26 15:35:20,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=60784.0, ans=0.2
+2024-08-26 15:35:26,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=60784.0, ans=0.1
+2024-08-26 15:35:27,703 INFO [train.py:1114] (1/4) Epoch 5, batch 1450, loss[loss=0.2883, simple_loss=0.3342, pruned_loss=0.08802, ctc_loss=0.1657, over 19674.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3138, pruned_loss=0.07992, ctc_loss=0.1493, over 3862302.65 frames. ], batch size: 63, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:35:42,506 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.680e+02 1.820e+02 2.123e+02 3.172e+02, threshold=3.639e+02, percent-clipped=0.0
+2024-08-26 15:35:51,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=60944.0, ans=0.2
+2024-08-26 15:35:51,321 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.15 vs. limit=15.0
+2024-08-26 15:35:54,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=60944.0, ans=0.125
+2024-08-26 15:36:11,401 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=61050.666666666664, ans=0.0
+2024-08-26 15:36:15,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=61050.666666666664, ans=0.025
+2024-08-26 15:36:19,829 INFO [train.py:1114] (1/4) Epoch 5, batch 1500, loss[loss=0.286, simple_loss=0.3381, pruned_loss=0.0854, ctc_loss=0.1579, over 19569.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3142, pruned_loss=0.07998, ctc_loss=0.1497, over 3862689.19 frames. ], batch size: 57, lr: 2.70e-02, grad_scale: 32.0
+2024-08-26 15:36:41,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=61210.666666666664, ans=0.125
+2024-08-26 15:36:42,751 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.15 vs. limit=15.0
+2024-08-26 15:37:01,895 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.65 vs. limit=15.0
+2024-08-26 15:37:09,981 INFO [train.py:1114] (1/4) Epoch 5, batch 1550, loss[loss=0.2842, simple_loss=0.3311, pruned_loss=0.08785, ctc_loss=0.1539, over 19639.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3144, pruned_loss=0.08029, ctc_loss=0.1503, over 3847007.99 frames. ], batch size: 60, lr: 2.70e-02, grad_scale: 16.0
+2024-08-26 15:37:20,873 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=61424.0, ans=0.07
+2024-08-26 15:37:22,482 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.359e+02 1.752e+02 1.975e+02 2.269e+02 3.644e+02, threshold=3.951e+02, percent-clipped=1.0
+2024-08-26 15:37:24,630 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=61424.0, ans=0.1
+2024-08-26 15:37:33,235 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=61477.333333333336, ans=0.125
+2024-08-26 15:37:53,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=61584.0, ans=0.09899494936611666
+2024-08-26 15:38:03,692 INFO [train.py:1114] (1/4) Epoch 5, batch 1600, loss[loss=0.2896, simple_loss=0.3394, pruned_loss=0.08814, ctc_loss=0.159, over 19845.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3145, pruned_loss=0.08058, ctc_loss=0.1506, over 3836328.59 frames. ], batch size: 57, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:38:07,149 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.74 vs. limit=10.0
+2024-08-26 15:38:35,794 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.41 vs. limit=15.0
+2024-08-26 15:39:04,445 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.49 vs. limit=15.0
+2024-08-26 15:39:19,228 INFO [train.py:1114] (1/4) Epoch 5, batch 1650, loss[loss=0.2817, simple_loss=0.324, pruned_loss=0.08806, ctc_loss=0.1581, over 19665.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3141, pruned_loss=0.08042, ctc_loss=0.1505, over 3833090.56 frames. ], batch size: 59, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:39:19,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=61904.0, ans=0.1
+2024-08-26 15:39:21,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=61904.0, ans=0.1
+2024-08-26 15:39:31,759 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.578e+02 1.738e+02 2.103e+02 3.628e+02, threshold=3.475e+02, percent-clipped=0.0
+2024-08-26 15:39:56,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=62117.333333333336, ans=0.125
+2024-08-26 15:39:58,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=62117.333333333336, ans=0.2
+2024-08-26 15:40:08,739 INFO [train.py:1114] (1/4) Epoch 5, batch 1700, loss[loss=0.2264, simple_loss=0.2649, pruned_loss=0.06818, ctc_loss=0.1288, over 19689.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3134, pruned_loss=0.07993, ctc_loss=0.1494, over 3847837.05 frames. ], batch size: 46, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:40:09,824 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=62170.666666666664, ans=0.0
+2024-08-26 15:40:21,384 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.77 vs. limit=22.5
+2024-08-26 15:40:37,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=62330.666666666664, ans=10.0
+2024-08-26 15:40:37,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=62330.666666666664, ans=0.05
+2024-08-26 15:40:40,892 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62330.666666666664, ans=0.1
+2024-08-26 15:40:46,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=62384.0, ans=0.125
+2024-08-26 15:40:54,055 INFO [train.py:1114] (1/4) Epoch 5, batch 1750, loss[loss=0.2406, simple_loss=0.281, pruned_loss=0.07332, ctc_loss=0.1338, over 19702.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3128, pruned_loss=0.07949, ctc_loss=0.1487, over 3852457.68 frames. ], batch size: 45, lr: 2.68e-02, grad_scale: 32.0
+2024-08-26 15:40:55,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=62437.333333333336, ans=0.09899494936611666
+2024-08-26 15:41:04,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=62490.666666666664, ans=0.95
+2024-08-26 15:41:05,739 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.599e+02 1.842e+02 2.097e+02 3.191e+02, threshold=3.683e+02, percent-clipped=0.0
+2024-08-26 15:41:08,905 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=62490.666666666664, ans=0.05
+2024-08-26 15:41:23,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=62597.333333333336, ans=0.125
+2024-08-26 15:41:23,722 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.85 vs. limit=22.5
+2024-08-26 15:41:24,454 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=62597.333333333336, ans=0.0
+2024-08-26 15:41:39,324 INFO [train.py:1114] (1/4) Epoch 5, batch 1800, loss[loss=0.2743, simple_loss=0.3276, pruned_loss=0.08046, ctc_loss=0.1501, over 19609.00 frames. ], tot_loss[loss=0.2662, simple_loss=0.3131, pruned_loss=0.0798, ctc_loss=0.1493, over 3853524.62 frames. ], batch size: 55, lr: 2.68e-02, grad_scale: 32.0
+2024-08-26 15:41:42,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=62704.0, ans=0.07
+2024-08-26 15:41:46,224 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.28 vs. limit=12.0
+2024-08-26 15:41:48,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=62757.333333333336, ans=0.0
+2024-08-26 15:41:52,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=62757.333333333336, ans=0.125
+2024-08-26 15:42:18,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=62917.333333333336, ans=0.2
+2024-08-26 15:42:24,239 INFO [train.py:1114] (1/4) Epoch 5, batch 1850, loss[loss=0.286, simple_loss=0.3323, pruned_loss=0.08844, ctc_loss=0.157, over 19587.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.3125, pruned_loss=0.07911, ctc_loss=0.1479, over 3856995.29 frames. ], batch size: 57, lr: 2.67e-02, grad_scale: 32.0
+2024-08-26 15:42:25,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=62970.666666666664, ans=0.2
+2024-08-26 15:42:29,783 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=62970.666666666664, ans=0.0
+2024-08-26 15:42:29,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=62970.666666666664, ans=0.125
+2024-08-26 15:42:30,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=62970.666666666664, ans=0.025
+2024-08-26 15:42:35,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=63024.0, ans=0.2
+2024-08-26 15:42:35,850 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.605e+02 1.818e+02 2.016e+02 3.945e+02, threshold=3.637e+02, percent-clipped=1.0
+2024-08-26 15:42:42,021 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:42:51,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=63077.333333333336, ans=0.0
+2024-08-26 15:42:51,225 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.18 vs. limit=6.0
+2024-08-26 15:42:52,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=63077.333333333336, ans=0.125
+2024-08-26 15:42:52,893 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.88 vs. limit=15.0
+2024-08-26 15:43:03,583 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.94 vs. limit=22.5
+2024-08-26 15:43:16,395 INFO [train.py:1114] (1/4) Epoch 5, batch 1900, loss[loss=0.2599, simple_loss=0.317, pruned_loss=0.07247, ctc_loss=0.1447, over 19659.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.3126, pruned_loss=0.07898, ctc_loss=0.1478, over 3862274.50 frames. ], batch size: 59, lr: 2.67e-02, grad_scale: 16.0
+2024-08-26 15:43:16,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=63237.333333333336, ans=0.1
+2024-08-26 15:43:20,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63237.333333333336, ans=0.1
+2024-08-26 15:43:34,638 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=63290.666666666664, ans=0.125
+2024-08-26 15:43:37,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=63290.666666666664, ans=0.125
+2024-08-26 15:43:37,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=63290.666666666664, ans=0.125
+2024-08-26 15:43:37,216 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:43:46,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=63397.333333333336, ans=0.125
+2024-08-26 15:43:53,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=63397.333333333336, ans=0.125
+2024-08-26 15:44:05,666 INFO [train.py:1114] (1/4) Epoch 5, batch 1950, loss[loss=0.2485, simple_loss=0.3031, pruned_loss=0.06944, ctc_loss=0.1374, over 19601.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.3142, pruned_loss=0.07914, ctc_loss=0.1482, over 3870831.22 frames. ], batch size: 52, lr: 2.67e-02, grad_scale: 16.0
+2024-08-26 15:44:05,887 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63504.0, ans=0.1
+2024-08-26 15:44:05,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=63504.0, ans=0.125
+2024-08-26 15:44:06,244 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.87 vs. limit=12.0
+2024-08-26 15:44:08,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=63504.0, ans=0.125
+2024-08-26 15:44:11,233 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63504.0, ans=0.1
+2024-08-26 15:44:20,099 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.677e+02 1.824e+02 1.963e+02 3.212e+02, threshold=3.647e+02, percent-clipped=0.0
+2024-08-26 15:44:21,445 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.90 vs. limit=10.0
+2024-08-26 15:44:32,935 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=63610.666666666664, ans=0.125
+2024-08-26 15:44:32,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=63610.666666666664, ans=0.125
+2024-08-26 15:44:34,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=63664.0, ans=0.0
+2024-08-26 15:44:43,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63717.333333333336, ans=0.1
+2024-08-26 15:44:46,193 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=63717.333333333336, ans=0.125
+2024-08-26 15:44:51,922 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.54 vs. limit=15.0
+2024-08-26 15:44:52,291 INFO [train.py:1114] (1/4) Epoch 5, batch 2000, loss[loss=0.2454, simple_loss=0.2817, pruned_loss=0.07688, ctc_loss=0.1385, over 19666.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3152, pruned_loss=0.08, ctc_loss=0.1494, over 3854746.18 frames. ], batch size: 45, lr: 2.66e-02, grad_scale: 32.0
+2024-08-26 15:45:09,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=63877.333333333336, ans=0.125
+2024-08-26 15:45:13,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=63877.333333333336, ans=0.2
+2024-08-26 15:45:21,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=63930.666666666664, ans=0.125
+2024-08-26 15:45:27,013 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=63984.0, ans=0.0
+2024-08-26 15:45:39,912 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=63984.0, ans=0.0
+2024-08-26 15:45:40,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=63984.0, ans=0.0
+2024-08-26 15:45:42,246 INFO [train.py:1114] (1/4) Epoch 5, batch 2050, loss[loss=0.2193, simple_loss=0.2748, pruned_loss=0.05943, ctc_loss=0.1124, over 19741.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3141, pruned_loss=0.07979, ctc_loss=0.1489, over 3850981.22 frames. ], batch size: 47, lr: 2.66e-02, grad_scale: 32.0
+2024-08-26 15:45:54,616 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.354e+02 1.624e+02 1.773e+02 2.077e+02 3.322e+02, threshold=3.546e+02, percent-clipped=0.0
+2024-08-26 15:45:56,202 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.74 vs. limit=15.0
+2024-08-26 15:45:58,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=64090.666666666664, ans=0.0
+2024-08-26 15:46:08,279 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.92 vs. limit=6.0
+2024-08-26 15:46:15,008 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=64197.333333333336, ans=0.95
+2024-08-26 15:46:18,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=64250.666666666664, ans=0.0
+2024-08-26 15:46:26,362 INFO [train.py:1114] (1/4) Epoch 5, batch 2100, loss[loss=0.2586, simple_loss=0.3092, pruned_loss=0.07582, ctc_loss=0.1411, over 19764.00 frames. ], tot_loss[loss=0.265, simple_loss=0.313, pruned_loss=0.07901, ctc_loss=0.1475, over 3858018.39 frames. ], batch size: 54, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:46:41,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64357.333333333336, ans=0.1
+2024-08-26 15:46:43,862 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=64410.666666666664, ans=0.125
+2024-08-26 15:46:58,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=64464.0, ans=0.2
+2024-08-26 15:47:13,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=64517.333333333336, ans=0.0
+2024-08-26 15:47:17,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=64517.333333333336, ans=0.2
+2024-08-26 15:47:23,494 INFO [train.py:1114] (1/4) Epoch 5, batch 2150, loss[loss=0.2387, simple_loss=0.2898, pruned_loss=0.06833, ctc_loss=0.1273, over 19602.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3125, pruned_loss=0.07902, ctc_loss=0.1475, over 3868961.66 frames. ], batch size: 52, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:47:35,823 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.599e+02 1.757e+02 2.074e+02 2.995e+02, threshold=3.513e+02, percent-clipped=0.0
+2024-08-26 15:47:44,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=64677.333333333336, ans=0.125
+2024-08-26 15:47:45,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=64677.333333333336, ans=0.0
+2024-08-26 15:47:51,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64730.666666666664, ans=0.1
+2024-08-26 15:48:04,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=64784.0, ans=0.125
+2024-08-26 15:48:07,237 INFO [train.py:1114] (1/4) Epoch 5, batch 2200, loss[loss=0.2734, simple_loss=0.3178, pruned_loss=0.08288, ctc_loss=0.158, over 19577.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.3122, pruned_loss=0.07873, ctc_loss=0.1473, over 3867474.71 frames. ], batch size: 57, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:48:33,287 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.37 vs. limit=15.0
+2024-08-26 15:48:52,500 INFO [train.py:1114] (1/4) Epoch 5, batch 2250, loss[loss=0.2778, simple_loss=0.3361, pruned_loss=0.08002, ctc_loss=0.1484, over 19617.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.3118, pruned_loss=0.07826, ctc_loss=0.1465, over 3868277.11 frames. ], batch size: 55, lr: 2.64e-02, grad_scale: 16.0
+2024-08-26 15:48:59,683 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=65104.0, ans=0.0
+2024-08-26 15:49:05,744 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.721e+02 2.056e+02 2.448e+02 6.138e+02, threshold=4.112e+02, percent-clipped=3.0
+2024-08-26 15:49:19,883 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=65264.0, ans=0.2
+2024-08-26 15:49:22,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=65264.0, ans=0.2
+2024-08-26 15:49:36,470 INFO [train.py:1114] (1/4) Epoch 5, batch 2300, loss[loss=0.2401, simple_loss=0.296, pruned_loss=0.06724, ctc_loss=0.1245, over 19508.00 frames. ], tot_loss[loss=0.263, simple_loss=0.311, pruned_loss=0.07823, ctc_loss=0.1465, over 3861687.49 frames. ], batch size: 49, lr: 2.64e-02, grad_scale: 16.0
+2024-08-26 15:49:39,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=65370.666666666664, ans=0.07
+2024-08-26 15:49:52,038 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.47 vs. limit=22.5
+2024-08-26 15:50:03,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=65477.333333333336, ans=0.0
+2024-08-26 15:50:22,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=65637.33333333333, ans=0.125
+2024-08-26 15:50:23,049 INFO [train.py:1114] (1/4) Epoch 5, batch 2350, loss[loss=0.2974, simple_loss=0.3414, pruned_loss=0.09279, ctc_loss=0.1697, over 19686.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3107, pruned_loss=0.07819, ctc_loss=0.1464, over 3864002.50 frames. ], batch size: 63, lr: 2.63e-02, grad_scale: 16.0
+2024-08-26 15:50:26,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=65637.33333333333, ans=0.025
+2024-08-26 15:50:26,832 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.02 vs. limit=15.0
+2024-08-26 15:50:36,067 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.297e+02 1.568e+02 1.781e+02 2.033e+02 3.218e+02, threshold=3.561e+02, percent-clipped=0.0
+2024-08-26 15:50:39,017 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=65690.66666666667, ans=0.2
+2024-08-26 15:50:50,493 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-26 15:50:56,841 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.61 vs. limit=12.0
+2024-08-26 15:51:02,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=65850.66666666667, ans=0.0
+2024-08-26 15:51:04,994 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.69 vs. limit=22.5
+2024-08-26 15:51:07,085 INFO [train.py:1114] (1/4) Epoch 5, batch 2400, loss[loss=0.277, simple_loss=0.3242, pruned_loss=0.08332, ctc_loss=0.1577, over 19270.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3137, pruned_loss=0.07978, ctc_loss=0.1489, over 3858281.04 frames. ], batch size: 71, lr: 2.63e-02, grad_scale: 32.0
+2024-08-26 15:51:08,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=65904.0, ans=0.125
+2024-08-26 15:51:24,618 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.90 vs. limit=15.0
+2024-08-26 15:51:28,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=66010.66666666667, ans=0.125
+2024-08-26 15:51:52,370 INFO [train.py:1114] (1/4) Epoch 5, batch 2450, loss[loss=0.3543, simple_loss=0.3519, pruned_loss=0.1296, ctc_loss=0.2435, over 13139.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3188, pruned_loss=0.0843, ctc_loss=0.1572, over 3727883.52 frames. ], batch size: 140, lr: 2.63e-02, grad_scale: 16.0
+2024-08-26 15:52:03,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=66224.0, ans=0.125
+2024-08-26 15:52:07,310 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 1.716e+02 1.912e+02 2.213e+02 5.978e+02, threshold=3.825e+02, percent-clipped=3.0
+2024-08-26 15:52:09,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=66224.0, ans=0.1
+2024-08-26 15:52:14,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=66277.33333333333, ans=0.0
+2024-08-26 15:53:42,766 INFO [train.py:1114] (1/4) Epoch 6, batch 0, loss[loss=0.2699, simple_loss=0.3135, pruned_loss=0.08284, ctc_loss=0.1514, over 19399.00 frames. ], tot_loss[loss=0.2699, simple_loss=0.3135, pruned_loss=0.08284, ctc_loss=0.1514, over 19399.00 frames. ], batch size: 48, lr: 2.45e-02, grad_scale: 32.0
+2024-08-26 15:53:42,767 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 15:54:26,133 INFO [train.py:1146] (1/4) Epoch 6, validation: loss=0.2162, simple_loss=0.3022, pruned_loss=0.04785, ctc_loss=0.08613, over 944034.00 frames.
+2024-08-26 15:54:26,134 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12882MB
+2024-08-26 15:54:29,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=66378.66666666667, ans=0.125
+2024-08-26 15:54:42,289 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten.whitening_limit, batch_count=66432.0, ans=15.0
+2024-08-26 15:54:45,863 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.10 vs. limit=22.5
+2024-08-26 15:54:50,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66485.33333333333, ans=0.1
+2024-08-26 15:54:50,348 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=66485.33333333333, ans=0.125
+2024-08-26 15:54:56,677 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=66538.66666666667, ans=0.1
+2024-08-26 15:55:03,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=66592.0, ans=0.1
+2024-08-26 15:55:04,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=66592.0, ans=0.125
+2024-08-26 15:55:06,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=66592.0, ans=0.125
+2024-08-26 15:55:07,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=66592.0, ans=0.2
+2024-08-26 15:55:13,234 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.15 vs. limit=15.0
+2024-08-26 15:55:13,585 INFO [train.py:1114] (1/4) Epoch 6, batch 50, loss[loss=0.2243, simple_loss=0.2766, pruned_loss=0.06262, ctc_loss=0.1168, over 19715.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3164, pruned_loss=0.08069, ctc_loss=0.1521, over 844837.63 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:55:31,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66752.0, ans=0.1
+2024-08-26 15:55:31,867 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=66752.0, ans=0.04949747468305833
+2024-08-26 15:55:39,174 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.607e+02 1.759e+02 1.997e+02 3.496e+02, threshold=3.518e+02, percent-clipped=0.0
+2024-08-26 15:55:39,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=66752.0, ans=0.0
+2024-08-26 15:55:43,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=66805.33333333333, ans=0.125
+2024-08-26 15:55:49,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=66805.33333333333, ans=0.125
+2024-08-26 15:55:50,263 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.57 vs. limit=15.0
+2024-08-26 15:55:57,168 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.37 vs. limit=15.0
+2024-08-26 15:55:57,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66858.66666666667, ans=0.1
+2024-08-26 15:56:03,173 INFO [train.py:1114] (1/4) Epoch 6, batch 100, loss[loss=0.2585, simple_loss=0.3054, pruned_loss=0.07696, ctc_loss=0.1442, over 19702.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3152, pruned_loss=0.07942, ctc_loss=0.1491, over 1498587.70 frames. ], batch size: 51, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:56:04,594 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.49 vs. limit=22.5
+2024-08-26 15:56:30,971 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.78 vs. limit=6.0
+2024-08-26 15:56:53,188 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.67 vs. limit=6.0
+2024-08-26 15:56:55,738 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=67125.33333333333, ans=0.09899494936611666
+2024-08-26 15:56:56,540 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=67178.66666666667, ans=0.125
+2024-08-26 15:56:57,270 INFO [train.py:1114] (1/4) Epoch 6, batch 150, loss[loss=0.244, simple_loss=0.283, pruned_loss=0.07526, ctc_loss=0.1365, over 19679.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3116, pruned_loss=0.07736, ctc_loss=0.145, over 2027875.39 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:57:01,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=67178.66666666667, ans=0.1
+2024-08-26 15:57:20,272 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.66 vs. limit=15.0
+2024-08-26 15:57:22,726 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.333e+02 1.584e+02 1.709e+02 1.986e+02 2.973e+02, threshold=3.418e+02, percent-clipped=0.0
+2024-08-26 15:57:39,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=67392.0, ans=0.125
+2024-08-26 15:57:44,431 INFO [train.py:1114] (1/4) Epoch 6, batch 200, loss[loss=0.2494, simple_loss=0.302, pruned_loss=0.07056, ctc_loss=0.1392, over 18471.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3097, pruned_loss=0.07661, ctc_loss=0.1436, over 2436571.14 frames. ], batch size: 86, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:57:45,165 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.16 vs. limit=10.0
+2024-08-26 15:57:45,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=67445.33333333333, ans=0.1
+2024-08-26 15:57:51,765 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.14 vs. limit=15.0
+2024-08-26 15:58:07,562 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=67552.0, ans=0.0
+2024-08-26 15:58:08,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=67552.0, ans=0.0
+2024-08-26 15:58:09,739 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.15 vs. limit=22.5
+2024-08-26 15:58:24,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=67605.33333333333, ans=0.125
+2024-08-26 15:58:26,091 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.12 vs. limit=15.0
+2024-08-26 15:58:31,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=67658.66666666667, ans=0.025
+2024-08-26 15:58:33,380 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=67658.66666666667, ans=0.125
+2024-08-26 15:58:36,021 INFO [train.py:1114] (1/4) Epoch 6, batch 250, loss[loss=0.2665, simple_loss=0.3155, pruned_loss=0.07845, ctc_loss=0.1512, over 19431.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3088, pruned_loss=0.07617, ctc_loss=0.1426, over 2756537.75 frames. ], batch size: 67, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:58:41,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=67712.0, ans=0.0
+2024-08-26 15:59:05,230 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.04 vs. limit=15.0
+2024-08-26 15:59:09,881 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.42 vs. limit=6.0
+2024-08-26 15:59:10,415 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.559e+02 1.703e+02 1.915e+02 3.590e+02, threshold=3.407e+02, percent-clipped=1.0
+2024-08-26 15:59:10,858 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.49 vs. limit=15.0
+2024-08-26 15:59:16,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=67872.0, ans=0.125
+2024-08-26 15:59:18,457 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=67872.0, ans=0.5
+2024-08-26 15:59:20,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=67872.0, ans=0.1
+2024-08-26 15:59:35,248 INFO [train.py:1114] (1/4) Epoch 6, batch 300, loss[loss=0.2653, simple_loss=0.3153, pruned_loss=0.07827, ctc_loss=0.147, over 19497.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.3078, pruned_loss=0.07554, ctc_loss=0.1411, over 3001602.54 frames. ], batch size: 61, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:59:40,399 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=67978.66666666667, ans=0.125
+2024-08-26 15:59:48,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=68032.0, ans=0.125
+2024-08-26 16:00:08,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=68138.66666666667, ans=0.2
+2024-08-26 16:00:13,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=68138.66666666667, ans=0.5
+2024-08-26 16:00:21,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=68192.0, ans=0.0
+2024-08-26 16:00:24,075 INFO [train.py:1114] (1/4) Epoch 6, batch 350, loss[loss=0.229, simple_loss=0.2749, pruned_loss=0.06749, ctc_loss=0.1207, over 19761.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3087, pruned_loss=0.07577, ctc_loss=0.1418, over 3191085.11 frames. ], batch size: 48, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:00:25,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=68245.33333333333, ans=0.125
+2024-08-26 16:00:27,565 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.62 vs. limit=10.0
+2024-08-26 16:00:29,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=68245.33333333333, ans=0.125
+2024-08-26 16:00:42,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=68352.0, ans=0.125
+2024-08-26 16:00:49,640 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.625e+02 1.872e+02 2.224e+02 3.924e+02, threshold=3.744e+02, percent-clipped=2.0
+2024-08-26 16:00:53,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=68405.33333333333, ans=0.125
+2024-08-26 16:00:54,032 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.02 vs. limit=15.0
+2024-08-26 16:00:59,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=68405.33333333333, ans=0.125
+2024-08-26 16:01:00,268 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:01:11,323 INFO [train.py:1114] (1/4) Epoch 6, batch 400, loss[loss=0.2611, simple_loss=0.3171, pruned_loss=0.0739, ctc_loss=0.1432, over 19485.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.308, pruned_loss=0.07531, ctc_loss=0.141, over 3342801.21 frames. ], batch size: 54, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:01:14,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=68512.0, ans=0.125
+2024-08-26 16:01:24,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=68565.33333333333, ans=0.0
+2024-08-26 16:01:35,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=68618.66666666667, ans=0.125
+2024-08-26 16:01:54,298 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:01:59,881 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=68725.33333333333, ans=0.0
+2024-08-26 16:02:07,097 INFO [train.py:1114] (1/4) Epoch 6, batch 450, loss[loss=0.2436, simple_loss=0.2999, pruned_loss=0.06764, ctc_loss=0.1298, over 19624.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.308, pruned_loss=0.07526, ctc_loss=0.1408, over 3451744.00 frames. ], batch size: 55, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:02:15,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=68832.0, ans=0.2
+2024-08-26 16:02:34,034 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.611e+02 1.799e+02 2.140e+02 4.925e+02, threshold=3.597e+02, percent-clipped=1.0
+2024-08-26 16:02:48,519 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:02:49,756 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.18 vs. limit=15.0
+2024-08-26 16:02:55,665 INFO [train.py:1114] (1/4) Epoch 6, batch 500, loss[loss=0.2778, simple_loss=0.3224, pruned_loss=0.08476, ctc_loss=0.1592, over 19656.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3073, pruned_loss=0.07475, ctc_loss=0.14, over 3546592.17 frames. ], batch size: 63, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:02:57,230 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.99 vs. limit=15.0
+2024-08-26 16:02:58,695 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=69045.33333333333, ans=0.125
+2024-08-26 16:02:59,811 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=69045.33333333333, ans=0.5
+2024-08-26 16:03:09,222 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=69098.66666666667, ans=0.1
+2024-08-26 16:03:12,105 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=69098.66666666667, ans=0.125
+2024-08-26 16:03:25,670 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.31 vs. limit=15.0
+2024-08-26 16:03:43,064 INFO [train.py:1114] (1/4) Epoch 6, batch 550, loss[loss=0.2799, simple_loss=0.3275, pruned_loss=0.08357, ctc_loss=0.1632, over 19238.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3072, pruned_loss=0.07482, ctc_loss=0.1401, over 3609361.26 frames. ], batch size: 71, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:03:56,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=69365.33333333333, ans=0.95
+2024-08-26 16:04:06,731 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=4.93 vs. limit=15.0
+2024-08-26 16:04:08,881 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.633e+02 1.875e+02 2.080e+02 6.681e+02, threshold=3.749e+02, percent-clipped=3.0
+2024-08-26 16:04:21,749 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=69525.33333333333, ans=0.0
+2024-08-26 16:04:30,171 INFO [train.py:1114] (1/4) Epoch 6, batch 600, loss[loss=0.2531, simple_loss=0.3146, pruned_loss=0.06978, ctc_loss=0.13, over 19413.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3069, pruned_loss=0.07432, ctc_loss=0.1389, over 3666238.52 frames. ], batch size: 67, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:04:33,578 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.42 vs. limit=15.0
+2024-08-26 16:04:39,738 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=69578.66666666667, ans=0.2
+2024-08-26 16:04:55,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=69632.0, ans=0.2
+2024-08-26 16:04:56,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=69685.33333333333, ans=0.0
+2024-08-26 16:05:07,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=69738.66666666667, ans=0.125
+2024-08-26 16:05:09,762 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=69738.66666666667, ans=0.125
+2024-08-26 16:05:09,866 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=69738.66666666667, ans=0.5
+2024-08-26 16:05:24,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=69792.0, ans=0.125
+2024-08-26 16:05:25,780 INFO [train.py:1114] (1/4) Epoch 6, batch 650, loss[loss=0.2474, simple_loss=0.2992, pruned_loss=0.07133, ctc_loss=0.1323, over 19769.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3051, pruned_loss=0.0732, ctc_loss=0.1371, over 3717015.54 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-26 16:05:26,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=69845.33333333333, ans=0.125
+2024-08-26 16:05:49,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=69952.0, ans=0.125
+2024-08-26 16:05:50,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=69952.0, ans=0.0
+2024-08-26 16:05:53,416 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.596e+02 1.734e+02 1.974e+02 3.978e+02, threshold=3.467e+02, percent-clipped=1.0
+2024-08-26 16:06:02,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=70005.33333333333, ans=0.5
+2024-08-26 16:06:08,921 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=70058.66666666667, ans=0.0
+2024-08-26 16:06:15,338 INFO [train.py:1114] (1/4) Epoch 6, batch 700, loss[loss=0.2458, simple_loss=0.2959, pruned_loss=0.07, ctc_loss=0.139, over 19712.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.3053, pruned_loss=0.0734, ctc_loss=0.1374, over 3749297.31 frames. ], batch size: 51, lr: 2.40e-02, grad_scale: 16.0
+2024-08-26 16:06:33,709 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.10 vs. limit=12.0
+2024-08-26 16:06:41,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_ff3.min_abs, batch_count=70218.66666666667, ans=0.2
+2024-08-26 16:06:54,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=70325.33333333333, ans=0.125
+2024-08-26 16:06:55,886 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=70325.33333333333, ans=0.125
+2024-08-26 16:07:02,325 INFO [train.py:1114] (1/4) Epoch 6, batch 750, loss[loss=0.2681, simple_loss=0.3183, pruned_loss=0.07958, ctc_loss=0.1468, over 19507.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3056, pruned_loss=0.07353, ctc_loss=0.1377, over 3775115.39 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 16.0
+2024-08-26 16:07:03,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=70378.66666666667, ans=0.1
+2024-08-26 16:07:24,210 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.08 vs. limit=15.0
+2024-08-26 16:07:33,074 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.662e+02 1.845e+02 2.236e+02 2.956e+02, threshold=3.689e+02, percent-clipped=0.0
+2024-08-26 16:07:36,150 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:08:08,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=70592.0, ans=0.0
+2024-08-26 16:08:25,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=70645.33333333333, ans=0.0
+2024-08-26 16:08:25,240 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=70645.33333333333, ans=0.0
+2024-08-26 16:08:25,914 INFO [train.py:1114] (1/4) Epoch 6, batch 800, loss[loss=0.2244, simple_loss=0.2773, pruned_loss=0.06275, ctc_loss=0.1152, over 19422.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3057, pruned_loss=0.07379, ctc_loss=0.1379, over 3795598.18 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:08:26,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=70645.33333333333, ans=0.025
+2024-08-26 16:08:39,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=70698.66666666667, ans=0.09899494936611666
+2024-08-26 16:09:25,214 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=10.65 vs. limit=15.0
+2024-08-26 16:09:32,230 INFO [train.py:1114] (1/4) Epoch 6, batch 850, loss[loss=0.2796, simple_loss=0.3389, pruned_loss=0.0789, ctc_loss=0.1561, over 19650.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3056, pruned_loss=0.07378, ctc_loss=0.1378, over 3815002.64 frames. ], batch size: 59, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:09:33,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=70912.0, ans=0.0
+2024-08-26 16:09:58,790 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.315e+02 1.558e+02 1.696e+02 1.888e+02 5.151e+02, threshold=3.391e+02, percent-clipped=1.0
+2024-08-26 16:10:00,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=71072.0, ans=0.0
+2024-08-26 16:10:03,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=71072.0, ans=0.2
+2024-08-26 16:10:05,905 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=71072.0, ans=0.0
+2024-08-26 16:10:26,557 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.05 vs. limit=10.0
+2024-08-26 16:10:28,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=71125.33333333333, ans=0.05
+2024-08-26 16:10:35,827 INFO [train.py:1114] (1/4) Epoch 6, batch 900, loss[loss=0.2402, simple_loss=0.2956, pruned_loss=0.06816, ctc_loss=0.121, over 19409.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3064, pruned_loss=0.07441, ctc_loss=0.1389, over 3819354.15 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:10:45,731 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71232.0, ans=0.1
+2024-08-26 16:10:50,679 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.75 vs. limit=12.0
+2024-08-26 16:10:52,434 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.60 vs. limit=15.0
+2024-08-26 16:10:55,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=71285.33333333333, ans=15.0
+2024-08-26 16:11:01,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=71285.33333333333, ans=0.125
+2024-08-26 16:11:05,854 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.01 vs. limit=15.0
+2024-08-26 16:11:10,876 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.10 vs. limit=15.0
+2024-08-26 16:11:13,545 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71392.0, ans=0.1
+2024-08-26 16:11:23,803 INFO [train.py:1114] (1/4) Epoch 6, batch 950, loss[loss=0.2313, simple_loss=0.2822, pruned_loss=0.06614, ctc_loss=0.1203, over 19493.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3069, pruned_loss=0.07486, ctc_loss=0.1401, over 3821090.00 frames. ], batch size: 49, lr: 2.38e-02, grad_scale: 16.0
+2024-08-26 16:11:35,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=71445.33333333333, ans=0.05
+2024-08-26 16:11:42,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=71498.66666666667, ans=0.0
+2024-08-26 16:11:44,186 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:11:53,194 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=11.43 vs. limit=15.0
+2024-08-26 16:11:57,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=71552.0, ans=0.125
+2024-08-26 16:11:58,915 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=71552.0, ans=0.125
+2024-08-26 16:11:59,456 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.602e+02 1.780e+02 2.099e+02 5.215e+02, threshold=3.559e+02, percent-clipped=4.0
+2024-08-26 16:12:08,358 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=71605.33333333333, ans=0.025
+2024-08-26 16:12:18,718 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=71658.66666666667, ans=0.1
+2024-08-26 16:12:21,527 INFO [train.py:1114] (1/4) Epoch 6, batch 1000, loss[loss=0.2193, simple_loss=0.2816, pruned_loss=0.05674, ctc_loss=0.1087, over 19838.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3076, pruned_loss=0.07515, ctc_loss=0.1403, over 3815904.45 frames. ], batch size: 52, lr: 2.38e-02, grad_scale: 16.0
+2024-08-26 16:12:29,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=71712.0, ans=0.0
+2024-08-26 16:12:34,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=71765.33333333333, ans=0.1
+2024-08-26 16:12:52,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer_na.min_abs, batch_count=71765.33333333333, ans=0.02
+2024-08-26 16:12:59,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=71818.66666666667, ans=0.1
+2024-08-26 16:13:06,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=71872.0, ans=0.2
+2024-08-26 16:13:08,101 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.76 vs. limit=15.0
+2024-08-26 16:13:08,173 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.13 vs. limit=12.0
+2024-08-26 16:13:11,551 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:13:17,222 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=71925.33333333333, ans=0.125
+2024-08-26 16:13:18,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=71925.33333333333, ans=0.0
+2024-08-26 16:13:22,622 INFO [train.py:1114] (1/4) Epoch 6, batch 1050, loss[loss=0.2625, simple_loss=0.3169, pruned_loss=0.07562, ctc_loss=0.1422, over 19835.00 frames. ], tot_loss[loss=0.255, simple_loss=0.306, pruned_loss=0.07425, ctc_loss=0.1389, over 3822457.66 frames. ], batch size: 57, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:13:24,765 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=71978.66666666667, ans=0.125
+2024-08-26 16:13:26,021 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.33 vs. limit=15.0
+2024-08-26 16:13:38,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=72032.0, ans=0.0
+2024-08-26 16:13:42,299 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.47 vs. limit=10.0
+2024-08-26 16:13:50,114 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.278e+02 1.587e+02 1.763e+02 2.081e+02 5.001e+02, threshold=3.526e+02, percent-clipped=1.0
+2024-08-26 16:13:55,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=72138.66666666667, ans=0.2
+2024-08-26 16:14:10,561 INFO [train.py:1114] (1/4) Epoch 6, batch 1100, loss[loss=0.2478, simple_loss=0.2977, pruned_loss=0.07286, ctc_loss=0.1306, over 19591.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.3052, pruned_loss=0.07343, ctc_loss=0.1372, over 3829725.50 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:14:35,807 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.76 vs. limit=15.0
+2024-08-26 16:14:38,504 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=72352.0, ans=0.125
+2024-08-26 16:14:41,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=72352.0, ans=0.125
+2024-08-26 16:14:43,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=72352.0, ans=0.125
+2024-08-26 16:14:45,442 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=13.09 vs. limit=15.0
+2024-08-26 16:15:15,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=72458.66666666667, ans=0.125
+2024-08-26 16:15:25,918 INFO [train.py:1114] (1/4) Epoch 6, batch 1150, loss[loss=0.2377, simple_loss=0.2981, pruned_loss=0.06463, ctc_loss=0.1198, over 19583.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3049, pruned_loss=0.07348, ctc_loss=0.1371, over 3827941.44 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:15:45,115 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.93 vs. limit=15.0
+2024-08-26 16:15:48,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=72512.0, ans=0.07
+2024-08-26 16:15:50,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=72512.0, ans=0.025
+2024-08-26 16:16:51,599 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.628e+02 1.822e+02 2.077e+02 5.117e+02, threshold=3.645e+02, percent-clipped=2.0
+2024-08-26 16:16:52,184 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.85 vs. limit=15.0
+2024-08-26 16:16:52,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=72672.0, ans=0.125
+2024-08-26 16:16:57,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=72672.0, ans=0.09899494936611666
+2024-08-26 16:17:15,533 INFO [train.py:1114] (1/4) Epoch 6, batch 1200, loss[loss=0.2724, simple_loss=0.3307, pruned_loss=0.07797, ctc_loss=0.1452, over 19837.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3063, pruned_loss=0.07413, ctc_loss=0.1385, over 3823993.67 frames. ], batch size: 57, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:17:23,484 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=72778.66666666667, ans=0.0
+2024-08-26 16:17:25,464 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=72832.0, ans=0.0
+2024-08-26 16:17:28,241 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:17:49,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=72938.66666666667, ans=0.125
+2024-08-26 16:18:02,431 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.63 vs. limit=15.0
+2024-08-26 16:18:04,581 INFO [train.py:1114] (1/4) Epoch 6, batch 1250, loss[loss=0.2822, simple_loss=0.3308, pruned_loss=0.08586, ctc_loss=0.1548, over 19520.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3062, pruned_loss=0.0737, ctc_loss=0.1374, over 3841866.82 frames. ], batch size: 61, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:18:13,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=73098.66666666667, ans=0.125
+2024-08-26 16:18:28,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=73152.0, ans=0.125
+2024-08-26 16:18:28,811 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.55 vs. limit=15.0
+2024-08-26 16:18:31,859 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.534e+02 1.709e+02 2.004e+02 3.682e+02, threshold=3.418e+02, percent-clipped=1.0
+2024-08-26 16:18:38,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=73205.33333333333, ans=0.0
+2024-08-26 16:18:59,493 INFO [train.py:1114] (1/4) Epoch 6, batch 1300, loss[loss=0.2516, simple_loss=0.3145, pruned_loss=0.06812, ctc_loss=0.1312, over 18887.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3054, pruned_loss=0.07339, ctc_loss=0.1367, over 3845848.26 frames. ], batch size: 76, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:19:16,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=73312.0, ans=0.2
+2024-08-26 16:19:52,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=73365.33333333333, ans=0.025
+2024-08-26 16:19:57,164 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=73365.33333333333, ans=0.0
+2024-08-26 16:19:59,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=73365.33333333333, ans=0.125
+2024-08-26 16:20:01,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=73418.66666666667, ans=0.1
+2024-08-26 16:20:09,583 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=73418.66666666667, ans=0.125
+2024-08-26 16:20:19,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=73472.0, ans=0.125
+2024-08-26 16:20:26,025 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.79 vs. limit=22.5
+2024-08-26 16:20:32,211 INFO [train.py:1114] (1/4) Epoch 6, batch 1350, loss[loss=0.2236, simple_loss=0.292, pruned_loss=0.05509, ctc_loss=0.1125, over 19757.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3041, pruned_loss=0.07253, ctc_loss=0.1352, over 3856087.46 frames. ], batch size: 54, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:20:34,327 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=73578.66666666667, ans=0.2
+2024-08-26 16:20:35,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=73578.66666666667, ans=0.015
+2024-08-26 16:20:36,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=73578.66666666667, ans=0.1
+2024-08-26 16:20:47,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=73632.0, ans=0.0
+2024-08-26 16:20:49,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=73632.0, ans=0.0
+2024-08-26 16:20:51,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=73685.33333333333, ans=0.0
+2024-08-26 16:21:00,545 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.536e+02 1.657e+02 1.960e+02 3.055e+02, threshold=3.315e+02, percent-clipped=0.0
+2024-08-26 16:21:02,136 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.17 vs. limit=22.5
+2024-08-26 16:21:19,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=73845.33333333333, ans=0.125
+2024-08-26 16:21:20,641 INFO [train.py:1114] (1/4) Epoch 6, batch 1400, loss[loss=0.2287, simple_loss=0.2778, pruned_loss=0.06613, ctc_loss=0.1183, over 19654.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3038, pruned_loss=0.07239, ctc_loss=0.1351, over 3863972.67 frames. ], batch size: 46, lr: 2.35e-02, grad_scale: 32.0
+2024-08-26 16:21:26,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=73845.33333333333, ans=0.125
+2024-08-26 16:21:34,285 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:21:37,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=73898.66666666667, ans=0.1
+2024-08-26 16:21:43,809 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=73952.0, ans=0.035
+2024-08-26 16:22:08,747 INFO [train.py:1114] (1/4) Epoch 6, batch 1450, loss[loss=0.2838, simple_loss=0.3287, pruned_loss=0.08756, ctc_loss=0.1597, over 19687.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3048, pruned_loss=0.07292, ctc_loss=0.1362, over 3862438.35 frames. ], batch size: 63, lr: 2.35e-02, grad_scale: 16.0
+2024-08-26 16:22:24,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=74165.33333333333, ans=0.05
+2024-08-26 16:23:09,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=74165.33333333333, ans=0.0
+2024-08-26 16:23:32,934 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.612e+02 1.863e+02 2.093e+02 4.374e+02, threshold=3.727e+02, percent-clipped=2.0
+2024-08-26 16:23:34,218 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=74272.0, ans=0.125
+2024-08-26 16:23:49,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=74325.33333333333, ans=0.025
+2024-08-26 16:23:57,146 INFO [train.py:1114] (1/4) Epoch 6, batch 1500, loss[loss=0.2402, simple_loss=0.298, pruned_loss=0.06692, ctc_loss=0.1213, over 19592.00 frames. ], tot_loss[loss=0.253, simple_loss=0.3051, pruned_loss=0.07309, ctc_loss=0.1365, over 3862805.82 frames. ], batch size: 57, lr: 2.35e-02, grad_scale: 16.0
+2024-08-26 16:24:01,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=74378.66666666667, ans=0.0
+2024-08-26 16:24:04,684 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=14.50 vs. limit=15.0
+2024-08-26 16:24:29,093 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=74432.0, ans=0.0
+2024-08-26 16:24:29,856 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74432.0, ans=0.1
+2024-08-26 16:24:35,914 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=74432.0, ans=0.0
+2024-08-26 16:24:38,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=74432.0, ans=0.125
+2024-08-26 16:25:09,760 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=74645.33333333333, ans=0.125
+2024-08-26 16:25:10,564 INFO [train.py:1114] (1/4) Epoch 6, batch 1550, loss[loss=0.2654, simple_loss=0.3204, pruned_loss=0.07696, ctc_loss=0.1409, over 19611.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3055, pruned_loss=0.07363, ctc_loss=0.1375, over 3848623.69 frames. ], batch size: 60, lr: 2.34e-02, grad_scale: 16.0
+2024-08-26 16:25:13,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=74645.33333333333, ans=0.1
+2024-08-26 16:26:02,846 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.93 vs. limit=15.0
+2024-08-26 16:26:19,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=74752.0, ans=0.125
+2024-08-26 16:26:19,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=74752.0, ans=0.125
+2024-08-26 16:26:20,821 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.577e+02 1.696e+02 1.957e+02 2.811e+02, threshold=3.391e+02, percent-clipped=0.0
+2024-08-26 16:26:30,287 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.39 vs. limit=6.0
+2024-08-26 16:26:40,315 INFO [train.py:1114] (1/4) Epoch 6, batch 1600, loss[loss=0.2584, simple_loss=0.3182, pruned_loss=0.07182, ctc_loss=0.1375, over 19862.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3047, pruned_loss=0.07333, ctc_loss=0.1371, over 3838342.69 frames. ], batch size: 57, lr: 2.34e-02, grad_scale: 32.0
+2024-08-26 16:26:53,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=74965.33333333333, ans=0.125
+2024-08-26 16:27:35,215 INFO [train.py:1114] (1/4) Epoch 6, batch 1650, loss[loss=0.2712, simple_loss=0.3241, pruned_loss=0.07985, ctc_loss=0.1464, over 19633.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3047, pruned_loss=0.0733, ctc_loss=0.137, over 3835386.23 frames. ], batch size: 59, lr: 2.34e-02, grad_scale: 32.0
+2024-08-26 16:27:39,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=75178.66666666667, ans=0.0
+2024-08-26 16:27:48,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=75232.0, ans=0.125
+2024-08-26 16:28:04,597 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.82 vs. limit=22.5
+2024-08-26 16:28:43,067 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.584e+02 1.799e+02 2.082e+02 3.549e+02, threshold=3.597e+02, percent-clipped=1.0
+2024-08-26 16:29:36,198 INFO [train.py:1114] (1/4) Epoch 6, batch 1700, loss[loss=0.2069, simple_loss=0.2609, pruned_loss=0.05582, ctc_loss=0.1032, over 19660.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3041, pruned_loss=0.07241, ctc_loss=0.1355, over 3848577.47 frames. ], batch size: 46, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:29:43,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=75445.33333333333, ans=0.125
+2024-08-26 16:29:44,133 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.57 vs. limit=15.0
+2024-08-26 16:29:45,658 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=75498.66666666667, ans=0.1
+2024-08-26 16:29:48,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=75498.66666666667, ans=0.125
+2024-08-26 16:29:49,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=75498.66666666667, ans=0.04949747468305833
+2024-08-26 16:29:49,662 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.53 vs. limit=6.0
+2024-08-26 16:29:51,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=75498.66666666667, ans=0.0
+2024-08-26 16:29:54,124 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.84 vs. limit=15.0
+2024-08-26 16:29:57,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=75552.0, ans=10.0
+2024-08-26 16:30:01,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=75552.0, ans=0.04949747468305833
+2024-08-26 16:30:24,056 INFO [train.py:1114] (1/4) Epoch 6, batch 1750, loss[loss=0.2154, simple_loss=0.2725, pruned_loss=0.057, ctc_loss=0.1109, over 19642.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3035, pruned_loss=0.07218, ctc_loss=0.135, over 3853401.56 frames. ], batch size: 45, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:30:28,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=75712.0, ans=0.0
+2024-08-26 16:31:04,523 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.524e+02 1.697e+02 1.959e+02 3.052e+02, threshold=3.394e+02, percent-clipped=0.0
+2024-08-26 16:31:05,689 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=75872.0, ans=0.0
+2024-08-26 16:31:25,741 INFO [train.py:1114] (1/4) Epoch 6, batch 1800, loss[loss=0.2476, simple_loss=0.3118, pruned_loss=0.06688, ctc_loss=0.1242, over 19621.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3042, pruned_loss=0.07251, ctc_loss=0.1355, over 3854754.97 frames. ], batch size: 55, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:31:30,770 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.15 vs. limit=10.0
+2024-08-26 16:32:01,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=75978.66666666667, ans=0.125
+2024-08-26 16:32:02,450 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.19 vs. limit=12.0
+2024-08-26 16:32:16,575 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.90 vs. limit=15.0
+2024-08-26 16:32:19,840 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=76085.33333333333, ans=0.0
+2024-08-26 16:32:20,849 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.62 vs. limit=15.0
+2024-08-26 16:32:26,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=76138.66666666667, ans=0.1
+2024-08-26 16:33:01,906 INFO [train.py:1114] (1/4) Epoch 6, batch 1850, loss[loss=0.2244, simple_loss=0.2893, pruned_loss=0.05776, ctc_loss=0.1103, over 19601.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3034, pruned_loss=0.07196, ctc_loss=0.1345, over 3857229.03 frames. ], batch size: 57, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:33:22,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=76245.33333333333, ans=0.125
+2024-08-26 16:33:27,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76245.33333333333, ans=0.1
+2024-08-26 16:33:52,069 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.13 vs. limit=10.0
+2024-08-26 16:33:54,776 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.31 vs. limit=15.0
+2024-08-26 16:33:56,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=76352.0, ans=0.125
+2024-08-26 16:33:57,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=76352.0, ans=0.2
+2024-08-26 16:33:58,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=76352.0, ans=0.07
+2024-08-26 16:34:05,742 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.545e+02 1.701e+02 1.893e+02 2.907e+02, threshold=3.402e+02, percent-clipped=0.0
+2024-08-26 16:34:06,881 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=76405.33333333333, ans=0.025
+2024-08-26 16:34:20,095 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=76458.66666666667, ans=0.0
+2024-08-26 16:34:23,393 INFO [train.py:1114] (1/4) Epoch 6, batch 1900, loss[loss=0.2551, simple_loss=0.3093, pruned_loss=0.07187, ctc_loss=0.1431, over 19651.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3042, pruned_loss=0.07226, ctc_loss=0.135, over 3861251.94 frames. ], batch size: 59, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:34:48,887 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=76672.0, ans=0.2
+2024-08-26 16:35:11,367 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.65 vs. limit=15.0
+2024-08-26 16:35:12,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=76672.0, ans=0.0
+2024-08-26 16:35:12,866 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=76672.0, ans=0.2
+2024-08-26 16:35:14,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=76672.0, ans=0.0
+2024-08-26 16:35:14,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=76672.0, ans=0.1
+2024-08-26 16:35:14,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=76672.0, ans=0.1
+2024-08-26 16:35:14,658 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=76672.0, ans=0.0
+2024-08-26 16:35:15,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=76672.0, ans=0.125
+2024-08-26 16:35:18,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=76725.33333333333, ans=0.2
+2024-08-26 16:35:27,766 INFO [train.py:1114] (1/4) Epoch 6, batch 1950, loss[loss=0.2262, simple_loss=0.2838, pruned_loss=0.06157, ctc_loss=0.114, over 19585.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3048, pruned_loss=0.0722, ctc_loss=0.1347, over 3870447.06 frames. ], batch size: 52, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:35:29,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=76778.66666666667, ans=0.125
+2024-08-26 16:35:32,216 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.16 vs. limit=15.0
+2024-08-26 16:36:20,178 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.19 vs. limit=22.5
+2024-08-26 16:36:27,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=76885.33333333333, ans=0.0
+2024-08-26 16:36:32,170 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.646e+02 1.808e+02 2.059e+02 4.885e+02, threshold=3.617e+02, percent-clipped=2.0
+2024-08-26 16:36:38,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=76938.66666666667, ans=0.2
+2024-08-26 16:36:46,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=76992.0, ans=0.0
+2024-08-26 16:36:53,607 INFO [train.py:1114] (1/4) Epoch 6, batch 2000, loss[loss=0.2302, simple_loss=0.2805, pruned_loss=0.06567, ctc_loss=0.1215, over 19686.00 frames. ], tot_loss[loss=0.253, simple_loss=0.3057, pruned_loss=0.0729, ctc_loss=0.1362, over 3854966.03 frames. ], batch size: 45, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:36:56,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=77045.33333333333, ans=0.0
+2024-08-26 16:37:10,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=77152.0, ans=0.125
+2024-08-26 16:37:22,946 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=5.84 vs. limit=15.0
+2024-08-26 16:37:23,617 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.87 vs. limit=15.0
+2024-08-26 16:37:26,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=77205.33333333333, ans=0.125
+2024-08-26 16:37:38,187 INFO [train.py:1114] (1/4) Epoch 6, batch 2050, loss[loss=0.2079, simple_loss=0.2587, pruned_loss=0.0567, ctc_loss=0.109, over 19698.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3045, pruned_loss=0.07245, ctc_loss=0.1354, over 3851842.29 frames. ], batch size: 47, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:37:50,778 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=77365.33333333333, ans=0.125
+2024-08-26 16:37:57,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=77418.66666666667, ans=0.125
+2024-08-26 16:38:04,756 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 1.566e+02 1.748e+02 2.075e+02 4.290e+02, threshold=3.497e+02, percent-clipped=1.0
+2024-08-26 16:38:06,789 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=77472.0, ans=0.125
+2024-08-26 16:38:09,581 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.58 vs. limit=15.0
+2024-08-26 16:38:11,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=77472.0, ans=0.0
+2024-08-26 16:38:14,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=77525.33333333333, ans=0.125
+2024-08-26 16:38:34,154 INFO [train.py:1114] (1/4) Epoch 6, batch 2100, loss[loss=0.2554, simple_loss=0.3129, pruned_loss=0.0717, ctc_loss=0.1364, over 19786.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3037, pruned_loss=0.07198, ctc_loss=0.1345, over 3857919.47 frames. ], batch size: 54, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:38:35,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=77578.66666666667, ans=0.125
+2024-08-26 16:38:37,143 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.64 vs. limit=12.0
+2024-08-26 16:39:08,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=77578.66666666667, ans=0.125
+2024-08-26 16:39:15,872 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:39:19,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=77685.33333333333, ans=0.025
+2024-08-26 16:39:34,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=77738.66666666667, ans=0.125
+2024-08-26 16:39:36,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=77792.0, ans=0.1
+2024-08-26 16:39:38,586 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.63 vs. limit=15.0
+2024-08-26 16:39:40,399 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.98 vs. limit=15.0
+2024-08-26 16:39:46,024 INFO [train.py:1114] (1/4) Epoch 6, batch 2150, loss[loss=0.2356, simple_loss=0.2919, pruned_loss=0.06595, ctc_loss=0.1184, over 19583.00 frames. ], tot_loss[loss=0.25, simple_loss=0.303, pruned_loss=0.07168, ctc_loss=0.1339, over 3869040.43 frames. ], batch size: 52, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:39:50,873 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=77845.33333333333, ans=0.09899494936611666
+2024-08-26 16:39:56,259 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:39:58,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=77898.66666666667, ans=0.1
+2024-08-26 16:40:04,359 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.54 vs. limit=12.0
+2024-08-26 16:40:05,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=77952.0, ans=0.125
+2024-08-26 16:40:07,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=77952.0, ans=0.1
+2024-08-26 16:40:11,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=77952.0, ans=0.0
+2024-08-26 16:40:13,760 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 1.590e+02 1.744e+02 2.019e+02 3.989e+02, threshold=3.489e+02, percent-clipped=1.0
+2024-08-26 16:40:19,240 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=78005.33333333333, ans=0.125
+2024-08-26 16:40:24,806 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.11 vs. limit=15.0
+2024-08-26 16:40:31,345 INFO [train.py:1114] (1/4) Epoch 6, batch 2200, loss[loss=0.2579, simple_loss=0.3139, pruned_loss=0.07267, ctc_loss=0.1416, over 19604.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.303, pruned_loss=0.07177, ctc_loss=0.134, over 3868027.54 frames. ], batch size: 57, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:40:34,100 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=78112.0, ans=0.125
+2024-08-26 16:40:38,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=78112.0, ans=0.125
+2024-08-26 16:40:43,097 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=78165.33333333333, ans=0.125
+2024-08-26 16:40:43,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=78165.33333333333, ans=0.125
+2024-08-26 16:40:49,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=78218.66666666667, ans=0.0
+2024-08-26 16:40:53,001 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=15.0
+2024-08-26 16:40:58,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=78272.0, ans=0.2
+2024-08-26 16:41:58,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=78325.33333333333, ans=0.1
+2024-08-26 16:41:58,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=78325.33333333333, ans=0.0
+2024-08-26 16:42:03,100 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.99 vs. limit=10.0
+2024-08-26 16:42:04,353 INFO [train.py:1114] (1/4) Epoch 6, batch 2250, loss[loss=0.2485, simple_loss=0.3137, pruned_loss=0.06568, ctc_loss=0.13, over 19604.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3033, pruned_loss=0.07171, ctc_loss=0.1339, over 3868605.32 frames. ], batch size: 55, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:42:07,357 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.73 vs. limit=15.0
+2024-08-26 16:42:30,471 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.366e+02 1.631e+02 1.850e+02 2.118e+02 4.912e+02, threshold=3.701e+02, percent-clipped=4.0
+2024-08-26 16:42:37,139 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.22 vs. limit=15.0
+2024-08-26 16:42:41,223 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=78592.0, ans=0.1
+2024-08-26 16:42:57,019 INFO [train.py:1114] (1/4) Epoch 6, batch 2300, loss[loss=0.2393, simple_loss=0.2879, pruned_loss=0.07101, ctc_loss=0.122, over 19509.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3022, pruned_loss=0.07161, ctc_loss=0.1337, over 3862070.77 frames. ], batch size: 49, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:43:05,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=78698.66666666667, ans=0.0
+2024-08-26 16:43:10,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=78698.66666666667, ans=0.2
+2024-08-26 16:43:11,200 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:43:16,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=78752.0, ans=0.125
+2024-08-26 16:43:19,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=78752.0, ans=0.125
+2024-08-26 16:43:24,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=78805.33333333333, ans=0.035
+2024-08-26 16:43:25,200 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=78805.33333333333, ans=0.125
+2024-08-26 16:43:41,594 INFO [train.py:1114] (1/4) Epoch 6, batch 2350, loss[loss=0.2551, simple_loss=0.3107, pruned_loss=0.07235, ctc_loss=0.1368, over 19696.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3024, pruned_loss=0.07186, ctc_loss=0.1341, over 3864413.14 frames. ], batch size: 63, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:43:42,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=78912.0, ans=0.0
+2024-08-26 16:43:58,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=78965.33333333333, ans=0.0
+2024-08-26 16:44:06,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=79018.66666666667, ans=0.025
+2024-08-26 16:44:09,673 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.319e+02 1.571e+02 1.792e+02 2.053e+02 3.529e+02, threshold=3.585e+02, percent-clipped=0.0
+2024-08-26 16:44:12,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=79072.0, ans=0.1
+2024-08-26 16:44:21,400 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:44:27,085 INFO [train.py:1114] (1/4) Epoch 6, batch 2400, loss[loss=0.2553, simple_loss=0.3108, pruned_loss=0.07188, ctc_loss=0.1403, over 19418.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3048, pruned_loss=0.07281, ctc_loss=0.1359, over 3858376.52 frames. ], batch size: 67, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:44:30,840 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=79178.66666666667, ans=0.0
+2024-08-26 16:44:42,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=79232.0, ans=0.125
+2024-08-26 16:44:53,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=79338.66666666667, ans=0.125
+2024-08-26 16:45:12,832 INFO [train.py:1114] (1/4) Epoch 6, batch 2450, loss[loss=0.3704, simple_loss=0.3663, pruned_loss=0.1382, ctc_loss=0.2449, over 13027.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3097, pruned_loss=0.07694, ctc_loss=0.1434, over 3730840.69 frames. ], batch size: 140, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:45:13,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=79445.33333333333, ans=0.1
+2024-08-26 16:45:14,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=79445.33333333333, ans=0.125
+2024-08-26 16:45:20,506 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=79445.33333333333, ans=0.125
+2024-08-26 16:45:23,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=79498.66666666667, ans=0.0
+2024-08-26 16:45:28,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=79498.66666666667, ans=0.0
+2024-08-26 16:45:29,749 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.98 vs. limit=15.0
+2024-08-26 16:45:40,095 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 1.744e+02 1.902e+02 2.066e+02 3.652e+02, threshold=3.804e+02, percent-clipped=1.0
+2024-08-26 16:48:16,404 INFO [train.py:1114] (1/4) Epoch 7, batch 0, loss[loss=0.2319, simple_loss=0.2871, pruned_loss=0.06371, ctc_loss=0.1233, over 19404.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.2871, pruned_loss=0.06371, ctc_loss=0.1233, over 19404.00 frames. ], batch size: 48, lr: 2.14e-02, grad_scale: 32.0
+2024-08-26 16:48:16,405 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 16:48:29,565 INFO [train.py:1146] (1/4) Epoch 7, validation: loss=0.2068, simple_loss=0.2958, pruned_loss=0.04327, ctc_loss=0.07811, over 944034.00 frames.
+2024-08-26 16:48:29,566 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12882MB
+2024-08-26 16:48:41,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=79712.0, ans=0.125
+2024-08-26 16:49:14,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=79872.0, ans=0.125
+2024-08-26 16:49:19,280 INFO [train.py:1114] (1/4) Epoch 7, batch 50, loss[loss=0.2222, simple_loss=0.2772, pruned_loss=0.06038, ctc_loss=0.1163, over 19704.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3093, pruned_loss=0.07617, ctc_loss=0.1428, over 845399.71 frames. ], batch size: 47, lr: 2.14e-02, grad_scale: 32.0
+2024-08-26 16:49:19,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=79925.33333333333, ans=0.125
+2024-08-26 16:49:42,085 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=5.01 vs. limit=5.0
+2024-08-26 16:49:51,579 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.76 vs. limit=6.0
+2024-08-26 16:49:57,473 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.282e+02 1.584e+02 1.822e+02 2.089e+02 3.575e+02, threshold=3.645e+02, percent-clipped=0.0
+2024-08-26 16:50:07,015 INFO [train.py:1114] (1/4) Epoch 7, batch 100, loss[loss=0.2217, simple_loss=0.2852, pruned_loss=0.05732, ctc_loss=0.1091, over 19722.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3079, pruned_loss=0.07387, ctc_loss=0.1387, over 1499399.80 frames. ], batch size: 51, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:50:10,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=80192.0, ans=0.0
+2024-08-26 16:50:38,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80352.0, ans=0.1
+2024-08-26 16:50:52,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=80405.33333333333, ans=0.125
+2024-08-26 16:51:01,445 INFO [train.py:1114] (1/4) Epoch 7, batch 150, loss[loss=0.2158, simple_loss=0.2765, pruned_loss=0.0555, ctc_loss=0.1103, over 19722.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3038, pruned_loss=0.07157, ctc_loss=0.1345, over 2027591.77 frames. ], batch size: 47, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:51:07,973 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80458.66666666667, ans=0.1
+2024-08-26 16:51:08,332 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.40 vs. limit=15.0
+2024-08-26 16:51:14,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=80512.0, ans=0.2
+2024-08-26 16:51:25,748 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.93 vs. limit=10.0
+2024-08-26 16:51:33,084 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.68 vs. limit=15.0
+2024-08-26 16:51:38,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=80672.0, ans=0.125
+2024-08-26 16:51:39,013 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.222e+02 1.525e+02 1.667e+02 1.863e+02 2.878e+02, threshold=3.334e+02, percent-clipped=0.0
+2024-08-26 16:51:48,551 INFO [train.py:1114] (1/4) Epoch 7, batch 200, loss[loss=0.2692, simple_loss=0.325, pruned_loss=0.07792, ctc_loss=0.1439, over 18201.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3016, pruned_loss=0.07044, ctc_loss=0.1319, over 2435660.66 frames. ], batch size: 85, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:51:48,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=80725.33333333333, ans=0.025
+2024-08-26 16:52:17,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=80885.33333333333, ans=0.0
+2024-08-26 16:52:17,692 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.02 vs. limit=15.0
+2024-08-26 16:52:22,235 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=80885.33333333333, ans=0.1
+2024-08-26 16:52:35,140 INFO [train.py:1114] (1/4) Epoch 7, batch 250, loss[loss=0.2693, simple_loss=0.3193, pruned_loss=0.08021, ctc_loss=0.147, over 19374.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3003, pruned_loss=0.06949, ctc_loss=0.1304, over 2757003.56 frames. ], batch size: 67, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:52:35,653 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.07 vs. limit=15.0
+2024-08-26 16:52:39,056 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=80992.0, ans=0.125
+2024-08-26 16:53:01,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=81098.66666666667, ans=0.125
+2024-08-26 16:53:05,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=81152.0, ans=0.125
+2024-08-26 16:53:16,587 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.591e+02 1.729e+02 1.900e+02 5.825e+02, threshold=3.457e+02, percent-clipped=1.0
+2024-08-26 16:53:25,915 INFO [train.py:1114] (1/4) Epoch 7, batch 300, loss[loss=0.2584, simple_loss=0.31, pruned_loss=0.07499, ctc_loss=0.1422, over 19528.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.2998, pruned_loss=0.06936, ctc_loss=0.13, over 3002026.24 frames. ], batch size: 61, lr: 2.12e-02, grad_scale: 32.0
+2024-08-26 16:53:30,809 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=81258.66666666667, ans=0.125
+2024-08-26 16:54:05,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=81418.66666666667, ans=0.125
+2024-08-26 16:54:18,323 INFO [train.py:1114] (1/4) Epoch 7, batch 350, loss[loss=0.2016, simple_loss=0.2645, pruned_loss=0.05033, ctc_loss=0.09515, over 19747.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3003, pruned_loss=0.06949, ctc_loss=0.1301, over 3191705.25 frames. ], batch size: 48, lr: 2.12e-02, grad_scale: 16.0
+2024-08-26 16:54:56,441 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.574e+02 1.753e+02 2.022e+02 2.928e+02, threshold=3.506e+02, percent-clipped=0.0
+2024-08-26 16:55:04,699 INFO [train.py:1114] (1/4) Epoch 7, batch 400, loss[loss=0.257, simple_loss=0.3159, pruned_loss=0.07271, ctc_loss=0.1319, over 19508.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3001, pruned_loss=0.06942, ctc_loss=0.1297, over 3343929.88 frames. ], batch size: 54, lr: 2.12e-02, grad_scale: 32.0
+2024-08-26 16:55:05,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=81792.0, ans=0.125
+2024-08-26 16:55:16,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=81845.33333333333, ans=0.125
+2024-08-26 16:55:21,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=81845.33333333333, ans=0.125
+2024-08-26 16:55:33,510 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.70 vs. limit=6.0
+2024-08-26 16:55:39,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=81952.0, ans=0.95
+2024-08-26 16:55:43,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=82005.33333333333, ans=0.125
+2024-08-26 16:55:47,170 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=82005.33333333333, ans=0.0
+2024-08-26 16:55:51,735 INFO [train.py:1114] (1/4) Epoch 7, batch 450, loss[loss=0.2449, simple_loss=0.3047, pruned_loss=0.06851, ctc_loss=0.1202, over 19623.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3001, pruned_loss=0.06957, ctc_loss=0.13, over 3450438.20 frames. ], batch size: 55, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:56:05,164 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.39 vs. limit=6.0
+2024-08-26 16:56:10,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=82112.0, ans=0.125
+2024-08-26 16:56:11,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=82112.0, ans=0.0
+2024-08-26 16:56:21,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=82165.33333333333, ans=0.0
+2024-08-26 16:56:40,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=82272.0, ans=0.2
+2024-08-26 16:56:41,732 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 1.485e+02 1.753e+02 2.038e+02 3.855e+02, threshold=3.505e+02, percent-clipped=1.0
+2024-08-26 16:56:43,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=82272.0, ans=0.125
+2024-08-26 16:56:44,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=82272.0, ans=0.025
+2024-08-26 16:56:49,048 INFO [train.py:1114] (1/4) Epoch 7, batch 500, loss[loss=0.2523, simple_loss=0.3171, pruned_loss=0.06875, ctc_loss=0.1253, over 19678.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.2989, pruned_loss=0.06863, ctc_loss=0.1283, over 3547137.89 frames. ], batch size: 63, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:56:53,813 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=82325.33333333333, ans=0.125
+2024-08-26 16:57:23,933 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=82485.33333333333, ans=0.0
+2024-08-26 16:57:29,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=82538.66666666667, ans=0.125
+2024-08-26 16:57:31,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=82538.66666666667, ans=0.1
+2024-08-26 16:57:35,771 INFO [train.py:1114] (1/4) Epoch 7, batch 550, loss[loss=0.26, simple_loss=0.3187, pruned_loss=0.07244, ctc_loss=0.141, over 19309.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.2989, pruned_loss=0.06877, ctc_loss=0.1282, over 3609288.66 frames. ], batch size: 71, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:57:35,950 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=82592.0, ans=0.125
+2024-08-26 16:57:42,590 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=82592.0, ans=0.125
+2024-08-26 16:57:52,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=82645.33333333333, ans=0.125
+2024-08-26 16:57:59,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82698.66666666667, ans=0.1
+2024-08-26 16:58:13,582 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=82805.33333333333, ans=0.125
+2024-08-26 16:58:16,886 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.530e+02 1.701e+02 1.927e+02 4.407e+02, threshold=3.402e+02, percent-clipped=1.0
+2024-08-26 16:58:21,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=82805.33333333333, ans=0.0
+2024-08-26 16:58:30,194 INFO [train.py:1114] (1/4) Epoch 7, batch 600, loss[loss=0.2415, simple_loss=0.3046, pruned_loss=0.06423, ctc_loss=0.1248, over 19404.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.2989, pruned_loss=0.06858, ctc_loss=0.1279, over 3666929.36 frames. ], batch size: 67, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:59:01,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=82858.66666666667, ans=0.0
+2024-08-26 17:00:43,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82912.0, ans=0.1
+2024-08-26 17:01:16,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=83018.66666666667, ans=0.025
+2024-08-26 17:04:50,232 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=83072.0, ans=0.07
+2024-08-26 17:04:51,935 INFO [train.py:1114] (1/4) Epoch 7, batch 650, loss[loss=0.2307, simple_loss=0.2929, pruned_loss=0.06108, ctc_loss=0.1158, over 19777.00 frames. ], tot_loss[loss=0.242, simple_loss=0.2976, pruned_loss=0.06782, ctc_loss=0.1268, over 3716872.42 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:05:15,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=83178.66666666667, ans=0.125
+2024-08-26 17:05:41,844 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.294e+02 1.502e+02 1.666e+02 1.880e+02 3.682e+02, threshold=3.331e+02, percent-clipped=2.0
+2024-08-26 17:06:20,359 INFO [train.py:1114] (1/4) Epoch 7, batch 700, loss[loss=0.2264, simple_loss=0.2883, pruned_loss=0.05964, ctc_loss=0.113, over 19715.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2981, pruned_loss=0.068, ctc_loss=0.127, over 3748590.20 frames. ], batch size: 51, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:06:26,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=83392.0, ans=0.125
+2024-08-26 17:06:48,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=83552.0, ans=0.0
+2024-08-26 17:07:06,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=83605.33333333333, ans=0.125
+2024-08-26 17:07:08,431 INFO [train.py:1114] (1/4) Epoch 7, batch 750, loss[loss=0.2188, simple_loss=0.2889, pruned_loss=0.05435, ctc_loss=0.1003, over 19489.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.2979, pruned_loss=0.06789, ctc_loss=0.1268, over 3773990.72 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:07:08,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=83658.66666666667, ans=0.0
+2024-08-26 17:07:33,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=83765.33333333333, ans=0.0
+2024-08-26 17:07:37,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=83818.66666666667, ans=0.1
+2024-08-26 17:07:42,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=83818.66666666667, ans=0.125
+2024-08-26 17:07:45,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=83872.0, ans=0.125
+2024-08-26 17:07:48,230 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.281e+02 1.533e+02 1.678e+02 1.875e+02 3.166e+02, threshold=3.356e+02, percent-clipped=0.0
+2024-08-26 17:07:58,364 INFO [train.py:1114] (1/4) Epoch 7, batch 800, loss[loss=0.2192, simple_loss=0.2746, pruned_loss=0.05926, ctc_loss=0.1132, over 19412.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2979, pruned_loss=0.06808, ctc_loss=0.127, over 3794781.94 frames. ], batch size: 48, lr: 2.10e-02, grad_scale: 32.0
+2024-08-26 17:07:58,522 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=83925.33333333333, ans=0.2
+2024-08-26 17:08:06,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=83978.66666666667, ans=0.2
+2024-08-26 17:08:14,352 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=83978.66666666667, ans=0.0
+2024-08-26 17:08:17,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=83978.66666666667, ans=0.125
+2024-08-26 17:08:19,440 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.66 vs. limit=22.5
+2024-08-26 17:08:30,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=84032.0, ans=0.125
+2024-08-26 17:08:36,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=84085.33333333333, ans=0.2
+2024-08-26 17:08:37,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=84085.33333333333, ans=0.0
+2024-08-26 17:08:38,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=84085.33333333333, ans=0.04949747468305833
+2024-08-26 17:08:39,394 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.85 vs. limit=6.0
+2024-08-26 17:08:46,275 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=84138.66666666667, ans=0.2
+2024-08-26 17:08:53,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=84138.66666666667, ans=0.2
+2024-08-26 17:08:56,338 INFO [train.py:1114] (1/4) Epoch 7, batch 850, loss[loss=0.259, simple_loss=0.314, pruned_loss=0.07432, ctc_loss=0.1386, over 19653.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2973, pruned_loss=0.0677, ctc_loss=0.1264, over 3814384.82 frames. ], batch size: 59, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:09:32,793 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=84298.66666666667, ans=0.125
+2024-08-26 17:09:48,647 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=84298.66666666667, ans=0.0
+2024-08-26 17:09:49,741 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff3.min_abs, batch_count=84352.0, ans=0.2
+2024-08-26 17:09:59,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=84405.33333333333, ans=0.125
+2024-08-26 17:10:01,941 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.310e+02 1.545e+02 1.673e+02 1.909e+02 3.259e+02, threshold=3.346e+02, percent-clipped=0.0
+2024-08-26 17:10:09,590 INFO [train.py:1114] (1/4) Epoch 7, batch 900, loss[loss=0.2174, simple_loss=0.2757, pruned_loss=0.05797, ctc_loss=0.1078, over 19808.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.298, pruned_loss=0.06812, ctc_loss=0.1272, over 3818924.23 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:10:09,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=84458.66666666667, ans=0.0
+2024-08-26 17:10:10,310 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.02 vs. limit=6.0
+2024-08-26 17:10:14,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=84458.66666666667, ans=0.0
+2024-08-26 17:10:24,613 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.98 vs. limit=12.0
+2024-08-26 17:10:29,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=84565.33333333333, ans=0.0
+2024-08-26 17:10:29,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=84565.33333333333, ans=0.125
+2024-08-26 17:10:39,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=84618.66666666667, ans=0.0
+2024-08-26 17:10:58,452 INFO [train.py:1114] (1/4) Epoch 7, batch 950, loss[loss=0.2107, simple_loss=0.2676, pruned_loss=0.05638, ctc_loss=0.1025, over 19496.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.2986, pruned_loss=0.06859, ctc_loss=0.1281, over 3820132.42 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:10:58,697 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:11:21,787 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.51 vs. limit=15.0
+2024-08-26 17:11:22,318 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=84832.0, ans=0.0
+2024-08-26 17:11:43,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=84938.66666666667, ans=0.125
+2024-08-26 17:11:48,307 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.332e+02 1.566e+02 1.708e+02 1.976e+02 3.572e+02, threshold=3.415e+02, percent-clipped=1.0
+2024-08-26 17:12:18,434 INFO [train.py:1114] (1/4) Epoch 7, batch 1000, loss[loss=0.225, simple_loss=0.2897, pruned_loss=0.0578, ctc_loss=0.1116, over 19875.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.2998, pruned_loss=0.06909, ctc_loss=0.129, over 3815457.84 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:12:18,824 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:12:22,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=84992.0, ans=0.1
+2024-08-26 17:12:26,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=84992.0, ans=0.125
+2024-08-26 17:12:28,346 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.24 vs. limit=15.0
+2024-08-26 17:13:44,249 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.20 vs. limit=15.0
+2024-08-26 17:13:45,048 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.91 vs. limit=15.0
+2024-08-26 17:13:59,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=85258.66666666667, ans=0.2
+2024-08-26 17:13:59,712 INFO [train.py:1114] (1/4) Epoch 7, batch 1050, loss[loss=0.2532, simple_loss=0.3122, pruned_loss=0.07017, ctc_loss=0.1346, over 19844.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.2987, pruned_loss=0.06847, ctc_loss=0.1278, over 3823090.38 frames. ], batch size: 57, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:13:59,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=85258.66666666667, ans=0.0
+2024-08-26 17:14:08,810 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.31 vs. limit=15.0
+2024-08-26 17:14:10,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=85312.0, ans=0.125
+2024-08-26 17:14:39,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=85312.0, ans=0.125
+2024-08-26 17:14:41,148 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=85312.0, ans=0.125
+2024-08-26 17:16:19,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=85365.33333333333, ans=0.0
+2024-08-26 17:16:26,854 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:16:29,540 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:16:34,233 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:16:35,226 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=85418.66666666667, ans=0.125
+2024-08-26 17:16:40,048 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=85472.0, ans=0.125
+2024-08-26 17:16:40,668 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.203e+02 1.449e+02 1.584e+02 1.768e+02 2.861e+02, threshold=3.169e+02, percent-clipped=0.0
+2024-08-26 17:16:42,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=85472.0, ans=0.125
+2024-08-26 17:16:46,582 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=85472.0, ans=0.125
+2024-08-26 17:16:47,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=85525.33333333333, ans=0.125
+2024-08-26 17:16:48,374 INFO [train.py:1114] (1/4) Epoch 7, batch 1100, loss[loss=0.2177, simple_loss=0.2859, pruned_loss=0.0543, ctc_loss=0.1024, over 19591.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2988, pruned_loss=0.06823, ctc_loss=0.1277, over 3830140.20 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:16:49,512 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85525.33333333333, ans=0.1
+2024-08-26 17:16:50,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=85525.33333333333, ans=0.2
+2024-08-26 17:17:14,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=85632.0, ans=0.025
+2024-08-26 17:17:32,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=85738.66666666667, ans=0.0
+2024-08-26 17:17:45,020 INFO [train.py:1114] (1/4) Epoch 7, batch 1150, loss[loss=0.2214, simple_loss=0.286, pruned_loss=0.05678, ctc_loss=0.1081, over 19582.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2986, pruned_loss=0.06834, ctc_loss=0.1281, over 3828421.18 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-26 17:17:54,664 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.56 vs. limit=6.0
+2024-08-26 17:18:02,545 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=85845.33333333333, ans=0.125
+2024-08-26 17:18:11,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=85898.66666666667, ans=0.0
+2024-08-26 17:18:16,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=85898.66666666667, ans=0.125
+2024-08-26 17:18:38,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=86005.33333333333, ans=0.0
+2024-08-26 17:18:41,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=86005.33333333333, ans=0.0
+2024-08-26 17:18:41,960 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.243e+02 1.522e+02 1.667e+02 1.891e+02 3.736e+02, threshold=3.335e+02, percent-clipped=2.0
+2024-08-26 17:18:48,629 INFO [train.py:1114] (1/4) Epoch 7, batch 1200, loss[loss=0.2387, simple_loss=0.3054, pruned_loss=0.06213, ctc_loss=0.1193, over 19830.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.299, pruned_loss=0.06853, ctc_loss=0.1284, over 3823710.99 frames. ], batch size: 57, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:19:00,318 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.89 vs. limit=15.0
+2024-08-26 17:19:00,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=86112.0, ans=0.125
+2024-08-26 17:19:11,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=86165.33333333333, ans=0.125
+2024-08-26 17:19:21,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=86165.33333333333, ans=0.1
+2024-08-26 17:19:28,806 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.58 vs. limit=15.0
+2024-08-26 17:19:43,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=86218.66666666667, ans=0.125
+2024-08-26 17:19:54,875 INFO [train.py:1114] (1/4) Epoch 7, batch 1250, loss[loss=0.2731, simple_loss=0.3292, pruned_loss=0.07806, ctc_loss=0.1522, over 19523.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2991, pruned_loss=0.06817, ctc_loss=0.1277, over 3842663.98 frames. ], batch size: 61, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:20:08,076 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.56 vs. limit=15.0
+2024-08-26 17:20:16,286 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.01 vs. limit=15.0
+2024-08-26 17:20:22,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=86485.33333333333, ans=0.125
+2024-08-26 17:20:25,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=86485.33333333333, ans=0.2
+2024-08-26 17:20:26,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=86485.33333333333, ans=0.125
+2024-08-26 17:20:35,652 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.207e+02 1.476e+02 1.609e+02 1.857e+02 3.245e+02, threshold=3.218e+02, percent-clipped=0.0
+2024-08-26 17:20:35,894 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:20:42,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=86538.66666666667, ans=0.125
+2024-08-26 17:20:44,747 INFO [train.py:1114] (1/4) Epoch 7, batch 1300, loss[loss=0.2526, simple_loss=0.3103, pruned_loss=0.06965, ctc_loss=0.1389, over 18921.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.2977, pruned_loss=0.06728, ctc_loss=0.126, over 3846659.20 frames. ], batch size: 76, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:20:47,349 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.87 vs. limit=12.0
+2024-08-26 17:20:55,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=86645.33333333333, ans=0.125
+2024-08-26 17:21:08,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=86698.66666666667, ans=0.0
+2024-08-26 17:21:30,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=86805.33333333333, ans=0.0
+2024-08-26 17:21:31,095 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.12 vs. limit=12.0
+2024-08-26 17:21:31,869 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.17 vs. limit=15.0
+2024-08-26 17:21:38,922 INFO [train.py:1114] (1/4) Epoch 7, batch 1350, loss[loss=0.2209, simple_loss=0.2869, pruned_loss=0.05547, ctc_loss=0.1098, over 19786.00 frames. ], tot_loss[loss=0.241, simple_loss=0.2974, pruned_loss=0.06719, ctc_loss=0.1255, over 3858723.75 frames. ], batch size: 54, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:21:41,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=86858.66666666667, ans=0.015
+2024-08-26 17:21:58,072 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.73 vs. limit=15.0
+2024-08-26 17:22:03,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_na.min_abs, batch_count=86965.33333333333, ans=0.02
+2024-08-26 17:22:05,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=86965.33333333333, ans=0.125
+2024-08-26 17:22:08,745 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.00 vs. limit=6.0
+2024-08-26 17:22:09,438 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.66 vs. limit=22.5
+2024-08-26 17:22:19,561 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.495e+02 1.726e+02 1.992e+02 3.104e+02, threshold=3.452e+02, percent-clipped=0.0
+2024-08-26 17:22:26,108 INFO [train.py:1114] (1/4) Epoch 7, batch 1400, loss[loss=0.2054, simple_loss=0.2559, pruned_loss=0.05638, ctc_loss=0.1052, over 19649.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2965, pruned_loss=0.06684, ctc_loss=0.1249, over 3865468.57 frames. ], batch size: 46, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:22:26,347 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=87125.33333333333, ans=0.025
+2024-08-26 17:22:49,340 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.57 vs. limit=10.0
+2024-08-26 17:23:06,595 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=87285.33333333333, ans=0.125
+2024-08-26 17:23:23,769 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:23:28,443 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=87338.66666666667, ans=0.0
+2024-08-26 17:23:35,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=87392.0, ans=0.2
+2024-08-26 17:23:35,693 INFO [train.py:1114] (1/4) Epoch 7, batch 1450, loss[loss=0.2566, simple_loss=0.3152, pruned_loss=0.07232, ctc_loss=0.1334, over 19663.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2973, pruned_loss=0.06736, ctc_loss=0.1257, over 3863342.27 frames. ], batch size: 63, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:24:09,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=87552.0, ans=0.1
+2024-08-26 17:24:20,638 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.540e+02 1.669e+02 1.894e+02 3.453e+02, threshold=3.338e+02, percent-clipped=1.0
+2024-08-26 17:24:20,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=87605.33333333333, ans=0.0
+2024-08-26 17:24:29,672 INFO [train.py:1114] (1/4) Epoch 7, batch 1500, loss[loss=0.2441, simple_loss=0.2993, pruned_loss=0.06884, ctc_loss=0.1279, over 19582.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2979, pruned_loss=0.06768, ctc_loss=0.1263, over 3862746.88 frames. ], batch size: 57, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:24:40,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=87658.66666666667, ans=0.0
+2024-08-26 17:24:44,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=87712.0, ans=0.125
+2024-08-26 17:24:51,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=87765.33333333333, ans=0.125
+2024-08-26 17:24:52,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=87765.33333333333, ans=0.05
+2024-08-26 17:24:54,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=87765.33333333333, ans=0.125
+2024-08-26 17:24:58,541 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.80 vs. limit=10.0
+2024-08-26 17:25:04,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=87818.66666666667, ans=10.0
+2024-08-26 17:25:19,511 INFO [train.py:1114] (1/4) Epoch 7, batch 1550, loss[loss=0.2592, simple_loss=0.3145, pruned_loss=0.07457, ctc_loss=0.1368, over 19607.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.2983, pruned_loss=0.06812, ctc_loss=0.1274, over 3848878.45 frames. ], batch size: 60, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:25:22,646 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=87925.33333333333, ans=0.0
+2024-08-26 17:25:27,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=87925.33333333333, ans=0.125
+2024-08-26 17:25:44,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=88032.0, ans=0.125
+2024-08-26 17:25:46,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.60 vs. limit=15.0
+2024-08-26 17:26:04,270 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.559e+02 1.788e+02 2.182e+02 5.116e+02, threshold=3.576e+02, percent-clipped=3.0
+2024-08-26 17:26:05,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=88138.66666666667, ans=0.0
+2024-08-26 17:26:10,200 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=88192.0, ans=0.1
+2024-08-26 17:26:10,453 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.07 vs. limit=10.0
+2024-08-26 17:26:10,939 INFO [train.py:1114] (1/4) Epoch 7, batch 1600, loss[loss=0.2461, simple_loss=0.3067, pruned_loss=0.06796, ctc_loss=0.1241, over 19853.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.2982, pruned_loss=0.06813, ctc_loss=0.1272, over 3837900.68 frames. ], batch size: 57, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:26:12,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=88192.0, ans=0.0
+2024-08-26 17:26:15,915 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=88192.0, ans=0.125
+2024-08-26 17:26:53,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=88405.33333333333, ans=0.125
+2024-08-26 17:26:59,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=88405.33333333333, ans=0.125
+2024-08-26 17:27:01,933 INFO [train.py:1114] (1/4) Epoch 7, batch 1650, loss[loss=0.2596, simple_loss=0.3072, pruned_loss=0.07809, ctc_loss=0.1393, over 19670.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.2979, pruned_loss=0.06819, ctc_loss=0.1274, over 3834353.21 frames. ], batch size: 59, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:28:12,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=88565.33333333333, ans=0.125
+2024-08-26 17:28:16,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=88565.33333333333, ans=0.2
+2024-08-26 17:28:23,678 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.42 vs. limit=12.0
+2024-08-26 17:28:59,405 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=88618.66666666667, ans=0.125
+2024-08-26 17:29:25,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=88672.0, ans=0.125
+2024-08-26 17:29:25,562 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.503e+02 1.653e+02 1.809e+02 2.992e+02, threshold=3.307e+02, percent-clipped=0.0
+2024-08-26 17:29:40,039 INFO [train.py:1114] (1/4) Epoch 7, batch 1700, loss[loss=0.2164, simple_loss=0.2693, pruned_loss=0.05867, ctc_loss=0.1155, over 19698.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2973, pruned_loss=0.06753, ctc_loss=0.1262, over 3848276.44 frames. ], batch size: 46, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:29:58,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=88725.33333333333, ans=0.0
+2024-08-26 17:30:14,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=88778.66666666667, ans=0.125
+2024-08-26 17:30:17,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=88832.0, ans=0.0
+2024-08-26 17:30:21,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=88832.0, ans=0.1
+2024-08-26 17:30:33,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=88885.33333333333, ans=0.0
+2024-08-26 17:30:34,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=88938.66666666667, ans=0.125
+2024-08-26 17:30:39,476 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.30 vs. limit=22.5
+2024-08-26 17:30:44,520 INFO [train.py:1114] (1/4) Epoch 7, batch 1750, loss[loss=0.1973, simple_loss=0.2572, pruned_loss=0.05041, ctc_loss=0.09139, over 19625.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.2966, pruned_loss=0.06719, ctc_loss=0.1256, over 3852421.63 frames. ], batch size: 45, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:31:08,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=89098.66666666667, ans=0.1
+2024-08-26 17:31:18,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=89152.0, ans=0.0
+2024-08-26 17:31:18,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=89152.0, ans=0.125
+2024-08-26 17:31:22,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=89205.33333333333, ans=0.0
+2024-08-26 17:31:23,267 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.487e+02 1.622e+02 1.808e+02 3.869e+02, threshold=3.245e+02, percent-clipped=1.0
+2024-08-26 17:31:24,366 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=89205.33333333333, ans=0.125
+2024-08-26 17:31:29,441 INFO [train.py:1114] (1/4) Epoch 7, batch 1800, loss[loss=0.2506, simple_loss=0.3115, pruned_loss=0.06988, ctc_loss=0.1251, over 19609.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.2969, pruned_loss=0.06723, ctc_loss=0.1256, over 3853282.55 frames. ], batch size: 55, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:31:30,053 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.26 vs. limit=15.0
+2024-08-26 17:31:34,381 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.11 vs. limit=22.5
+2024-08-26 17:31:47,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=89365.33333333333, ans=0.125
+2024-08-26 17:31:57,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=89418.66666666667, ans=0.025
+2024-08-26 17:31:58,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=89418.66666666667, ans=0.125
+2024-08-26 17:32:14,096 INFO [train.py:1114] (1/4) Epoch 7, batch 1850, loss[loss=0.24, simple_loss=0.3069, pruned_loss=0.06274, ctc_loss=0.1192, over 19570.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.2968, pruned_loss=0.06717, ctc_loss=0.1254, over 3857561.30 frames. ], batch size: 57, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:32:19,596 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:32:22,310 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=89578.66666666667, ans=0.1
+2024-08-26 17:32:23,538 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.27 vs. limit=10.0
+2024-08-26 17:32:52,570 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.39 vs. limit=10.0
+2024-08-26 17:32:55,556 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.278e+02 1.590e+02 1.759e+02 2.003e+02 3.443e+02, threshold=3.517e+02, percent-clipped=1.0
+2024-08-26 17:33:01,829 INFO [train.py:1114] (1/4) Epoch 7, batch 1900, loss[loss=0.2343, simple_loss=0.2985, pruned_loss=0.06239, ctc_loss=0.1135, over 19656.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2977, pruned_loss=0.06736, ctc_loss=0.1256, over 3862690.98 frames. ], batch size: 59, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:33:02,489 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.21 vs. limit=15.0
+2024-08-26 17:33:03,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=89792.0, ans=0.2
+2024-08-26 17:35:03,776 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=89898.66666666667, ans=0.0
+2024-08-26 17:35:15,582 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=90005.33333333333, ans=0.0
+2024-08-26 17:35:19,252 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=90005.33333333333, ans=0.125
+2024-08-26 17:35:21,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=90005.33333333333, ans=0.035
+2024-08-26 17:35:23,459 INFO [train.py:1114] (1/4) Epoch 7, batch 1950, loss[loss=0.2099, simple_loss=0.2762, pruned_loss=0.0524, ctc_loss=0.09679, over 19577.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.2979, pruned_loss=0.0668, ctc_loss=0.1248, over 3871580.56 frames. ], batch size: 52, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:35:39,542 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=90112.0, ans=0.07
+2024-08-26 17:35:53,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=90218.66666666667, ans=0.125
+2024-08-26 17:35:57,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=90218.66666666667, ans=0.125
+2024-08-26 17:36:03,244 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.531e+02 1.657e+02 1.854e+02 3.915e+02, threshold=3.314e+02, percent-clipped=1.0
+2024-08-26 17:36:09,473 INFO [train.py:1114] (1/4) Epoch 7, batch 2000, loss[loss=0.2114, simple_loss=0.2717, pruned_loss=0.05604, ctc_loss=0.09772, over 19656.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2989, pruned_loss=0.0676, ctc_loss=0.1263, over 3855939.06 frames. ], batch size: 45, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:36:13,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=90325.33333333333, ans=0.125
+2024-08-26 17:36:19,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=90378.66666666667, ans=0.07
+2024-08-26 17:36:21,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=90378.66666666667, ans=0.125
+2024-08-26 17:36:22,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=90378.66666666667, ans=0.0
+2024-08-26 17:36:36,778 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=90485.33333333333, ans=0.125
+2024-08-26 17:36:43,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=90485.33333333333, ans=0.2
+2024-08-26 17:36:51,658 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=90538.66666666667, ans=0.0
+2024-08-26 17:36:53,972 INFO [train.py:1114] (1/4) Epoch 7, batch 2050, loss[loss=0.226, simple_loss=0.2808, pruned_loss=0.06147, ctc_loss=0.1207, over 19726.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.2973, pruned_loss=0.06703, ctc_loss=0.1254, over 3852162.00 frames. ], batch size: 47, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:36:54,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=90592.0, ans=0.05
+2024-08-26 17:36:58,576 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90592.0, ans=0.1
+2024-08-26 17:37:04,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=90645.33333333333, ans=0.1
+2024-08-26 17:37:18,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=90698.66666666667, ans=0.0
+2024-08-26 17:37:19,347 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=90752.0, ans=0.0
+2024-08-26 17:37:22,796 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=90752.0, ans=0.0
+2024-08-26 17:37:25,744 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.61 vs. limit=15.0
+2024-08-26 17:37:29,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=90805.33333333333, ans=0.1
+2024-08-26 17:37:29,443 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.05 vs. limit=15.0
+2024-08-26 17:37:30,673 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=90805.33333333333, ans=0.025
+2024-08-26 17:37:32,300 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.477e+02 1.642e+02 1.962e+02 4.346e+02, threshold=3.284e+02, percent-clipped=3.0
+2024-08-26 17:37:38,476 INFO [train.py:1114] (1/4) Epoch 7, batch 2100, loss[loss=0.212, simple_loss=0.2777, pruned_loss=0.05222, ctc_loss=0.1049, over 19754.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2963, pruned_loss=0.06605, ctc_loss=0.1236, over 3858882.17 frames. ], batch size: 54, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:37:41,562 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=90858.66666666667, ans=0.0
+2024-08-26 17:37:55,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=90912.0, ans=0.0
+2024-08-26 17:38:08,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=91018.66666666667, ans=0.09899494936611666
+2024-08-26 17:38:10,810 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91018.66666666667, ans=0.1
+2024-08-26 17:38:13,385 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=91018.66666666667, ans=0.125
+2024-08-26 17:38:13,421 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=91018.66666666667, ans=0.2
+2024-08-26 17:38:13,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=91018.66666666667, ans=0.125
+2024-08-26 17:38:16,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=91018.66666666667, ans=0.1
+2024-08-26 17:38:24,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=91072.0, ans=0.2
+2024-08-26 17:38:26,427 INFO [train.py:1114] (1/4) Epoch 7, batch 2150, loss[loss=0.214, simple_loss=0.2788, pruned_loss=0.05428, ctc_loss=0.1015, over 19584.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.2956, pruned_loss=0.06594, ctc_loss=0.1233, over 3869609.55 frames. ], batch size: 52, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:38:28,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=91125.33333333333, ans=0.125
+2024-08-26 17:38:34,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-26 17:38:40,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-26 17:38:42,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-26 17:38:42,497 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-26 17:38:50,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=91232.0, ans=0.125
+2024-08-26 17:38:50,767 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.87 vs. limit=6.0
+2024-08-26 17:39:04,139 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.485e+02 1.702e+02 1.931e+02 2.999e+02, threshold=3.403e+02, percent-clipped=0.0
+2024-08-26 17:39:07,383 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.06 vs. limit=15.0
+2024-08-26 17:39:08,213 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.68 vs. limit=15.0
+2024-08-26 17:39:10,355 INFO [train.py:1114] (1/4) Epoch 7, batch 2200, loss[loss=0.2706, simple_loss=0.3207, pruned_loss=0.07984, ctc_loss=0.1521, over 19580.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.2958, pruned_loss=0.06601, ctc_loss=0.1232, over 3868263.55 frames. ], batch size: 57, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:39:19,804 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.65 vs. limit=12.0
+2024-08-26 17:39:27,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=91498.66666666667, ans=0.125
+2024-08-26 17:39:27,285 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=91498.66666666667, ans=0.0
+2024-08-26 17:39:32,937 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.94 vs. limit=15.0
+2024-08-26 17:39:51,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91605.33333333333, ans=0.1
+2024-08-26 17:39:54,462 INFO [train.py:1114] (1/4) Epoch 7, batch 2250, loss[loss=0.2437, simple_loss=0.3024, pruned_loss=0.06727, ctc_loss=0.126, over 19603.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.2959, pruned_loss=0.06601, ctc_loss=0.1234, over 3867162.72 frames. ], batch size: 55, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:40:05,413 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.15 vs. limit=15.0
+2024-08-26 17:40:21,444 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.17 vs. limit=15.0
+2024-08-26 17:40:27,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=91818.66666666667, ans=0.125
+2024-08-26 17:40:30,947 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=91872.0, ans=0.025
+2024-08-26 17:40:32,441 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.538e+02 1.708e+02 1.997e+02 3.315e+02, threshold=3.416e+02, percent-clipped=0.0
+2024-08-26 17:40:33,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=91872.0, ans=0.125
+2024-08-26 17:40:36,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=91872.0, ans=0.125
+2024-08-26 17:40:37,856 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=91925.33333333333, ans=10.0
+2024-08-26 17:40:38,577 INFO [train.py:1114] (1/4) Epoch 7, batch 2300, loss[loss=0.2236, simple_loss=0.2803, pruned_loss=0.06041, ctc_loss=0.1152, over 19505.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.2955, pruned_loss=0.06604, ctc_loss=0.1234, over 3861012.41 frames. ], batch size: 49, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:40:40,446 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=91925.33333333333, ans=0.0
+2024-08-26 17:40:40,553 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=91925.33333333333, ans=0.2
+2024-08-26 17:40:46,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=91978.66666666667, ans=0.125
+2024-08-26 17:40:55,001 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.87 vs. limit=12.0
+2024-08-26 17:41:04,113 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.11 vs. limit=15.0
+2024-08-26 17:41:15,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=92138.66666666667, ans=0.125
+2024-08-26 17:41:16,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=92138.66666666667, ans=0.0
+2024-08-26 17:41:22,373 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.36 vs. limit=15.0
+2024-08-26 17:41:22,874 INFO [train.py:1114] (1/4) Epoch 7, batch 2350, loss[loss=0.2596, simple_loss=0.3183, pruned_loss=0.07255, ctc_loss=0.1396, over 19666.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.2956, pruned_loss=0.06626, ctc_loss=0.1237, over 3863615.91 frames. ], batch size: 63, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:41:29,988 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=92192.0, ans=0.0
+2024-08-26 17:41:32,875 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.73 vs. limit=15.0
+2024-08-26 17:41:43,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=92298.66666666667, ans=0.0
+2024-08-26 17:41:47,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=92298.66666666667, ans=0.0
+2024-08-26 17:41:58,435 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:42:01,683 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.248e+02 1.515e+02 1.664e+02 1.862e+02 3.479e+02, threshold=3.327e+02, percent-clipped=1.0
+2024-08-26 17:42:06,888 INFO [train.py:1114] (1/4) Epoch 7, batch 2400, loss[loss=0.2728, simple_loss=0.3281, pruned_loss=0.07864, ctc_loss=0.1505, over 19295.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.2973, pruned_loss=0.06707, ctc_loss=0.125, over 3857314.72 frames. ], batch size: 71, lr: 2.01e-02, grad_scale: 32.0
+2024-08-26 17:42:13,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=92458.66666666667, ans=0.125
+2024-08-26 17:42:13,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=92458.66666666667, ans=0.1
+2024-08-26 17:42:22,738 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=92512.0, ans=0.025
+2024-08-26 17:42:53,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=92672.0, ans=0.1
+2024-08-26 17:42:53,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=92672.0, ans=0.125
+2024-08-26 17:42:55,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=92725.33333333333, ans=0.125
+2024-08-26 17:42:56,042 INFO [train.py:1114] (1/4) Epoch 7, batch 2450, loss[loss=0.3526, simple_loss=0.359, pruned_loss=0.1259, ctc_loss=0.2362, over 13294.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3022, pruned_loss=0.07103, ctc_loss=0.1327, over 3729139.51 frames. ], batch size: 140, lr: 2.01e-02, grad_scale: 16.0
+2024-08-26 17:43:07,203 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=92778.66666666667, ans=0.1
+2024-08-26 17:43:22,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=92885.33333333333, ans=0.0
+2024-08-26 17:43:27,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=92885.33333333333, ans=0.5
+2024-08-26 17:44:23,155 INFO [train.py:1114] (1/4) Epoch 8, batch 0, loss[loss=0.2237, simple_loss=0.2826, pruned_loss=0.05971, ctc_loss=0.1135, over 19413.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2826, pruned_loss=0.05971, ctc_loss=0.1135, over 19413.00 frames. ], batch size: 48, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:44:23,155 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 17:44:49,260 INFO [train.py:1146] (1/4) Epoch 8, validation: loss=0.2003, simple_loss=0.2903, pruned_loss=0.04062, ctc_loss=0.07268, over 944034.00 frames.
+2024-08-26 17:44:49,261 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12882MB
+2024-08-26 17:44:55,029 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.675e+02 1.918e+02 2.084e+02 4.365e+02, threshold=3.836e+02, percent-clipped=1.0
+2024-08-26 17:45:17,329 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=7.38 vs. limit=12.0
+2024-08-26 17:45:28,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93040.0, ans=0.1
+2024-08-26 17:45:38,742 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=93146.66666666667, ans=0.125
+2024-08-26 17:45:51,620 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=93146.66666666667, ans=0.025
+2024-08-26 17:45:54,269 INFO [train.py:1114] (1/4) Epoch 8, batch 50, loss[loss=0.2205, simple_loss=0.2716, pruned_loss=0.06251, ctc_loss=0.1112, over 19725.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.3016, pruned_loss=0.06955, ctc_loss=0.131, over 845284.01 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:46:23,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=93360.0, ans=0.0
+2024-08-26 17:46:23,674 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.94 vs. limit=15.0
+2024-08-26 17:46:25,843 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.75 vs. limit=22.5
+2024-08-26 17:46:42,909 INFO [train.py:1114] (1/4) Epoch 8, batch 100, loss[loss=0.2296, simple_loss=0.287, pruned_loss=0.06211, ctc_loss=0.1198, over 19716.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3013, pruned_loss=0.06867, ctc_loss=0.1293, over 1500494.30 frames. ], batch size: 51, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:46:47,941 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=93466.66666666667, ans=0.1
+2024-08-26 17:46:48,505 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.574e+02 1.749e+02 2.053e+02 3.512e+02, threshold=3.498e+02, percent-clipped=0.0
+2024-08-26 17:47:02,881 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=93520.0, ans=0.0
+2024-08-26 17:47:04,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=93573.33333333333, ans=0.2
+2024-08-26 17:47:20,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=93626.66666666667, ans=0.125
+2024-08-26 17:47:21,226 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=93626.66666666667, ans=0.125
+2024-08-26 17:47:26,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=93680.0, ans=0.0
+2024-08-26 17:47:32,166 INFO [train.py:1114] (1/4) Epoch 8, batch 150, loss[loss=0.2076, simple_loss=0.2653, pruned_loss=0.05499, ctc_loss=0.09963, over 19719.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2968, pruned_loss=0.06667, ctc_loss=0.125, over 2028940.94 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:47:41,612 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=93786.66666666667, ans=0.125
+2024-08-26 17:47:50,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=93840.0, ans=0.125
+2024-08-26 17:47:51,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=93840.0, ans=0.0
+2024-08-26 17:47:51,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=93840.0, ans=0.125
+2024-08-26 17:48:02,878 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=12.80 vs. limit=15.0
+2024-08-26 17:48:04,730 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.31 vs. limit=22.5
+2024-08-26 17:48:20,173 INFO [train.py:1114] (1/4) Epoch 8, batch 200, loss[loss=0.2403, simple_loss=0.298, pruned_loss=0.0662, ctc_loss=0.1257, over 18206.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.2944, pruned_loss=0.06531, ctc_loss=0.1221, over 2436792.66 frames. ], batch size: 85, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:48:25,556 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.434e+02 1.574e+02 1.787e+02 2.973e+02, threshold=3.148e+02, percent-clipped=0.0
+2024-08-26 17:48:32,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=94053.33333333333, ans=0.125
+2024-08-26 17:48:49,760 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=94106.66666666667, ans=0.025
+2024-08-26 17:49:12,241 INFO [train.py:1114] (1/4) Epoch 8, batch 250, loss[loss=0.2489, simple_loss=0.3051, pruned_loss=0.06943, ctc_loss=0.1344, over 19411.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2934, pruned_loss=0.06451, ctc_loss=0.1205, over 2756344.82 frames. ], batch size: 67, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:49:17,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=94266.66666666667, ans=0.125
+2024-08-26 17:49:24,837 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:49:27,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=94320.0, ans=0.0
+2024-08-26 17:49:36,079 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.72 vs. limit=15.0
+2024-08-26 17:49:38,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=94373.33333333333, ans=0.2
+2024-08-26 17:49:38,971 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.52 vs. limit=15.0
+2024-08-26 17:50:02,615 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:50:03,371 INFO [train.py:1114] (1/4) Epoch 8, batch 300, loss[loss=0.2337, simple_loss=0.2984, pruned_loss=0.06138, ctc_loss=0.1158, over 19536.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.2918, pruned_loss=0.06351, ctc_loss=0.1187, over 3000549.85 frames. ], batch size: 61, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:50:09,198 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.482e+02 1.652e+02 1.879e+02 4.693e+02, threshold=3.305e+02, percent-clipped=1.0
+2024-08-26 17:50:17,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=94586.66666666667, ans=0.0
+2024-08-26 17:50:28,352 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=94640.0, ans=0.125
+2024-08-26 17:50:30,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=94640.0, ans=0.025
+2024-08-26 17:50:31,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=94693.33333333333, ans=0.0
+2024-08-26 17:50:50,288 INFO [train.py:1114] (1/4) Epoch 8, batch 350, loss[loss=0.2153, simple_loss=0.2696, pruned_loss=0.05933, ctc_loss=0.1059, over 19761.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.293, pruned_loss=0.06379, ctc_loss=0.1191, over 3191442.03 frames. ], batch size: 48, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:51:04,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=94853.33333333333, ans=0.125
+2024-08-26 17:51:04,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=94853.33333333333, ans=0.5
+2024-08-26 17:51:10,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=94906.66666666667, ans=0.1
+2024-08-26 17:51:16,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=94906.66666666667, ans=0.0
+2024-08-26 17:51:59,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=95013.33333333333, ans=0.0
+2024-08-26 17:52:17,664 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=95013.33333333333, ans=0.05
+2024-08-26 17:52:19,168 INFO [train.py:1114] (1/4) Epoch 8, batch 400, loss[loss=0.2349, simple_loss=0.2929, pruned_loss=0.06506, ctc_loss=0.117, over 19491.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2924, pruned_loss=0.06342, ctc_loss=0.1187, over 3343449.44 frames. ], batch size: 54, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:52:24,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=95066.66666666667, ans=0.2
+2024-08-26 17:52:24,625 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 1.574e+02 1.829e+02 2.059e+02 4.627e+02, threshold=3.659e+02, percent-clipped=2.0
+2024-08-26 17:52:29,187 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.86 vs. limit=6.0
+2024-08-26 17:52:38,251 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=95173.33333333333, ans=0.2
+2024-08-26 17:52:39,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=95173.33333333333, ans=0.125
+2024-08-26 17:52:44,886 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=95173.33333333333, ans=0.0
+2024-08-26 17:52:56,744 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.93 vs. limit=15.0
+2024-08-26 17:53:05,844 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=95280.0, ans=0.0
+2024-08-26 17:53:08,493 INFO [train.py:1114] (1/4) Epoch 8, batch 450, loss[loss=0.241, simple_loss=0.3128, pruned_loss=0.06092, ctc_loss=0.1183, over 19623.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2923, pruned_loss=0.06351, ctc_loss=0.1188, over 3451718.36 frames. ], batch size: 55, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:53:12,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=95333.33333333333, ans=0.125
+2024-08-26 17:53:13,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=95333.33333333333, ans=0.1
+2024-08-26 17:53:13,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=95333.33333333333, ans=0.0
+2024-08-26 17:53:14,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=95333.33333333333, ans=0.1
+2024-08-26 17:53:38,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=95493.33333333333, ans=0.125
+2024-08-26 17:53:44,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten.whitening_limit, batch_count=95493.33333333333, ans=22.5
+2024-08-26 17:53:45,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=95493.33333333333, ans=0.0
+2024-08-26 17:53:53,888 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.00 vs. limit=15.0
+2024-08-26 17:53:58,072 INFO [train.py:1114] (1/4) Epoch 8, batch 500, loss[loss=0.2504, simple_loss=0.3091, pruned_loss=0.06897, ctc_loss=0.1343, over 19638.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.291, pruned_loss=0.06309, ctc_loss=0.1183, over 3547664.10 frames. ], batch size: 63, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:54:03,653 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.468e+02 1.609e+02 1.778e+02 4.606e+02, threshold=3.218e+02, percent-clipped=1.0
+2024-08-26 17:54:07,242 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.49 vs. limit=15.0
+2024-08-26 17:54:07,891 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=95653.33333333333, ans=0.125
+2024-08-26 17:54:08,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=95653.33333333333, ans=0.0
+2024-08-26 17:54:26,160 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.73 vs. limit=15.0
+2024-08-26 17:54:42,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=95760.0, ans=0.2
+2024-08-26 17:56:20,603 INFO [train.py:1114] (1/4) Epoch 8, batch 550, loss[loss=0.2529, simple_loss=0.3077, pruned_loss=0.07144, ctc_loss=0.1378, over 19289.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.292, pruned_loss=0.06367, ctc_loss=0.1192, over 3609336.19 frames. ], batch size: 71, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:56:21,126 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.32 vs. limit=15.0
+2024-08-26 17:57:15,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=95866.66666666667, ans=0.125
+2024-08-26 17:57:15,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=95866.66666666667, ans=0.125
+2024-08-26 17:57:30,900 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.22 vs. limit=15.0
+2024-08-26 17:57:35,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=95973.33333333333, ans=0.125
+2024-08-26 17:57:58,502 INFO [train.py:1114] (1/4) Epoch 8, batch 600, loss[loss=0.2479, simple_loss=0.3126, pruned_loss=0.06656, ctc_loss=0.1252, over 19378.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.2923, pruned_loss=0.06378, ctc_loss=0.1192, over 3666215.50 frames. ], batch size: 67, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:57:59,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=96133.33333333333, ans=0.125
+2024-08-26 17:58:05,965 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.508e+02 1.654e+02 1.896e+02 3.415e+02, threshold=3.309e+02, percent-clipped=1.0
+2024-08-26 17:58:24,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=96240.0, ans=0.0
+2024-08-26 17:58:49,400 INFO [train.py:1114] (1/4) Epoch 8, batch 650, loss[loss=0.2298, simple_loss=0.2902, pruned_loss=0.05949, ctc_loss=0.126, over 19759.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2915, pruned_loss=0.06335, ctc_loss=0.1186, over 3716209.91 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 17:58:55,293 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=96400.0, ans=0.1
+2024-08-26 17:59:01,668 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=96453.33333333333, ans=0.1
+2024-08-26 17:59:07,095 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=96506.66666666667, ans=0.0
+2024-08-26 17:59:13,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=96506.66666666667, ans=0.0
+2024-08-26 17:59:16,481 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=96560.0, ans=0.025
+2024-08-26 17:59:28,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=96613.33333333333, ans=0.0
+2024-08-26 17:59:33,493 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=96613.33333333333, ans=0.125
+2024-08-26 17:59:34,688 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.63 vs. limit=15.0
+2024-08-26 17:59:36,090 INFO [train.py:1114] (1/4) Epoch 8, batch 700, loss[loss=0.2159, simple_loss=0.2761, pruned_loss=0.05636, ctc_loss=0.1074, over 19719.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2921, pruned_loss=0.06358, ctc_loss=0.1191, over 3749112.39 frames. ], batch size: 51, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 17:59:39,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=96666.66666666667, ans=0.5
+2024-08-26 17:59:41,809 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.481e+02 1.644e+02 1.817e+02 3.294e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-26 17:59:47,999 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.27 vs. limit=12.0
+2024-08-26 18:00:27,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=96933.33333333333, ans=0.04949747468305833
+2024-08-26 18:00:27,685 INFO [train.py:1114] (1/4) Epoch 8, batch 750, loss[loss=0.2438, simple_loss=0.3061, pruned_loss=0.06629, ctc_loss=0.122, over 19493.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2919, pruned_loss=0.06364, ctc_loss=0.1191, over 3774857.95 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 18:00:29,912 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.37 vs. limit=15.0
+2024-08-26 18:00:31,922 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.70 vs. limit=10.0
+2024-08-26 18:00:41,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=96986.66666666667, ans=0.125
+2024-08-26 18:00:44,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.whiten.whitening_limit, batch_count=96986.66666666667, ans=15.0
+2024-08-26 18:00:45,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.77 vs. limit=15.0
+2024-08-26 18:00:55,467 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.88 vs. limit=6.0
+2024-08-26 18:01:19,063 INFO [train.py:1114] (1/4) Epoch 8, batch 800, loss[loss=0.205, simple_loss=0.2641, pruned_loss=0.0537, ctc_loss=0.09616, over 19804.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.292, pruned_loss=0.06365, ctc_loss=0.119, over 3796328.98 frames. ], batch size: 49, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 18:01:24,569 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.172e+02 1.524e+02 1.729e+02 2.039e+02 3.596e+02, threshold=3.457e+02, percent-clipped=1.0
+2024-08-26 18:01:24,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=97200.0, ans=0.0
+2024-08-26 18:01:26,024 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.66 vs. limit=6.0
+2024-08-26 18:01:37,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=97306.66666666667, ans=0.2
+2024-08-26 18:01:42,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=97306.66666666667, ans=0.1
+2024-08-26 18:01:45,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=97306.66666666667, ans=0.125
+2024-08-26 18:01:53,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=97360.0, ans=0.025
+2024-08-26 18:02:06,303 INFO [train.py:1114] (1/4) Epoch 8, batch 850, loss[loss=0.2635, simple_loss=0.3145, pruned_loss=0.07613, ctc_loss=0.1507, over 19646.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.2918, pruned_loss=0.06347, ctc_loss=0.1185, over 3815639.42 frames. ], batch size: 59, lr: 1.85e-02, grad_scale: 32.0
+2024-08-26 18:02:06,478 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=97466.66666666667, ans=0.2
+2024-08-26 18:02:10,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=97466.66666666667, ans=0.125
+2024-08-26 18:02:11,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.68 vs. limit=6.0
+2024-08-26 18:02:11,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=97466.66666666667, ans=0.125
+2024-08-26 18:02:21,636 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.84 vs. limit=15.0
+2024-08-26 18:02:22,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=97520.0, ans=0.125
+2024-08-26 18:02:23,105 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:02:33,373 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=97626.66666666667, ans=0.0
+2024-08-26 18:02:33,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=97626.66666666667, ans=0.5
+2024-08-26 18:02:39,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=97626.66666666667, ans=0.125
+2024-08-26 18:02:53,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=97680.0, ans=0.125
+2024-08-26 18:02:57,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=97733.33333333333, ans=0.0
+2024-08-26 18:02:58,337 INFO [train.py:1114] (1/4) Epoch 8, batch 900, loss[loss=0.2188, simple_loss=0.2759, pruned_loss=0.05863, ctc_loss=0.1108, over 19428.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.2923, pruned_loss=0.06379, ctc_loss=0.119, over 3820034.18 frames. ], batch size: 48, lr: 1.85e-02, grad_scale: 32.0
+2024-08-26 18:03:02,389 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=97733.33333333333, ans=0.125
+2024-08-26 18:03:03,993 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.578e+02 1.704e+02 2.106e+02 3.434e+02, threshold=3.409e+02, percent-clipped=0.0
+2024-08-26 18:03:06,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=97733.33333333333, ans=0.125
+2024-08-26 18:03:42,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=97946.66666666667, ans=0.125
+2024-08-26 18:03:45,507 INFO [train.py:1114] (1/4) Epoch 8, batch 950, loss[loss=0.2052, simple_loss=0.2638, pruned_loss=0.05438, ctc_loss=0.09453, over 19506.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.2927, pruned_loss=0.06407, ctc_loss=0.1197, over 3821119.37 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:03:51,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=98000.0, ans=0.1
+2024-08-26 18:04:07,400 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.05 vs. limit=15.0
+2024-08-26 18:04:26,964 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.97 vs. limit=15.0
+2024-08-26 18:04:37,644 INFO [train.py:1114] (1/4) Epoch 8, batch 1000, loss[loss=0.2121, simple_loss=0.2737, pruned_loss=0.05451, ctc_loss=0.1036, over 19838.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.2936, pruned_loss=0.06428, ctc_loss=0.1203, over 3817448.78 frames. ], batch size: 52, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:04:44,376 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.497e+02 1.652e+02 1.874e+02 4.992e+02, threshold=3.305e+02, percent-clipped=2.0
+2024-08-26 18:04:45,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=98266.66666666667, ans=0.125
+2024-08-26 18:04:49,471 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=98320.0, ans=0.0
+2024-08-26 18:04:50,431 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:04:54,129 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=98320.0, ans=0.1
+2024-08-26 18:04:58,375 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.17 vs. limit=22.5
+2024-08-26 18:05:01,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=98373.33333333333, ans=0.125
+2024-08-26 18:05:09,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=98426.66666666667, ans=0.125
+2024-08-26 18:05:10,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=23.21 vs. limit=22.5
+2024-08-26 18:05:15,986 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.62 vs. limit=22.5
+2024-08-26 18:05:20,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=98480.0, ans=0.0
+2024-08-26 18:05:22,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=98480.0, ans=0.0
+2024-08-26 18:05:24,678 INFO [train.py:1114] (1/4) Epoch 8, batch 1050, loss[loss=0.2367, simple_loss=0.299, pruned_loss=0.06408, ctc_loss=0.1155, over 19833.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.2928, pruned_loss=0.06406, ctc_loss=0.1198, over 3823813.01 frames. ], batch size: 57, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:05:36,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=98586.66666666667, ans=0.5
+2024-08-26 18:05:48,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=98586.66666666667, ans=0.1
+2024-08-26 18:06:02,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=98693.33333333333, ans=0.1
+2024-08-26 18:06:03,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=98693.33333333333, ans=0.0
+2024-08-26 18:06:05,861 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=98693.33333333333, ans=0.025
+2024-08-26 18:06:07,675 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:06:18,145 INFO [train.py:1114] (1/4) Epoch 8, batch 1100, loss[loss=0.2166, simple_loss=0.2776, pruned_loss=0.05648, ctc_loss=0.1067, over 19576.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2922, pruned_loss=0.0635, ctc_loss=0.1188, over 3831686.92 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 16.0
+2024-08-26 18:06:24,661 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.469e+02 1.560e+02 1.744e+02 3.443e+02, threshold=3.121e+02, percent-clipped=2.0
+2024-08-26 18:06:28,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=98853.33333333333, ans=0.125
+2024-08-26 18:06:36,477 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.21 vs. limit=10.0
+2024-08-26 18:06:37,198 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=98906.66666666667, ans=0.1
+2024-08-26 18:06:44,053 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.05 vs. limit=15.0
+2024-08-26 18:06:46,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=98906.66666666667, ans=0.125
+2024-08-26 18:06:49,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=98960.0, ans=0.125
+2024-08-26 18:07:00,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=99013.33333333333, ans=0.1
+2024-08-26 18:07:10,068 INFO [train.py:1114] (1/4) Epoch 8, batch 1150, loss[loss=0.2302, simple_loss=0.2865, pruned_loss=0.06339, ctc_loss=0.1178, over 19597.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.2926, pruned_loss=0.06368, ctc_loss=0.1192, over 3830909.21 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 16.0
+2024-08-26 18:07:12,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=99066.66666666667, ans=0.125
+2024-08-26 18:07:17,406 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=8.36 vs. limit=15.0
+2024-08-26 18:07:32,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=99173.33333333333, ans=0.125
+2024-08-26 18:07:35,406 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.75 vs. limit=15.0
+2024-08-26 18:07:39,267 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.23 vs. limit=22.5
+2024-08-26 18:07:49,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=99280.0, ans=0.1
+2024-08-26 18:07:49,423 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.92 vs. limit=12.0
+2024-08-26 18:07:51,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=99280.0, ans=0.025
+2024-08-26 18:07:51,359 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.84 vs. limit=15.0
+2024-08-26 18:07:57,682 INFO [train.py:1114] (1/4) Epoch 8, batch 1200, loss[loss=0.2306, simple_loss=0.2989, pruned_loss=0.05912, ctc_loss=0.1098, over 19842.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2933, pruned_loss=0.06395, ctc_loss=0.1197, over 3824781.15 frames. ], batch size: 57, lr: 1.84e-02, grad_scale: 32.0
+2024-08-26 18:08:04,249 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.256e+02 1.491e+02 1.608e+02 2.003e+02 2.840e+02, threshold=3.216e+02, percent-clipped=0.0
+2024-08-26 18:08:08,332 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=99386.66666666667, ans=0.025
+2024-08-26 18:08:39,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=99546.66666666667, ans=0.0
+2024-08-26 18:08:41,203 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=99546.66666666667, ans=0.05
+2024-08-26 18:08:47,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=99546.66666666667, ans=0.0
+2024-08-26 18:08:49,186 INFO [train.py:1114] (1/4) Epoch 8, batch 1250, loss[loss=0.2432, simple_loss=0.3071, pruned_loss=0.06589, ctc_loss=0.1187, over 19526.00 frames. ], tot_loss[loss=0.234, simple_loss=0.2935, pruned_loss=0.06351, ctc_loss=0.1189, over 3843129.06 frames. ], batch size: 61, lr: 1.84e-02, grad_scale: 32.0
+2024-08-26 18:08:49,762 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.81 vs. limit=22.5
+2024-08-26 18:08:56,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=99600.0, ans=0.0
+2024-08-26 18:09:01,980 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=99653.33333333333, ans=0.025
+2024-08-26 18:09:12,378 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=6.70 vs. limit=15.0
+2024-08-26 18:09:14,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=99706.66666666667, ans=0.0
+2024-08-26 18:09:40,598 INFO [train.py:1114] (1/4) Epoch 8, batch 1300, loss[loss=0.2537, simple_loss=0.3031, pruned_loss=0.07361, ctc_loss=0.1428, over 18890.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.2917, pruned_loss=0.06259, ctc_loss=0.1174, over 3846658.49 frames. ], batch size: 76, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:09:42,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=99866.66666666667, ans=0.125
+2024-08-26 18:09:47,133 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.481e+02 1.661e+02 1.866e+02 3.142e+02, threshold=3.323e+02, percent-clipped=0.0
+2024-08-26 18:10:03,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=99973.33333333333, ans=0.0
+2024-08-26 18:10:26,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=100133.33333333333, ans=0.2
+2024-08-26 18:10:27,282 INFO [train.py:1114] (1/4) Epoch 8, batch 1350, loss[loss=0.2239, simple_loss=0.2939, pruned_loss=0.05581, ctc_loss=0.106, over 19792.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2916, pruned_loss=0.06269, ctc_loss=0.1173, over 3858031.17 frames. ], batch size: 54, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:10:28,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=100133.33333333333, ans=0.04949747468305833
+2024-08-26 18:10:47,430 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.34 vs. limit=22.5
+2024-08-26 18:10:53,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=100240.0, ans=0.125
+2024-08-26 18:10:54,770 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=100293.33333333333, ans=0.125
+2024-08-26 18:11:02,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100293.33333333333, ans=0.1
+2024-08-26 18:11:03,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=100293.33333333333, ans=0.95
+2024-08-26 18:11:07,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=100346.66666666667, ans=0.2
+2024-08-26 18:11:14,665 INFO [train.py:1114] (1/4) Epoch 8, batch 1400, loss[loss=0.2046, simple_loss=0.2624, pruned_loss=0.05417, ctc_loss=0.09603, over 19658.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.2918, pruned_loss=0.06292, ctc_loss=0.1175, over 3865207.46 frames. ], batch size: 46, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:11:23,741 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.577e+02 1.859e+02 2.331e+02 3.237e+02, threshold=3.718e+02, percent-clipped=0.0
+2024-08-26 18:11:32,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=100453.33333333333, ans=0.5
+2024-08-26 18:11:36,668 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=100453.33333333333, ans=0.2
+2024-08-26 18:11:51,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=100560.0, ans=0.125
+2024-08-26 18:11:55,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100560.0, ans=0.125
+2024-08-26 18:12:09,362 INFO [train.py:1114] (1/4) Epoch 8, batch 1450, loss[loss=0.2619, simple_loss=0.3127, pruned_loss=0.07759, ctc_loss=0.1398, over 19693.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2927, pruned_loss=0.0634, ctc_loss=0.1184, over 3863108.04 frames. ], batch size: 63, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:12:35,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=100773.33333333333, ans=0.0
+2024-08-26 18:12:39,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100773.33333333333, ans=0.125
+2024-08-26 18:13:00,711 INFO [train.py:1114] (1/4) Epoch 8, batch 1500, loss[loss=0.2478, simple_loss=0.305, pruned_loss=0.06925, ctc_loss=0.1304, over 19590.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2927, pruned_loss=0.06327, ctc_loss=0.1183, over 3862434.42 frames. ], batch size: 57, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:13:07,545 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.450e+02 1.594e+02 1.806e+02 5.150e+02, threshold=3.189e+02, percent-clipped=1.0
+2024-08-26 18:13:08,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=100933.33333333333, ans=0.125
+2024-08-26 18:13:23,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=101040.0, ans=0.025
+2024-08-26 18:13:48,297 INFO [train.py:1114] (1/4) Epoch 8, batch 1550, loss[loss=0.251, simple_loss=0.3083, pruned_loss=0.07087, ctc_loss=0.1297, over 19627.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2931, pruned_loss=0.06379, ctc_loss=0.119, over 3846907.76 frames. ], batch size: 60, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:13:55,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=101200.0, ans=0.09899494936611666
+2024-08-26 18:14:03,859 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=101253.33333333333, ans=0.2
+2024-08-26 18:14:40,869 INFO [train.py:1114] (1/4) Epoch 8, batch 1600, loss[loss=0.2466, simple_loss=0.3076, pruned_loss=0.06721, ctc_loss=0.1277, over 19828.00 frames. ], tot_loss[loss=0.234, simple_loss=0.2929, pruned_loss=0.06376, ctc_loss=0.1192, over 3836701.43 frames. ], batch size: 57, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:14:41,201 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=101466.66666666667, ans=0.0
+2024-08-26 18:14:42,236 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.73 vs. limit=15.0
+2024-08-26 18:14:47,302 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.562e+02 1.716e+02 2.059e+02 3.797e+02, threshold=3.431e+02, percent-clipped=2.0
+2024-08-26 18:14:53,226 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=101520.0, ans=0.2
+2024-08-26 18:15:05,765 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=101573.33333333333, ans=0.0
+2024-08-26 18:15:19,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=101626.66666666667, ans=0.125
+2024-08-26 18:15:32,087 INFO [train.py:1114] (1/4) Epoch 8, batch 1650, loss[loss=0.2296, simple_loss=0.2988, pruned_loss=0.05835, ctc_loss=0.1094, over 19665.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.2925, pruned_loss=0.06365, ctc_loss=0.119, over 3832380.28 frames. ], batch size: 59, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:15:36,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=101733.33333333333, ans=0.0
+2024-08-26 18:15:47,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=101786.66666666667, ans=0.0
+2024-08-26 18:16:01,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=101893.33333333333, ans=0.125
+2024-08-26 18:16:17,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=101946.66666666667, ans=0.0
+2024-08-26 18:16:18,705 INFO [train.py:1114] (1/4) Epoch 8, batch 1700, loss[loss=0.1964, simple_loss=0.2496, pruned_loss=0.05175, ctc_loss=0.09923, over 19678.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2918, pruned_loss=0.06298, ctc_loss=0.1176, over 3846502.58 frames. ], batch size: 46, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:16:21,080 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=102000.0, ans=0.125
+2024-08-26 18:16:24,732 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:16:25,301 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.495e+02 1.737e+02 2.089e+02 3.401e+02, threshold=3.475e+02, percent-clipped=0.0
+2024-08-26 18:16:31,364 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.50 vs. limit=22.5
+2024-08-26 18:16:34,217 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=102053.33333333333, ans=0.125
+2024-08-26 18:16:45,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=102160.0, ans=0.0
+2024-08-26 18:17:03,797 INFO [train.py:1114] (1/4) Epoch 8, batch 1750, loss[loss=0.1834, simple_loss=0.2442, pruned_loss=0.04535, ctc_loss=0.07969, over 19665.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.2912, pruned_loss=0.06258, ctc_loss=0.117, over 3850806.49 frames. ], batch size: 45, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:17:25,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=102373.33333333333, ans=0.125
+2024-08-26 18:17:39,907 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=102480.0, ans=0.125
+2024-08-26 18:17:46,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=102480.0, ans=0.125
+2024-08-26 18:17:48,504 INFO [train.py:1114] (1/4) Epoch 8, batch 1800, loss[loss=0.2334, simple_loss=0.2995, pruned_loss=0.06049, ctc_loss=0.1158, over 19613.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2913, pruned_loss=0.06245, ctc_loss=0.1167, over 3852697.43 frames. ], batch size: 55, lr: 1.81e-02, grad_scale: 32.0
+2024-08-26 18:17:56,850 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.517e+02 1.665e+02 1.949e+02 3.105e+02, threshold=3.330e+02, percent-clipped=0.0
+2024-08-26 18:18:16,670 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102640.0, ans=0.1
+2024-08-26 18:18:25,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=102693.33333333333, ans=0.125
+2024-08-26 18:18:36,732 INFO [train.py:1114] (1/4) Epoch 8, batch 1850, loss[loss=0.2392, simple_loss=0.3057, pruned_loss=0.0628, ctc_loss=0.118, over 19586.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.2911, pruned_loss=0.06237, ctc_loss=0.1165, over 3856138.23 frames. ], batch size: 57, lr: 1.81e-02, grad_scale: 32.0
+2024-08-26 18:18:36,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=102800.0, ans=0.125
+2024-08-26 18:18:39,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=102800.0, ans=0.1
+2024-08-26 18:18:57,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=102906.66666666667, ans=0.125
+2024-08-26 18:18:59,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=102906.66666666667, ans=0.0
+2024-08-26 18:19:00,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=102906.66666666667, ans=0.2
+2024-08-26 18:19:01,204 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.58 vs. limit=15.0
+2024-08-26 18:19:14,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=103013.33333333333, ans=0.025
+2024-08-26 18:19:21,232 INFO [train.py:1114] (1/4) Epoch 8, batch 1900, loss[loss=0.2349, simple_loss=0.3052, pruned_loss=0.06033, ctc_loss=0.1101, over 19658.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2914, pruned_loss=0.0624, ctc_loss=0.1166, over 3860755.12 frames. ], batch size: 59, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:19:22,352 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=103066.66666666667, ans=0.0
+2024-08-26 18:19:28,168 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.533e+02 1.714e+02 2.014e+02 3.062e+02, threshold=3.427e+02, percent-clipped=0.0
+2024-08-26 18:19:36,201 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103120.0, ans=0.125
+2024-08-26 18:19:40,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=103173.33333333333, ans=0.125
+2024-08-26 18:19:42,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=103173.33333333333, ans=0.125
+2024-08-26 18:19:48,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=103226.66666666667, ans=0.0
+2024-08-26 18:19:49,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=103226.66666666667, ans=0.125
+2024-08-26 18:19:52,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103226.66666666667, ans=0.125
+2024-08-26 18:20:01,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=103280.0, ans=0.125
+2024-08-26 18:20:04,890 INFO [train.py:1114] (1/4) Epoch 8, batch 1950, loss[loss=0.2295, simple_loss=0.2859, pruned_loss=0.06356, ctc_loss=0.1149, over 19599.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.2929, pruned_loss=0.0629, ctc_loss=0.1175, over 3870260.74 frames. ], batch size: 52, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:20:10,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=103333.33333333333, ans=0.125
+2024-08-26 18:20:15,902 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=103386.66666666667, ans=0.0
+2024-08-26 18:20:16,287 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.44 vs. limit=15.0
+2024-08-26 18:20:29,330 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=103440.0, ans=0.125
+2024-08-26 18:20:32,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=103493.33333333333, ans=0.2
+2024-08-26 18:20:33,736 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=103493.33333333333, ans=0.025
+2024-08-26 18:20:44,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=103546.66666666667, ans=0.125
+2024-08-26 18:20:51,118 INFO [train.py:1114] (1/4) Epoch 8, batch 2000, loss[loss=0.207, simple_loss=0.2587, pruned_loss=0.05605, ctc_loss=0.1077, over 19638.00 frames. ], tot_loss[loss=0.234, simple_loss=0.2935, pruned_loss=0.06349, ctc_loss=0.1185, over 3855069.17 frames. ], batch size: 45, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:20:54,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=103600.0, ans=0.125
+2024-08-26 18:21:00,305 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.619e+02 1.835e+02 2.136e+02 5.632e+02, threshold=3.670e+02, percent-clipped=2.0
+2024-08-26 18:21:05,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=103653.33333333333, ans=0.0
+2024-08-26 18:21:13,081 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.93 vs. limit=22.5
+2024-08-26 18:21:24,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103760.0, ans=0.1
+2024-08-26 18:21:28,535 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.36 vs. limit=22.5
+2024-08-26 18:21:32,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=103813.33333333333, ans=0.0
+2024-08-26 18:21:36,066 INFO [train.py:1114] (1/4) Epoch 8, batch 2050, loss[loss=0.2161, simple_loss=0.2769, pruned_loss=0.05736, ctc_loss=0.1015, over 19708.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2923, pruned_loss=0.06308, ctc_loss=0.1176, over 3850588.02 frames. ], batch size: 47, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:21:37,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=103866.66666666667, ans=0.0
+2024-08-26 18:21:38,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103866.66666666667, ans=0.1
+2024-08-26 18:21:46,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=103920.0, ans=0.2
+2024-08-26 18:21:57,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=103973.33333333333, ans=10.0
+2024-08-26 18:22:04,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=104026.66666666667, ans=0.125
+2024-08-26 18:22:06,185 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.14 vs. limit=15.0
+2024-08-26 18:22:07,673 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=104026.66666666667, ans=0.0
+2024-08-26 18:22:09,339 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=104026.66666666667, ans=0.0
+2024-08-26 18:22:10,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=104080.0, ans=0.2
+2024-08-26 18:22:13,960 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.99 vs. limit=15.0
+2024-08-26 18:22:19,591 INFO [train.py:1114] (1/4) Epoch 8, batch 2100, loss[loss=0.2204, simple_loss=0.2832, pruned_loss=0.057, ctc_loss=0.1089, over 19768.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.2912, pruned_loss=0.06238, ctc_loss=0.1163, over 3856941.52 frames. ], batch size: 54, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:22:24,179 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=104133.33333333333, ans=0.0
+2024-08-26 18:22:27,466 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.502e+02 1.673e+02 2.007e+02 2.886e+02, threshold=3.346e+02, percent-clipped=0.0
+2024-08-26 18:22:28,508 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=104186.66666666667, ans=0.07
+2024-08-26 18:22:31,563 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.36 vs. limit=15.0
+2024-08-26 18:22:35,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=104186.66666666667, ans=0.07
+2024-08-26 18:22:42,525 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.19 vs. limit=10.0
+2024-08-26 18:22:45,204 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.41 vs. limit=10.0
+2024-08-26 18:22:47,940 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.99 vs. limit=22.5
+2024-08-26 18:22:49,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=104293.33333333333, ans=0.0
+2024-08-26 18:22:55,561 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.44 vs. limit=10.0
+2024-08-26 18:22:57,062 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=104346.66666666667, ans=0.1
+2024-08-26 18:22:58,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=104346.66666666667, ans=0.125
+2024-08-26 18:23:02,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=104400.0, ans=0.0
+2024-08-26 18:23:03,052 INFO [train.py:1114] (1/4) Epoch 8, batch 2150, loss[loss=0.2203, simple_loss=0.2826, pruned_loss=0.05685, ctc_loss=0.1106, over 19573.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2903, pruned_loss=0.06188, ctc_loss=0.1155, over 3867606.77 frames. ], batch size: 52, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:23:11,022 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=104453.33333333333, ans=0.125
+2024-08-26 18:23:30,767 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.23 vs. limit=6.0
+2024-08-26 18:23:37,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_na.min_abs, batch_count=104613.33333333333, ans=0.02
+2024-08-26 18:23:43,720 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.94 vs. limit=15.0
+2024-08-26 18:23:46,683 INFO [train.py:1114] (1/4) Epoch 8, batch 2200, loss[loss=0.2395, simple_loss=0.2999, pruned_loss=0.06553, ctc_loss=0.12, over 19584.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.29, pruned_loss=0.06171, ctc_loss=0.1154, over 3866375.89 frames. ], batch size: 57, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:23:54,542 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.596e+02 1.839e+02 2.214e+02 3.376e+02, threshold=3.678e+02, percent-clipped=1.0
+2024-08-26 18:24:10,092 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.30 vs. limit=12.0
+2024-08-26 18:24:12,505 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=14.48 vs. limit=15.0
+2024-08-26 18:24:30,567 INFO [train.py:1114] (1/4) Epoch 8, batch 2250, loss[loss=0.2268, simple_loss=0.287, pruned_loss=0.06069, ctc_loss=0.1129, over 19622.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.2905, pruned_loss=0.06199, ctc_loss=0.1157, over 3866484.83 frames. ], batch size: 55, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:24:40,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=104986.66666666667, ans=0.125
+2024-08-26 18:25:02,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=105093.33333333333, ans=0.025
+2024-08-26 18:25:16,099 INFO [train.py:1114] (1/4) Epoch 8, batch 2300, loss[loss=0.2176, simple_loss=0.2751, pruned_loss=0.05904, ctc_loss=0.1053, over 19506.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.2894, pruned_loss=0.06174, ctc_loss=0.1153, over 3861007.67 frames. ], batch size: 49, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:25:21,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=105200.0, ans=0.125
+2024-08-26 18:25:23,759 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.553e+02 1.767e+02 2.002e+02 4.280e+02, threshold=3.534e+02, percent-clipped=3.0
+2024-08-26 18:25:24,032 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=105253.33333333333, ans=0.125
+2024-08-26 18:25:27,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=105253.33333333333, ans=0.0
+2024-08-26 18:25:32,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=105306.66666666667, ans=0.125
+2024-08-26 18:25:38,499 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=105306.66666666667, ans=0.125
+2024-08-26 18:25:41,032 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=105360.0, ans=0.2
+2024-08-26 18:25:49,455 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=105413.33333333333, ans=0.2
+2024-08-26 18:25:49,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=105413.33333333333, ans=0.125
+2024-08-26 18:25:58,622 INFO [train.py:1114] (1/4) Epoch 8, batch 2350, loss[loss=0.2517, simple_loss=0.3086, pruned_loss=0.07014, ctc_loss=0.1364, over 19670.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.2895, pruned_loss=0.06188, ctc_loss=0.1153, over 3864515.36 frames. ], batch size: 63, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:25:59,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=105466.66666666667, ans=15.0
+2024-08-26 18:26:15,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-26 18:26:39,308 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=13.69 vs. limit=15.0
+2024-08-26 18:26:42,917 INFO [train.py:1114] (1/4) Epoch 8, batch 2400, loss[loss=0.2418, simple_loss=0.2941, pruned_loss=0.06899, ctc_loss=0.1287, over 19309.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.2917, pruned_loss=0.06273, ctc_loss=0.1166, over 3859355.61 frames. ], batch size: 71, lr: 1.79e-02, grad_scale: 32.0
+2024-08-26 18:26:43,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=105733.33333333333, ans=0.09899494936611666
+2024-08-26 18:26:49,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=105733.33333333333, ans=0.125
+2024-08-26 18:26:50,597 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.526e+02 1.733e+02 1.998e+02 3.354e+02, threshold=3.467e+02, percent-clipped=0.0
+2024-08-26 18:26:56,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=105786.66666666667, ans=0.2
+2024-08-26 18:26:56,332 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.28 vs. limit=15.0
+2024-08-26 18:27:01,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=105840.0, ans=0.125
+2024-08-26 18:27:09,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105893.33333333333, ans=0.1
+2024-08-26 18:27:27,051 INFO [train.py:1114] (1/4) Epoch 8, batch 2450, loss[loss=0.312, simple_loss=0.3348, pruned_loss=0.105, ctc_loss=0.1977, over 13681.00 frames. ], tot_loss[loss=0.239, simple_loss=0.2963, pruned_loss=0.06619, ctc_loss=0.1234, over 3729889.19 frames. ], batch size: 140, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:27:31,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=106000.0, ans=0.0
+2024-08-26 18:27:34,587 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=106000.0, ans=0.1
+2024-08-26 18:27:52,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=106106.66666666667, ans=0.0
+2024-08-26 18:27:56,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=106160.0, ans=0.125
+2024-08-26 18:28:00,298 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106160.0, ans=0.1
+2024-08-26 18:28:47,198 INFO [train.py:1114] (1/4) Epoch 9, batch 0, loss[loss=0.2431, simple_loss=0.2925, pruned_loss=0.06991, ctc_loss=0.1346, over 19801.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.2925, pruned_loss=0.06991, ctc_loss=0.1346, over 19801.00 frames. ], batch size: 49, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:28:47,198 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 18:28:54,385 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([4.3972, 4.0880, 3.9306, 3.8133], device='cuda:1')
+2024-08-26 18:28:56,818 INFO [train.py:1146] (1/4) Epoch 9, validation: loss=0.1927, simple_loss=0.2844, pruned_loss=0.03737, ctc_loss=0.06585, over 944034.00 frames.
+2024-08-26 18:28:56,819 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12882MB
+2024-08-26 18:28:59,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=106208.0, ans=0.2
+2024-08-26 18:29:11,356 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=106261.33333333333, ans=0.0
+2024-08-26 18:29:14,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=106261.33333333333, ans=0.0
+2024-08-26 18:29:16,433 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 1.688e+02 1.849e+02 2.025e+02 3.204e+02, threshold=3.698e+02, percent-clipped=0.0
+2024-08-26 18:29:22,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=106314.66666666667, ans=0.0
+2024-08-26 18:29:22,220 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=106314.66666666667, ans=0.0
+2024-08-26 18:29:23,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106368.0, ans=0.125
+2024-08-26 18:29:26,185 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.92 vs. limit=15.0
+2024-08-26 18:29:33,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=106421.33333333333, ans=0.2
+2024-08-26 18:29:40,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=106421.33333333333, ans=0.125
+2024-08-26 18:29:43,039 INFO [train.py:1114] (1/4) Epoch 9, batch 50, loss[loss=0.1972, simple_loss=0.2603, pruned_loss=0.04966, ctc_loss=0.08676, over 19686.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.2946, pruned_loss=0.06416, ctc_loss=0.1193, over 845285.85 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:30:06,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=106581.33333333333, ans=0.025
+2024-08-26 18:30:10,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=106581.33333333333, ans=0.5
+2024-08-26 18:30:12,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=106634.66666666667, ans=0.125
+2024-08-26 18:30:23,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=106634.66666666667, ans=0.125
+2024-08-26 18:30:23,428 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=106634.66666666667, ans=0.07
+2024-08-26 18:30:39,523 INFO [train.py:1114] (1/4) Epoch 9, batch 100, loss[loss=0.2027, simple_loss=0.2678, pruned_loss=0.0497, ctc_loss=0.09549, over 19705.00 frames. ], tot_loss[loss=0.234, simple_loss=0.294, pruned_loss=0.06336, ctc_loss=0.1179, over 1499557.75 frames. ], batch size: 51, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:30:47,078 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=106741.33333333333, ans=0.0
+2024-08-26 18:30:58,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=106794.66666666667, ans=0.125
+2024-08-26 18:31:02,332 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.554e+02 1.735e+02 2.126e+02 3.416e+02, threshold=3.470e+02, percent-clipped=0.0
+2024-08-26 18:31:06,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=106848.0, ans=0.125
+2024-08-26 18:31:07,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=106848.0, ans=0.125
+2024-08-26 18:31:28,286 INFO [train.py:1114] (1/4) Epoch 9, batch 150, loss[loss=0.2078, simple_loss=0.2683, pruned_loss=0.05295, ctc_loss=0.1032, over 19740.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2908, pruned_loss=0.06171, ctc_loss=0.115, over 2028994.73 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 16.0
+2024-08-26 18:31:30,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=107008.0, ans=0.025
+2024-08-26 18:31:35,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=107008.0, ans=0.5
+2024-08-26 18:31:45,991 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=107114.66666666667, ans=0.95
+2024-08-26 18:32:07,062 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=107221.33333333333, ans=0.0
+2024-08-26 18:32:14,109 INFO [train.py:1114] (1/4) Epoch 9, batch 200, loss[loss=0.2568, simple_loss=0.3086, pruned_loss=0.07495, ctc_loss=0.1376, over 18126.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.2896, pruned_loss=0.06117, ctc_loss=0.1141, over 2436455.84 frames. ], batch size: 85, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:32:20,608 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=107274.66666666667, ans=0.125
+2024-08-26 18:32:24,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=107328.0, ans=0.025
+2024-08-26 18:32:25,088 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:32:30,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=107328.0, ans=0.125
+2024-08-26 18:32:36,043 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 1.442e+02 1.571e+02 1.787e+02 2.800e+02, threshold=3.143e+02, percent-clipped=0.0
+2024-08-26 18:32:37,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=107381.33333333333, ans=0.125
+2024-08-26 18:32:42,071 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.55 vs. limit=12.0
+2024-08-26 18:32:42,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=107434.66666666667, ans=0.0
+2024-08-26 18:32:46,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=107434.66666666667, ans=0.125
+2024-08-26 18:33:01,989 INFO [train.py:1114] (1/4) Epoch 9, batch 250, loss[loss=0.2434, simple_loss=0.2959, pruned_loss=0.07039, ctc_loss=0.1253, over 19416.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.2889, pruned_loss=0.06047, ctc_loss=0.1129, over 2756445.89 frames. ], batch size: 67, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:33:03,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=107541.33333333333, ans=0.0
+2024-08-26 18:33:07,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107541.33333333333, ans=0.1
+2024-08-26 18:33:09,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=107541.33333333333, ans=0.125
+2024-08-26 18:33:14,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=107594.66666666667, ans=0.0
+2024-08-26 18:33:33,124 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.92 vs. limit=6.0
+2024-08-26 18:33:44,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=107701.33333333333, ans=0.0
+2024-08-26 18:33:57,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-26 18:33:59,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=107754.66666666667, ans=0.07
+2024-08-26 18:34:01,006 INFO [train.py:1114] (1/4) Epoch 9, batch 300, loss[loss=0.2489, simple_loss=0.3111, pruned_loss=0.06816, ctc_loss=0.1258, over 19512.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.289, pruned_loss=0.06041, ctc_loss=0.1129, over 3000901.66 frames. ], batch size: 61, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:34:05,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=107808.0, ans=0.0
+2024-08-26 18:34:23,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=107914.66666666667, ans=0.125
+2024-08-26 18:34:24,464 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.264e+02 1.498e+02 1.681e+02 1.999e+02 2.633e+02, threshold=3.363e+02, percent-clipped=0.0
+2024-08-26 18:34:25,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=107914.66666666667, ans=0.125
+2024-08-26 18:34:33,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=107968.0, ans=0.0
+2024-08-26 18:34:38,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-08-26 18:34:38,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-08-26 18:34:39,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107968.0, ans=0.1
+2024-08-26 18:34:40,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=108021.33333333333, ans=0.025
+2024-08-26 18:34:40,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108021.33333333333, ans=0.1
+2024-08-26 18:34:50,540 INFO [train.py:1114] (1/4) Epoch 9, batch 350, loss[loss=0.2138, simple_loss=0.2647, pruned_loss=0.0581, ctc_loss=0.117, over 19739.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2892, pruned_loss=0.06045, ctc_loss=0.1131, over 3191307.58 frames. ], batch size: 48, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:34:52,683 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:35:01,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=108128.0, ans=0.0
+2024-08-26 18:35:18,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=108181.33333333333, ans=0.125
+2024-08-26 18:35:40,767 INFO [train.py:1114] (1/4) Epoch 9, batch 400, loss[loss=0.2146, simple_loss=0.2869, pruned_loss=0.05096, ctc_loss=0.1007, over 19488.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.2888, pruned_loss=0.06032, ctc_loss=0.1129, over 3343094.27 frames. ], batch size: 54, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:35:41,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=108341.33333333333, ans=6.0
+2024-08-26 18:35:53,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=108394.66666666667, ans=0.025
+2024-08-26 18:36:02,023 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.232e+02 1.489e+02 1.712e+02 1.995e+02 4.778e+02, threshold=3.424e+02, percent-clipped=1.0
+2024-08-26 18:36:24,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=108554.66666666667, ans=0.0
+2024-08-26 18:36:30,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=108554.66666666667, ans=0.05
+2024-08-26 18:36:32,713 INFO [train.py:1114] (1/4) Epoch 9, batch 450, loss[loss=0.2103, simple_loss=0.2834, pruned_loss=0.0496, ctc_loss=0.09519, over 19607.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.2889, pruned_loss=0.06039, ctc_loss=0.113, over 3449872.69 frames. ], batch size: 55, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:37:01,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108714.66666666667, ans=0.1
+2024-08-26 18:37:21,531 INFO [train.py:1114] (1/4) Epoch 9, batch 500, loss[loss=0.2367, simple_loss=0.3031, pruned_loss=0.06229, ctc_loss=0.1141, over 19651.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2877, pruned_loss=0.05973, ctc_loss=0.1116, over 3546527.31 frames. ], batch size: 63, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:37:22,665 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=108874.66666666667, ans=0.125
+2024-08-26 18:37:23,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=108874.66666666667, ans=0.125
+2024-08-26 18:37:24,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108874.66666666667, ans=0.0
+2024-08-26 18:37:42,868 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.480e+02 1.660e+02 1.957e+02 3.087e+02, threshold=3.320e+02, percent-clipped=0.0
+2024-08-26 18:38:02,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=109088.0, ans=0.125
+2024-08-26 18:38:07,948 INFO [train.py:1114] (1/4) Epoch 9, batch 550, loss[loss=0.2584, simple_loss=0.3182, pruned_loss=0.07214, ctc_loss=0.1356, over 19302.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.2877, pruned_loss=0.05958, ctc_loss=0.1114, over 3609589.52 frames. ], batch size: 71, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:38:09,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=109141.33333333333, ans=0.0
+2024-08-26 18:38:11,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=109141.33333333333, ans=0.125
+2024-08-26 18:38:12,839 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=109141.33333333333, ans=0.2
+2024-08-26 18:38:44,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=109301.33333333333, ans=0.125
+2024-08-26 18:38:54,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109354.66666666667, ans=0.1
+2024-08-26 18:38:55,937 INFO [train.py:1114] (1/4) Epoch 9, batch 600, loss[loss=0.2215, simple_loss=0.2887, pruned_loss=0.05606, ctc_loss=0.1055, over 19431.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.2876, pruned_loss=0.05955, ctc_loss=0.1113, over 3666994.55 frames. ], batch size: 67, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:39:06,861 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=109408.0, ans=0.025
+2024-08-26 18:39:18,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=109514.66666666667, ans=0.2
+2024-08-26 18:39:21,964 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.496e+02 1.658e+02 1.980e+02 4.382e+02, threshold=3.316e+02, percent-clipped=1.0
+2024-08-26 18:39:31,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=109568.0, ans=0.0
+2024-08-26 18:39:32,352 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=6.24 vs. limit=12.0
+2024-08-26 18:39:36,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=109568.0, ans=0.0
+2024-08-26 18:39:38,533 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.61 vs. limit=15.0
+2024-08-26 18:39:40,251 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=109621.33333333333, ans=0.0
+2024-08-26 18:39:45,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=109621.33333333333, ans=0.125
+2024-08-26 18:39:46,060 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.00 vs. limit=15.0
+2024-08-26 18:39:49,350 INFO [train.py:1114] (1/4) Epoch 9, batch 650, loss[loss=0.215, simple_loss=0.2826, pruned_loss=0.05408, ctc_loss=0.09806, over 19762.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2873, pruned_loss=0.05953, ctc_loss=0.1112, over 3717055.50 frames. ], batch size: 54, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:39:56,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=109674.66666666667, ans=0.125
+2024-08-26 18:39:57,207 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.96 vs. limit=22.5
+2024-08-26 18:40:32,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109888.0, ans=0.1
+2024-08-26 18:40:40,260 INFO [train.py:1114] (1/4) Epoch 9, batch 700, loss[loss=0.2203, simple_loss=0.2844, pruned_loss=0.05627, ctc_loss=0.1088, over 19718.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.288, pruned_loss=0.05989, ctc_loss=0.1118, over 3748739.61 frames. ], batch size: 51, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:41:01,808 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.271e+02 1.503e+02 1.748e+02 2.321e+02 3.813e+02, threshold=3.497e+02, percent-clipped=1.0
+2024-08-26 18:41:04,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=110048.0, ans=0.125
+2024-08-26 18:41:11,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=110101.33333333333, ans=0.125
+2024-08-26 18:41:25,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=110154.66666666667, ans=0.0
+2024-08-26 18:41:28,644 INFO [train.py:1114] (1/4) Epoch 9, batch 750, loss[loss=0.2182, simple_loss=0.2876, pruned_loss=0.05394, ctc_loss=0.1022, over 19491.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2871, pruned_loss=0.0596, ctc_loss=0.1111, over 3774634.43 frames. ], batch size: 54, lr: 1.66e-02, grad_scale: 16.0
+2024-08-26 18:41:32,666 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=110208.0, ans=0.0
+2024-08-26 18:41:33,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=110208.0, ans=0.125
+2024-08-26 18:41:41,673 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110261.33333333333, ans=0.1
+2024-08-26 18:41:43,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=110261.33333333333, ans=0.07
+2024-08-26 18:41:46,467 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.62 vs. limit=15.0
+2024-08-26 18:41:48,953 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=110314.66666666667, ans=0.0
+2024-08-26 18:41:54,978 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.41 vs. limit=15.0
+2024-08-26 18:42:12,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110421.33333333333, ans=0.125
+2024-08-26 18:42:21,860 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=10.30 vs. limit=15.0
+2024-08-26 18:42:22,143 INFO [train.py:1114] (1/4) Epoch 9, batch 800, loss[loss=0.2052, simple_loss=0.2634, pruned_loss=0.05426, ctc_loss=0.09596, over 19420.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2866, pruned_loss=0.05937, ctc_loss=0.1107, over 3795830.93 frames. ], batch size: 48, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:42:35,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110528.0, ans=0.125
+2024-08-26 18:42:41,783 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.00 vs. limit=15.0
+2024-08-26 18:42:43,918 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.269e+02 1.427e+02 1.539e+02 1.792e+02 3.382e+02, threshold=3.078e+02, percent-clipped=0.0
+2024-08-26 18:42:45,602 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.72 vs. limit=22.5
+2024-08-26 18:43:09,193 INFO [train.py:1114] (1/4) Epoch 9, batch 850, loss[loss=0.2267, simple_loss=0.2925, pruned_loss=0.0586, ctc_loss=0.1093, over 19651.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2868, pruned_loss=0.05951, ctc_loss=0.1109, over 3815143.33 frames. ], batch size: 59, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:43:11,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=110741.33333333333, ans=0.025
+2024-08-26 18:43:32,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=110848.0, ans=0.1
+2024-08-26 18:43:48,536 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=110954.66666666667, ans=0.05
+2024-08-26 18:43:55,669 INFO [train.py:1114] (1/4) Epoch 9, batch 900, loss[loss=0.189, simple_loss=0.2548, pruned_loss=0.04512, ctc_loss=0.08256, over 19821.00 frames. ], tot_loss[loss=0.225, simple_loss=0.2867, pruned_loss=0.05951, ctc_loss=0.1109, over 3820006.16 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:45:38,147 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.254e+02 1.519e+02 1.752e+02 2.077e+02 5.433e+02, threshold=3.505e+02, percent-clipped=5.0
+2024-08-26 18:45:39,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=111114.66666666667, ans=0.0
+2024-08-26 18:45:40,193 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=111114.66666666667, ans=0.2
+2024-08-26 18:45:59,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=111221.33333333333, ans=0.0
+2024-08-26 18:46:05,595 INFO [train.py:1114] (1/4) Epoch 9, batch 950, loss[loss=0.2165, simple_loss=0.2799, pruned_loss=0.05573, ctc_loss=0.1041, over 19494.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2864, pruned_loss=0.05948, ctc_loss=0.1109, over 3822352.50 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:46:19,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=111328.0, ans=0.125
+2024-08-26 18:46:21,547 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=111328.0, ans=0.2
+2024-08-26 18:46:27,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=111328.0, ans=0.1
+2024-08-26 18:46:41,378 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.32 vs. limit=15.0
+2024-08-26 18:46:48,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=111488.0, ans=0.125
+2024-08-26 18:46:57,418 INFO [train.py:1114] (1/4) Epoch 9, batch 1000, loss[loss=0.2205, simple_loss=0.2845, pruned_loss=0.05695, ctc_loss=0.1066, over 19843.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2876, pruned_loss=0.06004, ctc_loss=0.1118, over 3816887.10 frames. ], batch size: 52, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:47:08,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=111594.66666666667, ans=0.0
+2024-08-26 18:47:19,848 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.461e+02 1.756e+02 2.077e+02 6.803e+02, threshold=3.513e+02, percent-clipped=1.0
+2024-08-26 18:47:29,642 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.26 vs. limit=15.0
+2024-08-26 18:47:43,914 INFO [train.py:1114] (1/4) Epoch 9, batch 1050, loss[loss=0.2207, simple_loss=0.2918, pruned_loss=0.05402, ctc_loss=0.1042, over 19826.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2868, pruned_loss=0.05975, ctc_loss=0.1114, over 3823253.42 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:47:49,905 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.34 vs. limit=10.0
+2024-08-26 18:47:58,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111861.33333333333, ans=0.1
+2024-08-26 18:48:00,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=111861.33333333333, ans=0.2
+2024-08-26 18:48:04,731 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=111914.66666666667, ans=0.125
+2024-08-26 18:48:07,368 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=111914.66666666667, ans=0.2
+2024-08-26 18:48:32,530 INFO [train.py:1114] (1/4) Epoch 9, batch 1100, loss[loss=0.208, simple_loss=0.273, pruned_loss=0.05195, ctc_loss=0.09766, over 19600.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2861, pruned_loss=0.05921, ctc_loss=0.1105, over 3830916.43 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:48:34,486 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:48:39,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=112074.66666666667, ans=0.125
+2024-08-26 18:48:39,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=112074.66666666667, ans=0.125
+2024-08-26 18:48:59,870 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.444e+02 1.690e+02 2.009e+02 4.396e+02, threshold=3.380e+02, percent-clipped=1.0
+2024-08-26 18:49:01,045 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=112181.33333333333, ans=0.125
+2024-08-26 18:49:47,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112288.0, ans=0.1
+2024-08-26 18:49:51,556 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=112288.0, ans=0.0
+2024-08-26 18:49:51,785 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.45 vs. limit=15.0
+2024-08-26 18:49:53,120 INFO [train.py:1114] (1/4) Epoch 9, batch 1150, loss[loss=0.202, simple_loss=0.2732, pruned_loss=0.04833, ctc_loss=0.08503, over 19561.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2861, pruned_loss=0.05931, ctc_loss=0.1106, over 3829625.20 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:50:34,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=112501.33333333333, ans=0.0
+2024-08-26 18:50:54,143 INFO [train.py:1114] (1/4) Epoch 9, batch 1200, loss[loss=0.2441, simple_loss=0.3059, pruned_loss=0.06588, ctc_loss=0.1265, over 19844.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2868, pruned_loss=0.05957, ctc_loss=0.1112, over 3825238.18 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-26 18:50:58,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=112608.0, ans=0.125
+2024-08-26 18:51:04,832 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=112661.33333333333, ans=0.1
+2024-08-26 18:51:10,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=112661.33333333333, ans=0.0
+2024-08-26 18:51:15,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=112714.66666666667, ans=0.0
+2024-08-26 18:51:15,428 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.83 vs. limit=15.0
+2024-08-26 18:51:16,814 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.431e+02 1.600e+02 1.807e+02 3.201e+02, threshold=3.201e+02, percent-clipped=0.0
+2024-08-26 18:51:36,038 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.57 vs. limit=15.0
+2024-08-26 18:51:37,570 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:51:38,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=112821.33333333333, ans=0.125
+2024-08-26 18:51:42,796 INFO [train.py:1114] (1/4) Epoch 9, batch 1250, loss[loss=0.2455, simple_loss=0.3041, pruned_loss=0.06798, ctc_loss=0.1271, over 19502.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2871, pruned_loss=0.05941, ctc_loss=0.1109, over 3844066.73 frames. ], batch size: 61, lr: 1.65e-02, grad_scale: 32.0
+2024-08-26 18:51:46,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112874.66666666667, ans=0.1
+2024-08-26 18:51:54,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=112928.0, ans=0.0
+2024-08-26 18:52:10,835 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.41 vs. limit=22.5
+2024-08-26 18:52:21,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=113034.66666666667, ans=0.125
+2024-08-26 18:52:23,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=113034.66666666667, ans=0.2
+2024-08-26 18:52:24,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=113088.0, ans=0.125
+2024-08-26 18:52:26,965 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.11 vs. limit=15.0
+2024-08-26 18:52:29,350 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=113088.0, ans=0.125
+2024-08-26 18:52:36,293 INFO [train.py:1114] (1/4) Epoch 9, batch 1300, loss[loss=0.2493, simple_loss=0.31, pruned_loss=0.06921, ctc_loss=0.1254, over 18755.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2862, pruned_loss=0.05898, ctc_loss=0.1103, over 3847285.77 frames. ], batch size: 76, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:52:48,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=113194.66666666667, ans=0.125
+2024-08-26 18:52:58,751 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.498e+02 1.743e+02 2.034e+02 3.430e+02, threshold=3.487e+02, percent-clipped=2.0
+2024-08-26 18:52:59,823 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=113248.0, ans=0.125
+2024-08-26 18:53:10,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=113301.33333333333, ans=0.1
+2024-08-26 18:53:23,259 INFO [train.py:1114] (1/4) Epoch 9, batch 1350, loss[loss=0.2303, simple_loss=0.2968, pruned_loss=0.05927, ctc_loss=0.1129, over 19776.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.286, pruned_loss=0.05877, ctc_loss=0.1097, over 3858500.78 frames. ], batch size: 54, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:53:26,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113408.0, ans=0.1
+2024-08-26 18:53:33,742 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:53:38,298 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=113461.33333333333, ans=0.025
+2024-08-26 18:53:43,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=113514.66666666667, ans=0.0
+2024-08-26 18:53:58,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=113568.0, ans=0.125
+2024-08-26 18:53:59,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=113621.33333333333, ans=0.125
+2024-08-26 18:54:09,874 INFO [train.py:1114] (1/4) Epoch 9, batch 1400, loss[loss=0.2039, simple_loss=0.2571, pruned_loss=0.05586, ctc_loss=0.09738, over 19683.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2857, pruned_loss=0.05885, ctc_loss=0.1098, over 3865413.39 frames. ], batch size: 46, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:54:11,398 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=4.54 vs. limit=15.0
+2024-08-26 18:54:20,445 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.65 vs. limit=15.0
+2024-08-26 18:54:28,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=113781.33333333333, ans=0.1
+2024-08-26 18:54:33,074 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.168e+02 1.492e+02 1.644e+02 1.948e+02 2.802e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-26 18:54:33,552 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.19 vs. limit=10.0
+2024-08-26 18:54:46,620 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=6.19 vs. limit=12.0
+2024-08-26 18:54:59,233 INFO [train.py:1114] (1/4) Epoch 9, batch 1450, loss[loss=0.2484, simple_loss=0.3131, pruned_loss=0.06751, ctc_loss=0.1216, over 19683.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.2865, pruned_loss=0.05916, ctc_loss=0.1101, over 3862231.39 frames. ], batch size: 63, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:55:14,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=113994.66666666667, ans=0.2
+2024-08-26 18:55:17,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=113994.66666666667, ans=0.0
+2024-08-26 18:55:20,493 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn2.whiten.whitening_limit, batch_count=113994.66666666667, ans=22.5
+2024-08-26 18:55:21,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=113994.66666666667, ans=0.0
+2024-08-26 18:55:32,306 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.38 vs. limit=15.0
+2024-08-26 18:55:35,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=114101.33333333333, ans=0.125
+2024-08-26 18:55:42,879 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.40 vs. limit=15.0
+2024-08-26 18:55:48,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=114154.66666666667, ans=0.125
+2024-08-26 18:55:54,507 INFO [train.py:1114] (1/4) Epoch 9, batch 1500, loss[loss=0.2297, simple_loss=0.2968, pruned_loss=0.06034, ctc_loss=0.1047, over 19577.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2876, pruned_loss=0.0595, ctc_loss=0.1107, over 3862086.48 frames. ], batch size: 57, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:56:00,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=114208.0, ans=10.0
+2024-08-26 18:56:03,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=114261.33333333333, ans=0.0
+2024-08-26 18:56:07,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=114261.33333333333, ans=0.125
+2024-08-26 18:56:12,032 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.34 vs. limit=15.0
+2024-08-26 18:56:18,311 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.542e+02 1.688e+02 1.884e+02 2.711e+02, threshold=3.377e+02, percent-clipped=0.0
+2024-08-26 18:56:37,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=114421.33333333333, ans=0.125
+2024-08-26 18:56:41,348 INFO [train.py:1114] (1/4) Epoch 9, batch 1550, loss[loss=0.2318, simple_loss=0.298, pruned_loss=0.06037, ctc_loss=0.1123, over 19601.00 frames. ], tot_loss[loss=0.226, simple_loss=0.288, pruned_loss=0.05975, ctc_loss=0.1114, over 3845598.20 frames. ], batch size: 60, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:57:02,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=114581.33333333333, ans=0.125
+2024-08-26 18:57:03,719 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=114581.33333333333, ans=0.125
+2024-08-26 18:57:08,358 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=114634.66666666667, ans=0.125
+2024-08-26 18:57:10,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=114634.66666666667, ans=0.2
+2024-08-26 18:57:26,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=114688.0, ans=0.1
+2024-08-26 18:57:29,651 INFO [train.py:1114] (1/4) Epoch 9, batch 1600, loss[loss=0.2061, simple_loss=0.2816, pruned_loss=0.0473, ctc_loss=0.09005, over 19845.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.288, pruned_loss=0.05983, ctc_loss=0.1114, over 3835446.47 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 32.0
+2024-08-26 18:57:35,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=114741.33333333333, ans=0.125
+2024-08-26 18:57:57,609 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.291e+02 1.549e+02 1.720e+02 1.979e+02 3.573e+02, threshold=3.441e+02, percent-clipped=1.0
+2024-08-26 18:57:58,299 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.62 vs. limit=15.0
+2024-08-26 18:58:10,493 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=114901.33333333333, ans=0.1
+2024-08-26 18:58:36,375 INFO [train.py:1114] (1/4) Epoch 9, batch 1650, loss[loss=0.2184, simple_loss=0.285, pruned_loss=0.05485, ctc_loss=0.1052, over 19634.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2881, pruned_loss=0.05989, ctc_loss=0.1116, over 3831847.39 frames. ], batch size: 59, lr: 1.63e-02, grad_scale: 32.0
+2024-08-26 18:59:41,062 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.85 vs. limit=15.0
+2024-08-26 18:59:54,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=115168.0, ans=0.025
+2024-08-26 18:59:59,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=115168.0, ans=0.125
+2024-08-26 19:00:11,415 INFO [train.py:1114] (1/4) Epoch 9, batch 1700, loss[loss=0.2002, simple_loss=0.2619, pruned_loss=0.05086, ctc_loss=0.09189, over 19663.00 frames. ], tot_loss[loss=0.225, simple_loss=0.2873, pruned_loss=0.05924, ctc_loss=0.1103, over 3846475.32 frames. ], batch size: 46, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:00:12,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=115274.66666666667, ans=0.0
+2024-08-26 19:00:20,002 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=115328.0, ans=0.2
+2024-08-26 19:00:26,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=115328.0, ans=0.0
+2024-08-26 19:00:27,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115328.0, ans=0.1
+2024-08-26 19:00:32,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=115381.33333333333, ans=0.125
+2024-08-26 19:00:32,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=115381.33333333333, ans=0.125
+2024-08-26 19:00:34,661 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.254e+02 1.433e+02 1.619e+02 1.844e+02 2.581e+02, threshold=3.239e+02, percent-clipped=0.0
+2024-08-26 19:00:37,590 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=115434.66666666667, ans=0.2
+2024-08-26 19:00:37,836 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.39 vs. limit=22.5
+2024-08-26 19:00:56,872 INFO [train.py:1114] (1/4) Epoch 9, batch 1750, loss[loss=0.1802, simple_loss=0.2415, pruned_loss=0.04388, ctc_loss=0.07784, over 19655.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2866, pruned_loss=0.05918, ctc_loss=0.1103, over 3851275.88 frames. ], batch size: 45, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:00:57,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=115541.33333333333, ans=0.125
+2024-08-26 19:01:02,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=115541.33333333333, ans=0.125
+2024-08-26 19:01:03,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=115541.33333333333, ans=0.125
+2024-08-26 19:01:18,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=115648.0, ans=0.125
+2024-08-26 19:01:20,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=115648.0, ans=0.2
+2024-08-26 19:01:24,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=115701.33333333333, ans=10.0
+2024-08-26 19:01:31,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=115701.33333333333, ans=0.125
+2024-08-26 19:01:43,102 INFO [train.py:1114] (1/4) Epoch 9, batch 1800, loss[loss=0.2207, simple_loss=0.2938, pruned_loss=0.05389, ctc_loss=0.09964, over 19616.00 frames. ], tot_loss[loss=0.224, simple_loss=0.2864, pruned_loss=0.05887, ctc_loss=0.1098, over 3855165.41 frames. ], batch size: 55, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:01:54,810 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=115861.33333333333, ans=0.125
+2024-08-26 19:01:57,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=115861.33333333333, ans=0.0
+2024-08-26 19:02:06,016 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.500e+02 1.645e+02 1.953e+02 3.789e+02, threshold=3.290e+02, percent-clipped=1.0
+2024-08-26 19:02:09,883 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115968.0, ans=0.1
+2024-08-26 19:02:11,168 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.38 vs. limit=15.0
+2024-08-26 19:02:19,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=116021.33333333333, ans=0.0
+2024-08-26 19:02:27,281 INFO [train.py:1114] (1/4) Epoch 9, batch 1850, loss[loss=0.2429, simple_loss=0.3023, pruned_loss=0.06663, ctc_loss=0.1258, over 19587.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.2858, pruned_loss=0.05857, ctc_loss=0.1093, over 3857559.76 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:02:36,358 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:02:40,035 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=116128.0, ans=0.125
+2024-08-26 19:02:45,113 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.11 vs. limit=22.5
+2024-08-26 19:02:50,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=116181.33333333333, ans=0.125
+2024-08-26 19:02:51,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=116181.33333333333, ans=0.025
+2024-08-26 19:03:13,218 INFO [train.py:1114] (1/4) Epoch 9, batch 1900, loss[loss=0.2251, simple_loss=0.2921, pruned_loss=0.05766, ctc_loss=0.1069, over 19648.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2867, pruned_loss=0.05886, ctc_loss=0.1099, over 3862589.14 frames. ], batch size: 59, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:03:19,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=116341.33333333333, ans=0.1
+2024-08-26 19:03:34,684 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.93 vs. limit=15.0
+2024-08-26 19:03:35,875 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.270e+02 1.509e+02 1.695e+02 1.935e+02 3.320e+02, threshold=3.390e+02, percent-clipped=1.0
+2024-08-26 19:03:49,106 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=116554.66666666667, ans=0.125
+2024-08-26 19:03:50,758 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=116554.66666666667, ans=0.125
+2024-08-26 19:03:56,678 INFO [train.py:1114] (1/4) Epoch 9, batch 1950, loss[loss=0.1934, simple_loss=0.2649, pruned_loss=0.04359, ctc_loss=0.08676, over 19604.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2876, pruned_loss=0.05896, ctc_loss=0.1101, over 3871366.78 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:04:08,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=116661.33333333333, ans=0.125
+2024-08-26 19:04:19,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=116714.66666666667, ans=0.2
+2024-08-26 19:04:20,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=116714.66666666667, ans=0.0
+2024-08-26 19:04:30,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=116768.0, ans=0.125
+2024-08-26 19:04:39,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=116821.33333333333, ans=0.125
+2024-08-26 19:04:45,338 INFO [train.py:1114] (1/4) Epoch 9, batch 2000, loss[loss=0.1996, simple_loss=0.2603, pruned_loss=0.05108, ctc_loss=0.09158, over 19628.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.288, pruned_loss=0.05913, ctc_loss=0.1107, over 3855299.30 frames. ], batch size: 45, lr: 1.62e-02, grad_scale: 32.0
+2024-08-26 19:04:47,503 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=116874.66666666667, ans=0.125
+2024-08-26 19:04:53,160 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.37 vs. limit=6.0
+2024-08-26 19:04:54,907 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.17 vs. limit=15.0
+2024-08-26 19:04:56,347 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.28 vs. limit=15.0
+2024-08-26 19:05:03,173 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=116981.33333333333, ans=0.0
+2024-08-26 19:05:05,062 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=116981.33333333333, ans=0.05
+2024-08-26 19:05:09,036 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.518e+02 1.711e+02 1.998e+02 4.316e+02, threshold=3.422e+02, percent-clipped=2.0
+2024-08-26 19:05:11,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=117034.66666666667, ans=0.125
+2024-08-26 19:05:17,959 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:05:26,266 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.18 vs. limit=22.5
+2024-08-26 19:05:27,631 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:05:29,278 INFO [train.py:1114] (1/4) Epoch 9, batch 2050, loss[loss=0.1858, simple_loss=0.2481, pruned_loss=0.04445, ctc_loss=0.08661, over 19729.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.2872, pruned_loss=0.0589, ctc_loss=0.1103, over 3851579.32 frames. ], batch size: 47, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:05:34,874 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=117141.33333333333, ans=0.0
+2024-08-26 19:05:41,719 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=117194.66666666667, ans=0.1
+2024-08-26 19:06:06,550 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.99 vs. limit=22.5
+2024-08-26 19:06:12,993 INFO [train.py:1114] (1/4) Epoch 9, batch 2100, loss[loss=0.2154, simple_loss=0.2901, pruned_loss=0.05179, ctc_loss=0.0928, over 19762.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2866, pruned_loss=0.05844, ctc_loss=0.1094, over 3857799.30 frames. ], batch size: 54, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:06:30,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=117514.66666666667, ans=0.125
+2024-08-26 19:06:36,666 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.488e+02 1.695e+02 1.945e+02 3.088e+02, threshold=3.391e+02, percent-clipped=0.0
+2024-08-26 19:06:44,738 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.44 vs. limit=15.0
+2024-08-26 19:06:51,416 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:06:52,495 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten.whitening_limit, batch_count=117621.33333333333, ans=15.0
+2024-08-26 19:06:53,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=117621.33333333333, ans=0.025
+2024-08-26 19:06:53,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=117621.33333333333, ans=0.0
+2024-08-26 19:06:55,548 INFO [train.py:1114] (1/4) Epoch 9, batch 2150, loss[loss=0.1996, simple_loss=0.2662, pruned_loss=0.04814, ctc_loss=0.09211, over 19567.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.2854, pruned_loss=0.05785, ctc_loss=0.1083, over 3867762.36 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 8.0
+2024-08-26 19:06:55,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=117674.66666666667, ans=0.1
+2024-08-26 19:06:57,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=117674.66666666667, ans=0.125
+2024-08-26 19:07:02,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=117674.66666666667, ans=0.0
+2024-08-26 19:07:11,322 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:07:13,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=117781.33333333333, ans=0.1
+2024-08-26 19:07:13,342 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.56 vs. limit=15.0
+2024-08-26 19:07:18,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=117781.33333333333, ans=0.04949747468305833
+2024-08-26 19:07:18,678 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.29 vs. limit=15.0
+2024-08-26 19:07:29,863 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=117888.0, ans=0.1
+2024-08-26 19:07:36,981 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.67 vs. limit=22.5
+2024-08-26 19:07:38,963 INFO [train.py:1114] (1/4) Epoch 9, batch 2200, loss[loss=0.2376, simple_loss=0.3064, pruned_loss=0.06202, ctc_loss=0.1118, over 19601.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.2853, pruned_loss=0.05785, ctc_loss=0.1083, over 3866602.78 frames. ], batch size: 57, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:07:42,232 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.46 vs. limit=6.0
+2024-08-26 19:07:45,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=117941.33333333333, ans=0.125
+2024-08-26 19:07:45,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=117941.33333333333, ans=0.125
+2024-08-26 19:08:03,124 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.528e+02 1.792e+02 2.132e+02 3.306e+02, threshold=3.583e+02, percent-clipped=0.0
+2024-08-26 19:08:14,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=118154.66666666667, ans=0.125
+2024-08-26 19:08:20,816 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=118154.66666666667, ans=0.0
+2024-08-26 19:08:21,085 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.77 vs. limit=15.0
+2024-08-26 19:08:34,327 INFO [train.py:1114] (1/4) Epoch 9, batch 2250, loss[loss=0.216, simple_loss=0.2886, pruned_loss=0.05131, ctc_loss=0.1018, over 19614.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2852, pruned_loss=0.05781, ctc_loss=0.1083, over 3867023.90 frames. ], batch size: 55, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:09:09,685 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.68 vs. limit=15.0
+2024-08-26 19:09:17,821 INFO [train.py:1114] (1/4) Epoch 9, batch 2300, loss[loss=0.2093, simple_loss=0.2695, pruned_loss=0.05483, ctc_loss=0.0986, over 19496.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2841, pruned_loss=0.0578, ctc_loss=0.1081, over 3860668.95 frames. ], batch size: 49, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:09:20,704 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=118474.66666666667, ans=0.125
+2024-08-26 19:09:42,032 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 1.479e+02 1.669e+02 2.317e+02 3.988e+02, threshold=3.338e+02, percent-clipped=3.0
+2024-08-26 19:09:43,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=118634.66666666667, ans=0.1
+2024-08-26 19:09:48,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=118634.66666666667, ans=0.125
+2024-08-26 19:10:01,375 INFO [train.py:1114] (1/4) Epoch 9, batch 2350, loss[loss=0.2345, simple_loss=0.2989, pruned_loss=0.06194, ctc_loss=0.1153, over 19669.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2844, pruned_loss=0.05814, ctc_loss=0.1087, over 3863748.78 frames. ], batch size: 63, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:10:06,646 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=118741.33333333333, ans=0.125
+2024-08-26 19:10:13,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=118794.66666666667, ans=0.125
+2024-08-26 19:11:06,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=118848.0, ans=0.125
+2024-08-26 19:11:06,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=118848.0, ans=0.1
+2024-08-26 19:11:06,999 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=118848.0, ans=0.0
+2024-08-26 19:11:11,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=118901.33333333333, ans=0.125
+2024-08-26 19:11:14,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=118901.33333333333, ans=0.125
+2024-08-26 19:11:19,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=118954.66666666667, ans=0.125
+2024-08-26 19:11:32,744 INFO [train.py:1114] (1/4) Epoch 9, batch 2400, loss[loss=0.2652, simple_loss=0.3194, pruned_loss=0.07627, ctc_loss=0.1465, over 19240.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2868, pruned_loss=0.05907, ctc_loss=0.1103, over 3857864.72 frames. ], batch size: 71, lr: 1.61e-02, grad_scale: 16.0
+2024-08-26 19:11:50,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=119061.33333333333, ans=0.125
+2024-08-26 19:12:04,703 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.526e+02 1.714e+02 1.892e+02 3.175e+02, threshold=3.427e+02, percent-clipped=0.0
+2024-08-26 19:12:13,727 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=11.66 vs. limit=15.0
+2024-08-26 19:12:22,384 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=119221.33333333333, ans=0.125
+2024-08-26 19:12:22,721 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=10.14 vs. limit=12.0
+2024-08-26 19:12:24,772 INFO [train.py:1114] (1/4) Epoch 9, batch 2450, loss[loss=0.2879, simple_loss=0.3178, pruned_loss=0.09466, ctc_loss=0.172, over 13701.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.2912, pruned_loss=0.0624, ctc_loss=0.1169, over 3731728.38 frames. ], batch size: 141, lr: 1.61e-02, grad_scale: 16.0
+2024-08-26 19:14:15,880 INFO [train.py:1114] (1/4) Epoch 10, batch 0, loss[loss=0.2042, simple_loss=0.2648, pruned_loss=0.05236, ctc_loss=0.09713, over 19818.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2648, pruned_loss=0.05236, ctc_loss=0.09713, over 19818.00 frames. ], batch size: 49, lr: 1.53e-02, grad_scale: 16.0
+2024-08-26 19:14:15,881 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 19:14:48,065 INFO [train.py:1146] (1/4) Epoch 10, validation: loss=0.1896, simple_loss=0.2813, pruned_loss=0.03622, ctc_loss=0.0637, over 944034.00 frames.
+2024-08-26 19:14:48,066 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12882MB
+2024-08-26 19:15:04,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=119536.0, ans=0.125
+2024-08-26 19:15:14,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=119589.33333333333, ans=0.125
+2024-08-26 19:15:25,080 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.696e+02 1.867e+02 2.057e+02 3.331e+02, threshold=3.733e+02, percent-clipped=0.0
+2024-08-26 19:15:25,368 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=119696.0, ans=0.125
+2024-08-26 19:15:26,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=119696.0, ans=0.125
+2024-08-26 19:15:34,236 INFO [train.py:1114] (1/4) Epoch 10, batch 50, loss[loss=0.1967, simple_loss=0.2659, pruned_loss=0.04623, ctc_loss=0.08732, over 19716.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2874, pruned_loss=0.05884, ctc_loss=0.1113, over 843671.78 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 16.0
+2024-08-26 19:15:42,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=119802.66666666667, ans=0.125
+2024-08-26 19:15:51,215 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=119802.66666666667, ans=0.0
+2024-08-26 19:15:53,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=119856.0, ans=0.0
+2024-08-26 19:16:02,323 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=119909.33333333333, ans=0.0
+2024-08-26 19:16:10,104 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.08 vs. limit=15.0
+2024-08-26 19:16:14,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=119962.66666666667, ans=0.0
+2024-08-26 19:16:20,471 INFO [train.py:1114] (1/4) Epoch 10, batch 100, loss[loss=0.2057, simple_loss=0.2706, pruned_loss=0.05089, ctc_loss=0.09777, over 19715.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2886, pruned_loss=0.05879, ctc_loss=0.1104, over 1499199.10 frames. ], batch size: 51, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:16:27,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=120016.0, ans=0.125
+2024-08-26 19:16:28,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=120069.33333333333, ans=0.125
+2024-08-26 19:16:37,475 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.10 vs. limit=15.0
+2024-08-26 19:16:38,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=120069.33333333333, ans=0.125
+2024-08-26 19:16:53,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=120176.0, ans=0.0
+2024-08-26 19:16:53,950 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.87 vs. limit=15.0
+2024-08-26 19:17:03,443 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.471e+02 1.633e+02 1.792e+02 2.780e+02, threshold=3.265e+02, percent-clipped=0.0
+2024-08-26 19:17:09,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=120229.33333333333, ans=0.05
+2024-08-26 19:17:11,616 INFO [train.py:1114] (1/4) Epoch 10, batch 150, loss[loss=0.1951, simple_loss=0.2534, pruned_loss=0.05005, ctc_loss=0.09158, over 19685.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.2858, pruned_loss=0.05801, ctc_loss=0.1083, over 2028038.53 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:17:16,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-26 19:17:19,139 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=120282.66666666667, ans=0.0
+2024-08-26 19:17:19,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=120282.66666666667, ans=0.0
+2024-08-26 19:17:20,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-26 19:17:38,931 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.83 vs. limit=15.0
+2024-08-26 19:17:52,753 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.98 vs. limit=22.5
+2024-08-26 19:18:02,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=120496.0, ans=0.2
+2024-08-26 19:18:07,131 INFO [train.py:1114] (1/4) Epoch 10, batch 200, loss[loss=0.241, simple_loss=0.3066, pruned_loss=0.06353, ctc_loss=0.1209, over 18236.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2849, pruned_loss=0.05768, ctc_loss=0.1075, over 2435602.70 frames. ], batch size: 85, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:18:29,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=120549.33333333333, ans=0.125
+2024-08-26 19:18:30,189 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.95 vs. limit=22.5
+2024-08-26 19:18:30,639 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=120549.33333333333, ans=0.125
+2024-08-26 19:18:33,604 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=120549.33333333333, ans=0.0
+2024-08-26 19:18:34,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=120602.66666666667, ans=0.0
+2024-08-26 19:18:40,946 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.48 vs. limit=22.5
+2024-08-26 19:18:51,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=120656.0, ans=0.0
+2024-08-26 19:19:10,604 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=120762.66666666667, ans=0.0
+2024-08-26 19:19:12,214 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.459e+02 1.596e+02 1.815e+02 3.041e+02, threshold=3.193e+02, percent-clipped=0.0
+2024-08-26 19:19:43,856 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=120762.66666666667, ans=0.1
+2024-08-26 19:19:47,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=120816.0, ans=0.125
+2024-08-26 19:19:48,330 INFO [train.py:1114] (1/4) Epoch 10, batch 250, loss[loss=0.2149, simple_loss=0.2863, pruned_loss=0.05233, ctc_loss=0.09711, over 19382.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2845, pruned_loss=0.0572, ctc_loss=0.1065, over 2755365.69 frames. ], batch size: 67, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:19:50,786 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.63 vs. limit=10.0
+2024-08-26 19:20:03,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=120869.33333333333, ans=0.07
+2024-08-26 19:20:06,485 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=120869.33333333333, ans=0.0
+2024-08-26 19:20:11,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=120869.33333333333, ans=0.125
+2024-08-26 19:20:11,420 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.05 vs. limit=22.5
+2024-08-26 19:20:14,815 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=120922.66666666667, ans=0.025
+2024-08-26 19:20:25,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=120976.0, ans=0.0
+2024-08-26 19:20:37,553 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=121029.33333333333, ans=0.125
+2024-08-26 19:20:45,507 INFO [train.py:1114] (1/4) Epoch 10, batch 300, loss[loss=0.2265, simple_loss=0.2941, pruned_loss=0.05848, ctc_loss=0.1045, over 19524.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2835, pruned_loss=0.05671, ctc_loss=0.1056, over 3000066.51 frames. ], batch size: 61, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:20:53,105 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=121082.66666666667, ans=0.025
+2024-08-26 19:21:01,497 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=121136.0, ans=0.125
+2024-08-26 19:21:25,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=121242.66666666667, ans=0.0
+2024-08-26 19:21:28,840 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.56 vs. limit=6.0
+2024-08-26 19:21:29,982 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.480e+02 1.641e+02 1.981e+02 3.456e+02, threshold=3.281e+02, percent-clipped=2.0
+2024-08-26 19:21:30,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=121296.0, ans=0.125
+2024-08-26 19:21:30,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.69 vs. limit=12.0
+2024-08-26 19:21:31,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=121296.0, ans=0.09899494936611666
+2024-08-26 19:21:37,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=121349.33333333333, ans=0.125
+2024-08-26 19:21:38,275 INFO [train.py:1114] (1/4) Epoch 10, batch 350, loss[loss=0.1943, simple_loss=0.2546, pruned_loss=0.04931, ctc_loss=0.0882, over 19801.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2835, pruned_loss=0.05664, ctc_loss=0.1056, over 3189437.63 frames. ], batch size: 48, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:21:40,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=121349.33333333333, ans=0.125
+2024-08-26 19:21:53,634 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.07 vs. limit=15.0
+2024-08-26 19:21:56,157 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=121456.0, ans=0.125
+2024-08-26 19:21:56,441 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.02 vs. limit=15.0
+2024-08-26 19:21:57,244 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.00 vs. limit=12.0
+2024-08-26 19:22:09,949 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=121509.33333333333, ans=0.1
+2024-08-26 19:22:18,475 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.05 vs. limit=6.0
+2024-08-26 19:22:24,882 INFO [train.py:1114] (1/4) Epoch 10, batch 400, loss[loss=0.2127, simple_loss=0.283, pruned_loss=0.05112, ctc_loss=0.1005, over 19497.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2829, pruned_loss=0.05654, ctc_loss=0.1054, over 3341910.85 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:22:26,980 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=121616.0, ans=0.0
+2024-08-26 19:22:37,812 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=121669.33333333333, ans=0.1
+2024-08-26 19:23:08,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=121776.0, ans=0.125
+2024-08-26 19:23:16,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=121829.33333333333, ans=0.125
+2024-08-26 19:23:18,027 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 1.471e+02 1.735e+02 2.020e+02 3.245e+02, threshold=3.470e+02, percent-clipped=0.0
+2024-08-26 19:23:26,370 INFO [train.py:1114] (1/4) Epoch 10, batch 450, loss[loss=0.2056, simple_loss=0.2805, pruned_loss=0.04778, ctc_loss=0.0877, over 19622.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.283, pruned_loss=0.05637, ctc_loss=0.1053, over 3450128.20 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:23:41,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=121936.0, ans=0.0
+2024-08-26 19:23:46,563 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=121989.33333333333, ans=0.125
+2024-08-26 19:23:53,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=121989.33333333333, ans=0.125
+2024-08-26 19:24:05,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=122042.66666666667, ans=0.125
+2024-08-26 19:24:09,328 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=122096.0, ans=0.125
+2024-08-26 19:24:16,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=122149.33333333333, ans=0.125
+2024-08-26 19:24:19,329 INFO [train.py:1114] (1/4) Epoch 10, batch 500, loss[loss=0.2391, simple_loss=0.3065, pruned_loss=0.06325, ctc_loss=0.113, over 19674.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2824, pruned_loss=0.05586, ctc_loss=0.1044, over 3545386.11 frames. ], batch size: 63, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:24:25,486 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=122149.33333333333, ans=0.125
+2024-08-26 19:24:28,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=122149.33333333333, ans=0.025
+2024-08-26 19:24:28,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=122149.33333333333, ans=0.2
+2024-08-26 19:24:42,556 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=122202.66666666667, ans=0.125
+2024-08-26 19:24:58,604 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=122256.0, ans=0.0
+2024-08-26 19:25:05,325 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=122309.33333333333, ans=0.125
+2024-08-26 19:25:11,344 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.449e+02 1.637e+02 1.959e+02 3.375e+02, threshold=3.275e+02, percent-clipped=0.0
+2024-08-26 19:25:11,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=122362.66666666667, ans=0.0
+2024-08-26 19:25:14,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-26 19:25:14,376 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-26 19:25:19,721 INFO [train.py:1114] (1/4) Epoch 10, batch 550, loss[loss=0.2297, simple_loss=0.2883, pruned_loss=0.06212, ctc_loss=0.1171, over 19232.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2826, pruned_loss=0.05604, ctc_loss=0.1047, over 3607483.00 frames. ], batch size: 71, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:25:21,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=122416.0, ans=0.125
+2024-08-26 19:25:22,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=122416.0, ans=0.125
+2024-08-26 19:25:30,373 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:25:31,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=122469.33333333333, ans=0.0
+2024-08-26 19:26:01,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=122629.33333333333, ans=0.07
+2024-08-26 19:26:03,749 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=122629.33333333333, ans=0.0
+2024-08-26 19:26:10,276 INFO [train.py:1114] (1/4) Epoch 10, batch 600, loss[loss=0.2162, simple_loss=0.2867, pruned_loss=0.05261, ctc_loss=0.1013, over 19386.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2831, pruned_loss=0.05647, ctc_loss=0.1053, over 3665325.29 frames. ], batch size: 67, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:26:24,099 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=122736.0, ans=0.0
+2024-08-26 19:26:44,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=122842.66666666667, ans=0.0
+2024-08-26 19:26:50,243 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.480e+02 1.661e+02 1.846e+02 3.271e+02, threshold=3.322e+02, percent-clipped=0.0
+2024-08-26 19:26:56,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=122896.0, ans=0.125
+2024-08-26 19:26:58,395 INFO [train.py:1114] (1/4) Epoch 10, batch 650, loss[loss=0.2134, simple_loss=0.2805, pruned_loss=0.05251, ctc_loss=0.1034, over 19773.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2827, pruned_loss=0.05619, ctc_loss=0.105, over 3716018.99 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:27:07,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=122949.33333333333, ans=0.0
+2024-08-26 19:27:12,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=123002.66666666667, ans=0.0
+2024-08-26 19:27:36,015 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=123109.33333333333, ans=0.125
+2024-08-26 19:27:37,869 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=123109.33333333333, ans=0.125
+2024-08-26 19:27:48,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=123162.66666666667, ans=0.0
+2024-08-26 19:27:51,539 INFO [train.py:1114] (1/4) Epoch 10, batch 700, loss[loss=0.1988, simple_loss=0.2688, pruned_loss=0.04764, ctc_loss=0.08378, over 19726.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2831, pruned_loss=0.05623, ctc_loss=0.105, over 3747424.30 frames. ], batch size: 51, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:27:58,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=123216.0, ans=0.125
+2024-08-26 19:28:01,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=123269.33333333333, ans=0.0
+2024-08-26 19:28:10,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=123322.66666666667, ans=0.125
+2024-08-26 19:28:11,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=123322.66666666667, ans=0.0
+2024-08-26 19:28:13,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=123322.66666666667, ans=0.125
+2024-08-26 19:28:21,384 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.43 vs. limit=15.0
+2024-08-26 19:28:29,135 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.526e+02 1.912e+02 2.394e+02 4.336e+02, threshold=3.825e+02, percent-clipped=8.0
+2024-08-26 19:28:35,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=123429.33333333333, ans=0.025
+2024-08-26 19:28:38,774 INFO [train.py:1114] (1/4) Epoch 10, batch 750, loss[loss=0.2041, simple_loss=0.2718, pruned_loss=0.04932, ctc_loss=0.09441, over 19519.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2824, pruned_loss=0.05589, ctc_loss=0.1042, over 3773650.40 frames. ], batch size: 54, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:29:19,650 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=11.15 vs. limit=15.0
+2024-08-26 19:29:27,332 INFO [train.py:1114] (1/4) Epoch 10, batch 800, loss[loss=0.1912, simple_loss=0.2497, pruned_loss=0.04775, ctc_loss=0.0933, over 19818.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2816, pruned_loss=0.05563, ctc_loss=0.1037, over 3795006.20 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-26 19:29:35,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=123802.66666666667, ans=0.05
+2024-08-26 19:29:36,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=123802.66666666667, ans=0.125
+2024-08-26 19:29:44,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=123802.66666666667, ans=0.05
+2024-08-26 19:29:46,360 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.96 vs. limit=22.5
+2024-08-26 19:29:47,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=123856.0, ans=0.125
+2024-08-26 19:29:59,847 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.81 vs. limit=22.5
+2024-08-26 19:30:07,513 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.505e+02 1.745e+02 2.038e+02 4.368e+02, threshold=3.490e+02, percent-clipped=1.0
+2024-08-26 19:30:17,629 INFO [train.py:1114] (1/4) Epoch 10, batch 850, loss[loss=0.2195, simple_loss=0.2881, pruned_loss=0.05537, ctc_loss=0.1002, over 19645.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2817, pruned_loss=0.05554, ctc_loss=0.1036, over 3814424.14 frames. ], batch size: 59, lr: 1.50e-02, grad_scale: 32.0
+2024-08-26 19:30:25,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=124016.0, ans=0.0
+2024-08-26 19:30:36,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=124069.33333333333, ans=0.0
+2024-08-26 19:30:42,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=124122.66666666667, ans=0.125
+2024-08-26 19:30:56,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=124229.33333333333, ans=0.125
+2024-08-26 19:31:00,933 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=124229.33333333333, ans=0.125
+2024-08-26 19:31:04,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=124229.33333333333, ans=0.125
+2024-08-26 19:31:08,378 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=124229.33333333333, ans=0.125
+2024-08-26 19:31:13,061 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=124229.33333333333, ans=0.125
+2024-08-26 19:31:14,603 INFO [train.py:1114] (1/4) Epoch 10, batch 900, loss[loss=0.1877, simple_loss=0.2563, pruned_loss=0.04365, ctc_loss=0.07952, over 19420.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2819, pruned_loss=0.056, ctc_loss=0.1043, over 3819291.77 frames. ], batch size: 48, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:32:19,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=124389.33333333333, ans=0.0
+2024-08-26 19:32:22,232 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=124389.33333333333, ans=0.0
+2024-08-26 19:32:35,080 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.525e+02 1.733e+02 2.036e+02 4.140e+02, threshold=3.466e+02, percent-clipped=3.0
+2024-08-26 19:32:42,441 INFO [train.py:1114] (1/4) Epoch 10, batch 950, loss[loss=0.2108, simple_loss=0.2715, pruned_loss=0.05513, ctc_loss=0.09944, over 19481.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2825, pruned_loss=0.05649, ctc_loss=0.1053, over 3822031.47 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:33:07,794 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=124656.0, ans=0.0
+2024-08-26 19:33:20,618 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=124709.33333333333, ans=0.125
+2024-08-26 19:33:20,886 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.37 vs. limit=10.0
+2024-08-26 19:33:22,406 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=124709.33333333333, ans=0.1
+2024-08-26 19:33:36,585 INFO [train.py:1114] (1/4) Epoch 10, batch 1000, loss[loss=0.1934, simple_loss=0.2621, pruned_loss=0.0452, ctc_loss=0.08566, over 19854.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2828, pruned_loss=0.05659, ctc_loss=0.1057, over 3817081.22 frames. ], batch size: 52, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:33:39,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.whiten.whitening_limit, batch_count=124816.0, ans=12.0
+2024-08-26 19:34:05,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=124922.66666666667, ans=0.125
+2024-08-26 19:34:07,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=124922.66666666667, ans=15.0
+2024-08-26 19:34:11,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=124976.0, ans=0.125
+2024-08-26 19:34:19,953 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.433e+02 1.580e+02 1.832e+02 3.141e+02, threshold=3.159e+02, percent-clipped=0.0
+2024-08-26 19:34:22,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=125029.33333333333, ans=0.125
+2024-08-26 19:34:27,366 INFO [train.py:1114] (1/4) Epoch 10, batch 1050, loss[loss=0.2293, simple_loss=0.3078, pruned_loss=0.05535, ctc_loss=0.1003, over 19837.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2821, pruned_loss=0.0562, ctc_loss=0.105, over 3823053.40 frames. ], batch size: 57, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:35:09,714 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.74 vs. limit=15.0
+2024-08-26 19:35:15,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=125189.33333333333, ans=0.2
+2024-08-26 19:35:23,549 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=125242.66666666667, ans=0.1
+2024-08-26 19:35:29,460 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.93 vs. limit=22.5
+2024-08-26 19:35:31,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=125296.0, ans=0.125
+2024-08-26 19:35:36,344 INFO [train.py:1114] (1/4) Epoch 10, batch 1100, loss[loss=0.2098, simple_loss=0.2781, pruned_loss=0.05214, ctc_loss=0.09303, over 19580.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2813, pruned_loss=0.0557, ctc_loss=0.1041, over 3829407.05 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 8.0
+2024-08-26 19:35:57,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=125456.0, ans=0.125
+2024-08-26 19:36:15,876 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.85 vs. limit=15.0
+2024-08-26 19:36:18,873 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.433e+02 1.605e+02 1.841e+02 2.779e+02, threshold=3.211e+02, percent-clipped=0.0
+2024-08-26 19:36:25,416 INFO [train.py:1114] (1/4) Epoch 10, batch 1150, loss[loss=0.2024, simple_loss=0.2711, pruned_loss=0.04898, ctc_loss=0.08939, over 19601.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2818, pruned_loss=0.05594, ctc_loss=0.1045, over 3829021.94 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 8.0
+2024-08-26 19:36:32,761 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.46 vs. limit=15.0
+2024-08-26 19:36:54,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=125722.66666666667, ans=0.125
+2024-08-26 19:36:55,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=125776.0, ans=0.025
+2024-08-26 19:37:16,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=125882.66666666667, ans=0.2
+2024-08-26 19:37:17,649 INFO [train.py:1114] (1/4) Epoch 10, batch 1200, loss[loss=0.2208, simple_loss=0.2908, pruned_loss=0.05514, ctc_loss=0.1015, over 19845.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2826, pruned_loss=0.05631, ctc_loss=0.1052, over 3823979.26 frames. ], batch size: 57, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:37:26,713 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.67 vs. limit=10.0
+2024-08-26 19:37:30,855 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=125936.0, ans=0.0
+2024-08-26 19:37:33,068 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.50 vs. limit=15.0
+2024-08-26 19:37:43,809 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=125989.33333333333, ans=0.1
+2024-08-26 19:37:44,121 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.65 vs. limit=22.5
+2024-08-26 19:37:57,386 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.466e+02 1.608e+02 1.824e+02 2.979e+02, threshold=3.216e+02, percent-clipped=0.0
+2024-08-26 19:38:04,046 INFO [train.py:1114] (1/4) Epoch 10, batch 1250, loss[loss=0.2385, simple_loss=0.2998, pruned_loss=0.06595, ctc_loss=0.1131, over 19506.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.283, pruned_loss=0.05631, ctc_loss=0.1049, over 3841312.74 frames. ], batch size: 61, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:38:04,586 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.29 vs. limit=15.0
+2024-08-26 19:38:05,400 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.71 vs. limit=10.0
+2024-08-26 19:38:08,234 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.49 vs. limit=22.5
+2024-08-26 19:38:19,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=126202.66666666667, ans=0.125
+2024-08-26 19:38:46,986 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=13.37 vs. limit=15.0
+2024-08-26 19:39:57,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=126362.66666666667, ans=0.04949747468305833
+2024-08-26 19:40:04,476 INFO [train.py:1114] (1/4) Epoch 10, batch 1300, loss[loss=0.239, simple_loss=0.2985, pruned_loss=0.06457, ctc_loss=0.1258, over 18932.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.282, pruned_loss=0.0558, ctc_loss=0.104, over 3844627.38 frames. ], batch size: 76, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:40:04,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=126416.0, ans=0.0
+2024-08-26 19:40:34,761 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.16 vs. limit=6.0
+2024-08-26 19:40:54,259 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.480e+02 1.716e+02 1.981e+02 3.061e+02, threshold=3.432e+02, percent-clipped=0.0
+2024-08-26 19:41:00,867 INFO [train.py:1114] (1/4) Epoch 10, batch 1350, loss[loss=0.2363, simple_loss=0.3012, pruned_loss=0.06209, ctc_loss=0.1178, over 19782.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2817, pruned_loss=0.05565, ctc_loss=0.1036, over 3855221.60 frames. ], batch size: 54, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:41:01,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=126682.66666666667, ans=0.125
+2024-08-26 19:41:06,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=126682.66666666667, ans=0.1
+2024-08-26 19:41:06,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=126682.66666666667, ans=0.1
+2024-08-26 19:41:16,682 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=126736.0, ans=0.1
+2024-08-26 19:41:25,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=126789.33333333333, ans=0.2
+2024-08-26 19:41:29,888 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=126789.33333333333, ans=0.0
+2024-08-26 19:41:42,569 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.94 vs. limit=15.0
+2024-08-26 19:41:45,268 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.10 vs. limit=22.5
+2024-08-26 19:41:45,826 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=126896.0, ans=0.125
+2024-08-26 19:41:48,664 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=126896.0, ans=0.125
+2024-08-26 19:41:52,399 INFO [train.py:1114] (1/4) Epoch 10, batch 1400, loss[loss=0.181, simple_loss=0.2413, pruned_loss=0.04419, ctc_loss=0.08095, over 19658.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2812, pruned_loss=0.05536, ctc_loss=0.1032, over 3861749.60 frames. ], batch size: 46, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:42:17,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=127002.66666666667, ans=0.125
+2024-08-26 19:42:20,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=127002.66666666667, ans=0.1
+2024-08-26 19:42:27,657 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=127056.0, ans=0.0
+2024-08-26 19:42:43,187 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.452e+02 1.585e+02 1.952e+02 4.788e+02, threshold=3.170e+02, percent-clipped=2.0
+2024-08-26 19:42:49,759 INFO [train.py:1114] (1/4) Epoch 10, batch 1450, loss[loss=0.2318, simple_loss=0.3025, pruned_loss=0.05907, ctc_loss=0.1073, over 19707.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2822, pruned_loss=0.05576, ctc_loss=0.1041, over 3860715.75 frames. ], batch size: 63, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:42:57,408 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=127216.0, ans=0.125
+2024-08-26 19:43:06,563 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=127269.33333333333, ans=0.1
+2024-08-26 19:43:11,177 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.62 vs. limit=12.0
+2024-08-26 19:43:15,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=127322.66666666667, ans=0.0
+2024-08-26 19:43:26,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=127376.0, ans=0.04949747468305833
+2024-08-26 19:43:37,444 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=127429.33333333333, ans=0.025
+2024-08-26 19:43:40,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=127429.33333333333, ans=0.025
+2024-08-26 19:43:44,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=127429.33333333333, ans=0.125
+2024-08-26 19:43:48,227 INFO [train.py:1114] (1/4) Epoch 10, batch 1500, loss[loss=0.2338, simple_loss=0.3014, pruned_loss=0.06066, ctc_loss=0.1123, over 19599.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2827, pruned_loss=0.05589, ctc_loss=0.1043, over 3861030.36 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:43:49,086 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.46 vs. limit=15.0
+2024-08-26 19:43:49,824 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.46 vs. limit=12.0
+2024-08-26 19:43:54,878 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=127482.66666666667, ans=0.0
+2024-08-26 19:43:59,796 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=4.74 vs. limit=15.0
+2024-08-26 19:44:01,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=127536.0, ans=0.0
+2024-08-26 19:44:08,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127589.33333333333, ans=0.1
+2024-08-26 19:44:34,631 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.37 vs. limit=15.0
+2024-08-26 19:44:36,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=127696.0, ans=0.125
+2024-08-26 19:44:37,665 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.427e+02 1.587e+02 1.794e+02 3.285e+02, threshold=3.174e+02, percent-clipped=1.0
+2024-08-26 19:44:52,461 INFO [train.py:1114] (1/4) Epoch 10, batch 1550, loss[loss=0.228, simple_loss=0.2965, pruned_loss=0.05812, ctc_loss=0.1084, over 19591.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2828, pruned_loss=0.05611, ctc_loss=0.1048, over 3846715.28 frames. ], batch size: 60, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:45:06,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127802.66666666667, ans=0.1
+2024-08-26 19:45:12,314 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127856.0, ans=0.1
+2024-08-26 19:45:30,270 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=127962.66666666667, ans=0.0
+2024-08-26 19:45:43,648 INFO [train.py:1114] (1/4) Epoch 10, batch 1600, loss[loss=0.2161, simple_loss=0.2872, pruned_loss=0.05203, ctc_loss=0.1025, over 19843.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2832, pruned_loss=0.05647, ctc_loss=0.1054, over 3837124.92 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:45:43,924 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=128016.0, ans=0.125
+2024-08-26 19:45:46,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128016.0, ans=0.1
+2024-08-26 19:46:13,994 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=128176.0, ans=0.125
+2024-08-26 19:46:18,706 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=128176.0, ans=0.0
+2024-08-26 19:46:26,524 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.210e+02 1.460e+02 1.671e+02 2.068e+02 2.984e+02, threshold=3.342e+02, percent-clipped=0.0
+2024-08-26 19:46:26,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=128229.33333333333, ans=0.125
+2024-08-26 19:46:33,074 INFO [train.py:1114] (1/4) Epoch 10, batch 1650, loss[loss=0.2129, simple_loss=0.2896, pruned_loss=0.0498, ctc_loss=0.09144, over 19672.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2827, pruned_loss=0.05605, ctc_loss=0.1047, over 3833661.31 frames. ], batch size: 59, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:46:46,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=128336.0, ans=0.0
+2024-08-26 19:47:00,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=128389.33333333333, ans=0.125
+2024-08-26 19:47:14,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=128496.0, ans=0.125
+2024-08-26 19:47:23,381 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=128496.0, ans=0.125
+2024-08-26 19:47:24,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=128496.0, ans=0.125
+2024-08-26 19:47:28,672 INFO [train.py:1114] (1/4) Epoch 10, batch 1700, loss[loss=0.202, simple_loss=0.2581, pruned_loss=0.05377, ctc_loss=0.09605, over 19655.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2821, pruned_loss=0.05564, ctc_loss=0.1039, over 3847897.17 frames. ], batch size: 46, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:47:32,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128549.33333333333, ans=0.1
+2024-08-26 19:47:40,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=128602.66666666667, ans=0.125
+2024-08-26 19:47:47,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=128656.0, ans=0.125
+2024-08-26 19:47:50,137 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128656.0, ans=0.1
+2024-08-26 19:48:15,897 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.44 vs. limit=22.5
+2024-08-26 19:48:18,882 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.440e+02 1.568e+02 1.897e+02 2.765e+02, threshold=3.136e+02, percent-clipped=0.0
+2024-08-26 19:48:20,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=128762.66666666667, ans=0.125
+2024-08-26 19:48:25,123 INFO [train.py:1114] (1/4) Epoch 10, batch 1750, loss[loss=0.1821, simple_loss=0.2423, pruned_loss=0.04453, ctc_loss=0.08224, over 19670.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2813, pruned_loss=0.05518, ctc_loss=0.1029, over 3851825.52 frames. ], batch size: 45, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:48:26,482 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.98 vs. limit=15.0
+2024-08-26 19:48:34,264 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.58 vs. limit=15.0
+2024-08-26 19:48:42,116 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.88 vs. limit=15.0
+2024-08-26 19:49:08,980 INFO [train.py:1114] (1/4) Epoch 10, batch 1800, loss[loss=0.2218, simple_loss=0.2893, pruned_loss=0.05574, ctc_loss=0.1073, over 19616.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2815, pruned_loss=0.05522, ctc_loss=0.1029, over 3852281.16 frames. ], batch size: 55, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:49:33,179 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.47 vs. limit=12.0
+2024-08-26 19:49:38,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=129242.66666666667, ans=0.125
+2024-08-26 19:49:43,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=129242.66666666667, ans=0.1
+2024-08-26 19:49:49,354 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.466e+02 1.715e+02 2.130e+02 3.505e+02, threshold=3.430e+02, percent-clipped=4.0
+2024-08-26 19:49:55,059 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.86 vs. limit=15.0
+2024-08-26 19:49:55,615 INFO [train.py:1114] (1/4) Epoch 10, batch 1850, loss[loss=0.2136, simple_loss=0.2817, pruned_loss=0.05196, ctc_loss=0.104, over 19591.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2808, pruned_loss=0.05496, ctc_loss=0.1024, over 3854580.27 frames. ], batch size: 57, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:49:55,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=129349.33333333333, ans=0.2
+2024-08-26 19:50:05,680 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.73 vs. limit=22.5
+2024-08-26 19:50:14,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=129456.0, ans=0.125
+2024-08-26 19:50:26,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=129456.0, ans=0.125
+2024-08-26 19:50:35,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=129509.33333333333, ans=0.1
+2024-08-26 19:50:39,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=129509.33333333333, ans=0.125
+2024-08-26 19:50:50,324 INFO [train.py:1114] (1/4) Epoch 10, batch 1900, loss[loss=0.2231, simple_loss=0.2969, pruned_loss=0.05406, ctc_loss=0.1031, over 19661.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2813, pruned_loss=0.05495, ctc_loss=0.1025, over 3860649.34 frames. ], batch size: 59, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:51:13,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=129722.66666666667, ans=0.125
+2024-08-26 19:51:20,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=129776.0, ans=0.0
+2024-08-26 19:51:27,690 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.498e+02 1.655e+02 1.944e+02 4.101e+02, threshold=3.311e+02, percent-clipped=1.0
+2024-08-26 19:51:30,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=129829.33333333333, ans=0.025
+2024-08-26 19:51:33,752 INFO [train.py:1114] (1/4) Epoch 10, batch 1950, loss[loss=0.1979, simple_loss=0.272, pruned_loss=0.04514, ctc_loss=0.08379, over 19601.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2825, pruned_loss=0.05522, ctc_loss=0.103, over 3869545.62 frames. ], batch size: 52, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:51:35,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=129882.66666666667, ans=0.1
+2024-08-26 19:52:42,174 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=130096.0, ans=0.1
+2024-08-26 19:52:47,508 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.14 vs. limit=15.0
+2024-08-26 19:52:51,600 INFO [train.py:1114] (1/4) Epoch 10, batch 2000, loss[loss=0.2061, simple_loss=0.263, pruned_loss=0.05448, ctc_loss=0.1004, over 19652.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2836, pruned_loss=0.05582, ctc_loss=0.1042, over 3854942.46 frames. ], batch size: 45, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:52:52,830 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.10 vs. limit=6.0
+2024-08-26 19:53:13,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=130256.0, ans=0.125
+2024-08-26 19:53:23,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=130309.33333333333, ans=0.125
+2024-08-26 19:53:28,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=130362.66666666667, ans=0.0
+2024-08-26 19:53:29,077 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.467e+02 1.617e+02 1.850e+02 3.299e+02, threshold=3.233e+02, percent-clipped=0.0
+2024-08-26 19:53:31,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=130362.66666666667, ans=0.1
+2024-08-26 19:53:35,207 INFO [train.py:1114] (1/4) Epoch 10, batch 2050, loss[loss=0.186, simple_loss=0.2568, pruned_loss=0.04191, ctc_loss=0.07831, over 19689.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2824, pruned_loss=0.05552, ctc_loss=0.1036, over 3851297.82 frames. ], batch size: 47, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:53:38,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=130416.0, ans=0.1
+2024-08-26 19:53:56,516 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.04 vs. limit=6.0
+2024-08-26 19:54:09,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=130629.33333333333, ans=0.025
+2024-08-26 19:54:09,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=130629.33333333333, ans=0.04949747468305833
+2024-08-26 19:54:14,751 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.94 vs. limit=22.5
+2024-08-26 19:54:18,657 INFO [train.py:1114] (1/4) Epoch 10, batch 2100, loss[loss=0.2174, simple_loss=0.2833, pruned_loss=0.05549, ctc_loss=0.1013, over 19773.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2815, pruned_loss=0.05483, ctc_loss=0.1024, over 3857717.85 frames. ], batch size: 54, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:54:24,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=130682.66666666667, ans=0.2
+2024-08-26 19:54:26,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=130736.0, ans=0.1
+2024-08-26 19:54:37,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=130789.33333333333, ans=0.125
+2024-08-26 19:54:41,910 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.41 vs. limit=22.5
+2024-08-26 19:54:42,020 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.53 vs. limit=6.0
+2024-08-26 19:54:48,622 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=130842.66666666667, ans=0.125
+2024-08-26 19:54:51,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=130842.66666666667, ans=0.125
+2024-08-26 19:54:56,946 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.237e+02 1.404e+02 1.614e+02 1.979e+02 3.349e+02, threshold=3.228e+02, percent-clipped=1.0
+2024-08-26 19:54:59,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=130896.0, ans=0.1
+2024-08-26 19:55:03,178 INFO [train.py:1114] (1/4) Epoch 10, batch 2150, loss[loss=0.1959, simple_loss=0.2633, pruned_loss=0.04634, ctc_loss=0.08956, over 19584.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.281, pruned_loss=0.05471, ctc_loss=0.102, over 3868234.63 frames. ], batch size: 52, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:55:11,186 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=131002.66666666667, ans=0.025
+2024-08-26 19:55:12,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=131002.66666666667, ans=0.025
+2024-08-26 19:55:13,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=131002.66666666667, ans=0.1
+2024-08-26 19:55:15,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=131002.66666666667, ans=0.125
+2024-08-26 19:55:33,963 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.71 vs. limit=10.0
+2024-08-26 19:55:43,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=131162.66666666666, ans=0.05
+2024-08-26 19:55:49,739 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.00 vs. limit=15.0
+2024-08-26 19:55:50,183 INFO [train.py:1114] (1/4) Epoch 10, batch 2200, loss[loss=0.2291, simple_loss=0.2918, pruned_loss=0.06228, ctc_loss=0.1047, over 19586.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2805, pruned_loss=0.05434, ctc_loss=0.1012, over 3867086.24 frames. ], batch size: 57, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:55:50,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=131216.0, ans=0.0
+2024-08-26 19:55:57,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=131216.0, ans=0.035
+2024-08-26 19:55:57,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=131216.0, ans=0.125
+2024-08-26 19:56:00,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=131269.33333333334, ans=0.1
+2024-08-26 19:56:14,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=131269.33333333334, ans=0.0
+2024-08-26 19:56:20,313 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=131322.66666666666, ans=0.125
+2024-08-26 19:56:38,538 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.505e+02 1.694e+02 1.989e+02 3.015e+02, threshold=3.388e+02, percent-clipped=0.0
+2024-08-26 19:56:39,766 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.10 vs. limit=22.5
+2024-08-26 19:56:44,629 INFO [train.py:1114] (1/4) Epoch 10, batch 2250, loss[loss=0.2182, simple_loss=0.2908, pruned_loss=0.05305, ctc_loss=0.09894, over 19612.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2811, pruned_loss=0.05464, ctc_loss=0.1017, over 3866927.65 frames. ], batch size: 55, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:56:51,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=131482.66666666666, ans=0.125
+2024-08-26 19:56:55,386 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:56:58,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=131536.0, ans=0.0
+2024-08-26 19:57:05,858 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.07 vs. limit=10.0
+2024-08-26 19:57:27,777 INFO [train.py:1114] (1/4) Epoch 10, batch 2300, loss[loss=0.1966, simple_loss=0.2612, pruned_loss=0.04826, ctc_loss=0.0888, over 19491.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2805, pruned_loss=0.05451, ctc_loss=0.1016, over 3861842.14 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:57:27,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=131749.33333333334, ans=0.1
+2024-08-26 19:57:30,100 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.81 vs. limit=6.0
+2024-08-26 19:57:34,163 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=131749.33333333334, ans=0.125
+2024-08-26 19:57:50,547 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=131856.0, ans=0.125
+2024-08-26 19:57:55,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.30 vs. limit=22.5
+2024-08-26 19:58:02,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=131962.66666666666, ans=0.2
+2024-08-26 19:58:05,764 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.213e+02 1.499e+02 1.709e+02 2.092e+02 3.241e+02, threshold=3.418e+02, percent-clipped=0.0
+2024-08-26 19:58:43,743 INFO [train.py:1114] (1/4) Epoch 10, batch 2350, loss[loss=0.2294, simple_loss=0.2875, pruned_loss=0.06291, ctc_loss=0.1136, over 19659.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2805, pruned_loss=0.05475, ctc_loss=0.1021, over 3863871.65 frames. ], batch size: 63, lr: 1.46e-02, grad_scale: 16.0
+2024-08-26 19:59:06,038 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=132122.66666666666, ans=0.125
+2024-08-26 19:59:14,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132176.0, ans=0.1
+2024-08-26 19:59:15,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=132176.0, ans=0.2
+2024-08-26 19:59:24,420 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.91 vs. limit=10.0
+2024-08-26 19:59:26,134 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=8.83 vs. limit=15.0
+2024-08-26 19:59:32,682 INFO [train.py:1114] (1/4) Epoch 10, batch 2400, loss[loss=0.2214, simple_loss=0.2866, pruned_loss=0.05714, ctc_loss=0.1044, over 19163.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2824, pruned_loss=0.05553, ctc_loss=0.1036, over 3857574.56 frames. ], batch size: 71, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:59:36,304 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=132282.66666666666, ans=0.125
+2024-08-26 19:59:37,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=132282.66666666666, ans=0.125
+2024-08-26 19:59:37,936 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=132282.66666666666, ans=0.125
+2024-08-26 20:00:10,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=132389.33333333334, ans=0.025
+2024-08-26 20:00:18,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=132442.66666666666, ans=0.125
+2024-08-26 20:00:19,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=132442.66666666666, ans=0.125
+2024-08-26 20:00:20,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132442.66666666666, ans=0.1
+2024-08-26 20:00:25,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=132496.0, ans=0.125
+2024-08-26 20:00:36,862 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.213e+02 1.532e+02 1.694e+02 1.900e+02 3.260e+02, threshold=3.387e+02, percent-clipped=0.0
+2024-08-26 20:00:42,847 INFO [train.py:1114] (1/4) Epoch 10, batch 2450, loss[loss=0.2914, simple_loss=0.3192, pruned_loss=0.0966, ctc_loss=0.1763, over 13381.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2865, pruned_loss=0.05885, ctc_loss=0.1101, over 3729699.44 frames. ], batch size: 140, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 20:00:59,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=132602.66666666666, ans=0.0
+2024-08-26 20:03:28,140 INFO [train.py:1114] (1/4) Epoch 11, batch 0, loss[loss=0.2016, simple_loss=0.2673, pruned_loss=0.04905, ctc_loss=0.09449, over 19408.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2673, pruned_loss=0.04905, ctc_loss=0.09449, over 19408.00 frames. ], batch size: 48, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:03:28,141 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 20:03:42,230 INFO [train.py:1146] (1/4) Epoch 11, validation: loss=0.1858, simple_loss=0.2776, pruned_loss=0.03491, ctc_loss=0.06042, over 944034.00 frames.
+2024-08-26 20:03:42,231 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12907MB
+2024-08-26 20:03:46,155 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=132757.33333333334, ans=10.0
+2024-08-26 20:03:54,659 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:04:01,502 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.27 vs. limit=10.0
+2024-08-26 20:04:11,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=132864.0, ans=10.0
+2024-08-26 20:04:15,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=132917.33333333334, ans=0.1
+2024-08-26 20:04:17,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=132917.33333333334, ans=0.0
+2024-08-26 20:04:21,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=132970.66666666666, ans=0.05
+2024-08-26 20:04:32,361 INFO [train.py:1114] (1/4) Epoch 11, batch 50, loss[loss=0.1783, simple_loss=0.2471, pruned_loss=0.04014, ctc_loss=0.07329, over 19716.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2809, pruned_loss=0.05488, ctc_loss=0.1029, over 845449.42 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:04:37,378 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=133024.0, ans=0.09899494936611666
+2024-08-26 20:04:37,948 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.624e+02 1.801e+02 2.017e+02 3.320e+02, threshold=3.603e+02, percent-clipped=0.0
+2024-08-26 20:04:40,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=133024.0, ans=0.125
+2024-08-26 20:04:59,456 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=133130.66666666666, ans=0.1
+2024-08-26 20:05:07,201 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.44 vs. limit=10.0
+2024-08-26 20:05:09,453 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=133184.0, ans=0.035
+2024-08-26 20:05:18,991 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.90 vs. limit=15.0
+2024-08-26 20:05:20,626 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=133290.66666666666, ans=0.125
+2024-08-26 20:05:21,253 INFO [train.py:1114] (1/4) Epoch 11, batch 100, loss[loss=0.2106, simple_loss=0.2776, pruned_loss=0.05257, ctc_loss=0.09598, over 19715.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2832, pruned_loss=0.05573, ctc_loss=0.1041, over 1498424.97 frames. ], batch size: 51, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:05:32,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=133344.0, ans=0.125
+2024-08-26 20:06:00,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=133450.66666666666, ans=0.0
+2024-08-26 20:06:10,882 INFO [train.py:1114] (1/4) Epoch 11, batch 150, loss[loss=0.1932, simple_loss=0.2569, pruned_loss=0.0479, ctc_loss=0.0842, over 19708.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2808, pruned_loss=0.05454, ctc_loss=0.1019, over 2026854.05 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:06:15,821 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:06:16,416 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.457e+02 1.584e+02 1.841e+02 2.561e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-08-26 20:06:29,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=133664.0, ans=0.0
+2024-08-26 20:06:33,805 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=133664.0, ans=0.125
+2024-08-26 20:06:37,614 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=133664.0, ans=0.2
+2024-08-26 20:06:39,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=133717.33333333334, ans=0.1
+2024-08-26 20:06:40,688 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.28 vs. limit=15.0
+2024-08-26 20:06:43,962 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=133717.33333333334, ans=0.125
+2024-08-26 20:06:53,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=133717.33333333334, ans=0.0
+2024-08-26 20:06:56,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=133770.66666666666, ans=0.1
+2024-08-26 20:08:08,067 INFO [train.py:1114] (1/4) Epoch 11, batch 200, loss[loss=0.2192, simple_loss=0.2869, pruned_loss=0.0554, ctc_loss=0.1016, over 18510.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2793, pruned_loss=0.05414, ctc_loss=0.101, over 2434621.63 frames. ], batch size: 85, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:08:15,118 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.24 vs. limit=10.0
+2024-08-26 20:08:30,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=133930.66666666666, ans=0.0
+2024-08-26 20:08:31,495 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.24 vs. limit=15.0
+2024-08-26 20:08:37,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=133984.0, ans=0.025
+2024-08-26 20:09:00,080 INFO [train.py:1114] (1/4) Epoch 11, batch 250, loss[loss=0.2327, simple_loss=0.3004, pruned_loss=0.06038, ctc_loss=0.1107, over 19382.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2794, pruned_loss=0.05392, ctc_loss=0.1004, over 2754253.18 frames. ], batch size: 67, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:09:01,324 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=134090.66666666666, ans=0.0
+2024-08-26 20:09:05,639 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.397e+02 1.518e+02 1.749e+02 2.921e+02, threshold=3.037e+02, percent-clipped=0.0
+2024-08-26 20:09:10,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=134090.66666666666, ans=0.0
+2024-08-26 20:09:12,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=134144.0, ans=0.125
+2024-08-26 20:09:15,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=134144.0, ans=0.2
+2024-08-26 20:09:18,551 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=134144.0, ans=0.0
+2024-08-26 20:09:22,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=134197.33333333334, ans=0.0
+2024-08-26 20:09:22,448 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=134197.33333333334, ans=0.0
+2024-08-26 20:09:34,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=134250.66666666666, ans=0.1
+2024-08-26 20:09:41,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=134304.0, ans=0.025
+2024-08-26 20:09:42,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=134304.0, ans=0.0
+2024-08-26 20:09:44,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=134304.0, ans=0.0
+2024-08-26 20:09:51,421 INFO [train.py:1114] (1/4) Epoch 11, batch 300, loss[loss=0.2103, simple_loss=0.2802, pruned_loss=0.05136, ctc_loss=0.09434, over 19510.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.279, pruned_loss=0.05366, ctc_loss=0.09982, over 3000172.55 frames. ], batch size: 61, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:10:11,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=134410.66666666666, ans=0.125
+2024-08-26 20:10:25,945 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.70 vs. limit=15.0
+2024-08-26 20:10:41,575 INFO [train.py:1114] (1/4) Epoch 11, batch 350, loss[loss=0.1983, simple_loss=0.2562, pruned_loss=0.05032, ctc_loss=0.09964, over 19724.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2795, pruned_loss=0.05375, ctc_loss=0.1001, over 3190539.03 frames. ], batch size: 48, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:10:45,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=134624.0, ans=0.125
+2024-08-26 20:10:47,199 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.479e+02 1.637e+02 2.052e+02 3.441e+02, threshold=3.275e+02, percent-clipped=1.0
+2024-08-26 20:10:56,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=134677.33333333334, ans=0.0
+2024-08-26 20:11:08,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=134730.66666666666, ans=0.1
+2024-08-26 20:11:26,045 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=134837.33333333334, ans=0.125
+2024-08-26 20:11:29,850 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.22 vs. limit=15.0
+2024-08-26 20:11:30,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=134890.66666666666, ans=0.0
+2024-08-26 20:11:31,275 INFO [train.py:1114] (1/4) Epoch 11, batch 400, loss[loss=0.2113, simple_loss=0.2789, pruned_loss=0.05235, ctc_loss=0.0975, over 19513.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.279, pruned_loss=0.05331, ctc_loss=0.09946, over 3342563.84 frames. ], batch size: 54, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:11:40,820 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer_ff2.min_abs, batch_count=134890.66666666666, ans=0.1
+2024-08-26 20:11:55,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=134997.33333333334, ans=0.2
+2024-08-26 20:11:57,161 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=24.22 vs. limit=22.5
+2024-08-26 20:11:58,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=134997.33333333334, ans=0.0
+2024-08-26 20:12:04,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=135050.66666666666, ans=0.015
+2024-08-26 20:12:19,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=135104.0, ans=0.125
+2024-08-26 20:12:20,807 INFO [train.py:1114] (1/4) Epoch 11, batch 450, loss[loss=0.2009, simple_loss=0.2765, pruned_loss=0.04535, ctc_loss=0.08625, over 19618.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2792, pruned_loss=0.05351, ctc_loss=0.0998, over 3451323.42 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:12:22,199 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.03 vs. limit=22.5
+2024-08-26 20:12:29,025 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.192e+02 1.489e+02 1.652e+02 2.008e+02 3.634e+02, threshold=3.305e+02, percent-clipped=1.0
+2024-08-26 20:12:32,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=135210.66666666666, ans=0.2
+2024-08-26 20:12:33,116 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.28 vs. limit=15.0
+2024-08-26 20:12:42,380 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.39 vs. limit=15.0
+2024-08-26 20:12:42,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=135264.0, ans=0.125
+2024-08-26 20:13:11,612 INFO [train.py:1114] (1/4) Epoch 11, batch 500, loss[loss=0.2245, simple_loss=0.2895, pruned_loss=0.05867, ctc_loss=0.1054, over 19714.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2782, pruned_loss=0.05304, ctc_loss=0.09888, over 3547378.58 frames. ], batch size: 63, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:13:16,666 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.36 vs. limit=6.0
+2024-08-26 20:13:20,665 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=135477.33333333334, ans=0.0
+2024-08-26 20:13:50,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=135637.33333333334, ans=10.0
+2024-08-26 20:13:51,492 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=135637.33333333334, ans=0.1
+2024-08-26 20:13:58,579 INFO [train.py:1114] (1/4) Epoch 11, batch 550, loss[loss=0.2433, simple_loss=0.3049, pruned_loss=0.06564, ctc_loss=0.126, over 19374.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2783, pruned_loss=0.05306, ctc_loss=0.09905, over 3609558.62 frames. ], batch size: 71, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:13:59,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=135690.66666666666, ans=0.125
+2024-08-26 20:14:06,853 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.194e+02 1.449e+02 1.695e+02 2.078e+02 4.377e+02, threshold=3.390e+02, percent-clipped=1.0
+2024-08-26 20:14:07,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=135690.66666666666, ans=0.0
+2024-08-26 20:14:10,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=135744.0, ans=0.125
+2024-08-26 20:14:21,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=135797.33333333334, ans=0.025
+2024-08-26 20:14:23,571 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.31 vs. limit=22.5
+2024-08-26 20:14:28,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=135797.33333333334, ans=0.0
+2024-08-26 20:14:37,796 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=135850.66666666666, ans=0.1
+2024-08-26 20:14:50,535 INFO [train.py:1114] (1/4) Epoch 11, batch 600, loss[loss=0.2387, simple_loss=0.306, pruned_loss=0.06309, ctc_loss=0.1128, over 19334.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2789, pruned_loss=0.0534, ctc_loss=0.09978, over 3665906.05 frames. ], batch size: 67, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:14:53,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=135957.33333333334, ans=0.125
+2024-08-26 20:15:05,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=136010.66666666666, ans=0.1
+2024-08-26 20:15:08,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=136010.66666666666, ans=0.0
+2024-08-26 20:15:14,559 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.81 vs. limit=15.0
+2024-08-26 20:15:24,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=136117.33333333334, ans=0.125
+2024-08-26 20:15:41,567 INFO [train.py:1114] (1/4) Epoch 11, batch 650, loss[loss=0.1967, simple_loss=0.28, pruned_loss=0.03988, ctc_loss=0.08425, over 19775.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2782, pruned_loss=0.05299, ctc_loss=0.09892, over 3715925.17 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:15:44,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=136224.0, ans=0.0
+2024-08-26 20:15:47,093 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.457e+02 1.627e+02 2.058e+02 3.143e+02, threshold=3.253e+02, percent-clipped=0.0
+2024-08-26 20:16:27,820 INFO [train.py:1114] (1/4) Epoch 11, batch 700, loss[loss=0.2026, simple_loss=0.2658, pruned_loss=0.05047, ctc_loss=0.09612, over 19718.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2785, pruned_loss=0.05316, ctc_loss=0.09928, over 3747623.78 frames. ], batch size: 51, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:16:30,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=136490.66666666666, ans=0.0
+2024-08-26 20:17:05,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=136650.66666666666, ans=0.125
+2024-08-26 20:17:16,586 INFO [train.py:1114] (1/4) Epoch 11, batch 750, loss[loss=0.2021, simple_loss=0.2807, pruned_loss=0.0451, ctc_loss=0.08297, over 19503.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.278, pruned_loss=0.05301, ctc_loss=0.09885, over 3774889.90 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:17:24,265 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.63 vs. limit=15.0
+2024-08-26 20:17:24,639 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.496e+02 1.727e+02 2.151e+02 3.286e+02, threshold=3.455e+02, percent-clipped=1.0
+2024-08-26 20:17:28,096 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.34 vs. limit=15.0
+2024-08-26 20:17:32,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=136810.66666666666, ans=0.125
+2024-08-26 20:17:38,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=136864.0, ans=0.2
+2024-08-26 20:17:40,831 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.25 vs. limit=15.0
+2024-08-26 20:17:53,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=136917.33333333334, ans=0.0
+2024-08-26 20:17:57,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=136970.66666666666, ans=0.0
+2024-08-26 20:18:02,071 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=136970.66666666666, ans=0.07
+2024-08-26 20:18:07,544 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=137024.0, ans=0.1
+2024-08-26 20:18:08,124 INFO [train.py:1114] (1/4) Epoch 11, batch 800, loss[loss=0.1835, simple_loss=0.2562, pruned_loss=0.04088, ctc_loss=0.07279, over 19782.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2784, pruned_loss=0.05338, ctc_loss=0.0996, over 3797096.71 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:18:15,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=137024.0, ans=0.025
+2024-08-26 20:18:26,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=137130.66666666666, ans=0.125
+2024-08-26 20:18:29,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=137130.66666666666, ans=0.125
+2024-08-26 20:19:13,269 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=137237.33333333334, ans=0.0
+2024-08-26 20:19:27,349 INFO [train.py:1114] (1/4) Epoch 11, batch 850, loss[loss=0.2218, simple_loss=0.2904, pruned_loss=0.05568, ctc_loss=0.1046, over 19685.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2781, pruned_loss=0.05324, ctc_loss=0.09944, over 3816208.68 frames. ], batch size: 59, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:19:39,843 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.453e+02 1.601e+02 1.920e+02 5.497e+02, threshold=3.202e+02, percent-clipped=1.0
+2024-08-26 20:19:40,093 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=137290.66666666666, ans=0.125
+2024-08-26 20:19:49,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=137344.0, ans=0.125
+2024-08-26 20:19:54,460 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=137344.0, ans=0.0
+2024-08-26 20:20:17,632 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=137450.66666666666, ans=0.5
+2024-08-26 20:20:29,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=137450.66666666666, ans=0.0
+2024-08-26 20:20:44,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=137504.0, ans=0.2
+2024-08-26 20:20:53,074 INFO [train.py:1114] (1/4) Epoch 11, batch 900, loss[loss=0.1843, simple_loss=0.2511, pruned_loss=0.04336, ctc_loss=0.07701, over 19413.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2781, pruned_loss=0.05339, ctc_loss=0.09966, over 3819692.85 frames. ], batch size: 48, lr: 1.37e-02, grad_scale: 16.0
+2024-08-26 20:21:10,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=137610.66666666666, ans=10.0
+2024-08-26 20:21:18,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=137664.0, ans=0.0
+2024-08-26 20:21:29,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=137717.33333333334, ans=0.1
+2024-08-26 20:21:34,556 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=137717.33333333334, ans=0.2
+2024-08-26 20:21:48,794 INFO [train.py:1114] (1/4) Epoch 11, batch 950, loss[loss=0.1897, simple_loss=0.2609, pruned_loss=0.04328, ctc_loss=0.07963, over 19495.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2789, pruned_loss=0.05378, ctc_loss=0.1004, over 3821715.22 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 16.0
+2024-08-26 20:21:55,400 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.468e+02 1.744e+02 2.017e+02 3.816e+02, threshold=3.488e+02, percent-clipped=2.0
+2024-08-26 20:21:55,785 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=137824.0, ans=0.125
+2024-08-26 20:21:56,583 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=137824.0, ans=0.2
+2024-08-26 20:22:33,515 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=138037.33333333334, ans=0.0
+2024-08-26 20:22:40,794 INFO [train.py:1114] (1/4) Epoch 11, batch 1000, loss[loss=0.2127, simple_loss=0.2809, pruned_loss=0.05262, ctc_loss=0.09792, over 19861.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2795, pruned_loss=0.05394, ctc_loss=0.1007, over 3818232.30 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:22:46,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=138090.66666666666, ans=0.0
+2024-08-26 20:22:49,426 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=138144.0, ans=0.0
+2024-08-26 20:22:56,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=138144.0, ans=0.0
+2024-08-26 20:23:04,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=138197.33333333334, ans=0.125
+2024-08-26 20:23:10,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=138250.66666666666, ans=0.125
+2024-08-26 20:23:11,779 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.31 vs. limit=15.0
+2024-08-26 20:23:25,728 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:23:28,368 INFO [train.py:1114] (1/4) Epoch 11, batch 1050, loss[loss=0.1912, simple_loss=0.2699, pruned_loss=0.04045, ctc_loss=0.07886, over 19836.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2784, pruned_loss=0.05352, ctc_loss=0.0999, over 3825246.23 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:23:34,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=138357.33333333334, ans=0.0
+2024-08-26 20:23:34,927 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.363e+02 1.534e+02 1.839e+02 4.578e+02, threshold=3.069e+02, percent-clipped=1.0
+2024-08-26 20:24:08,200 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.56 vs. limit=15.0
+2024-08-26 20:24:10,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=138410.66666666666, ans=0.2
+2024-08-26 20:24:10,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=138410.66666666666, ans=0.5
+2024-08-26 20:24:12,850 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=138410.66666666666, ans=0.0
+2024-08-26 20:25:07,603 INFO [train.py:1114] (1/4) Epoch 11, batch 1100, loss[loss=0.1899, simple_loss=0.2643, pruned_loss=0.04319, ctc_loss=0.073, over 19595.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2781, pruned_loss=0.05295, ctc_loss=0.09896, over 3832824.15 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:25:08,862 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=138624.0, ans=0.025
+2024-08-26 20:25:19,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=138677.33333333334, ans=0.0
+2024-08-26 20:25:30,620 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=138730.66666666666, ans=0.05
+2024-08-26 20:25:32,510 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff3.min_abs, batch_count=138730.66666666666, ans=0.2
+2024-08-26 20:25:36,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=138784.0, ans=0.125
+2024-08-26 20:25:36,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=138784.0, ans=0.125
+2024-08-26 20:25:37,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=138784.0, ans=0.125
+2024-08-26 20:25:43,085 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=138784.0, ans=0.0
+2024-08-26 20:25:56,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=138890.66666666666, ans=0.125
+2024-08-26 20:25:56,914 INFO [train.py:1114] (1/4) Epoch 11, batch 1150, loss[loss=0.1931, simple_loss=0.2656, pruned_loss=0.04438, ctc_loss=0.07998, over 19607.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2779, pruned_loss=0.05304, ctc_loss=0.09909, over 3832353.99 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:26:03,585 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.470e+02 1.661e+02 1.952e+02 3.516e+02, threshold=3.323e+02, percent-clipped=2.0
+2024-08-26 20:26:12,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=138944.0, ans=0.125
+2024-08-26 20:26:15,319 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.46 vs. limit=15.0
+2024-08-26 20:26:15,377 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.77 vs. limit=15.0
+2024-08-26 20:26:40,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=139104.0, ans=0.125
+2024-08-26 20:26:45,579 INFO [train.py:1114] (1/4) Epoch 11, batch 1200, loss[loss=0.2154, simple_loss=0.2832, pruned_loss=0.05346, ctc_loss=0.1014, over 19837.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2788, pruned_loss=0.05321, ctc_loss=0.09966, over 3827484.35 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:26:46,150 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.66 vs. limit=12.0
+2024-08-26 20:26:52,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=139157.33333333334, ans=0.0
+2024-08-26 20:26:54,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=139210.66666666666, ans=0.0
+2024-08-26 20:26:58,830 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=139210.66666666666, ans=0.2
+2024-08-26 20:27:06,729 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.75 vs. limit=22.5
+2024-08-26 20:27:10,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=139264.0, ans=0.0
+2024-08-26 20:27:13,812 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=139317.33333333334, ans=0.2
+2024-08-26 20:27:30,475 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=139317.33333333334, ans=10.0
+2024-08-26 20:27:30,867 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.88 vs. limit=22.5
+2024-08-26 20:28:18,701 INFO [train.py:1114] (1/4) Epoch 11, batch 1250, loss[loss=0.2153, simple_loss=0.2872, pruned_loss=0.0526, ctc_loss=0.0958, over 19537.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2789, pruned_loss=0.05297, ctc_loss=0.09922, over 3845387.56 frames. ], batch size: 61, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:28:27,604 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.224e+02 1.425e+02 1.545e+02 1.729e+02 3.064e+02, threshold=3.089e+02, percent-clipped=0.0
+2024-08-26 20:28:34,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=139477.33333333334, ans=0.0
+2024-08-26 20:28:58,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=139584.0, ans=0.125
+2024-08-26 20:29:06,404 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.91 vs. limit=6.0
+2024-08-26 20:29:12,944 INFO [train.py:1114] (1/4) Epoch 11, batch 1300, loss[loss=0.2491, simple_loss=0.3111, pruned_loss=0.06868, ctc_loss=0.1247, over 18837.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2783, pruned_loss=0.05259, ctc_loss=0.09859, over 3847789.11 frames. ], batch size: 76, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:35:20,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=139797.33333333334, ans=0.125
+2024-08-26 20:35:28,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=139797.33333333334, ans=0.125
+2024-08-26 20:35:31,639 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=139850.66666666666, ans=0.1
+2024-08-26 20:35:49,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=139904.0, ans=0.0
+2024-08-26 20:35:49,743 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.22 vs. limit=22.5
+2024-08-26 20:35:52,052 INFO [train.py:1114] (1/4) Epoch 11, batch 1350, loss[loss=0.1969, simple_loss=0.275, pruned_loss=0.04333, ctc_loss=0.08032, over 19754.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2781, pruned_loss=0.0525, ctc_loss=0.09831, over 3858244.04 frames. ], batch size: 54, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:35:58,549 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.441e+02 1.644e+02 1.919e+02 3.174e+02, threshold=3.287e+02, percent-clipped=1.0
+2024-08-26 20:36:00,604 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=140010.66666666666, ans=0.1
+2024-08-26 20:36:37,739 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=140170.66666666666, ans=0.125
+2024-08-26 20:36:37,766 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=140170.66666666666, ans=0.0
+2024-08-26 20:36:38,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=140170.66666666666, ans=0.025
+2024-08-26 20:36:39,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=140170.66666666666, ans=0.125
+2024-08-26 20:36:40,738 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.79 vs. limit=15.0
+2024-08-26 20:36:41,170 INFO [train.py:1114] (1/4) Epoch 11, batch 1400, loss[loss=0.1895, simple_loss=0.249, pruned_loss=0.04689, ctc_loss=0.09069, over 19659.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.278, pruned_loss=0.05251, ctc_loss=0.09822, over 3865075.90 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:36:53,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=140277.33333333334, ans=0.125
+2024-08-26 20:37:20,654 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.07 vs. limit=15.0
+2024-08-26 20:37:40,128 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.47 vs. limit=15.0
+2024-08-26 20:37:42,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=140384.0, ans=0.0
+2024-08-26 20:37:50,407 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.22 vs. limit=15.0
+2024-08-26 20:38:01,750 INFO [train.py:1114] (1/4) Epoch 11, batch 1450, loss[loss=0.2275, simple_loss=0.29, pruned_loss=0.06021, ctc_loss=0.1116, over 19704.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.279, pruned_loss=0.05292, ctc_loss=0.09888, over 3862923.73 frames. ], batch size: 63, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:38:08,100 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.486e+02 1.636e+02 1.926e+02 3.321e+02, threshold=3.272e+02, percent-clipped=1.0
+2024-08-26 20:38:14,614 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=140544.0, ans=0.0
+2024-08-26 20:38:33,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=140650.66666666666, ans=0.125
+2024-08-26 20:38:42,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=140704.0, ans=0.0
+2024-08-26 20:38:50,489 INFO [train.py:1114] (1/4) Epoch 11, batch 1500, loss[loss=0.2016, simple_loss=0.2776, pruned_loss=0.04584, ctc_loss=0.08467, over 19592.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2789, pruned_loss=0.05263, ctc_loss=0.09854, over 3862884.47 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:39:00,383 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=140757.33333333334, ans=0.0
+2024-08-26 20:39:03,363 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.13 vs. limit=6.0
+2024-08-26 20:39:12,593 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.65 vs. limit=6.0
+2024-08-26 20:39:19,091 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=140864.0, ans=0.1
+2024-08-26 20:39:37,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=140970.66666666666, ans=0.0
+2024-08-26 20:39:38,999 INFO [train.py:1114] (1/4) Epoch 11, batch 1550, loss[loss=0.2158, simple_loss=0.2809, pruned_loss=0.05564, ctc_loss=0.0986, over 19614.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2785, pruned_loss=0.05276, ctc_loss=0.09869, over 3847926.47 frames. ], batch size: 60, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:39:39,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=141024.0, ans=0.125
+2024-08-26 20:39:43,681 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=141024.0, ans=0.0
+2024-08-26 20:39:45,241 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.401e+02 1.612e+02 1.919e+02 3.103e+02, threshold=3.225e+02, percent-clipped=0.0
+2024-08-26 20:39:59,209 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=141130.66666666666, ans=0.125
+2024-08-26 20:40:28,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=141237.33333333334, ans=0.2
+2024-08-26 20:40:29,942 INFO [train.py:1114] (1/4) Epoch 11, batch 1600, loss[loss=0.2426, simple_loss=0.3065, pruned_loss=0.06495, ctc_loss=0.1222, over 19840.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2784, pruned_loss=0.05284, ctc_loss=0.09879, over 3837004.12 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:40:35,780 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=141290.66666666666, ans=0.125
+2024-08-26 20:40:38,178 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=6.29 vs. limit=15.0
+2024-08-26 20:40:53,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=141397.33333333334, ans=0.125
+2024-08-26 20:41:07,148 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=141450.66666666666, ans=0.0
+2024-08-26 20:41:16,277 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=141504.0, ans=0.125
+2024-08-26 20:41:18,741 INFO [train.py:1114] (1/4) Epoch 11, batch 1650, loss[loss=0.2067, simple_loss=0.2801, pruned_loss=0.04884, ctc_loss=0.0889, over 19656.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2779, pruned_loss=0.05278, ctc_loss=0.09863, over 3833068.39 frames. ], batch size: 59, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:41:22,162 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.59 vs. limit=15.0
+2024-08-26 20:41:25,303 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.119e+02 1.523e+02 1.726e+02 1.964e+02 3.202e+02, threshold=3.451e+02, percent-clipped=0.0
+2024-08-26 20:41:25,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=141557.33333333334, ans=0.025
+2024-08-26 20:41:33,315 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.39 vs. limit=10.0
+2024-08-26 20:41:51,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=141717.33333333334, ans=0.025
+2024-08-26 20:42:07,203 INFO [train.py:1114] (1/4) Epoch 11, batch 1700, loss[loss=0.1868, simple_loss=0.2505, pruned_loss=0.04546, ctc_loss=0.08022, over 19652.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2781, pruned_loss=0.05271, ctc_loss=0.09842, over 3847348.79 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:42:20,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=141824.0, ans=0.0
+2024-08-26 20:42:33,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=141930.66666666666, ans=0.2
+2024-08-26 20:42:52,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=142037.33333333334, ans=0.125
+2024-08-26 20:42:59,707 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.88 vs. limit=6.0
+2024-08-26 20:43:00,056 INFO [train.py:1114] (1/4) Epoch 11, batch 1750, loss[loss=0.1785, simple_loss=0.2394, pruned_loss=0.04314, ctc_loss=0.07822, over 19616.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2776, pruned_loss=0.05243, ctc_loss=0.09789, over 3851715.47 frames. ], batch size: 45, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:43:06,155 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.441e+02 1.591e+02 1.781e+02 2.526e+02, threshold=3.183e+02, percent-clipped=0.0
+2024-08-26 20:43:09,001 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=142144.0, ans=0.0
+2024-08-26 20:43:09,139 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=142144.0, ans=0.025
+2024-08-26 20:43:18,827 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=142197.33333333334, ans=0.125
+2024-08-26 20:43:26,935 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=142197.33333333334, ans=0.0
+2024-08-26 20:43:40,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=142304.0, ans=0.1
+2024-08-26 20:43:50,376 INFO [train.py:1114] (1/4) Epoch 11, batch 1800, loss[loss=0.2147, simple_loss=0.2869, pruned_loss=0.05188, ctc_loss=0.09683, over 19611.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2773, pruned_loss=0.0522, ctc_loss=0.09747, over 3854080.64 frames. ], batch size: 55, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:43:56,081 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.30 vs. limit=15.0
+2024-08-26 20:44:05,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=142410.66666666666, ans=0.0
+2024-08-26 20:44:17,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=142464.0, ans=0.1
+2024-08-26 20:44:18,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=142464.0, ans=0.04949747468305833
+2024-08-26 20:44:26,072 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=142517.33333333334, ans=0.125
+2024-08-26 20:44:28,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=142517.33333333334, ans=0.0
+2024-08-26 20:44:32,378 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=142570.66666666666, ans=0.125
+2024-08-26 20:44:36,187 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.38 vs. limit=12.0
+2024-08-26 20:44:40,937 INFO [train.py:1114] (1/4) Epoch 11, batch 1850, loss[loss=0.2129, simple_loss=0.287, pruned_loss=0.0504, ctc_loss=0.09498, over 19581.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2773, pruned_loss=0.05232, ctc_loss=0.09748, over 3856468.98 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:44:45,965 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.36 vs. limit=12.0
+2024-08-26 20:44:47,991 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.442e+02 1.639e+02 2.043e+02 4.343e+02, threshold=3.277e+02, percent-clipped=6.0
+2024-08-26 20:45:21,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=142837.33333333334, ans=0.05
+2024-08-26 20:45:29,164 INFO [train.py:1114] (1/4) Epoch 11, batch 1900, loss[loss=0.2071, simple_loss=0.2839, pruned_loss=0.04795, ctc_loss=0.08621, over 19649.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2777, pruned_loss=0.05234, ctc_loss=0.09758, over 3861115.76 frames. ], batch size: 59, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:45:49,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=142997.33333333334, ans=0.2
+2024-08-26 20:46:00,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.73 vs. limit=6.0
+2024-08-26 20:46:21,954 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.01 vs. limit=15.0
+2024-08-26 20:46:23,313 INFO [train.py:1114] (1/4) Epoch 11, batch 1950, loss[loss=0.1939, simple_loss=0.2601, pruned_loss=0.04612, ctc_loss=0.08856, over 19586.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.279, pruned_loss=0.05253, ctc_loss=0.09779, over 3870338.24 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:46:26,297 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=143157.33333333334, ans=0.025
+2024-08-26 20:46:32,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=143157.33333333334, ans=0.0
+2024-08-26 20:47:24,652 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.500e+02 1.631e+02 1.894e+02 3.317e+02, threshold=3.262e+02, percent-clipped=1.0
+2024-08-26 20:48:00,719 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.14 vs. limit=10.0
+2024-08-26 20:48:33,068 INFO [train.py:1114] (1/4) Epoch 11, batch 2000, loss[loss=0.1869, simple_loss=0.2457, pruned_loss=0.04639, ctc_loss=0.0885, over 19632.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2799, pruned_loss=0.05306, ctc_loss=0.09895, over 3855225.69 frames. ], batch size: 45, lr: 1.34e-02, grad_scale: 32.0
+2024-08-26 20:48:38,840 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.74 vs. limit=15.0
+2024-08-26 20:48:40,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=143424.0, ans=0.1
+2024-08-26 20:48:40,847 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.26 vs. limit=15.0
+2024-08-26 20:48:46,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=143477.33333333334, ans=0.0
+2024-08-26 20:48:48,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=143477.33333333334, ans=0.0
+2024-08-26 20:49:02,039 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.77 vs. limit=15.0
+2024-08-26 20:49:37,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=143637.33333333334, ans=0.0
+2024-08-26 20:49:39,121 INFO [train.py:1114] (1/4) Epoch 11, batch 2050, loss[loss=0.1949, simple_loss=0.2562, pruned_loss=0.04778, ctc_loss=0.09502, over 19721.00 frames. ], tot_loss[loss=0.212, simple_loss=0.279, pruned_loss=0.05287, ctc_loss=0.09843, over 3850917.53 frames. ], batch size: 47, lr: 1.34e-02, grad_scale: 32.0
+2024-08-26 20:49:47,236 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.448e+02 1.585e+02 1.933e+02 3.153e+02, threshold=3.170e+02, percent-clipped=0.0
+2024-08-26 20:49:47,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=143690.66666666666, ans=0.0
+2024-08-26 20:49:49,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=143744.0, ans=0.0
+2024-08-26 20:49:50,236 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=143744.0, ans=0.1
+2024-08-26 20:49:50,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=143744.0, ans=0.035
+2024-08-26 20:49:51,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=143744.0, ans=0.125
+2024-08-26 20:50:24,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=143850.66666666666, ans=0.1
+2024-08-26 20:50:37,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=143957.33333333334, ans=0.2
+2024-08-26 20:50:37,966 INFO [train.py:1114] (1/4) Epoch 11, batch 2100, loss[loss=0.2066, simple_loss=0.2779, pruned_loss=0.0493, ctc_loss=0.09163, over 19785.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2779, pruned_loss=0.05231, ctc_loss=0.09753, over 3857044.90 frames. ], batch size: 54, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:50:59,514 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=6.52 vs. limit=15.0
+2024-08-26 20:51:06,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=144117.33333333334, ans=0.07
+2024-08-26 20:51:11,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=144117.33333333334, ans=0.0
+2024-08-26 20:51:13,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=144170.66666666666, ans=0.0
+2024-08-26 20:51:13,439 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=144170.66666666666, ans=0.125
+2024-08-26 20:51:14,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=144170.66666666666, ans=0.025
+2024-08-26 20:51:23,031 INFO [train.py:1114] (1/4) Epoch 11, batch 2150, loss[loss=0.1934, simple_loss=0.2586, pruned_loss=0.04722, ctc_loss=0.0847, over 19581.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2768, pruned_loss=0.05199, ctc_loss=0.0968, over 3867527.11 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:51:23,218 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=144224.0, ans=0.125
+2024-08-26 20:51:30,827 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.485e+02 1.672e+02 2.037e+02 4.338e+02, threshold=3.345e+02, percent-clipped=7.0
+2024-08-26 20:51:39,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=144277.33333333334, ans=0.125
+2024-08-26 20:51:43,796 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.17 vs. limit=12.0
+2024-08-26 20:52:06,906 INFO [train.py:1114] (1/4) Epoch 11, batch 2200, loss[loss=0.2287, simple_loss=0.2991, pruned_loss=0.05796, ctc_loss=0.106, over 19593.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2764, pruned_loss=0.05186, ctc_loss=0.09667, over 3867221.50 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:52:14,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=144490.66666666666, ans=0.125
+2024-08-26 20:52:50,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=144757.33333333334, ans=0.125
+2024-08-26 20:52:50,880 INFO [train.py:1114] (1/4) Epoch 11, batch 2250, loss[loss=0.2354, simple_loss=0.3041, pruned_loss=0.06106, ctc_loss=0.1114, over 19604.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2767, pruned_loss=0.05202, ctc_loss=0.09704, over 3866737.60 frames. ], batch size: 55, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:52:58,753 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.208e+02 1.461e+02 1.628e+02 1.934e+02 8.673e+02, threshold=3.256e+02, percent-clipped=2.0
+2024-08-26 20:53:06,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=144810.66666666666, ans=0.04949747468305833
+2024-08-26 20:53:10,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=144864.0, ans=0.125
+2024-08-26 20:53:16,397 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=144917.33333333334, ans=0.0
+2024-08-26 20:53:35,353 INFO [train.py:1114] (1/4) Epoch 11, batch 2300, loss[loss=0.2032, simple_loss=0.2691, pruned_loss=0.04947, ctc_loss=0.09591, over 19489.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2757, pruned_loss=0.05192, ctc_loss=0.0966, over 3861277.75 frames. ], batch size: 49, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:53:37,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=145024.0, ans=0.1
+2024-08-26 20:53:41,102 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.31 vs. limit=22.5
+2024-08-26 20:53:43,687 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=145077.33333333334, ans=0.125
+2024-08-26 20:54:03,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=145184.0, ans=0.0
+2024-08-26 20:54:20,116 INFO [train.py:1114] (1/4) Epoch 11, batch 2350, loss[loss=0.2473, simple_loss=0.3042, pruned_loss=0.07, ctc_loss=0.1258, over 19640.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2761, pruned_loss=0.05213, ctc_loss=0.09685, over 3863502.55 frames. ], batch size: 63, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:54:22,926 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=145290.66666666666, ans=0.1
+2024-08-26 20:54:27,277 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=145290.66666666666, ans=0.125
+2024-08-26 20:54:28,779 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.482e+02 1.673e+02 1.901e+02 2.829e+02, threshold=3.345e+02, percent-clipped=0.0
+2024-08-26 20:54:35,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=145344.0, ans=0.0
+2024-08-26 20:54:48,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=145450.66666666666, ans=0.2
+2024-08-26 20:54:50,843 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=145450.66666666666, ans=0.125
+2024-08-26 20:54:53,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=145450.66666666666, ans=0.025
+2024-08-26 20:55:01,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=145504.0, ans=0.1
+2024-08-26 20:55:04,263 INFO [train.py:1114] (1/4) Epoch 11, batch 2400, loss[loss=0.2176, simple_loss=0.2863, pruned_loss=0.05455, ctc_loss=0.09977, over 19286.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2783, pruned_loss=0.05294, ctc_loss=0.09824, over 3858062.37 frames. ], batch size: 71, lr: 1.33e-02, grad_scale: 32.0
+2024-08-26 20:55:13,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=145610.66666666666, ans=0.09899494936611666
+2024-08-26 20:55:17,958 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.80 vs. limit=22.5
+2024-08-26 20:55:20,650 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=7.61 vs. limit=15.0
+2024-08-26 20:55:23,641 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=145664.0, ans=0.125
+2024-08-26 20:55:29,654 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=145717.33333333334, ans=0.125
+2024-08-26 20:55:29,659 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=145717.33333333334, ans=0.07
+2024-08-26 20:55:33,594 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.46 vs. limit=15.0
+2024-08-26 20:55:35,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=145717.33333333334, ans=0.125
+2024-08-26 20:55:45,027 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=145770.66666666666, ans=0.1
+2024-08-26 20:55:49,241 INFO [train.py:1114] (1/4) Epoch 11, batch 2450, loss[loss=0.2889, simple_loss=0.3185, pruned_loss=0.09499, ctc_loss=0.1733, over 13684.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2829, pruned_loss=0.0562, ctc_loss=0.1046, over 3727836.25 frames. ], batch size: 141, lr: 1.33e-02, grad_scale: 32.0
+2024-08-26 20:55:51,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=145824.0, ans=0.1
+2024-08-26 20:55:56,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=145824.0, ans=0.0
+2024-08-26 20:55:57,998 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=145877.33333333334, ans=6.0
+2024-08-26 20:55:58,187 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.577e+02 1.748e+02 1.957e+02 3.323e+02, threshold=3.496e+02, percent-clipped=0.0
+2024-08-26 20:56:21,029 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=145984.0, ans=0.025
+2024-08-26 21:01:32,170 INFO [train.py:1114] (1/4) Epoch 12, batch 0, loss[loss=0.2164, simple_loss=0.2726, pruned_loss=0.05853, ctc_loss=0.1081, over 19403.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2726, pruned_loss=0.05853, ctc_loss=0.1081, over 19403.00 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:01:32,170 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 21:01:47,788 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.8304, 5.0517, 5.6239, 5.2800], device='cuda:1')
+2024-08-26 21:01:52,249 INFO [train.py:1146] (1/4) Epoch 12, validation: loss=0.1812, simple_loss=0.274, pruned_loss=0.03284, ctc_loss=0.05683, over 944034.00 frames.
+2024-08-26 21:01:52,249 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-26 21:01:57,684 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.73 vs. limit=22.5
+2024-08-26 21:02:14,033 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.11 vs. limit=10.0
+2024-08-26 21:02:25,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=146138.66666666666, ans=10.0
+2024-08-26 21:02:26,550 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=146138.66666666666, ans=0.1
+2024-08-26 21:02:43,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=146245.33333333334, ans=0.1
+2024-08-26 21:02:50,485 INFO [train.py:1114] (1/4) Epoch 12, batch 50, loss[loss=0.1771, simple_loss=0.2491, pruned_loss=0.03725, ctc_loss=0.07631, over 19725.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2788, pruned_loss=0.0529, ctc_loss=0.09983, over 845355.73 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:02:55,466 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=146298.66666666666, ans=0.125
+2024-08-26 21:02:58,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=146298.66666666666, ans=0.0
+2024-08-26 21:03:00,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=146352.0, ans=0.125
+2024-08-26 21:03:02,148 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=146352.0, ans=0.0
+2024-08-26 21:03:06,881 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=146352.0, ans=0.125
+2024-08-26 21:03:11,152 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.556e+02 1.742e+02 1.990e+02 3.045e+02, threshold=3.484e+02, percent-clipped=0.0
+2024-08-26 21:03:19,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=146458.66666666666, ans=0.125
+2024-08-26 21:03:24,872 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.31 vs. limit=15.0
+2024-08-26 21:04:10,874 INFO [train.py:1114] (1/4) Epoch 12, batch 100, loss[loss=0.1835, simple_loss=0.2587, pruned_loss=0.03845, ctc_loss=0.07841, over 19713.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2805, pruned_loss=0.05299, ctc_loss=0.09991, over 1498997.21 frames. ], batch size: 51, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:04:21,638 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.58 vs. limit=12.0
+2024-08-26 21:04:25,206 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.13 vs. limit=6.0
+2024-08-26 21:04:46,893 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=146725.33333333334, ans=0.1
+2024-08-26 21:04:47,273 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.29 vs. limit=10.0
+2024-08-26 21:04:57,105 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=146778.66666666666, ans=0.125
+2024-08-26 21:04:57,155 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=146778.66666666666, ans=0.125
+2024-08-26 21:05:01,182 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.75 vs. limit=15.0
+2024-08-26 21:05:05,128 INFO [train.py:1114] (1/4) Epoch 12, batch 150, loss[loss=0.1798, simple_loss=0.2435, pruned_loss=0.042, ctc_loss=0.08019, over 19728.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2775, pruned_loss=0.05196, ctc_loss=0.09749, over 2027965.43 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:05:12,332 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.24 vs. limit=22.5
+2024-08-26 21:05:25,626 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.421e+02 1.535e+02 1.745e+02 2.429e+02, threshold=3.070e+02, percent-clipped=0.0
+2024-08-26 21:05:33,473 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=146992.0, ans=0.125
+2024-08-26 21:05:38,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=146992.0, ans=0.0
+2024-08-26 21:05:39,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=146992.0, ans=0.0
+2024-08-26 21:05:42,974 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.91 vs. limit=15.0
+2024-08-26 21:05:44,134 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.66 vs. limit=10.0
+2024-08-26 21:05:50,581 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.42 vs. limit=15.0
+2024-08-26 21:05:52,017 INFO [train.py:1114] (1/4) Epoch 12, batch 200, loss[loss=0.2412, simple_loss=0.3027, pruned_loss=0.06474, ctc_loss=0.1257, over 18078.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2764, pruned_loss=0.05159, ctc_loss=0.09672, over 2435354.71 frames. ], batch size: 85, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:05:58,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=147098.66666666666, ans=0.125
+2024-08-26 21:06:00,067 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.71 vs. limit=15.0
+2024-08-26 21:06:02,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=147152.0, ans=0.0
+2024-08-26 21:06:26,869 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.88 vs. limit=6.0
+2024-08-26 21:06:38,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=147365.33333333334, ans=0.05
+2024-08-26 21:06:38,658 INFO [train.py:1114] (1/4) Epoch 12, batch 250, loss[loss=0.2167, simple_loss=0.2859, pruned_loss=0.05392, ctc_loss=0.09903, over 19469.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2757, pruned_loss=0.05128, ctc_loss=0.09602, over 2757226.13 frames. ], batch size: 67, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:06:40,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=147365.33333333334, ans=0.125
+2024-08-26 21:06:59,405 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.414e+02 1.495e+02 1.680e+02 4.024e+02, threshold=2.991e+02, percent-clipped=1.0
+2024-08-26 21:07:05,367 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=147472.0, ans=0.125
+2024-08-26 21:07:32,534 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=147578.66666666666, ans=0.0
+2024-08-26 21:07:35,052 INFO [train.py:1114] (1/4) Epoch 12, batch 300, loss[loss=0.2337, simple_loss=0.3013, pruned_loss=0.0618, ctc_loss=0.1062, over 19513.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.275, pruned_loss=0.05099, ctc_loss=0.09538, over 3000839.16 frames. ], batch size: 61, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:07:36,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=147632.0, ans=0.125
+2024-08-26 21:07:43,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=147685.33333333334, ans=0.125
+2024-08-26 21:08:05,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=147792.0, ans=0.5
+2024-08-26 21:08:11,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff2.min_abs, batch_count=147845.33333333334, ans=0.1
+2024-08-26 21:08:13,235 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=147845.33333333334, ans=0.125
+2024-08-26 21:08:15,109 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=147845.33333333334, ans=0.125
+2024-08-26 21:08:30,180 INFO [train.py:1114] (1/4) Epoch 12, batch 350, loss[loss=0.2006, simple_loss=0.2645, pruned_loss=0.0506, ctc_loss=0.08894, over 19757.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2753, pruned_loss=0.05102, ctc_loss=0.0954, over 3190720.75 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:11:59,402 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=147952.0, ans=0.125
+2024-08-26 21:12:05,910 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.16 vs. limit=15.0
+2024-08-26 21:12:10,851 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 1.537e+02 1.863e+02 2.287e+02 4.040e+02, threshold=3.725e+02, percent-clipped=5.0
+2024-08-26 21:12:15,297 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.21 vs. limit=8.0
+2024-08-26 21:12:23,129 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=148058.66666666666, ans=0.125
+2024-08-26 21:12:25,867 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=148058.66666666666, ans=0.125
+2024-08-26 21:12:31,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=148112.0, ans=0.1
+2024-08-26 21:13:43,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=148112.0, ans=0.09899494936611666
+2024-08-26 21:13:43,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=148112.0, ans=0.0
+2024-08-26 21:13:47,512 INFO [train.py:1114] (1/4) Epoch 12, batch 400, loss[loss=0.1903, simple_loss=0.2675, pruned_loss=0.04122, ctc_loss=0.07677, over 19497.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.275, pruned_loss=0.05078, ctc_loss=0.09488, over 3342696.82 frames. ], batch size: 54, lr: 1.27e-02, grad_scale: 32.0
+2024-08-26 21:14:01,033 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.15 vs. limit=10.0
+2024-08-26 21:14:22,851 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.41 vs. limit=15.0
+2024-08-26 21:14:24,522 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=148378.66666666666, ans=0.125
+2024-08-26 21:14:30,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=148378.66666666666, ans=0.125
+2024-08-26 21:14:34,559 INFO [train.py:1114] (1/4) Epoch 12, batch 450, loss[loss=0.2083, simple_loss=0.2868, pruned_loss=0.04769, ctc_loss=0.08622, over 19609.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.275, pruned_loss=0.05084, ctc_loss=0.09499, over 3451399.95 frames. ], batch size: 55, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:14:44,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=148432.0, ans=0.125
+2024-08-26 21:14:52,504 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.03 vs. limit=15.0
+2024-08-26 21:15:03,155 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=148538.66666666666, ans=0.125
+2024-08-26 21:15:05,752 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.502e+02 1.695e+02 2.071e+02 2.894e+02, threshold=3.390e+02, percent-clipped=0.0
+2024-08-26 21:15:14,477 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.72 vs. limit=15.0
+2024-08-26 21:15:31,495 INFO [train.py:1114] (1/4) Epoch 12, batch 500, loss[loss=0.2025, simple_loss=0.2778, pruned_loss=0.047, ctc_loss=0.08317, over 19695.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2738, pruned_loss=0.05022, ctc_loss=0.09365, over 3547732.00 frames. ], batch size: 63, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:15:37,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=148698.66666666666, ans=0.0
+2024-08-26 21:15:37,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=148698.66666666666, ans=0.1
+2024-08-26 21:15:47,786 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:16:02,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=148858.66666666666, ans=0.04949747468305833
+2024-08-26 21:16:03,248 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.01 vs. limit=6.0
+2024-08-26 21:16:12,199 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=148912.0, ans=0.125
+2024-08-26 21:16:19,338 INFO [train.py:1114] (1/4) Epoch 12, batch 550, loss[loss=0.2502, simple_loss=0.3088, pruned_loss=0.07145, ctc_loss=0.122, over 19247.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2743, pruned_loss=0.05044, ctc_loss=0.09394, over 3608680.84 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:16:19,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=148965.33333333334, ans=0.1
+2024-08-26 21:16:29,924 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.67 vs. limit=15.0
+2024-08-26 21:16:38,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=149072.0, ans=0.125
+2024-08-26 21:16:39,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=149072.0, ans=0.125
+2024-08-26 21:16:39,722 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.448e+02 1.617e+02 1.906e+02 3.977e+02, threshold=3.234e+02, percent-clipped=1.0
+2024-08-26 21:16:48,483 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=149125.33333333334, ans=0.0
+2024-08-26 21:17:47,333 INFO [train.py:1114] (1/4) Epoch 12, batch 600, loss[loss=0.2227, simple_loss=0.2969, pruned_loss=0.05416, ctc_loss=0.1002, over 19360.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2747, pruned_loss=0.05044, ctc_loss=0.09392, over 3665956.24 frames. ], batch size: 67, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:17:49,703 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.25 vs. limit=22.5
+2024-08-26 21:17:53,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=149232.0, ans=0.2
+2024-08-26 21:18:12,592 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=149338.66666666666, ans=0.1
+2024-08-26 21:18:24,701 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.15 vs. limit=15.0
+2024-08-26 21:18:24,745 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.40 vs. limit=15.0
+2024-08-26 21:18:37,776 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.91 vs. limit=15.0
+2024-08-26 21:18:44,315 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.60 vs. limit=12.0
+2024-08-26 21:18:46,486 INFO [train.py:1114] (1/4) Epoch 12, batch 650, loss[loss=0.2091, simple_loss=0.2852, pruned_loss=0.04832, ctc_loss=0.09105, over 19754.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2742, pruned_loss=0.05026, ctc_loss=0.09368, over 3716283.73 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:19:01,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=149498.66666666666, ans=0.0
+2024-08-26 21:19:09,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=149552.0, ans=0.025
+2024-08-26 21:19:11,071 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=149552.0, ans=0.125
+2024-08-26 21:19:12,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=149552.0, ans=0.125
+2024-08-26 21:19:16,432 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.436e+02 1.583e+02 1.844e+02 2.674e+02, threshold=3.165e+02, percent-clipped=0.0
+2024-08-26 21:19:28,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=149658.66666666666, ans=0.125
+2024-08-26 21:19:33,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=149658.66666666666, ans=0.2
+2024-08-26 21:19:35,700 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=149712.0, ans=0.125
+2024-08-26 21:19:41,550 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=149712.0, ans=0.025
+2024-08-26 21:19:45,046 INFO [train.py:1114] (1/4) Epoch 12, batch 700, loss[loss=0.1898, simple_loss=0.2566, pruned_loss=0.045, ctc_loss=0.08243, over 19713.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2749, pruned_loss=0.05067, ctc_loss=0.09446, over 3746477.32 frames. ], batch size: 51, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:19:48,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=149765.33333333334, ans=0.0
+2024-08-26 21:19:49,093 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=149765.33333333334, ans=0.07
+2024-08-26 21:19:50,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=149765.33333333334, ans=0.015
+2024-08-26 21:20:31,168 INFO [train.py:1114] (1/4) Epoch 12, batch 750, loss[loss=0.2195, simple_loss=0.2902, pruned_loss=0.05472, ctc_loss=0.09848, over 19512.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2746, pruned_loss=0.05061, ctc_loss=0.09438, over 3773749.03 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:20:33,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=150032.0, ans=0.125
+2024-08-26 21:20:34,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=150032.0, ans=0.1
+2024-08-26 21:20:38,290 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.27 vs. limit=12.0
+2024-08-26 21:20:46,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=150085.33333333334, ans=0.125
+2024-08-26 21:20:49,452 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=150138.66666666666, ans=0.0
+2024-08-26 21:20:51,920 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.592e+02 1.843e+02 2.247e+02 3.979e+02, threshold=3.686e+02, percent-clipped=6.0
+2024-08-26 21:20:55,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.63 vs. limit=12.0
+2024-08-26 21:20:56,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=150138.66666666666, ans=0.125
+2024-08-26 21:21:04,752 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.34 vs. limit=22.5
+2024-08-26 21:21:08,258 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=150245.33333333334, ans=0.025
+2024-08-26 21:21:22,389 INFO [train.py:1114] (1/4) Epoch 12, batch 800, loss[loss=0.1866, simple_loss=0.248, pruned_loss=0.04528, ctc_loss=0.08657, over 19417.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2747, pruned_loss=0.05092, ctc_loss=0.09488, over 3795520.06 frames. ], batch size: 48, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:21:52,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=150405.33333333334, ans=0.125
+2024-08-26 21:21:53,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=150458.66666666666, ans=0.125
+2024-08-26 21:22:12,987 INFO [train.py:1114] (1/4) Epoch 12, batch 850, loss[loss=0.2224, simple_loss=0.2896, pruned_loss=0.05643, ctc_loss=0.1057, over 19644.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2749, pruned_loss=0.05089, ctc_loss=0.09491, over 3815292.38 frames. ], batch size: 59, lr: 1.26e-02, grad_scale: 16.0
+2024-08-26 21:22:19,262 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.71 vs. limit=6.0
+2024-08-26 21:22:25,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=150618.66666666666, ans=0.0
+2024-08-26 21:22:34,295 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.171e+02 1.451e+02 1.599e+02 1.811e+02 2.698e+02, threshold=3.198e+02, percent-clipped=0.0
+2024-08-26 21:22:43,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=150725.33333333334, ans=0.0
+2024-08-26 21:22:50,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=150778.66666666666, ans=0.0
+2024-08-26 21:23:00,257 INFO [train.py:1114] (1/4) Epoch 12, batch 900, loss[loss=0.1754, simple_loss=0.2431, pruned_loss=0.03895, ctc_loss=0.07463, over 19398.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2752, pruned_loss=0.05119, ctc_loss=0.09539, over 3818318.72 frames. ], batch size: 48, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:23:19,191 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=150938.66666666666, ans=0.05
+2024-08-26 21:23:47,028 INFO [train.py:1114] (1/4) Epoch 12, batch 950, loss[loss=0.182, simple_loss=0.2568, pruned_loss=0.03829, ctc_loss=0.07671, over 19481.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2758, pruned_loss=0.05138, ctc_loss=0.09583, over 3818384.29 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:24:06,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=151098.66666666666, ans=0.0
+2024-08-26 21:24:17,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=151152.0, ans=0.05
+2024-08-26 21:24:20,256 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.36 vs. limit=6.0
+2024-08-26 21:24:29,934 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.208e+02 1.446e+02 1.609e+02 1.941e+02 6.709e+02, threshold=3.217e+02, percent-clipped=2.0
+2024-08-26 21:24:35,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=151205.33333333334, ans=0.0
+2024-08-26 21:24:50,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=151312.0, ans=0.2
+2024-08-26 21:24:57,622 INFO [train.py:1114] (1/4) Epoch 12, batch 1000, loss[loss=0.1877, simple_loss=0.2605, pruned_loss=0.04264, ctc_loss=0.074, over 19855.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2761, pruned_loss=0.05134, ctc_loss=0.09582, over 3815332.89 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:25:00,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=151365.33333333334, ans=0.125
+2024-08-26 21:25:22,120 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.31 vs. limit=15.0
+2024-08-26 21:25:27,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=151418.66666666666, ans=0.125
+2024-08-26 21:25:39,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=151472.0, ans=0.025
+2024-08-26 21:25:59,413 INFO [train.py:1114] (1/4) Epoch 12, batch 1050, loss[loss=0.1971, simple_loss=0.2757, pruned_loss=0.04258, ctc_loss=0.08345, over 19847.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2753, pruned_loss=0.05093, ctc_loss=0.09522, over 3821509.76 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:26:06,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=151632.0, ans=0.125
+2024-08-26 21:26:20,577 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.117e+02 1.384e+02 1.517e+02 1.769e+02 3.938e+02, threshold=3.034e+02, percent-clipped=1.0
+2024-08-26 21:26:26,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=151792.0, ans=0.125
+2024-08-26 21:26:43,704 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.09 vs. limit=6.0
+2024-08-26 21:26:45,801 INFO [train.py:1114] (1/4) Epoch 12, batch 1100, loss[loss=0.2025, simple_loss=0.2703, pruned_loss=0.04921, ctc_loss=0.09053, over 19578.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2749, pruned_loss=0.05089, ctc_loss=0.09501, over 3828704.68 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:26:48,772 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=151898.66666666666, ans=0.07
+2024-08-26 21:26:52,096 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.43 vs. limit=15.0
+2024-08-26 21:26:52,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=151898.66666666666, ans=0.1
+2024-08-26 21:26:59,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=151952.0, ans=0.125
+2024-08-26 21:27:02,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=151952.0, ans=0.0
+2024-08-26 21:27:08,325 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=152005.33333333334, ans=0.0
+2024-08-26 21:27:08,633 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.83 vs. limit=22.5
+2024-08-26 21:27:20,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=152058.66666666666, ans=0.0
+2024-08-26 21:27:31,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=152112.0, ans=0.125
+2024-08-26 21:27:32,336 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.57 vs. limit=15.0
+2024-08-26 21:27:36,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=152112.0, ans=0.125
+2024-08-26 21:27:37,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=152112.0, ans=0.2
+2024-08-26 21:27:41,053 INFO [train.py:1114] (1/4) Epoch 12, batch 1150, loss[loss=0.1874, simple_loss=0.2543, pruned_loss=0.04385, ctc_loss=0.08206, over 19595.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2745, pruned_loss=0.05055, ctc_loss=0.09461, over 3828532.61 frames. ], batch size: 52, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:27:41,327 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=152165.33333333334, ans=0.07
+2024-08-26 21:27:44,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=152165.33333333334, ans=0.125
+2024-08-26 21:27:47,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=152165.33333333334, ans=0.2
+2024-08-26 21:27:48,792 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:27:57,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=152218.66666666666, ans=0.1
+2024-08-26 21:28:02,690 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.513e+02 1.822e+02 2.260e+02 3.131e+02, threshold=3.643e+02, percent-clipped=1.0
+2024-08-26 21:28:06,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=152272.0, ans=0.1
+2024-08-26 21:28:06,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=152272.0, ans=0.125
+2024-08-26 21:28:08,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=152325.33333333334, ans=0.125
+2024-08-26 21:28:10,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=152325.33333333334, ans=0.125
+2024-08-26 21:28:10,748 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.51 vs. limit=22.5
+2024-08-26 21:28:12,472 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=152325.33333333334, ans=0.0
+2024-08-26 21:28:13,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=152325.33333333334, ans=0.125
+2024-08-26 21:28:28,136 INFO [train.py:1114] (1/4) Epoch 12, batch 1200, loss[loss=0.2135, simple_loss=0.282, pruned_loss=0.05244, ctc_loss=0.1003, over 19828.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2751, pruned_loss=0.05063, ctc_loss=0.09483, over 3824097.13 frames. ], batch size: 57, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:28:44,551 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=152485.33333333334, ans=0.025
+2024-08-26 21:29:15,284 INFO [train.py:1114] (1/4) Epoch 12, batch 1250, loss[loss=0.2187, simple_loss=0.2849, pruned_loss=0.05646, ctc_loss=0.09877, over 19525.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2759, pruned_loss=0.0509, ctc_loss=0.09514, over 3842652.98 frames. ], batch size: 61, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:29:30,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=152752.0, ans=0.1
+2024-08-26 21:29:36,829 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.211e+02 1.442e+02 1.596e+02 2.011e+02 3.434e+02, threshold=3.192e+02, percent-clipped=0.0
+2024-08-26 21:30:17,254 INFO [train.py:1114] (1/4) Epoch 12, batch 1300, loss[loss=0.2171, simple_loss=0.2874, pruned_loss=0.05311, ctc_loss=0.1012, over 18795.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.275, pruned_loss=0.05043, ctc_loss=0.09435, over 3846441.29 frames. ], batch size: 76, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:31:05,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=153178.66666666666, ans=0.125
+2024-08-26 21:31:06,803 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.29 vs. limit=15.0
+2024-08-26 21:31:08,966 INFO [train.py:1114] (1/4) Epoch 12, batch 1350, loss[loss=0.2044, simple_loss=0.267, pruned_loss=0.05144, ctc_loss=0.09719, over 19780.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2744, pruned_loss=0.05008, ctc_loss=0.09365, over 3856928.48 frames. ], batch size: 54, lr: 1.25e-02, grad_scale: 32.0
+2024-08-26 21:31:29,983 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.104e+02 1.467e+02 1.650e+02 2.044e+02 3.234e+02, threshold=3.299e+02, percent-clipped=1.0
+2024-08-26 21:31:32,055 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.min_positive, batch_count=153338.66666666666, ans=0.025
+2024-08-26 21:31:34,564 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.29 vs. limit=22.5
+2024-08-26 21:31:35,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=153338.66666666666, ans=0.1
+2024-08-26 21:31:39,707 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=153392.0, ans=0.125
+2024-08-26 21:31:42,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=153392.0, ans=0.0
+2024-08-26 21:31:54,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=153498.66666666666, ans=0.0
+2024-08-26 21:31:55,342 INFO [train.py:1114] (1/4) Epoch 12, batch 1400, loss[loss=0.201, simple_loss=0.2564, pruned_loss=0.05347, ctc_loss=0.09676, over 19678.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2743, pruned_loss=0.05015, ctc_loss=0.0938, over 3864355.01 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:31:59,502 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.97 vs. limit=15.0
+2024-08-26 21:32:03,304 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.60 vs. limit=6.0
+2024-08-26 21:32:42,802 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.91 vs. limit=15.0
+2024-08-26 21:32:47,925 INFO [train.py:1114] (1/4) Epoch 12, batch 1450, loss[loss=0.2162, simple_loss=0.2854, pruned_loss=0.05299, ctc_loss=0.1024, over 19673.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2751, pruned_loss=0.0506, ctc_loss=0.09454, over 3862587.11 frames. ], batch size: 63, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:33:12,065 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.245e+02 1.443e+02 1.618e+02 1.909e+02 2.759e+02, threshold=3.236e+02, percent-clipped=0.0
+2024-08-26 21:33:33,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=153978.66666666666, ans=0.0
+2024-08-26 21:33:37,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=153978.66666666666, ans=0.025
+2024-08-26 21:33:38,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=153978.66666666666, ans=0.0
+2024-08-26 21:33:42,303 INFO [train.py:1114] (1/4) Epoch 12, batch 1500, loss[loss=0.1992, simple_loss=0.2708, pruned_loss=0.04684, ctc_loss=0.085, over 19598.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.275, pruned_loss=0.05015, ctc_loss=0.09389, over 3861806.77 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:34:03,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=154138.66666666666, ans=0.1
+2024-08-26 21:34:06,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=154138.66666666666, ans=0.025
+2024-08-26 21:34:07,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=154138.66666666666, ans=0.1
+2024-08-26 21:34:26,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=154245.33333333334, ans=0.0
+2024-08-26 21:34:29,573 INFO [train.py:1114] (1/4) Epoch 12, batch 1550, loss[loss=0.2289, simple_loss=0.2957, pruned_loss=0.05848, ctc_loss=0.1132, over 19594.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2751, pruned_loss=0.05055, ctc_loss=0.09459, over 3845995.35 frames. ], batch size: 60, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:34:35,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=154298.66666666666, ans=0.025
+2024-08-26 21:34:35,827 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.38 vs. limit=15.0
+2024-08-26 21:34:51,389 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.237e+02 1.431e+02 1.666e+02 1.890e+02 5.087e+02, threshold=3.332e+02, percent-clipped=2.0
+2024-08-26 21:35:17,028 INFO [train.py:1114] (1/4) Epoch 12, batch 1600, loss[loss=0.2017, simple_loss=0.2778, pruned_loss=0.04533, ctc_loss=0.0871, over 19849.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2746, pruned_loss=0.05051, ctc_loss=0.09446, over 3835436.76 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:35:34,029 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=154618.66666666666, ans=0.0
+2024-08-26 21:35:48,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=154725.33333333334, ans=0.0
+2024-08-26 21:35:59,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=154778.66666666666, ans=0.125
+2024-08-26 21:36:01,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=154778.66666666666, ans=0.07
+2024-08-26 21:36:05,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=154778.66666666666, ans=0.125
+2024-08-26 21:36:11,264 INFO [train.py:1114] (1/4) Epoch 12, batch 1650, loss[loss=0.2146, simple_loss=0.2834, pruned_loss=0.05286, ctc_loss=0.1002, over 19656.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2745, pruned_loss=0.05067, ctc_loss=0.09466, over 3831667.61 frames. ], batch size: 59, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:36:16,608 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.12 vs. limit=22.5
+2024-08-26 21:36:22,733 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=154885.33333333334, ans=0.0
+2024-08-26 21:36:34,779 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.420e+02 1.592e+02 1.938e+02 3.625e+02, threshold=3.184e+02, percent-clipped=1.0
+2024-08-26 21:36:35,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=154938.66666666666, ans=0.2
+2024-08-26 21:36:37,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=154938.66666666666, ans=0.09899494936611666
+2024-08-26 21:36:43,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=154992.0, ans=0.125
+2024-08-26 21:36:50,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=155045.33333333334, ans=0.0
+2024-08-26 21:36:50,102 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=155045.33333333334, ans=0.125
+2024-08-26 21:36:50,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=155045.33333333334, ans=0.0
+2024-08-26 21:36:56,912 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=155045.33333333334, ans=0.04949747468305833
+2024-08-26 21:37:00,210 INFO [train.py:1114] (1/4) Epoch 12, batch 1700, loss[loss=0.1865, simple_loss=0.2524, pruned_loss=0.04431, ctc_loss=0.08021, over 19671.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2742, pruned_loss=0.05022, ctc_loss=0.09389, over 3846379.08 frames. ], batch size: 46, lr: 1.24e-02, grad_scale: 32.0
+2024-08-26 21:37:07,022 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=155098.66666666666, ans=0.1
+2024-08-26 21:37:07,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=155098.66666666666, ans=0.0
+2024-08-26 21:37:34,625 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=155258.66666666666, ans=0.0
+2024-08-26 21:37:42,763 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.09 vs. limit=15.0
+2024-08-26 21:37:44,946 INFO [train.py:1114] (1/4) Epoch 12, batch 1750, loss[loss=0.1899, simple_loss=0.2527, pruned_loss=0.04588, ctc_loss=0.08858, over 19637.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2742, pruned_loss=0.0501, ctc_loss=0.09369, over 3851033.26 frames. ], batch size: 45, lr: 1.24e-02, grad_scale: 16.0
+2024-08-26 21:37:45,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=155365.33333333334, ans=0.0
+2024-08-26 21:37:53,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=155418.66666666666, ans=0.125
+2024-08-26 21:38:05,532 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:38:06,162 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.154e+02 1.409e+02 1.600e+02 1.878e+02 3.182e+02, threshold=3.201e+02, percent-clipped=0.0
+2024-08-26 21:38:06,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=155472.0, ans=0.125
+2024-08-26 21:38:07,245 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:38:17,308 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.42 vs. limit=12.0
+2024-08-26 21:38:20,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=155578.66666666666, ans=0.0
+2024-08-26 21:38:26,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=155578.66666666666, ans=15.0
+2024-08-26 21:38:28,996 INFO [train.py:1114] (1/4) Epoch 12, batch 1800, loss[loss=0.1961, simple_loss=0.275, pruned_loss=0.04244, ctc_loss=0.08085, over 19599.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2744, pruned_loss=0.05005, ctc_loss=0.09357, over 3853096.14 frames. ], batch size: 55, lr: 1.24e-02, grad_scale: 16.0
+2024-08-26 21:38:30,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=155632.0, ans=0.025
+2024-08-26 21:38:41,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=155685.33333333334, ans=0.125
+2024-08-26 21:38:46,160 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=6.00 vs. limit=6.0
+2024-08-26 21:38:55,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=155792.0, ans=0.125
+2024-08-26 21:39:03,555 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.52 vs. limit=12.0
+2024-08-26 21:39:12,710 INFO [train.py:1114] (1/4) Epoch 12, batch 1850, loss[loss=0.2479, simple_loss=0.3121, pruned_loss=0.06713, ctc_loss=0.1236, over 19597.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2739, pruned_loss=0.0497, ctc_loss=0.09286, over 3855362.83 frames. ], batch size: 57, lr: 1.24e-02, grad_scale: 16.0
+2024-08-26 21:39:12,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=155898.66666666666, ans=0.125
+2024-08-26 21:39:23,506 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=155952.0, ans=0.125
+2024-08-26 21:39:34,666 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 1.542e+02 1.764e+02 2.176e+02 3.980e+02, threshold=3.528e+02, percent-clipped=3.0
+2024-08-26 21:39:43,939 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.23 vs. limit=15.0
+2024-08-26 21:39:56,683 INFO [train.py:1114] (1/4) Epoch 12, batch 1900, loss[loss=0.2118, simple_loss=0.2853, pruned_loss=0.05066, ctc_loss=0.09258, over 19655.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2749, pruned_loss=0.05015, ctc_loss=0.0937, over 3861072.89 frames. ], batch size: 59, lr: 1.23e-02, grad_scale: 8.0
+2024-08-26 21:39:59,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=156165.33333333334, ans=0.1
+2024-08-26 21:40:09,002 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:40:16,723 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=156272.0, ans=0.025
+2024-08-26 21:40:20,240 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=156272.0, ans=0.1
+2024-08-26 21:40:22,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=156325.33333333334, ans=0.09899494936611666
+2024-08-26 21:40:29,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1.whitening_limit, batch_count=156325.33333333334, ans=10.0
+2024-08-26 21:40:40,047 INFO [train.py:1114] (1/4) Epoch 12, batch 1950, loss[loss=0.1829, simple_loss=0.255, pruned_loss=0.04043, ctc_loss=0.07509, over 19593.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2757, pruned_loss=0.05027, ctc_loss=0.09381, over 3869858.39 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 8.0
+2024-08-26 21:40:47,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=156432.0, ans=0.0
+2024-08-26 21:40:54,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=156485.33333333334, ans=0.0
+2024-08-26 21:40:55,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=156485.33333333334, ans=0.1
+2024-08-26 21:41:01,722 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 1.443e+02 1.619e+02 1.881e+02 3.638e+02, threshold=3.238e+02, percent-clipped=1.0
+2024-08-26 21:41:25,589 INFO [train.py:1114] (1/4) Epoch 12, batch 2000, loss[loss=0.187, simple_loss=0.251, pruned_loss=0.04444, ctc_loss=0.08532, over 19656.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2765, pruned_loss=0.05085, ctc_loss=0.09474, over 3853730.37 frames. ], batch size: 45, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:41:38,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=156752.0, ans=0.1
+2024-08-26 21:42:10,404 INFO [train.py:1114] (1/4) Epoch 12, batch 2050, loss[loss=0.1878, simple_loss=0.2514, pruned_loss=0.0448, ctc_loss=0.08647, over 19725.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2748, pruned_loss=0.05042, ctc_loss=0.09399, over 3850393.15 frames. ], batch size: 47, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:42:13,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=156965.33333333334, ans=0.0
+2024-08-26 21:42:17,324 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.87 vs. limit=15.0
+2024-08-26 21:42:24,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=157018.66666666666, ans=0.125
+2024-08-26 21:42:33,052 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.456e+02 1.628e+02 1.934e+02 3.317e+02, threshold=3.256e+02, percent-clipped=1.0
+2024-08-26 21:42:43,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=157125.33333333334, ans=0.125
+2024-08-26 21:42:48,156 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=157178.66666666666, ans=0.0
+2024-08-26 21:42:48,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=157178.66666666666, ans=0.0
+2024-08-26 21:42:50,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=157178.66666666666, ans=0.125
+2024-08-26 21:42:55,875 INFO [train.py:1114] (1/4) Epoch 12, batch 2100, loss[loss=0.1974, simple_loss=0.2677, pruned_loss=0.04625, ctc_loss=0.08659, over 19764.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2741, pruned_loss=0.04965, ctc_loss=0.09293, over 3858278.38 frames. ], batch size: 54, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:43:01,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=157232.0, ans=0.0
+2024-08-26 21:43:08,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=157285.33333333334, ans=0.125
+2024-08-26 21:43:19,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=157338.66666666666, ans=0.0
+2024-08-26 21:43:56,951 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.40 vs. limit=22.5
+2024-08-26 21:44:00,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=157445.33333333334, ans=0.125
+2024-08-26 21:44:06,211 INFO [train.py:1114] (1/4) Epoch 12, batch 2150, loss[loss=0.1913, simple_loss=0.265, pruned_loss=0.04242, ctc_loss=0.08179, over 19563.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2737, pruned_loss=0.04968, ctc_loss=0.09294, over 3869358.89 frames. ], batch size: 52, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:44:18,458 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=157552.0, ans=0.0
+2024-08-26 21:44:27,514 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.35 vs. limit=10.0
+2024-08-26 21:44:27,884 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.226e+02 1.483e+02 1.683e+02 2.213e+02 4.687e+02, threshold=3.365e+02, percent-clipped=1.0
+2024-08-26 21:44:48,376 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.68 vs. limit=15.0
+2024-08-26 21:45:26,973 INFO [train.py:1114] (1/4) Epoch 12, batch 2200, loss[loss=0.2318, simple_loss=0.2984, pruned_loss=0.05973, ctc_loss=0.1142, over 19595.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2734, pruned_loss=0.04964, ctc_loss=0.09272, over 3867203.55 frames. ], batch size: 57, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:45:40,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=157818.66666666666, ans=0.2
+2024-08-26 21:45:45,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=157872.0, ans=0.0
+2024-08-26 21:45:55,745 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=157925.33333333334, ans=0.0
+2024-08-26 21:45:55,905 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=157925.33333333334, ans=0.0
+2024-08-26 21:46:04,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=157978.66666666666, ans=0.1
+2024-08-26 21:46:10,383 INFO [train.py:1114] (1/4) Epoch 12, batch 2250, loss[loss=0.2124, simple_loss=0.2822, pruned_loss=0.05226, ctc_loss=0.09495, over 19608.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2738, pruned_loss=0.04983, ctc_loss=0.09295, over 3866797.67 frames. ], batch size: 55, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:46:12,199 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=158032.0, ans=0.125
+2024-08-26 21:46:13,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158032.0, ans=0.1
+2024-08-26 21:46:14,384 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.51 vs. limit=6.0
+2024-08-26 21:46:18,215 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=158085.33333333334, ans=0.2
+2024-08-26 21:46:31,824 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.542e+02 1.805e+02 2.126e+02 6.638e+02, threshold=3.611e+02, percent-clipped=1.0
+2024-08-26 21:46:38,179 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.08 vs. limit=22.5
+2024-08-26 21:46:39,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=158192.0, ans=0.125
+2024-08-26 21:46:53,570 INFO [train.py:1114] (1/4) Epoch 12, batch 2300, loss[loss=0.1851, simple_loss=0.2567, pruned_loss=0.04138, ctc_loss=0.07694, over 19502.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2732, pruned_loss=0.05002, ctc_loss=0.09323, over 3860138.16 frames. ], batch size: 49, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:47:03,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=158352.0, ans=0.1
+2024-08-26 21:47:20,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=158458.66666666666, ans=0.0
+2024-08-26 21:47:21,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=158458.66666666666, ans=0.0
+2024-08-26 21:47:36,481 INFO [train.py:1114] (1/4) Epoch 12, batch 2350, loss[loss=0.2156, simple_loss=0.2853, pruned_loss=0.05369, ctc_loss=0.09612, over 19669.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2728, pruned_loss=0.04995, ctc_loss=0.09296, over 3863839.87 frames. ], batch size: 63, lr: 1.23e-02, grad_scale: 16.0
+2024-08-26 21:47:39,299 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=158565.33333333334, ans=0.04949747468305833
+2024-08-26 21:47:39,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=158565.33333333334, ans=0.125
+2024-08-26 21:47:41,260 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.44 vs. limit=15.0
+2024-08-26 21:47:51,689 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.10 vs. limit=15.0
+2024-08-26 21:47:58,863 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.457e+02 1.679e+02 1.938e+02 3.188e+02, threshold=3.358e+02, percent-clipped=0.0
+2024-08-26 21:48:02,480 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=158725.33333333334, ans=0.125
+2024-08-26 21:48:05,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=158725.33333333334, ans=0.0
+2024-08-26 21:48:12,385 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=158778.66666666666, ans=0.125
+2024-08-26 21:48:14,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=158778.66666666666, ans=0.125
+2024-08-26 21:48:21,589 INFO [train.py:1114] (1/4) Epoch 12, batch 2400, loss[loss=0.2286, simple_loss=0.2996, pruned_loss=0.05743, ctc_loss=0.1069, over 19313.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.275, pruned_loss=0.05057, ctc_loss=0.09398, over 3858473.22 frames. ], batch size: 71, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 21:48:21,736 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:48:24,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=158832.0, ans=0.025
+2024-08-26 21:49:07,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=158885.33333333334, ans=0.1
+2024-08-26 21:49:24,869 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=158992.0, ans=0.5
+2024-08-26 21:49:33,530 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.73 vs. limit=15.0
+2024-08-26 21:49:42,229 INFO [train.py:1114] (1/4) Epoch 12, batch 2450, loss[loss=0.2792, simple_loss=0.3202, pruned_loss=0.08681, ctc_loss=0.1614, over 13343.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2788, pruned_loss=0.05339, ctc_loss=0.09947, over 3731368.21 frames. ], batch size: 140, lr: 1.22e-02, grad_scale: 32.0
+2024-08-26 21:49:43,374 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=159098.66666666666, ans=0.125
+2024-08-26 21:49:45,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=159098.66666666666, ans=0.125
+2024-08-26 21:49:49,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=159098.66666666666, ans=0.125
+2024-08-26 21:49:53,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=159152.0, ans=0.025
+2024-08-26 21:49:57,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=159152.0, ans=0.125
+2024-08-26 21:50:05,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=159205.33333333334, ans=0.2
+2024-08-26 21:50:05,692 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.611e+02 1.857e+02 2.069e+02 3.042e+02, threshold=3.714e+02, percent-clipped=0.0
+2024-08-26 21:50:12,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=159258.66666666666, ans=0.0
+2024-08-26 21:51:14,831 INFO [train.py:1114] (1/4) Epoch 13, batch 0, loss[loss=0.1816, simple_loss=0.2506, pruned_loss=0.04097, ctc_loss=0.07703, over 19821.00 frames. ], tot_loss[loss=0.1816, simple_loss=0.2506, pruned_loss=0.04097, ctc_loss=0.07703, over 19821.00 frames. ], batch size: 49, lr: 1.18e-02, grad_scale: 16.0
+2024-08-26 21:51:14,832 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 21:51:26,163 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.6628, 4.8349, 5.4402, 5.1924], device='cuda:1')
+2024-08-26 21:51:28,903 INFO [train.py:1146] (1/4) Epoch 13, validation: loss=0.1795, simple_loss=0.2723, pruned_loss=0.03226, ctc_loss=0.05568, over 944034.00 frames.
+2024-08-26 21:51:28,903 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-26 21:51:30,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=159306.66666666666, ans=0.0
+2024-08-26 21:51:32,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=159306.66666666666, ans=0.0
+2024-08-26 21:51:41,106 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=159360.0, ans=0.125
+2024-08-26 21:51:48,499 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=159360.0, ans=0.0
+2024-08-26 21:51:55,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=159413.33333333334, ans=0.125
+2024-08-26 21:52:00,966 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.48 vs. limit=22.5
+2024-08-26 21:52:02,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=159466.66666666666, ans=0.025
+2024-08-26 21:52:04,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=159466.66666666666, ans=0.125
+2024-08-26 21:52:08,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=159520.0, ans=0.025
+2024-08-26 21:52:10,958 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.45 vs. limit=12.0
+2024-08-26 21:52:18,079 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=159573.33333333334, ans=0.125
+2024-08-26 21:52:18,716 INFO [train.py:1114] (1/4) Epoch 13, batch 50, loss[loss=0.1651, simple_loss=0.2345, pruned_loss=0.03486, ctc_loss=0.06467, over 19716.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2745, pruned_loss=0.04971, ctc_loss=0.09304, over 844748.83 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:52:32,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=159626.66666666666, ans=0.1
+2024-08-26 21:52:36,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=159626.66666666666, ans=0.0
+2024-08-26 21:52:39,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=159680.0, ans=0.07
+2024-08-26 21:52:41,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=159680.0, ans=0.2
+2024-08-26 21:52:48,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=159733.33333333334, ans=0.2
+2024-08-26 21:52:56,397 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.509e+02 1.748e+02 2.087e+02 2.763e+02, threshold=3.495e+02, percent-clipped=0.0
+2024-08-26 21:52:56,750 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=159733.33333333334, ans=0.04949747468305833
+2024-08-26 21:53:01,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=159786.66666666666, ans=0.1
+2024-08-26 21:53:07,073 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=159840.0, ans=0.125
+2024-08-26 21:53:07,768 INFO [train.py:1114] (1/4) Epoch 13, batch 100, loss[loss=0.2003, simple_loss=0.2667, pruned_loss=0.04902, ctc_loss=0.08947, over 19708.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2762, pruned_loss=0.05, ctc_loss=0.09349, over 1499477.02 frames. ], batch size: 51, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:53:10,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=159840.0, ans=0.2
+2024-08-26 21:53:15,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=159840.0, ans=0.125
+2024-08-26 21:53:15,690 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.67 vs. limit=12.0
+2024-08-26 21:53:19,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=159893.33333333334, ans=0.0
+2024-08-26 21:53:30,381 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.32 vs. limit=10.0
+2024-08-26 21:53:36,709 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=159946.66666666666, ans=0.2
+2024-08-26 21:53:39,090 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.22 vs. limit=22.5
+2024-08-26 21:53:42,529 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=160000.0, ans=0.125
+2024-08-26 21:53:43,411 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160000.0, ans=0.1
+2024-08-26 21:53:45,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=160000.0, ans=0.0
+2024-08-26 21:54:16,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=160053.33333333334, ans=0.125
+2024-08-26 21:54:21,193 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.42 vs. limit=22.5
+2024-08-26 21:54:21,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=160053.33333333334, ans=0.0
+2024-08-26 21:54:22,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=160106.66666666666, ans=0.09899494936611666
+2024-08-26 21:54:23,451 INFO [train.py:1114] (1/4) Epoch 13, batch 150, loss[loss=0.1982, simple_loss=0.2565, pruned_loss=0.05106, ctc_loss=0.09449, over 19698.00 frames. ], tot_loss[loss=0.205, simple_loss=0.274, pruned_loss=0.04956, ctc_loss=0.09231, over 2027175.31 frames. ], batch size: 47, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:54:24,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=160106.66666666666, ans=0.1
+2024-08-26 21:54:27,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=160106.66666666666, ans=0.09899494936611666
+2024-08-26 21:54:47,200 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.10 vs. limit=15.0
+2024-08-26 21:54:56,286 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=160266.66666666666, ans=0.0
+2024-08-26 21:55:02,363 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.397e+02 1.535e+02 1.726e+02 2.735e+02, threshold=3.069e+02, percent-clipped=0.0
+2024-08-26 21:55:13,398 INFO [train.py:1114] (1/4) Epoch 13, batch 200, loss[loss=0.2255, simple_loss=0.292, pruned_loss=0.05733, ctc_loss=0.1109, over 18394.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2729, pruned_loss=0.04937, ctc_loss=0.09192, over 2434816.66 frames. ], batch size: 85, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:55:22,774 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.71 vs. limit=15.0
+2024-08-26 21:55:29,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=160426.66666666666, ans=0.125
+2024-08-26 21:55:31,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=160480.0, ans=0.0
+2024-08-26 21:55:41,133 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:56:19,220 INFO [train.py:1114] (1/4) Epoch 13, batch 250, loss[loss=0.2119, simple_loss=0.28, pruned_loss=0.05325, ctc_loss=0.09327, over 19336.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2725, pruned_loss=0.04885, ctc_loss=0.09125, over 2754771.31 frames. ], batch size: 67, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:56:24,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=160640.0, ans=0.1
+2024-08-26 21:56:25,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=160640.0, ans=0.1
+2024-08-26 21:56:45,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=160746.66666666666, ans=0.125
+2024-08-26 21:56:50,598 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=160800.0, ans=0.125
+2024-08-26 21:56:56,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=160800.0, ans=0.0
+2024-08-26 21:56:57,710 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.442e+02 1.721e+02 2.190e+02 3.294e+02, threshold=3.441e+02, percent-clipped=2.0
+2024-08-26 21:57:01,051 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.73 vs. limit=6.0
+2024-08-26 21:57:04,229 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=160853.33333333334, ans=0.125
+2024-08-26 21:57:07,833 INFO [train.py:1114] (1/4) Epoch 13, batch 300, loss[loss=0.2084, simple_loss=0.2749, pruned_loss=0.05156, ctc_loss=0.09716, over 19518.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2718, pruned_loss=0.04869, ctc_loss=0.09119, over 3000163.21 frames. ], batch size: 61, lr: 1.17e-02, grad_scale: 8.0
+2024-08-26 21:57:22,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=160960.0, ans=0.2
+2024-08-26 21:57:46,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=161120.0, ans=0.125
+2024-08-26 21:57:49,283 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=161120.0, ans=0.125
+2024-08-26 21:57:52,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=161120.0, ans=0.125
+2024-08-26 21:57:55,518 INFO [train.py:1114] (1/4) Epoch 13, batch 350, loss[loss=0.1699, simple_loss=0.2404, pruned_loss=0.03611, ctc_loss=0.06769, over 19765.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2724, pruned_loss=0.0488, ctc_loss=0.09119, over 3189239.62 frames. ], batch size: 48, lr: 1.17e-02, grad_scale: 8.0
+2024-08-26 21:58:26,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=161333.33333333334, ans=0.125
+2024-08-26 21:58:29,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=161333.33333333334, ans=0.0
+2024-08-26 21:58:32,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=161333.33333333334, ans=0.2
+2024-08-26 21:58:33,157 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.400e+02 1.583e+02 1.867e+02 2.908e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-08-26 21:58:43,184 INFO [train.py:1114] (1/4) Epoch 13, batch 400, loss[loss=0.2051, simple_loss=0.2766, pruned_loss=0.04908, ctc_loss=0.08872, over 19481.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2718, pruned_loss=0.04846, ctc_loss=0.09071, over 3341325.68 frames. ], batch size: 54, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:58:46,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=161440.0, ans=0.125
+2024-08-26 21:58:51,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=161493.33333333334, ans=0.0
+2024-08-26 21:58:55,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=161493.33333333334, ans=0.1
+2024-08-26 21:58:56,798 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.64 vs. limit=15.0
+2024-08-26 21:58:59,371 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=161493.33333333334, ans=0.0
+2024-08-26 21:59:15,595 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=161600.0, ans=0.0
+2024-08-26 21:59:16,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=161600.0, ans=0.1
+2024-08-26 21:59:30,869 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.97 vs. limit=10.0
+2024-08-26 21:59:32,239 INFO [train.py:1114] (1/4) Epoch 13, batch 450, loss[loss=0.191, simple_loss=0.2732, pruned_loss=0.03875, ctc_loss=0.07834, over 19624.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2722, pruned_loss=0.04873, ctc_loss=0.09146, over 3450443.00 frames. ], batch size: 55, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 21:59:48,680 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=161760.0, ans=0.125
+2024-08-26 21:59:51,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=161760.0, ans=0.0
+2024-08-26 22:00:10,470 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.228e+02 1.449e+02 1.659e+02 1.894e+02 3.083e+02, threshold=3.319e+02, percent-clipped=0.0
+2024-08-26 22:00:10,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=161920.0, ans=0.2
+2024-08-26 22:00:20,541 INFO [train.py:1114] (1/4) Epoch 13, batch 500, loss[loss=0.2132, simple_loss=0.2815, pruned_loss=0.05267, ctc_loss=0.0992, over 19660.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2711, pruned_loss=0.04829, ctc_loss=0.0905, over 3546503.37 frames. ], batch size: 63, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 22:00:46,502 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=162080.0, ans=0.1
+2024-08-26 22:00:52,036 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:00:56,260 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.78 vs. limit=6.0
+2024-08-26 22:01:02,480 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=162186.66666666666, ans=0.125
+2024-08-26 22:01:10,522 INFO [train.py:1114] (1/4) Epoch 13, batch 550, loss[loss=0.2136, simple_loss=0.2845, pruned_loss=0.05151, ctc_loss=0.0992, over 19314.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2708, pruned_loss=0.04817, ctc_loss=0.0903, over 3608218.83 frames. ], batch size: 71, lr: 1.17e-02, grad_scale: 16.0
+2024-08-26 22:01:36,768 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=162346.66666666666, ans=0.0
+2024-08-26 22:01:40,617 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=162400.0, ans=0.025
+2024-08-26 22:01:43,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=162400.0, ans=0.0
+2024-08-26 22:01:59,759 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 1.555e+02 1.782e+02 2.360e+02 4.088e+02, threshold=3.564e+02, percent-clipped=3.0
+2024-08-26 22:02:10,210 INFO [train.py:1114] (1/4) Epoch 13, batch 600, loss[loss=0.1985, simple_loss=0.2769, pruned_loss=0.04334, ctc_loss=0.08358, over 19384.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2715, pruned_loss=0.04828, ctc_loss=0.09044, over 3667030.45 frames. ], batch size: 67, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:02:11,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=162506.66666666666, ans=0.125
+2024-08-26 22:02:12,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=162506.66666666666, ans=0.07
+2024-08-26 22:02:20,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=162560.0, ans=0.0
+2024-08-26 22:02:30,919 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.18 vs. limit=15.0
+2024-08-26 22:02:38,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=162613.33333333334, ans=0.0
+2024-08-26 22:02:43,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.73 vs. limit=15.0
+2024-08-26 22:02:47,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=162666.66666666666, ans=0.125
+2024-08-26 22:02:48,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=162720.0, ans=0.125
+2024-08-26 22:02:58,290 INFO [train.py:1114] (1/4) Epoch 13, batch 650, loss[loss=0.1898, simple_loss=0.2671, pruned_loss=0.04083, ctc_loss=0.07727, over 19763.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.271, pruned_loss=0.04801, ctc_loss=0.08979, over 3717515.22 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:02:59,708 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.76 vs. limit=6.0
+2024-08-26 22:03:01,592 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.98 vs. limit=15.0
+2024-08-26 22:03:02,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten.whitening_limit, batch_count=162773.33333333334, ans=15.0
+2024-08-26 22:03:43,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=162880.0, ans=0.05
+2024-08-26 22:03:44,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=162880.0, ans=0.0
+2024-08-26 22:03:51,158 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.79 vs. limit=10.0
+2024-08-26 22:03:56,183 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.22 vs. limit=10.0
+2024-08-26 22:03:57,401 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.208e+02 1.372e+02 1.512e+02 1.802e+02 3.637e+02, threshold=3.024e+02, percent-clipped=1.0
+2024-08-26 22:03:58,862 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.04 vs. limit=22.5
+2024-08-26 22:04:09,863 INFO [train.py:1114] (1/4) Epoch 13, batch 700, loss[loss=0.1796, simple_loss=0.2511, pruned_loss=0.03926, ctc_loss=0.07382, over 19725.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2716, pruned_loss=0.0483, ctc_loss=0.09013, over 3749066.62 frames. ], batch size: 51, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:04:19,770 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.97 vs. limit=15.0
+2024-08-26 22:04:23,325 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.90 vs. limit=15.0
+2024-08-26 22:04:28,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=163093.33333333334, ans=0.025
+2024-08-26 22:04:36,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=163146.66666666666, ans=0.1
+2024-08-26 22:04:38,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=163146.66666666666, ans=0.125
+2024-08-26 22:04:38,333 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.22 vs. limit=15.0
+2024-08-26 22:04:42,019 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.25 vs. limit=6.0
+2024-08-26 22:04:43,335 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=163146.66666666666, ans=0.125
+2024-08-26 22:04:50,540 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.69 vs. limit=15.0
+2024-08-26 22:04:53,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=163200.0, ans=0.0
+2024-08-26 22:05:04,326 INFO [train.py:1114] (1/4) Epoch 13, batch 750, loss[loss=0.1925, simple_loss=0.2683, pruned_loss=0.04242, ctc_loss=0.07955, over 19474.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2713, pruned_loss=0.04818, ctc_loss=0.09002, over 3774963.78 frames. ], batch size: 54, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:05:04,922 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.73 vs. limit=10.0
+2024-08-26 22:05:29,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=163413.33333333334, ans=0.125
+2024-08-26 22:05:31,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.whiten.whitening_limit, batch_count=163413.33333333334, ans=12.0
+2024-08-26 22:05:38,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=163466.66666666666, ans=0.0
+2024-08-26 22:05:41,253 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=163466.66666666666, ans=0.125
+2024-08-26 22:05:41,879 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.233e+02 1.560e+02 1.959e+02 2.402e+02 3.823e+02, threshold=3.919e+02, percent-clipped=10.0
+2024-08-26 22:05:56,853 INFO [train.py:1114] (1/4) Epoch 13, batch 800, loss[loss=0.1854, simple_loss=0.253, pruned_loss=0.04246, ctc_loss=0.0824, over 19820.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2714, pruned_loss=0.04818, ctc_loss=0.08998, over 3796423.05 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 32.0
+2024-08-26 22:05:58,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=163573.33333333334, ans=0.1
+2024-08-26 22:06:02,423 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=163573.33333333334, ans=0.0
+2024-08-26 22:06:12,973 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.86 vs. limit=15.0
+2024-08-26 22:06:18,165 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=163680.0, ans=0.025
+2024-08-26 22:06:23,703 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=163733.33333333334, ans=0.2
+2024-08-26 22:06:54,853 INFO [train.py:1114] (1/4) Epoch 13, batch 850, loss[loss=0.1988, simple_loss=0.2785, pruned_loss=0.04333, ctc_loss=0.0812, over 19658.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2708, pruned_loss=0.048, ctc_loss=0.08956, over 3814761.56 frames. ], batch size: 59, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:06:58,124 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=163840.0, ans=0.125
+2024-08-26 22:06:59,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=163840.0, ans=0.125
+2024-08-26 22:07:12,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=163893.33333333334, ans=0.125
+2024-08-26 22:07:25,751 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=163946.66666666666, ans=0.0
+2024-08-26 22:07:29,526 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=164000.0, ans=0.125
+2024-08-26 22:07:37,696 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.442e+02 1.756e+02 2.038e+02 3.459e+02, threshold=3.512e+02, percent-clipped=0.0
+2024-08-26 22:07:44,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=164053.33333333334, ans=0.125
+2024-08-26 22:07:44,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=164053.33333333334, ans=0.2
+2024-08-26 22:07:50,321 INFO [train.py:1114] (1/4) Epoch 13, batch 900, loss[loss=0.1801, simple_loss=0.2464, pruned_loss=0.04199, ctc_loss=0.07443, over 19831.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2713, pruned_loss=0.04839, ctc_loss=0.09031, over 3819190.32 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:07:57,019 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=164106.66666666666, ans=0.0
+2024-08-26 22:08:12,212 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=164213.33333333334, ans=0.125
+2024-08-26 22:08:19,016 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.45 vs. limit=8.0
+2024-08-26 22:08:24,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=164266.66666666666, ans=0.125
+2024-08-26 22:08:40,772 INFO [train.py:1114] (1/4) Epoch 13, batch 950, loss[loss=0.1833, simple_loss=0.2561, pruned_loss=0.03981, ctc_loss=0.07725, over 19500.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2719, pruned_loss=0.04881, ctc_loss=0.09112, over 3820526.74 frames. ], batch size: 49, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:08:42,007 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=164373.33333333334, ans=0.0
+2024-08-26 22:08:45,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=164373.33333333334, ans=0.2
+2024-08-26 22:08:57,634 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=164426.66666666666, ans=0.025
+2024-08-26 22:09:00,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=164426.66666666666, ans=0.1
+2024-08-26 22:09:03,170 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=164480.0, ans=0.0
+2024-08-26 22:09:04,937 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=164480.0, ans=0.125
+2024-08-26 22:09:18,263 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.90 vs. limit=22.5
+2024-08-26 22:09:18,893 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=164533.33333333334, ans=0.2
+2024-08-26 22:09:20,508 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.559e+02 1.935e+02 2.172e+02 5.830e+02, threshold=3.869e+02, percent-clipped=1.0
+2024-08-26 22:09:21,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=164586.66666666666, ans=0.125
+2024-08-26 22:09:24,341 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=164586.66666666666, ans=0.125
+2024-08-26 22:09:25,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=164586.66666666666, ans=0.125
+2024-08-26 22:09:29,555 INFO [train.py:1114] (1/4) Epoch 13, batch 1000, loss[loss=0.1968, simple_loss=0.2698, pruned_loss=0.04564, ctc_loss=0.08121, over 19861.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2727, pruned_loss=0.04925, ctc_loss=0.09186, over 3816217.51 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:09:31,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=164640.0, ans=0.125
+2024-08-26 22:09:34,385 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=164640.0, ans=0.125
+2024-08-26 22:09:46,325 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:09:49,271 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=164746.66666666666, ans=0.125
+2024-08-26 22:10:05,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=164800.0, ans=0.07
+2024-08-26 22:10:06,428 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=164800.0, ans=0.0
+2024-08-26 22:10:11,818 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=164853.33333333334, ans=0.125
+2024-08-26 22:10:19,116 INFO [train.py:1114] (1/4) Epoch 13, batch 1050, loss[loss=0.1941, simple_loss=0.2686, pruned_loss=0.04311, ctc_loss=0.08372, over 19840.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2719, pruned_loss=0.049, ctc_loss=0.09138, over 3821903.62 frames. ], batch size: 57, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:10:20,441 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.85 vs. limit=6.0
+2024-08-26 22:10:29,821 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.78 vs. limit=15.0
+2024-08-26 22:10:30,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=164960.0, ans=0.1
+2024-08-26 22:10:33,286 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.50 vs. limit=15.0
+2024-08-26 22:10:36,793 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.40 vs. limit=15.0
+2024-08-26 22:10:38,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=165013.33333333334, ans=0.025
+2024-08-26 22:10:55,514 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.353e+02 1.566e+02 1.889e+02 2.686e+02, threshold=3.131e+02, percent-clipped=0.0
+2024-08-26 22:11:06,693 INFO [train.py:1114] (1/4) Epoch 13, batch 1100, loss[loss=0.1865, simple_loss=0.2588, pruned_loss=0.04177, ctc_loss=0.07683, over 19579.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2722, pruned_loss=0.04893, ctc_loss=0.09128, over 3829803.11 frames. ], batch size: 52, lr: 1.16e-02, grad_scale: 16.0
+2024-08-26 22:11:29,119 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.65 vs. limit=12.0
+2024-08-26 22:11:34,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=165333.33333333334, ans=0.0
+2024-08-26 22:11:39,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=165333.33333333334, ans=0.5
+2024-08-26 22:11:44,850 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.38 vs. limit=15.0
+2024-08-26 22:11:44,892 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.70 vs. limit=22.5
+2024-08-26 22:11:47,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=165386.66666666666, ans=0.125
+2024-08-26 22:11:55,423 INFO [train.py:1114] (1/4) Epoch 13, batch 1150, loss[loss=0.1978, simple_loss=0.2691, pruned_loss=0.04608, ctc_loss=0.08571, over 19582.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2726, pruned_loss=0.04915, ctc_loss=0.09157, over 3829936.96 frames. ], batch size: 52, lr: 1.15e-02, grad_scale: 16.0
+2024-08-26 22:12:01,368 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=165440.0, ans=0.0
+2024-08-26 22:12:16,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=165546.66666666666, ans=0.1
+2024-08-26 22:12:24,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=165600.0, ans=0.125
+2024-08-26 22:12:34,672 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.454e+02 1.639e+02 1.902e+02 3.180e+02, threshold=3.277e+02, percent-clipped=1.0
+2024-08-26 22:12:34,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=165653.33333333334, ans=0.1
+2024-08-26 22:12:36,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=165653.33333333334, ans=10.0
+2024-08-26 22:12:39,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=165653.33333333334, ans=0.2
+2024-08-26 22:12:43,816 INFO [train.py:1114] (1/4) Epoch 13, batch 1200, loss[loss=0.214, simple_loss=0.2874, pruned_loss=0.05037, ctc_loss=0.0995, over 19832.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2736, pruned_loss=0.04949, ctc_loss=0.09234, over 3824766.47 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:13:10,665 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.51 vs. limit=22.5
+2024-08-26 22:13:23,436 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=165920.0, ans=0.125
+2024-08-26 22:13:32,356 INFO [train.py:1114] (1/4) Epoch 13, batch 1250, loss[loss=0.2275, simple_loss=0.2958, pruned_loss=0.05884, ctc_loss=0.1039, over 19512.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2738, pruned_loss=0.04932, ctc_loss=0.09198, over 3842801.59 frames. ], batch size: 61, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:13:35,623 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.33 vs. limit=10.0
+2024-08-26 22:13:49,002 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=166026.66666666666, ans=0.025
+2024-08-26 22:13:59,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=166080.0, ans=0.1
+2024-08-26 22:13:59,334 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.93 vs. limit=12.0
+2024-08-26 22:14:04,557 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_na.min_abs, batch_count=166133.33333333334, ans=0.02
+2024-08-26 22:14:11,818 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.418e+02 1.637e+02 2.002e+02 4.206e+02, threshold=3.274e+02, percent-clipped=1.0
+2024-08-26 22:14:23,455 INFO [train.py:1114] (1/4) Epoch 13, batch 1300, loss[loss=0.2113, simple_loss=0.2786, pruned_loss=0.05183, ctc_loss=0.1008, over 18906.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2725, pruned_loss=0.0486, ctc_loss=0.09103, over 3847225.03 frames. ], batch size: 76, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:14:27,291 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=166240.0, ans=0.0
+2024-08-26 22:14:27,369 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=166240.0, ans=0.125
+2024-08-26 22:14:29,201 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=166240.0, ans=0.0
+2024-08-26 22:14:30,567 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.84 vs. limit=8.0
+2024-08-26 22:14:34,013 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.01 vs. limit=15.0
+2024-08-26 22:14:38,621 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.84 vs. limit=6.0
+2024-08-26 22:14:44,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=166346.66666666666, ans=0.125
+2024-08-26 22:14:49,627 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.86 vs. limit=15.0
+2024-08-26 22:14:59,762 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=166453.33333333334, ans=0.0
+2024-08-26 22:15:09,563 INFO [train.py:1114] (1/4) Epoch 13, batch 1350, loss[loss=0.1934, simple_loss=0.2689, pruned_loss=0.04302, ctc_loss=0.07989, over 19751.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2722, pruned_loss=0.04847, ctc_loss=0.0907, over 3858651.13 frames. ], batch size: 54, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:15:12,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=166506.66666666666, ans=0.125
+2024-08-26 22:15:19,343 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.39 vs. limit=15.0
+2024-08-26 22:15:28,351 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:15:41,585 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.56 vs. limit=22.5
+2024-08-26 22:15:49,433 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=166720.0, ans=0.0
+2024-08-26 22:15:50,041 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.184e+02 1.412e+02 1.605e+02 1.958e+02 2.747e+02, threshold=3.211e+02, percent-clipped=0.0
+2024-08-26 22:15:51,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=166720.0, ans=0.1
+2024-08-26 22:15:59,140 INFO [train.py:1114] (1/4) Epoch 13, batch 1400, loss[loss=0.1692, simple_loss=0.2363, pruned_loss=0.0367, ctc_loss=0.0714, over 19677.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.272, pruned_loss=0.04849, ctc_loss=0.09051, over 3864928.92 frames. ], batch size: 46, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:16:01,592 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.68 vs. limit=10.0
+2024-08-26 22:16:06,763 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=166773.33333333334, ans=0.125
+2024-08-26 22:16:08,014 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.98 vs. limit=15.0
+2024-08-26 22:16:21,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=166880.0, ans=10.0
+2024-08-26 22:16:22,550 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=166880.0, ans=0.0
+2024-08-26 22:16:31,361 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.07 vs. limit=15.0
+2024-08-26 22:16:32,103 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=166933.33333333334, ans=0.2
+2024-08-26 22:16:33,866 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=166933.33333333334, ans=0.125
+2024-08-26 22:16:37,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=166986.66666666666, ans=0.125
+2024-08-26 22:16:47,845 INFO [train.py:1114] (1/4) Epoch 13, batch 1450, loss[loss=0.2167, simple_loss=0.28, pruned_loss=0.05665, ctc_loss=0.1004, over 19651.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2725, pruned_loss=0.04877, ctc_loss=0.09104, over 3863219.48 frames. ], batch size: 63, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:16:49,216 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.81 vs. limit=22.5
+2024-08-26 22:17:10,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=167146.66666666666, ans=0.0
+2024-08-26 22:17:14,613 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=167146.66666666666, ans=0.125
+2024-08-26 22:17:27,086 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.193e+02 1.434e+02 1.640e+02 1.966e+02 4.010e+02, threshold=3.281e+02, percent-clipped=1.0
+2024-08-26 22:17:31,215 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=167253.33333333334, ans=0.025
+2024-08-26 22:17:36,408 INFO [train.py:1114] (1/4) Epoch 13, batch 1500, loss[loss=0.2013, simple_loss=0.2693, pruned_loss=0.04809, ctc_loss=0.09277, over 19591.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2725, pruned_loss=0.04849, ctc_loss=0.09062, over 3862550.12 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:18:02,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=167413.33333333334, ans=0.2
+2024-08-26 22:18:12,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=167466.66666666666, ans=0.0
+2024-08-26 22:18:14,484 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.70 vs. limit=15.0
+2024-08-26 22:18:16,132 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.91 vs. limit=15.0
+2024-08-26 22:18:26,618 INFO [train.py:1114] (1/4) Epoch 13, batch 1550, loss[loss=0.2192, simple_loss=0.2919, pruned_loss=0.05408, ctc_loss=0.09588, over 19627.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2729, pruned_loss=0.04895, ctc_loss=0.09133, over 3846724.73 frames. ], batch size: 60, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:18:28,673 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=167573.33333333334, ans=0.0
+2024-08-26 22:18:33,255 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.66 vs. limit=15.0
+2024-08-26 22:18:43,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=167626.66666666666, ans=0.09899494936611666
+2024-08-26 22:18:57,351 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=167733.33333333334, ans=0.0
+2024-08-26 22:19:04,513 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.500e+02 1.731e+02 2.118e+02 3.338e+02, threshold=3.463e+02, percent-clipped=1.0
+2024-08-26 22:19:12,941 INFO [train.py:1114] (1/4) Epoch 13, batch 1600, loss[loss=0.1816, simple_loss=0.26, pruned_loss=0.03752, ctc_loss=0.07053, over 19840.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2722, pruned_loss=0.04868, ctc_loss=0.09098, over 3834490.98 frames. ], batch size: 57, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:19:16,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=167840.0, ans=0.125
+2024-08-26 22:19:17,175 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.09 vs. limit=15.0
+2024-08-26 22:19:27,115 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=167893.33333333334, ans=0.2
+2024-08-26 22:19:31,204 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.12 vs. limit=22.5
+2024-08-26 22:19:59,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=167946.66666666666, ans=0.125
+2024-08-26 22:20:06,666 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.47 vs. limit=10.0
+2024-08-26 22:20:19,756 INFO [train.py:1114] (1/4) Epoch 13, batch 1650, loss[loss=0.2041, simple_loss=0.281, pruned_loss=0.04603, ctc_loss=0.08781, over 19655.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2727, pruned_loss=0.04898, ctc_loss=0.0913, over 3832591.27 frames. ], batch size: 59, lr: 1.15e-02, grad_scale: 32.0
+2024-08-26 22:20:20,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=168106.66666666666, ans=0.95
+2024-08-26 22:20:29,303 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=168160.0, ans=0.0
+2024-08-26 22:20:51,606 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.91 vs. limit=12.0
+2024-08-26 22:20:57,548 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.381e+02 1.542e+02 1.780e+02 2.683e+02, threshold=3.084e+02, percent-clipped=0.0
+2024-08-26 22:21:07,581 INFO [train.py:1114] (1/4) Epoch 13, batch 1700, loss[loss=0.1773, simple_loss=0.2425, pruned_loss=0.04023, ctc_loss=0.07907, over 19702.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2718, pruned_loss=0.04838, ctc_loss=0.09033, over 3847218.47 frames. ], batch size: 46, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:21:28,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=168480.0, ans=0.125
+2024-08-26 22:21:53,907 INFO [train.py:1114] (1/4) Epoch 13, batch 1750, loss[loss=0.178, simple_loss=0.2398, pruned_loss=0.04179, ctc_loss=0.08129, over 19684.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2714, pruned_loss=0.04816, ctc_loss=0.09003, over 3851805.56 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:22:00,112 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:22:07,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=168693.33333333334, ans=22.5
+2024-08-26 22:22:08,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=168693.33333333334, ans=0.0
+2024-08-26 22:22:09,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=168693.33333333334, ans=0.0
+2024-08-26 22:23:51,730 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.171e+02 1.438e+02 1.563e+02 1.924e+02 3.851e+02, threshold=3.126e+02, percent-clipped=3.0
+2024-08-26 22:23:55,858 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=168853.33333333334, ans=0.1
+2024-08-26 22:24:01,036 INFO [train.py:1114] (1/4) Epoch 13, batch 1800, loss[loss=0.2025, simple_loss=0.2796, pruned_loss=0.04459, ctc_loss=0.09059, over 19620.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2714, pruned_loss=0.04802, ctc_loss=0.08977, over 3853381.72 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:24:02,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=168906.66666666666, ans=0.125
+2024-08-26 22:24:11,049 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.41 vs. limit=22.5
+2024-08-26 22:24:31,028 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.56 vs. limit=6.0
+2024-08-26 22:24:36,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=169120.0, ans=0.2
+2024-08-26 22:24:44,741 INFO [train.py:1114] (1/4) Epoch 13, batch 1850, loss[loss=0.2018, simple_loss=0.2737, pruned_loss=0.04687, ctc_loss=0.09052, over 19571.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.271, pruned_loss=0.04795, ctc_loss=0.08946, over 3856130.43 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:24:51,304 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.04 vs. limit=15.0
+2024-08-26 22:25:05,701 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.09 vs. limit=22.5
+2024-08-26 22:25:08,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=169280.0, ans=0.125
+2024-08-26 22:25:21,911 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.546e+02 1.793e+02 2.323e+02 4.609e+02, threshold=3.586e+02, percent-clipped=7.0
+2024-08-26 22:25:25,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=169386.66666666666, ans=0.125
+2024-08-26 22:25:29,827 INFO [train.py:1114] (1/4) Epoch 13, batch 1900, loss[loss=0.1857, simple_loss=0.2673, pruned_loss=0.03853, ctc_loss=0.06734, over 19628.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2711, pruned_loss=0.04775, ctc_loss=0.08904, over 3861621.93 frames. ], batch size: 59, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:25:45,900 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=169493.33333333334, ans=0.125
+2024-08-26 22:25:56,390 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:25:58,049 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=169600.0, ans=0.1
+2024-08-26 22:26:01,485 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=169600.0, ans=0.2
+2024-08-26 22:26:05,022 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=169653.33333333334, ans=0.125
+2024-08-26 22:26:09,350 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=169653.33333333334, ans=0.0
+2024-08-26 22:26:13,641 INFO [train.py:1114] (1/4) Epoch 13, batch 1950, loss[loss=0.1993, simple_loss=0.2706, pruned_loss=0.04661, ctc_loss=0.08692, over 19591.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2723, pruned_loss=0.04793, ctc_loss=0.08947, over 3870432.45 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 16.0
+2024-08-26 22:26:16,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=169706.66666666666, ans=0.125
+2024-08-26 22:26:17,290 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=169706.66666666666, ans=0.0
+2024-08-26 22:26:28,533 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=169760.0, ans=0.125
+2024-08-26 22:26:40,809 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=169866.66666666666, ans=0.2
+2024-08-26 22:26:42,537 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=169866.66666666666, ans=0.0
+2024-08-26 22:26:45,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=169866.66666666666, ans=0.025
+2024-08-26 22:26:53,144 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.42 vs. limit=6.0
+2024-08-26 22:26:53,523 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.527e+02 1.786e+02 2.093e+02 2.857e+02, threshold=3.573e+02, percent-clipped=0.0
+2024-08-26 22:27:00,505 INFO [train.py:1114] (1/4) Epoch 13, batch 2000, loss[loss=0.1805, simple_loss=0.2381, pruned_loss=0.04519, ctc_loss=0.08111, over 19627.00 frames. ], tot_loss[loss=0.203, simple_loss=0.273, pruned_loss=0.04837, ctc_loss=0.09039, over 3856227.76 frames. ], batch size: 45, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:27:01,943 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.18 vs. limit=15.0
+2024-08-26 22:27:02,401 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=169973.33333333334, ans=0.125
+2024-08-26 22:27:35,017 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.85 vs. limit=10.0
+2024-08-26 22:27:35,536 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=170186.66666666666, ans=0.025
+2024-08-26 22:27:38,423 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.19 vs. limit=15.0
+2024-08-26 22:27:44,116 INFO [train.py:1114] (1/4) Epoch 13, batch 2050, loss[loss=0.1765, simple_loss=0.2427, pruned_loss=0.04025, ctc_loss=0.07457, over 19719.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.272, pruned_loss=0.04817, ctc_loss=0.08999, over 3851755.38 frames. ], batch size: 47, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:27:58,315 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=170293.33333333334, ans=0.2
+2024-08-26 22:28:20,406 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 1.436e+02 1.652e+02 1.928e+02 2.658e+02, threshold=3.303e+02, percent-clipped=0.0
+2024-08-26 22:28:24,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=170453.33333333334, ans=0.035
+2024-08-26 22:28:27,560 INFO [train.py:1114] (1/4) Epoch 13, batch 2100, loss[loss=0.1916, simple_loss=0.2673, pruned_loss=0.04247, ctc_loss=0.07709, over 19769.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2715, pruned_loss=0.04778, ctc_loss=0.08917, over 3858448.85 frames. ], batch size: 54, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:28:40,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=170560.0, ans=0.0
+2024-08-26 22:28:44,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=170613.33333333334, ans=0.95
+2024-08-26 22:28:45,299 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.06 vs. limit=22.5
+2024-08-26 22:28:52,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=170613.33333333334, ans=0.2
+2024-08-26 22:28:52,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=170613.33333333334, ans=0.125
+2024-08-26 22:28:52,566 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.28 vs. limit=15.0
+2024-08-26 22:29:18,564 INFO [train.py:1114] (1/4) Epoch 13, batch 2150, loss[loss=0.1872, simple_loss=0.2616, pruned_loss=0.04094, ctc_loss=0.07732, over 19608.00 frames. ], tot_loss[loss=0.201, simple_loss=0.271, pruned_loss=0.0477, ctc_loss=0.08887, over 3869651.90 frames. ], batch size: 52, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:29:18,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=170773.33333333334, ans=0.125
+2024-08-26 22:29:22,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=170773.33333333334, ans=0.1
+2024-08-26 22:29:24,301 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.66 vs. limit=15.0
+2024-08-26 22:29:38,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=170880.0, ans=0.125
+2024-08-26 22:29:48,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=170933.33333333334, ans=0.2
+2024-08-26 22:29:55,024 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.462e+02 1.698e+02 2.269e+02 4.218e+02, threshold=3.397e+02, percent-clipped=7.0
+2024-08-26 22:29:55,598 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.54 vs. limit=6.0
+2024-08-26 22:30:02,059 INFO [train.py:1114] (1/4) Epoch 13, batch 2200, loss[loss=0.2147, simple_loss=0.2835, pruned_loss=0.05264, ctc_loss=0.1014, over 19596.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2711, pruned_loss=0.04761, ctc_loss=0.08881, over 3868024.11 frames. ], batch size: 57, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:30:09,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=171040.0, ans=0.025
+2024-08-26 22:30:11,660 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=171093.33333333334, ans=0.125
+2024-08-26 22:30:15,023 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=171093.33333333334, ans=0.125
+2024-08-26 22:30:18,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=171093.33333333334, ans=10.0
+2024-08-26 22:30:46,564 INFO [train.py:1114] (1/4) Epoch 13, batch 2250, loss[loss=0.1976, simple_loss=0.2698, pruned_loss=0.04541, ctc_loss=0.0862, over 19613.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2709, pruned_loss=0.04762, ctc_loss=0.08912, over 3867521.25 frames. ], batch size: 55, lr: 1.14e-02, grad_scale: 32.0
+2024-08-26 22:30:50,066 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=171306.66666666666, ans=0.125
+2024-08-26 22:30:50,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=171306.66666666666, ans=0.09899494936611666
+2024-08-26 22:31:13,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=171466.66666666666, ans=0.125
+2024-08-26 22:31:20,978 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=171520.0, ans=0.04949747468305833
+2024-08-26 22:31:22,562 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.444e+02 1.610e+02 1.869e+02 3.635e+02, threshold=3.220e+02, percent-clipped=1.0
+2024-08-26 22:31:24,906 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.05 vs. limit=6.0
+2024-08-26 22:31:29,438 INFO [train.py:1114] (1/4) Epoch 13, batch 2300, loss[loss=0.1859, simple_loss=0.2565, pruned_loss=0.04205, ctc_loss=0.07814, over 19513.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.27, pruned_loss=0.04756, ctc_loss=0.08893, over 3860632.66 frames. ], batch size: 49, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 22:31:35,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=171573.33333333334, ans=0.125
+2024-08-26 22:31:53,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=171680.0, ans=0.125
+2024-08-26 22:32:10,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=171786.66666666666, ans=0.0
+2024-08-26 22:32:13,426 INFO [train.py:1114] (1/4) Epoch 13, batch 2350, loss[loss=0.2123, simple_loss=0.2886, pruned_loss=0.049, ctc_loss=0.0947, over 19661.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2705, pruned_loss=0.04788, ctc_loss=0.08946, over 3863505.81 frames. ], batch size: 63, lr: 1.13e-02, grad_scale: 16.0
+2024-08-26 22:32:15,218 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:32:19,472 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=171840.0, ans=0.0
+2024-08-26 22:32:20,353 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=171840.0, ans=0.0
+2024-08-26 22:32:53,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=172000.0, ans=0.125
+2024-08-26 22:32:56,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=172000.0, ans=0.1
+2024-08-26 22:33:04,275 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=172053.33333333334, ans=0.0
+2024-08-26 22:33:04,856 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.652e+02 1.956e+02 2.487e+02 4.828e+02, threshold=3.913e+02, percent-clipped=4.0
+2024-08-26 22:33:07,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=172053.33333333334, ans=0.0
+2024-08-26 22:33:08,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=172053.33333333334, ans=0.125
+2024-08-26 22:33:10,713 INFO [train.py:1114] (1/4) Epoch 13, batch 2400, loss[loss=0.2012, simple_loss=0.2765, pruned_loss=0.04484, ctc_loss=0.09061, over 19235.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2726, pruned_loss=0.04871, ctc_loss=0.09081, over 3858875.86 frames. ], batch size: 71, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 22:33:12,615 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=172106.66666666666, ans=0.125
+2024-08-26 22:33:18,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=172160.0, ans=0.125
+2024-08-26 22:33:22,967 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:33:54,746 INFO [train.py:1114] (1/4) Epoch 13, batch 2450, loss[loss=0.3028, simple_loss=0.3266, pruned_loss=0.1021, ctc_loss=0.187, over 13687.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2767, pruned_loss=0.05162, ctc_loss=0.09655, over 3730503.06 frames. ], batch size: 142, lr: 1.13e-02, grad_scale: 32.0
+2024-08-26 22:33:55,919 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=172373.33333333334, ans=0.1
+2024-08-26 22:33:56,134 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=12.59 vs. limit=15.0
+2024-08-26 22:33:56,279 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=14.68 vs. limit=15.0
+2024-08-26 22:33:57,129 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=8.59 vs. limit=10.0
+2024-08-26 22:34:03,324 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=172426.66666666666, ans=0.5
+2024-08-26 22:34:15,845 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=9.73 vs. limit=15.0
+2024-08-26 22:34:23,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=172533.33333333334, ans=0.1
+2024-08-26 22:34:25,410 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=172533.33333333334, ans=0.125
+2024-08-26 22:38:18,480 INFO [train.py:1114] (1/4) Epoch 14, batch 0, loss[loss=0.1833, simple_loss=0.2562, pruned_loss=0.03987, ctc_loss=0.07686, over 19403.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2562, pruned_loss=0.03987, ctc_loss=0.07686, over 19403.00 frames. ], batch size: 48, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:38:18,481 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 22:38:36,025 INFO [train.py:1146] (1/4) Epoch 14, validation: loss=0.1777, simple_loss=0.2705, pruned_loss=0.03149, ctc_loss=0.05468, over 944034.00 frames.
+2024-08-26 22:38:36,168 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-26 22:38:39,676 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.631e+02 1.782e+02 1.968e+02 3.125e+02, threshold=3.565e+02, percent-clipped=0.0
+2024-08-26 22:38:43,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=172581.33333333334, ans=0.1
+2024-08-26 22:38:44,705 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=172634.66666666666, ans=0.1
+2024-08-26 22:39:11,107 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.18 vs. limit=10.0
+2024-08-26 22:40:01,745 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.68 vs. limit=6.0
+2024-08-26 22:40:03,201 INFO [train.py:1114] (1/4) Epoch 14, batch 50, loss[loss=0.1982, simple_loss=0.2565, pruned_loss=0.05142, ctc_loss=0.09256, over 19699.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2732, pruned_loss=0.04853, ctc_loss=0.09122, over 844835.93 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:40:16,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=172848.0, ans=0.125
+2024-08-26 22:41:55,150 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:42:41,036 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=172901.33333333334, ans=0.0
+2024-08-26 22:42:48,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=172954.66666666666, ans=0.2
+2024-08-26 22:42:48,464 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=172954.66666666666, ans=0.125
+2024-08-26 22:43:09,660 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.04 vs. limit=12.0
+2024-08-26 22:43:15,385 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.81 vs. limit=22.5
+2024-08-26 22:43:20,293 INFO [train.py:1114] (1/4) Epoch 14, batch 100, loss[loss=0.1818, simple_loss=0.2565, pruned_loss=0.03903, ctc_loss=0.07268, over 19712.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2739, pruned_loss=0.04848, ctc_loss=0.09067, over 1499995.38 frames. ], batch size: 51, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:43:23,803 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.427e+02 1.577e+02 1.836e+02 2.542e+02, threshold=3.153e+02, percent-clipped=0.0
+2024-08-26 22:43:31,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=173168.0, ans=0.025
+2024-08-26 22:43:54,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=173274.66666666666, ans=0.0
+2024-08-26 22:44:08,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=173328.0, ans=0.125
+2024-08-26 22:44:08,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_na.min_abs, batch_count=173328.0, ans=0.02
+2024-08-26 22:44:09,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=173381.33333333334, ans=0.0
+2024-08-26 22:44:10,532 INFO [train.py:1114] (1/4) Epoch 14, batch 150, loss[loss=0.1826, simple_loss=0.2501, pruned_loss=0.0424, ctc_loss=0.07556, over 19717.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2718, pruned_loss=0.04793, ctc_loss=0.08942, over 2027349.01 frames. ], batch size: 47, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:44:15,590 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.41 vs. limit=22.5
+2024-08-26 22:44:21,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=173434.66666666666, ans=0.07
+2024-08-26 22:44:22,619 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=173434.66666666666, ans=0.125
+2024-08-26 22:44:29,500 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.68 vs. limit=22.5
+2024-08-26 22:44:42,184 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=173541.33333333334, ans=0.125
+2024-08-26 22:44:43,256 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.19 vs. limit=12.0
+2024-08-26 22:44:43,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=173541.33333333334, ans=0.125
+2024-08-26 22:44:47,578 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=173541.33333333334, ans=0.125
+2024-08-26 22:45:08,851 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.88 vs. limit=15.0
+2024-08-26 22:45:10,943 INFO [train.py:1114] (1/4) Epoch 14, batch 200, loss[loss=0.2253, simple_loss=0.2846, pruned_loss=0.06056, ctc_loss=0.1122, over 18213.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2701, pruned_loss=0.04735, ctc_loss=0.08838, over 2434363.68 frames. ], batch size: 85, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:45:12,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=173648.0, ans=0.0
+2024-08-26 22:45:14,585 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.393e+02 1.624e+02 1.885e+02 3.247e+02, threshold=3.247e+02, percent-clipped=1.0
+2024-08-26 22:45:18,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=173648.0, ans=0.0
+2024-08-26 22:45:19,050 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=173648.0, ans=0.1
+2024-08-26 22:45:40,409 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.06 vs. limit=15.0
+2024-08-26 22:45:44,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=173808.0, ans=0.0
+2024-08-26 22:46:04,239 INFO [train.py:1114] (1/4) Epoch 14, batch 250, loss[loss=0.1935, simple_loss=0.2741, pruned_loss=0.04202, ctc_loss=0.07227, over 19420.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.27, pruned_loss=0.04696, ctc_loss=0.08786, over 2753707.57 frames. ], batch size: 67, lr: 1.09e-02, grad_scale: 32.0
+2024-08-26 22:46:16,636 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.82 vs. limit=22.5
+2024-08-26 22:46:25,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=174021.33333333334, ans=22.5
+2024-08-26 22:46:25,694 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.15 vs. limit=22.5
+2024-08-26 22:46:32,863 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=174074.66666666666, ans=0.025
+2024-08-26 22:46:37,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=174074.66666666666, ans=0.125
+2024-08-26 22:46:47,958 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.96 vs. limit=15.0
+2024-08-26 22:46:49,910 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.71 vs. limit=6.0
+2024-08-26 22:46:50,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=174128.0, ans=0.125
+2024-08-26 22:46:51,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=174128.0, ans=0.2
+2024-08-26 22:46:54,993 INFO [train.py:1114] (1/4) Epoch 14, batch 300, loss[loss=0.2164, simple_loss=0.2843, pruned_loss=0.05406, ctc_loss=0.1009, over 19548.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2697, pruned_loss=0.04676, ctc_loss=0.08768, over 2999566.01 frames. ], batch size: 61, lr: 1.09e-02, grad_scale: 16.0
+2024-08-26 22:46:59,571 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.470e+02 1.728e+02 2.225e+02 3.956e+02, threshold=3.457e+02, percent-clipped=2.0
+2024-08-26 22:47:23,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=174288.0, ans=0.0
+2024-08-26 22:47:29,318 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.19 vs. limit=15.0
+2024-08-26 22:47:32,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=174341.33333333334, ans=0.0
+2024-08-26 22:47:32,889 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.49 vs. limit=15.0
+2024-08-26 22:47:37,438 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.05 vs. limit=15.0
+2024-08-26 22:47:43,371 INFO [train.py:1114] (1/4) Epoch 14, batch 350, loss[loss=0.1741, simple_loss=0.2396, pruned_loss=0.03879, ctc_loss=0.0774, over 19763.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2704, pruned_loss=0.04688, ctc_loss=0.088, over 3190345.80 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 16.0
+2024-08-26 22:48:14,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=174501.33333333334, ans=0.125
+2024-08-26 22:48:14,860 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.61 vs. limit=15.0
+2024-08-26 22:48:20,354 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.69 vs. limit=15.0
+2024-08-26 22:48:20,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=174554.66666666666, ans=0.125
+2024-08-26 22:48:23,797 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=174554.66666666666, ans=0.07
+2024-08-26 22:48:24,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=174554.66666666666, ans=0.125
+2024-08-26 22:48:26,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=174554.66666666666, ans=0.125
+2024-08-26 22:48:32,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=174608.0, ans=0.0
+2024-08-26 22:48:35,875 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.38 vs. limit=15.0
+2024-08-26 22:48:45,057 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.43 vs. limit=12.0
+2024-08-26 22:48:47,579 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=174714.66666666666, ans=0.125
+2024-08-26 22:48:48,273 INFO [train.py:1114] (1/4) Epoch 14, batch 400, loss[loss=0.1962, simple_loss=0.2731, pruned_loss=0.04306, ctc_loss=0.08297, over 19500.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2699, pruned_loss=0.04662, ctc_loss=0.08752, over 3341930.78 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:48:52,781 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.491e+02 1.630e+02 1.842e+02 3.705e+02, threshold=3.261e+02, percent-clipped=1.0
+2024-08-26 22:48:57,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=174714.66666666666, ans=0.0
+2024-08-26 22:49:08,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=174821.33333333334, ans=0.125
+2024-08-26 22:49:11,253 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.57 vs. limit=10.0
+2024-08-26 22:49:22,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=174874.66666666666, ans=0.2
+2024-08-26 22:49:29,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=174928.0, ans=0.125
+2024-08-26 22:49:29,756 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=174928.0, ans=0.2
+2024-08-26 22:49:33,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=174928.0, ans=0.025
+2024-08-26 22:49:39,144 INFO [train.py:1114] (1/4) Epoch 14, batch 450, loss[loss=0.1956, simple_loss=0.2727, pruned_loss=0.04287, ctc_loss=0.08197, over 19622.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2702, pruned_loss=0.04695, ctc_loss=0.08814, over 3450368.11 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:50:15,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=175141.33333333334, ans=0.125
+2024-08-26 22:50:16,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=175141.33333333334, ans=0.0
+2024-08-26 22:50:27,953 INFO [train.py:1114] (1/4) Epoch 14, batch 500, loss[loss=0.2043, simple_loss=0.2772, pruned_loss=0.04804, ctc_loss=0.08808, over 19650.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2691, pruned_loss=0.04636, ctc_loss=0.08713, over 3545951.97 frames. ], batch size: 63, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:50:30,941 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:50:30,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=175248.0, ans=0.09899494936611666
+2024-08-26 22:50:32,540 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.438e+02 1.690e+02 1.988e+02 3.244e+02, threshold=3.379e+02, percent-clipped=0.0
+2024-08-26 22:50:48,349 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=175354.66666666666, ans=0.0
+2024-08-26 22:51:00,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=175408.0, ans=0.125
+2024-08-26 22:51:01,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=175408.0, ans=0.125
+2024-08-26 22:51:10,646 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=175461.33333333334, ans=0.015
+2024-08-26 22:51:15,972 INFO [train.py:1114] (1/4) Epoch 14, batch 550, loss[loss=0.2016, simple_loss=0.2761, pruned_loss=0.04671, ctc_loss=0.08404, over 19266.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2694, pruned_loss=0.04659, ctc_loss=0.08747, over 3608026.80 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:51:51,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=175674.66666666666, ans=0.2
+2024-08-26 22:52:06,895 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=175728.0, ans=0.0
+2024-08-26 22:52:15,516 INFO [train.py:1114] (1/4) Epoch 14, batch 600, loss[loss=0.22, simple_loss=0.2904, pruned_loss=0.05395, ctc_loss=0.1044, over 19321.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2698, pruned_loss=0.04683, ctc_loss=0.08787, over 3664666.00 frames. ], batch size: 67, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:52:20,038 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.101e+02 1.434e+02 1.658e+02 1.951e+02 2.764e+02, threshold=3.317e+02, percent-clipped=0.0
+2024-08-26 22:52:22,084 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=175781.33333333334, ans=0.125
+2024-08-26 22:52:22,959 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=175781.33333333334, ans=0.125
+2024-08-26 22:52:29,135 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.58 vs. limit=6.0
+2024-08-26 22:52:31,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=175834.66666666666, ans=0.125
+2024-08-26 22:52:32,530 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=175834.66666666666, ans=0.125
+2024-08-26 22:52:37,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=175888.0, ans=0.125
+2024-08-26 22:52:37,585 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.55 vs. limit=15.0
+2024-08-26 22:52:40,268 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.25 vs. limit=15.0
+2024-08-26 22:52:49,915 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 22:52:51,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=175941.33333333334, ans=0.025
+2024-08-26 22:52:56,505 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.54 vs. limit=15.0
+2024-08-26 22:53:17,371 INFO [train.py:1114] (1/4) Epoch 14, batch 650, loss[loss=0.1971, simple_loss=0.2737, pruned_loss=0.04367, ctc_loss=0.08269, over 19752.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2691, pruned_loss=0.04653, ctc_loss=0.08702, over 3715941.04 frames. ], batch size: 54, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:53:44,994 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.39 vs. limit=12.0
+2024-08-26 22:53:50,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176208.0, ans=0.1
+2024-08-26 22:53:52,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=176208.0, ans=0.1
+2024-08-26 22:54:13,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176208.0, ans=0.1
+2024-08-26 22:54:13,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=176208.0, ans=0.125
+2024-08-26 22:54:15,914 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=176261.33333333334, ans=10.0
+2024-08-26 22:54:29,470 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=176261.33333333334, ans=0.0
+2024-08-26 22:54:41,337 INFO [train.py:1114] (1/4) Epoch 14, batch 700, loss[loss=0.1925, simple_loss=0.2596, pruned_loss=0.04659, ctc_loss=0.08022, over 19721.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2699, pruned_loss=0.04694, ctc_loss=0.08756, over 3748643.73 frames. ], batch size: 51, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 22:54:52,792 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.446e+02 1.597e+02 2.123e+02 3.826e+02, threshold=3.195e+02, percent-clipped=1.0
+2024-08-26 22:55:06,990 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176368.0, ans=0.1
+2024-08-26 22:55:13,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=176368.0, ans=0.0
+2024-08-26 22:55:18,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=176368.0, ans=0.125
+2024-08-26 22:58:22,191 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.26 vs. limit=15.0
+2024-08-26 22:58:22,906 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=176528.0, ans=0.125
+2024-08-26 22:58:30,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=176528.0, ans=0.125
+2024-08-26 22:59:23,413 INFO [train.py:1114] (1/4) Epoch 14, batch 750, loss[loss=0.1848, simple_loss=0.2654, pruned_loss=0.03727, ctc_loss=0.07415, over 19855.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2695, pruned_loss=0.04672, ctc_loss=0.08728, over 3776206.67 frames. ], batch size: 55, lr: 1.08e-02, grad_scale: 16.0
+2024-08-26 22:59:23,569 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=176581.33333333334, ans=0.125
+2024-08-26 22:59:56,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=176581.33333333334, ans=0.1
+2024-08-26 23:00:26,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=176581.33333333334, ans=0.125
+2024-08-26 23:00:59,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=176634.66666666666, ans=0.125
+2024-08-26 23:01:37,498 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.07 vs. limit=15.0
+2024-08-26 23:01:58,992 INFO [train.py:1114] (1/4) Epoch 14, batch 800, loss[loss=0.1771, simple_loss=0.2462, pruned_loss=0.03949, ctc_loss=0.07239, over 19804.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2698, pruned_loss=0.04703, ctc_loss=0.08783, over 3797126.98 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:02:11,900 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.464e+02 1.718e+02 2.120e+02 3.590e+02, threshold=3.437e+02, percent-clipped=3.0
+2024-08-26 23:05:03,816 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=176954.66666666666, ans=0.1
+2024-08-26 23:05:52,828 INFO [train.py:1114] (1/4) Epoch 14, batch 850, loss[loss=0.2045, simple_loss=0.2771, pruned_loss=0.04754, ctc_loss=0.09177, over 19658.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2692, pruned_loss=0.04653, ctc_loss=0.08707, over 3815224.61 frames. ], batch size: 59, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:06:08,306 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=177168.0, ans=0.0
+2024-08-26 23:06:16,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=177221.33333333334, ans=0.125
+2024-08-26 23:06:34,232 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=177328.0, ans=0.0
+2024-08-26 23:06:46,754 INFO [train.py:1114] (1/4) Epoch 14, batch 900, loss[loss=0.1826, simple_loss=0.2532, pruned_loss=0.04082, ctc_loss=0.076, over 19433.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2695, pruned_loss=0.0468, ctc_loss=0.08754, over 3818547.13 frames. ], batch size: 48, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:06:48,831 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=177381.33333333334, ans=0.0
+2024-08-26 23:06:52,132 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.084e+02 1.429e+02 1.657e+02 1.986e+02 3.410e+02, threshold=3.315e+02, percent-clipped=0.0
+2024-08-26 23:07:06,006 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.64 vs. limit=22.5
+2024-08-26 23:07:14,851 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=177488.0, ans=0.125
+2024-08-26 23:07:19,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=177541.33333333334, ans=0.0
+2024-08-26 23:07:20,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=177541.33333333334, ans=0.125
+2024-08-26 23:07:21,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=177541.33333333334, ans=0.0
+2024-08-26 23:07:23,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=177541.33333333334, ans=0.125
+2024-08-26 23:07:29,352 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=177594.66666666666, ans=0.1
+2024-08-26 23:07:38,547 INFO [train.py:1114] (1/4) Epoch 14, batch 950, loss[loss=0.1764, simple_loss=0.246, pruned_loss=0.03883, ctc_loss=0.07281, over 19512.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2697, pruned_loss=0.04697, ctc_loss=0.08789, over 3821519.80 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 32.0
+2024-08-26 23:07:39,067 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.73 vs. limit=22.5
+2024-08-26 23:07:51,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=177701.33333333334, ans=0.0
+2024-08-26 23:07:55,890 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.34 vs. limit=22.5
+2024-08-26 23:08:00,182 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=177754.66666666666, ans=0.125
+2024-08-26 23:08:01,175 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=177754.66666666666, ans=0.2
+2024-08-26 23:08:01,185 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=177754.66666666666, ans=0.04949747468305833
+2024-08-26 23:08:03,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=177754.66666666666, ans=0.07
+2024-08-26 23:08:04,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=177754.66666666666, ans=0.125
+2024-08-26 23:08:11,267 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=177808.0, ans=0.125
+2024-08-26 23:08:21,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=177861.33333333334, ans=0.125
+2024-08-26 23:08:26,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=177861.33333333334, ans=0.125
+2024-08-26 23:08:28,929 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.94 vs. limit=15.0
+2024-08-26 23:08:35,564 INFO [train.py:1114] (1/4) Epoch 14, batch 1000, loss[loss=0.1924, simple_loss=0.2608, pruned_loss=0.04522, ctc_loss=0.08417, over 19863.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2703, pruned_loss=0.04715, ctc_loss=0.08816, over 3818921.51 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:08:41,166 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.435e+02 1.639e+02 1.944e+02 3.185e+02, threshold=3.279e+02, percent-clipped=0.0
+2024-08-26 23:09:00,481 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=178021.33333333334, ans=0.0
+2024-08-26 23:09:03,311 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=178021.33333333334, ans=0.2
+2024-08-26 23:09:11,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=178074.66666666666, ans=0.125
+2024-08-26 23:09:12,544 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=178074.66666666666, ans=0.0
+2024-08-26 23:09:29,122 INFO [train.py:1114] (1/4) Epoch 14, batch 1050, loss[loss=0.1997, simple_loss=0.2747, pruned_loss=0.04489, ctc_loss=0.08741, over 19861.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2696, pruned_loss=0.04709, ctc_loss=0.08795, over 3824616.59 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:09:31,688 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.72 vs. limit=6.0
+2024-08-26 23:09:34,295 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.28 vs. limit=15.0
+2024-08-26 23:09:40,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=178234.66666666666, ans=0.025
+2024-08-26 23:10:03,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=178288.0, ans=0.0
+2024-08-26 23:10:07,386 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=178341.33333333334, ans=0.0
+2024-08-26 23:10:47,603 INFO [train.py:1114] (1/4) Epoch 14, batch 1100, loss[loss=0.2232, simple_loss=0.2834, pruned_loss=0.0605, ctc_loss=0.105, over 19567.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2692, pruned_loss=0.0468, ctc_loss=0.08751, over 3832045.48 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:10:53,001 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.389e+02 1.598e+02 1.774e+02 3.032e+02, threshold=3.197e+02, percent-clipped=0.0
+2024-08-26 23:11:01,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=178501.33333333334, ans=0.1
+2024-08-26 23:11:10,333 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.15 vs. limit=15.0
+2024-08-26 23:11:35,540 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=178661.33333333334, ans=0.125
+2024-08-26 23:11:37,950 INFO [train.py:1114] (1/4) Epoch 14, batch 1150, loss[loss=0.1744, simple_loss=0.2459, pruned_loss=0.03779, ctc_loss=0.06807, over 19594.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2685, pruned_loss=0.04637, ctc_loss=0.08683, over 3830601.15 frames. ], batch size: 52, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:11:38,998 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=178714.66666666666, ans=0.125
+2024-08-26 23:11:40,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=178714.66666666666, ans=0.125
+2024-08-26 23:11:42,084 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=178714.66666666666, ans=0.125
+2024-08-26 23:11:45,852 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:11:48,908 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=178768.0, ans=0.2
+2024-08-26 23:12:00,650 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.62 vs. limit=12.0
+2024-08-26 23:12:13,307 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.15 vs. limit=15.0
+2024-08-26 23:12:31,156 INFO [train.py:1114] (1/4) Epoch 14, batch 1200, loss[loss=0.2085, simple_loss=0.2842, pruned_loss=0.04781, ctc_loss=0.09325, over 19831.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2696, pruned_loss=0.04666, ctc_loss=0.08729, over 3826118.36 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:12:34,032 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=178981.33333333334, ans=0.015
+2024-08-26 23:12:36,804 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.458e+02 1.687e+02 2.139e+02 4.936e+02, threshold=3.375e+02, percent-clipped=2.0
+2024-08-26 23:12:38,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=178981.33333333334, ans=0.125
+2024-08-26 23:12:44,192 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.91 vs. limit=15.0
+2024-08-26 23:12:47,262 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=179034.66666666666, ans=0.125
+2024-08-26 23:12:58,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=179088.0, ans=0.125
+2024-08-26 23:13:05,966 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:13:14,545 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=179194.66666666666, ans=0.0
+2024-08-26 23:13:17,803 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.19 vs. limit=15.0
+2024-08-26 23:13:17,807 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.59 vs. limit=10.0
+2024-08-26 23:13:20,037 INFO [train.py:1114] (1/4) Epoch 14, batch 1250, loss[loss=0.2213, simple_loss=0.2908, pruned_loss=0.05558, ctc_loss=0.1018, over 19505.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.27, pruned_loss=0.04655, ctc_loss=0.08682, over 3843744.84 frames. ], batch size: 61, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:13:22,338 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.52 vs. limit=10.0
+2024-08-26 23:13:25,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=179248.0, ans=0.0
+2024-08-26 23:13:30,587 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff3.min_abs, batch_count=179301.33333333334, ans=0.2
+2024-08-26 23:13:45,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=179354.66666666666, ans=0.125
+2024-08-26 23:13:49,931 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=179354.66666666666, ans=0.125
+2024-08-26 23:13:49,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=179354.66666666666, ans=0.09899494936611666
+2024-08-26 23:13:55,634 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.32 vs. limit=10.0
+2024-08-26 23:13:57,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=179408.0, ans=0.0
+2024-08-26 23:14:01,067 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=179461.33333333334, ans=0.0
+2024-08-26 23:14:12,534 INFO [train.py:1114] (1/4) Epoch 14, batch 1300, loss[loss=0.2336, simple_loss=0.2956, pruned_loss=0.063, ctc_loss=0.114, over 18980.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2692, pruned_loss=0.04618, ctc_loss=0.0863, over 3845894.64 frames. ], batch size: 76, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:14:15,378 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=179514.66666666666, ans=0.125
+2024-08-26 23:14:19,137 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.184e+02 1.402e+02 1.628e+02 1.914e+02 2.926e+02, threshold=3.256e+02, percent-clipped=0.0
+2024-08-26 23:14:24,916 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=179568.0, ans=0.125
+2024-08-26 23:14:29,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=179568.0, ans=0.125
+2024-08-26 23:14:46,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=179674.66666666666, ans=0.0
+2024-08-26 23:14:47,909 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=179674.66666666666, ans=0.0
+2024-08-26 23:14:55,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=179728.0, ans=0.125
+2024-08-26 23:14:58,668 INFO [train.py:1114] (1/4) Epoch 14, batch 1350, loss[loss=0.1856, simple_loss=0.2651, pruned_loss=0.03838, ctc_loss=0.07338, over 19772.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.269, pruned_loss=0.0462, ctc_loss=0.086, over 3856351.84 frames. ], batch size: 54, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:16:00,222 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=179781.33333333334, ans=0.125
+2024-08-26 23:16:06,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=179834.66666666666, ans=0.0
+2024-08-26 23:16:34,901 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=179888.0, ans=0.125
+2024-08-26 23:16:49,730 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=179994.66666666666, ans=0.2
+2024-08-26 23:16:59,391 INFO [train.py:1114] (1/4) Epoch 14, batch 1400, loss[loss=0.1703, simple_loss=0.2354, pruned_loss=0.03861, ctc_loss=0.06984, over 19666.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2687, pruned_loss=0.04619, ctc_loss=0.08591, over 3863430.61 frames. ], batch size: 46, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:17:07,631 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.482e+02 1.624e+02 2.003e+02 3.142e+02, threshold=3.248e+02, percent-clipped=0.0
+2024-08-26 23:17:08,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=180048.0, ans=0.125
+2024-08-26 23:17:11,750 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=180101.33333333334, ans=0.125
+2024-08-26 23:17:24,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=180154.66666666666, ans=0.125
+2024-08-26 23:17:30,238 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.03 vs. limit=15.0
+2024-08-26 23:17:34,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=180208.0, ans=0.125
+2024-08-26 23:17:34,937 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=180208.0, ans=0.0
+2024-08-26 23:17:35,125 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.20 vs. limit=10.0
+2024-08-26 23:17:37,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=180208.0, ans=0.0
+2024-08-26 23:17:45,995 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=180261.33333333334, ans=0.125
+2024-08-26 23:17:50,487 INFO [train.py:1114] (1/4) Epoch 14, batch 1450, loss[loss=0.2233, simple_loss=0.2894, pruned_loss=0.05783, ctc_loss=0.1039, over 19663.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2692, pruned_loss=0.04623, ctc_loss=0.08624, over 3861905.87 frames. ], batch size: 63, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:18:25,054 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=180421.33333333334, ans=0.0
+2024-08-26 23:18:36,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=180528.0, ans=0.125
+2024-08-26 23:18:37,031 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=180528.0, ans=0.125
+2024-08-26 23:18:40,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=180528.0, ans=0.0
+2024-08-26 23:18:46,184 INFO [train.py:1114] (1/4) Epoch 14, batch 1500, loss[loss=0.2126, simple_loss=0.2832, pruned_loss=0.05226, ctc_loss=0.0936, over 19586.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2698, pruned_loss=0.04645, ctc_loss=0.08659, over 3861529.31 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:18:52,954 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.115e+02 1.461e+02 1.607e+02 1.928e+02 3.862e+02, threshold=3.214e+02, percent-clipped=2.0
+2024-08-26 23:18:55,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=180634.66666666666, ans=0.125
+2024-08-26 23:19:14,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=180741.33333333334, ans=0.125
+2024-08-26 23:20:19,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=180741.33333333334, ans=0.0
+2024-08-26 23:20:20,533 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.93 vs. limit=22.5
+2024-08-26 23:20:30,224 INFO [train.py:1114] (1/4) Epoch 14, batch 1550, loss[loss=0.2114, simple_loss=0.2897, pruned_loss=0.04883, ctc_loss=0.08849, over 19621.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2701, pruned_loss=0.04685, ctc_loss=0.08736, over 3846645.55 frames. ], batch size: 60, lr: 1.07e-02, grad_scale: 16.0
+2024-08-26 23:20:32,403 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=180848.0, ans=0.125
+2024-08-26 23:21:01,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=181008.0, ans=0.0
+2024-08-26 23:21:20,684 INFO [train.py:1114] (1/4) Epoch 14, batch 1600, loss[loss=0.1982, simple_loss=0.2714, pruned_loss=0.04506, ctc_loss=0.08718, over 19843.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2699, pruned_loss=0.04697, ctc_loss=0.08779, over 3835823.69 frames. ], batch size: 57, lr: 1.07e-02, grad_scale: 32.0
+2024-08-26 23:21:27,134 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.461e+02 1.627e+02 1.971e+02 3.033e+02, threshold=3.255e+02, percent-clipped=0.0
+2024-08-26 23:21:29,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=181168.0, ans=0.0
+2024-08-26 23:21:34,948 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=181168.0, ans=0.0
+2024-08-26 23:21:39,199 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.42 vs. limit=6.0
+2024-08-26 23:23:41,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=181274.66666666666, ans=0.0
+2024-08-26 23:23:43,646 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.49 vs. limit=15.0
+2024-08-26 23:23:53,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=181381.33333333334, ans=0.0
+2024-08-26 23:23:54,652 INFO [train.py:1114] (1/4) Epoch 14, batch 1650, loss[loss=0.1944, simple_loss=0.2734, pruned_loss=0.04171, ctc_loss=0.07986, over 19621.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2694, pruned_loss=0.04669, ctc_loss=0.08736, over 3832200.11 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:23:57,064 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.37 vs. limit=22.5
+2024-08-26 23:23:58,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=181381.33333333334, ans=0.025
+2024-08-26 23:24:07,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=181434.66666666666, ans=0.125
+2024-08-26 23:24:10,557 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=181434.66666666666, ans=0.1
+2024-08-26 23:24:21,923 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.53 vs. limit=22.5
+2024-08-26 23:24:40,741 INFO [train.py:1114] (1/4) Epoch 14, batch 1700, loss[loss=0.1831, simple_loss=0.2475, pruned_loss=0.04285, ctc_loss=0.0825, over 19669.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2691, pruned_loss=0.04631, ctc_loss=0.08671, over 3846575.53 frames. ], batch size: 46, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:24:47,151 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.192e+02 1.441e+02 1.691e+02 2.079e+02 3.382e+02, threshold=3.381e+02, percent-clipped=3.0
+2024-08-26 23:24:50,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=181701.33333333334, ans=0.2
+2024-08-26 23:25:02,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=181754.66666666666, ans=0.1
+2024-08-26 23:25:11,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=181808.0, ans=0.1
+2024-08-26 23:25:13,119 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=181808.0, ans=0.1
+2024-08-26 23:25:13,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=181808.0, ans=0.0
+2024-08-26 23:25:18,151 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=181861.33333333334, ans=0.1
+2024-08-26 23:25:25,113 INFO [train.py:1114] (1/4) Epoch 14, batch 1750, loss[loss=0.1958, simple_loss=0.2563, pruned_loss=0.04991, ctc_loss=0.08861, over 19673.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2689, pruned_loss=0.04608, ctc_loss=0.08628, over 3852118.27 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:25:42,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=182021.33333333334, ans=0.1
+2024-08-26 23:25:46,989 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.67 vs. limit=12.0
+2024-08-26 23:25:52,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=182074.66666666666, ans=0.125
+2024-08-26 23:26:19,465 INFO [train.py:1114] (1/4) Epoch 14, batch 1800, loss[loss=0.1974, simple_loss=0.2744, pruned_loss=0.04312, ctc_loss=0.08539, over 19615.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2693, pruned_loss=0.04622, ctc_loss=0.08651, over 3853355.10 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:26:20,445 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=182181.33333333334, ans=0.125
+2024-08-26 23:26:24,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=182181.33333333334, ans=0.125
+2024-08-26 23:26:26,552 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.533e+02 1.884e+02 2.505e+02 4.097e+02, threshold=3.767e+02, percent-clipped=5.0
+2024-08-26 23:26:32,180 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=182234.66666666666, ans=0.09899494936611666
+2024-08-26 23:26:47,085 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.45 vs. limit=15.0
+2024-08-26 23:26:52,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=182341.33333333334, ans=0.125
+2024-08-26 23:26:59,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=182394.66666666666, ans=0.125
+2024-08-26 23:27:05,187 INFO [train.py:1114] (1/4) Epoch 14, batch 1850, loss[loss=0.1983, simple_loss=0.2749, pruned_loss=0.04382, ctc_loss=0.08533, over 19584.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2691, pruned_loss=0.04627, ctc_loss=0.08654, over 3856493.44 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:27:07,417 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.60 vs. limit=15.0
+2024-08-26 23:27:08,834 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=182448.0, ans=0.2
+2024-08-26 23:28:46,380 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.21 vs. limit=12.0
+2024-08-26 23:28:55,597 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.81 vs. limit=15.0
+2024-08-26 23:29:14,713 INFO [train.py:1114] (1/4) Epoch 14, batch 1900, loss[loss=0.1943, simple_loss=0.2731, pruned_loss=0.04191, ctc_loss=0.07916, over 19663.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2696, pruned_loss=0.04628, ctc_loss=0.08664, over 3860795.35 frames. ], batch size: 59, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:29:16,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=182714.66666666666, ans=0.2
+2024-08-26 23:29:21,598 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.217e+02 1.441e+02 1.690e+02 2.071e+02 3.452e+02, threshold=3.379e+02, percent-clipped=0.0
+2024-08-26 23:29:33,696 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=182821.33333333334, ans=0.0
+2024-08-26 23:29:35,058 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.74 vs. limit=15.0
+2024-08-26 23:29:47,622 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=182874.66666666666, ans=0.125
+2024-08-26 23:29:55,246 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=182928.0, ans=0.125
+2024-08-26 23:29:57,807 INFO [train.py:1114] (1/4) Epoch 14, batch 1950, loss[loss=0.1749, simple_loss=0.2519, pruned_loss=0.03525, ctc_loss=0.06837, over 19584.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2701, pruned_loss=0.04613, ctc_loss=0.08635, over 3869924.75 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:30:07,159 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=183034.66666666666, ans=0.0
+2024-08-26 23:30:08,386 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.23 vs. limit=15.0
+2024-08-26 23:30:10,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=183034.66666666666, ans=0.125
+2024-08-26 23:30:16,135 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.52 vs. limit=22.5
+2024-08-26 23:30:16,808 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 23:30:30,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=183141.33333333334, ans=0.0
+2024-08-26 23:30:33,076 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.13 vs. limit=15.0
+2024-08-26 23:30:44,846 INFO [train.py:1114] (1/4) Epoch 14, batch 2000, loss[loss=0.1704, simple_loss=0.2377, pruned_loss=0.0383, ctc_loss=0.06639, over 19667.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2708, pruned_loss=0.04694, ctc_loss=0.08763, over 3854683.60 frames. ], batch size: 45, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:30:52,059 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.411e+02 1.571e+02 1.845e+02 2.838e+02, threshold=3.143e+02, percent-clipped=0.0
+2024-08-26 23:30:54,915 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=183301.33333333334, ans=0.2
+2024-08-26 23:31:14,055 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.81 vs. limit=15.0
+2024-08-26 23:31:14,090 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.68 vs. limit=22.5
+2024-08-26 23:32:08,143 INFO [train.py:1114] (1/4) Epoch 14, batch 2050, loss[loss=0.1704, simple_loss=0.2402, pruned_loss=0.0362, ctc_loss=0.07049, over 19696.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2696, pruned_loss=0.0467, ctc_loss=0.08712, over 3850423.23 frames. ], batch size: 47, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:32:09,092 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=183514.66666666666, ans=0.1
+2024-08-26 23:32:09,135 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=183514.66666666666, ans=0.2
+2024-08-26 23:32:20,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=183568.0, ans=0.0
+2024-08-26 23:32:28,624 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.33 vs. limit=10.0
+2024-08-26 23:32:35,748 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.13 vs. limit=22.5
+2024-08-26 23:32:51,519 INFO [train.py:1114] (1/4) Epoch 14, batch 2100, loss[loss=0.1947, simple_loss=0.2725, pruned_loss=0.04232, ctc_loss=0.08047, over 19783.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2687, pruned_loss=0.046, ctc_loss=0.08583, over 3857559.04 frames. ], batch size: 54, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:32:57,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=183781.33333333334, ans=0.125
+2024-08-26 23:32:58,374 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.193e+02 1.491e+02 1.652e+02 1.860e+02 2.729e+02, threshold=3.304e+02, percent-clipped=0.0
+2024-08-26 23:34:16,403 INFO [train.py:1114] (1/4) Epoch 14, batch 2150, loss[loss=0.1995, simple_loss=0.2718, pruned_loss=0.04662, ctc_loss=0.08499, over 19581.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2683, pruned_loss=0.04605, ctc_loss=0.0858, over 3868055.93 frames. ], batch size: 52, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:34:37,682 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.80 vs. limit=6.0
+2024-08-26 23:34:53,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=184261.33333333334, ans=0.125
+2024-08-26 23:34:56,710 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=184261.33333333334, ans=0.0
+2024-08-26 23:34:57,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=184261.33333333334, ans=0.2
+2024-08-26 23:34:58,806 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=4.66 vs. limit=15.0
+2024-08-26 23:34:59,295 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=184314.66666666666, ans=0.0
+2024-08-26 23:34:59,941 INFO [train.py:1114] (1/4) Epoch 14, batch 2200, loss[loss=0.1944, simple_loss=0.2689, pruned_loss=0.04333, ctc_loss=0.08319, over 19577.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.268, pruned_loss=0.04578, ctc_loss=0.08545, over 3867342.29 frames. ], batch size: 57, lr: 1.06e-02, grad_scale: 32.0
+2024-08-26 23:35:02,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=184314.66666666666, ans=0.125
+2024-08-26 23:35:06,950 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.447e+02 1.750e+02 2.552e+02 4.295e+02, threshold=3.499e+02, percent-clipped=8.0
+2024-08-26 23:35:17,653 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=184421.33333333334, ans=0.0
+2024-08-26 23:35:30,973 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.80 vs. limit=12.0
+2024-08-26 23:35:39,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=184528.0, ans=0.125
+2024-08-26 23:35:41,541 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=184528.0, ans=0.125
+2024-08-26 23:35:43,863 INFO [train.py:1114] (1/4) Epoch 14, batch 2250, loss[loss=0.2049, simple_loss=0.2787, pruned_loss=0.04828, ctc_loss=0.08628, over 19609.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2679, pruned_loss=0.0457, ctc_loss=0.08529, over 3867767.48 frames. ], batch size: 55, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:35:56,779 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=184634.66666666666, ans=0.125
+2024-08-26 23:36:05,242 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=184688.0, ans=0.125
+2024-08-26 23:36:14,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=184741.33333333334, ans=0.1
+2024-08-26 23:36:27,381 INFO [train.py:1114] (1/4) Epoch 14, batch 2300, loss[loss=0.1767, simple_loss=0.2469, pruned_loss=0.03868, ctc_loss=0.07262, over 19503.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2669, pruned_loss=0.04551, ctc_loss=0.08491, over 3861437.78 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 16.0
+2024-08-26 23:36:28,868 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.37 vs. limit=6.0
+2024-08-26 23:36:35,134 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.458e+02 1.662e+02 2.114e+02 3.033e+02, threshold=3.324e+02, percent-clipped=0.0
+2024-08-26 23:36:39,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=184901.33333333334, ans=0.0
+2024-08-26 23:36:55,880 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=185008.0, ans=0.05
+2024-08-26 23:37:00,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=185008.0, ans=0.07
+2024-08-26 23:37:01,663 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=185061.33333333334, ans=0.0
+2024-08-26 23:37:10,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=185114.66666666666, ans=10.0
+2024-08-26 23:37:10,969 INFO [train.py:1114] (1/4) Epoch 14, batch 2350, loss[loss=0.2007, simple_loss=0.2717, pruned_loss=0.04773, ctc_loss=0.08549, over 19691.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2673, pruned_loss=0.04591, ctc_loss=0.08549, over 3864115.47 frames. ], batch size: 63, lr: 1.05e-02, grad_scale: 16.0
+2024-08-26 23:37:19,364 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=185114.66666666666, ans=0.1
+2024-08-26 23:37:24,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=185168.0, ans=0.125
+2024-08-26 23:37:42,683 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.50 vs. limit=6.0
+2024-08-26 23:37:44,392 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.49 vs. limit=15.0
+2024-08-26 23:37:50,152 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=185328.0, ans=0.0
+2024-08-26 23:37:55,070 INFO [train.py:1114] (1/4) Epoch 14, batch 2400, loss[loss=0.2141, simple_loss=0.2786, pruned_loss=0.05432, ctc_loss=0.1023, over 19263.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2697, pruned_loss=0.04684, ctc_loss=0.08704, over 3858838.15 frames. ], batch size: 71, lr: 1.05e-02, grad_scale: 32.0
+2024-08-26 23:37:55,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=185381.33333333334, ans=0.0
+2024-08-26 23:38:00,442 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=185381.33333333334, ans=0.125
+2024-08-26 23:38:02,857 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.081e+02 1.569e+02 1.843e+02 2.357e+02 3.475e+02, threshold=3.685e+02, percent-clipped=2.0
+2024-08-26 23:38:06,597 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=185434.66666666666, ans=0.0
+2024-08-26 23:38:07,545 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=185434.66666666666, ans=0.0
+2024-08-26 23:38:15,934 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=185488.0, ans=0.2
+2024-08-26 23:38:39,327 INFO [train.py:1114] (1/4) Epoch 14, batch 2450, loss[loss=0.2837, simple_loss=0.3195, pruned_loss=0.08939, ctc_loss=0.1728, over 13328.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2738, pruned_loss=0.04964, ctc_loss=0.09255, over 3731819.87 frames. ], batch size: 140, lr: 1.05e-02, grad_scale: 16.0
+2024-08-26 23:38:41,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=185648.0, ans=0.125
+2024-08-26 23:38:50,138 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=185701.33333333334, ans=0.125
+2024-08-26 23:38:59,792 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.03 vs. limit=15.0
+2024-08-26 23:39:00,851 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.13 vs. limit=15.0
+2024-08-26 23:40:44,471 INFO [train.py:1114] (1/4) Epoch 15, batch 0, loss[loss=0.1895, simple_loss=0.255, pruned_loss=0.04425, ctc_loss=0.08872, over 19792.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.255, pruned_loss=0.04425, ctc_loss=0.08872, over 19792.00 frames. ], batch size: 49, lr: 1.02e-02, grad_scale: 32.0
+2024-08-26 23:40:46,074 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-26 23:41:25,236 INFO [train.py:1146] (1/4) Epoch 15, validation: loss=0.1751, simple_loss=0.2686, pruned_loss=0.03035, ctc_loss=0.05216, over 944034.00 frames.
+2024-08-26 23:41:29,769 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-26 23:41:30,889 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=185856.0, ans=0.0
+2024-08-26 23:41:32,505 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=185856.0, ans=0.125
+2024-08-26 23:41:32,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=185856.0, ans=0.2
+2024-08-26 23:42:34,508 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.661e+02 1.811e+02 2.041e+02 3.400e+02, threshold=3.623e+02, percent-clipped=0.0
+2024-08-26 23:42:54,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=185962.66666666666, ans=0.2
+2024-08-26 23:47:49,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=186069.33333333334, ans=0.025
+2024-08-26 23:49:22,219 INFO [train.py:1114] (1/4) Epoch 15, batch 50, loss[loss=0.1653, simple_loss=0.2339, pruned_loss=0.03466, ctc_loss=0.06842, over 19715.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2687, pruned_loss=0.04665, ctc_loss=0.08806, over 844130.59 frames. ], batch size: 47, lr: 1.02e-02, grad_scale: 16.0
+2024-08-26 23:49:22,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=186122.66666666666, ans=0.0
+2024-08-26 23:51:28,092 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=186122.66666666666, ans=0.125
+2024-08-26 23:52:09,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=186229.33333333334, ans=0.2
+2024-08-26 23:53:58,855 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.98 vs. limit=15.0
+2024-08-26 23:57:10,319 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=186336.0, ans=0.0
+2024-08-26 23:58:27,655 INFO [train.py:1114] (1/4) Epoch 15, batch 100, loss[loss=0.1844, simple_loss=0.2564, pruned_loss=0.0417, ctc_loss=0.07263, over 19731.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2708, pruned_loss=0.04635, ctc_loss=0.08704, over 1499265.19 frames. ], batch size: 51, lr: 1.02e-02, grad_scale: 16.0
+2024-08-27 00:06:36,472 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=186442.66666666666, ans=0.2
+2024-08-27 00:06:52,657 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.493e+02 1.771e+02 2.166e+02 3.428e+02, threshold=3.543e+02, percent-clipped=0.0
+2024-08-27 00:09:12,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=186496.0, ans=0.2
+2024-08-27 00:12:03,622 INFO [train.py:1114] (1/4) Epoch 15, batch 150, loss[loss=0.183, simple_loss=0.2431, pruned_loss=0.04453, ctc_loss=0.08473, over 19736.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2685, pruned_loss=0.04576, ctc_loss=0.08584, over 2027143.42 frames. ], batch size: 47, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:12:28,252 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=186709.33333333334, ans=0.1
+2024-08-27 00:12:43,984 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.33 vs. limit=12.0
+2024-08-27 00:14:08,494 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=186762.66666666666, ans=0.1
+2024-08-27 00:14:15,833 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=186816.0, ans=0.1
+2024-08-27 00:14:42,905 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=186816.0, ans=0.0
+2024-08-27 00:17:10,134 INFO [train.py:1114] (1/4) Epoch 15, batch 200, loss[loss=0.2127, simple_loss=0.287, pruned_loss=0.05025, ctc_loss=0.09467, over 18371.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2668, pruned_loss=0.04493, ctc_loss=0.08443, over 2433712.99 frames. ], batch size: 85, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:17:42,952 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=186976.0, ans=0.125
+2024-08-27 00:17:55,683 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=187029.33333333334, ans=0.125
+2024-08-27 00:17:59,318 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.145e+02 1.435e+02 1.602e+02 1.959e+02 3.588e+02, threshold=3.205e+02, percent-clipped=1.0
+2024-08-27 00:18:07,276 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.70 vs. limit=6.0
+2024-08-27 00:18:16,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187082.66666666666, ans=0.1
+2024-08-27 00:18:47,710 INFO [train.py:1114] (1/4) Epoch 15, batch 250, loss[loss=0.2093, simple_loss=0.2893, pruned_loss=0.04677, ctc_loss=0.08957, over 19426.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2669, pruned_loss=0.04467, ctc_loss=0.08361, over 2754856.41 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:19:47,226 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff3.min_abs, batch_count=187189.33333333334, ans=0.2
+2024-08-27 00:19:50,610 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.72 vs. limit=6.0
+2024-08-27 00:20:13,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=187296.0, ans=0.0
+2024-08-27 00:20:18,154 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=187349.33333333334, ans=0.125
+2024-08-27 00:20:31,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=187349.33333333334, ans=0.0
+2024-08-27 00:21:09,874 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=187402.66666666666, ans=10.0
+2024-08-27 00:21:12,355 INFO [train.py:1114] (1/4) Epoch 15, batch 300, loss[loss=0.1994, simple_loss=0.2727, pruned_loss=0.0469, ctc_loss=0.08062, over 19546.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2661, pruned_loss=0.04443, ctc_loss=0.08299, over 2999580.57 frames. ], batch size: 61, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:21:17,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=187456.0, ans=0.125
+2024-08-27 00:21:17,291 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.33 vs. limit=15.0
+2024-08-27 00:22:03,852 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.482e+02 1.757e+02 2.250e+02 4.561e+02, threshold=3.514e+02, percent-clipped=7.0
+2024-08-27 00:22:31,075 INFO [train.py:1114] (1/4) Epoch 15, batch 350, loss[loss=0.173, simple_loss=0.2404, pruned_loss=0.03828, ctc_loss=0.0727, over 19736.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2671, pruned_loss=0.04464, ctc_loss=0.08334, over 3189607.36 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 16.0
+2024-08-27 00:24:46,299 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.99 vs. limit=15.0
+2024-08-27 00:24:57,885 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=187829.33333333334, ans=0.125
+2024-08-27 00:25:01,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=187829.33333333334, ans=0.125
+2024-08-27 00:25:01,968 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.81 vs. limit=15.0
+2024-08-27 00:25:05,348 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=187829.33333333334, ans=0.125
+2024-08-27 00:25:07,171 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=187882.66666666666, ans=0.125
+2024-08-27 00:25:25,431 INFO [train.py:1114] (1/4) Epoch 15, batch 400, loss[loss=0.203, simple_loss=0.2783, pruned_loss=0.04685, ctc_loss=0.08517, over 19491.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2664, pruned_loss=0.04423, ctc_loss=0.08272, over 3341688.53 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:25:26,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=187989.33333333334, ans=0.1
+2024-08-27 00:25:28,518 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=187989.33333333334, ans=0.05
+2024-08-27 00:25:46,849 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.414e+02 1.733e+02 2.120e+02 3.671e+02, threshold=3.466e+02, percent-clipped=1.0
+2024-08-27 00:25:50,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=188096.0, ans=0.2
+2024-08-27 00:26:30,475 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=188202.66666666666, ans=0.2
+2024-08-27 00:26:33,890 INFO [train.py:1114] (1/4) Epoch 15, batch 450, loss[loss=0.1822, simple_loss=0.2633, pruned_loss=0.03631, ctc_loss=0.07122, over 19602.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2665, pruned_loss=0.0446, ctc_loss=0.08326, over 3448438.69 frames. ], batch size: 55, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:26:40,287 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=188256.0, ans=0.2
+2024-08-27 00:27:29,690 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 00:27:51,552 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=188469.33333333334, ans=0.125
+2024-08-27 00:27:58,673 INFO [train.py:1114] (1/4) Epoch 15, batch 500, loss[loss=0.1995, simple_loss=0.2787, pruned_loss=0.0444, ctc_loss=0.07869, over 19663.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2657, pruned_loss=0.04419, ctc_loss=0.08263, over 3544209.62 frames. ], batch size: 63, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:28:09,837 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.89 vs. limit=15.0
+2024-08-27 00:28:12,345 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=188576.0, ans=0.125
+2024-08-27 00:28:18,674 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=188576.0, ans=0.125
+2024-08-27 00:28:18,686 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=188576.0, ans=0.125
+2024-08-27 00:28:19,528 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=188629.33333333334, ans=0.5
+2024-08-27 00:28:25,434 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.423e+02 1.716e+02 2.052e+02 3.766e+02, threshold=3.431e+02, percent-clipped=1.0
+2024-08-27 00:28:40,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=188682.66666666666, ans=0.125
+2024-08-27 00:28:42,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=188682.66666666666, ans=0.0
+2024-08-27 00:28:44,407 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=188682.66666666666, ans=0.1
+2024-08-27 00:28:45,249 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=188682.66666666666, ans=0.0
+2024-08-27 00:28:47,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=188736.0, ans=0.07
+2024-08-27 00:28:50,794 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=188736.0, ans=0.04949747468305833
+2024-08-27 00:28:56,799 INFO [train.py:1114] (1/4) Epoch 15, batch 550, loss[loss=0.2295, simple_loss=0.2905, pruned_loss=0.06243, ctc_loss=0.109, over 19161.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2663, pruned_loss=0.04451, ctc_loss=0.08327, over 3606403.89 frames. ], batch size: 71, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:29:52,010 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=188896.0, ans=0.125
+2024-08-27 00:29:56,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=188896.0, ans=0.2
+2024-08-27 00:29:59,447 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=188949.33333333334, ans=0.125
+2024-08-27 00:30:17,658 INFO [train.py:1114] (1/4) Epoch 15, batch 600, loss[loss=0.2147, simple_loss=0.2955, pruned_loss=0.04818, ctc_loss=0.09373, over 19405.00 frames. ], tot_loss[loss=0.195, simple_loss=0.267, pruned_loss=0.04472, ctc_loss=0.08367, over 3665565.83 frames. ], batch size: 67, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:30:23,877 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.26 vs. limit=15.0
+2024-08-27 00:31:06,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=189109.33333333334, ans=0.125
+2024-08-27 00:31:12,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=189109.33333333334, ans=0.125
+2024-08-27 00:31:15,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=189162.66666666666, ans=0.125
+2024-08-27 00:31:18,192 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.139e+02 1.468e+02 1.719e+02 2.297e+02 4.329e+02, threshold=3.438e+02, percent-clipped=2.0
+2024-08-27 00:31:52,656 INFO [train.py:1114] (1/4) Epoch 15, batch 650, loss[loss=0.1784, simple_loss=0.2599, pruned_loss=0.0346, ctc_loss=0.0692, over 19759.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2662, pruned_loss=0.04451, ctc_loss=0.08316, over 3715728.51 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:31:58,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=189322.66666666666, ans=0.125
+2024-08-27 00:31:59,556 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.57 vs. limit=12.0
+2024-08-27 00:31:59,618 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.69 vs. limit=15.0
+2024-08-27 00:32:24,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=189482.66666666666, ans=0.125
+2024-08-27 00:32:31,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=189536.0, ans=0.125
+2024-08-27 00:33:04,286 INFO [train.py:1114] (1/4) Epoch 15, batch 700, loss[loss=0.1649, simple_loss=0.2393, pruned_loss=0.03314, ctc_loss=0.06051, over 19748.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2664, pruned_loss=0.04483, ctc_loss=0.08376, over 3748378.29 frames. ], batch size: 51, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:33:07,326 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=189589.33333333334, ans=0.125
+2024-08-27 00:33:09,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=189589.33333333334, ans=0.025
+2024-08-27 00:33:13,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=189642.66666666666, ans=0.1
+2024-08-27 00:33:58,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=189642.66666666666, ans=0.5
+2024-08-27 00:34:01,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=189696.0, ans=0.5
+2024-08-27 00:34:01,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=189696.0, ans=0.0
+2024-08-27 00:34:03,643 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.167e+02 1.548e+02 1.878e+02 2.334e+02 4.066e+02, threshold=3.756e+02, percent-clipped=4.0
+2024-08-27 00:34:11,292 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=189749.33333333334, ans=0.125
+2024-08-27 00:34:11,468 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.58 vs. limit=22.5
+2024-08-27 00:35:10,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=189802.66666666666, ans=0.1
+2024-08-27 00:35:17,154 INFO [train.py:1114] (1/4) Epoch 15, batch 750, loss[loss=0.1772, simple_loss=0.2573, pruned_loss=0.03454, ctc_loss=0.07018, over 19478.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2661, pruned_loss=0.04463, ctc_loss=0.08356, over 3775378.62 frames. ], batch size: 54, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:35:24,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=189856.0, ans=0.0
+2024-08-27 00:35:24,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=189856.0, ans=10.0
+2024-08-27 00:35:27,726 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=189909.33333333334, ans=0.5
+2024-08-27 00:35:36,913 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=189962.66666666666, ans=0.0
+2024-08-27 00:35:38,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=189962.66666666666, ans=0.0
+2024-08-27 00:35:48,557 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.11 vs. limit=22.5
+2024-08-27 00:36:02,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=190122.66666666666, ans=0.125
+2024-08-27 00:36:06,228 INFO [train.py:1114] (1/4) Epoch 15, batch 800, loss[loss=0.1664, simple_loss=0.2392, pruned_loss=0.03354, ctc_loss=0.06657, over 19404.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2666, pruned_loss=0.04475, ctc_loss=0.08373, over 3797168.46 frames. ], batch size: 48, lr: 1.01e-02, grad_scale: 32.0
+2024-08-27 00:36:22,547 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=190176.0, ans=0.125
+2024-08-27 00:36:29,662 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.516e+02 1.778e+02 2.217e+02 3.654e+02, threshold=3.555e+02, percent-clipped=0.0
+2024-08-27 00:36:29,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=190229.33333333334, ans=10.0
+2024-08-27 00:36:41,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=190282.66666666666, ans=0.125
+2024-08-27 00:36:54,402 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.20 vs. limit=15.0
+2024-08-27 00:36:54,883 INFO [train.py:1114] (1/4) Epoch 15, batch 850, loss[loss=0.2108, simple_loss=0.2831, pruned_loss=0.05007, ctc_loss=0.09585, over 19649.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2665, pruned_loss=0.04468, ctc_loss=0.08364, over 3816716.82 frames. ], batch size: 59, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:36:55,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=190389.33333333334, ans=0.125
+2024-08-27 00:36:58,746 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=190389.33333333334, ans=0.125
+2024-08-27 00:37:04,145 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=190389.33333333334, ans=0.2
+2024-08-27 00:37:17,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=190496.0, ans=0.125
+2024-08-27 00:37:39,070 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=190602.66666666666, ans=0.125
+2024-08-27 00:37:40,090 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=190602.66666666666, ans=0.07
+2024-08-27 00:37:46,588 INFO [train.py:1114] (1/4) Epoch 15, batch 900, loss[loss=0.1682, simple_loss=0.2403, pruned_loss=0.03487, ctc_loss=0.06585, over 19801.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.267, pruned_loss=0.04504, ctc_loss=0.08396, over 3819125.90 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:38:08,117 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=190762.66666666666, ans=0.2
+2024-08-27 00:38:12,640 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.206e+02 1.396e+02 1.546e+02 1.855e+02 3.193e+02, threshold=3.091e+02, percent-clipped=0.0
+2024-08-27 00:38:32,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=190869.33333333334, ans=0.0
+2024-08-27 00:38:42,121 INFO [train.py:1114] (1/4) Epoch 15, batch 950, loss[loss=0.1849, simple_loss=0.2589, pruned_loss=0.04046, ctc_loss=0.07486, over 19487.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2672, pruned_loss=0.04507, ctc_loss=0.08418, over 3820889.42 frames. ], batch size: 49, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:38:49,097 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=190922.66666666666, ans=0.2
+2024-08-27 00:39:04,507 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.92 vs. limit=15.0
+2024-08-27 00:39:11,487 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=5.81 vs. limit=15.0
+2024-08-27 00:39:13,982 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=191029.33333333334, ans=0.125
+2024-08-27 00:39:22,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=191082.66666666666, ans=0.125
+2024-08-27 00:39:22,804 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.09 vs. limit=12.0
+2024-08-27 00:39:36,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=191189.33333333334, ans=0.125
+2024-08-27 00:39:37,092 INFO [train.py:1114] (1/4) Epoch 15, batch 1000, loss[loss=0.1683, simple_loss=0.2403, pruned_loss=0.0355, ctc_loss=0.06316, over 19861.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2677, pruned_loss=0.04529, ctc_loss=0.08456, over 3815966.68 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:39:54,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=191242.66666666666, ans=0.125
+2024-08-27 00:40:00,305 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.061e+02 1.403e+02 1.586e+02 1.924e+02 3.101e+02, threshold=3.172e+02, percent-clipped=1.0
+2024-08-27 00:40:01,001 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.81 vs. limit=22.5
+2024-08-27 00:40:01,554 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=191296.0, ans=0.025
+2024-08-27 00:40:16,368 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=191402.66666666666, ans=0.0
+2024-08-27 00:40:25,497 INFO [train.py:1114] (1/4) Epoch 15, batch 1050, loss[loss=0.1849, simple_loss=0.2669, pruned_loss=0.03686, ctc_loss=0.07286, over 19833.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2673, pruned_loss=0.04512, ctc_loss=0.08431, over 3823073.04 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:40:41,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=191509.33333333334, ans=0.2
+2024-08-27 00:40:42,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=191509.33333333334, ans=0.05
+2024-08-27 00:40:46,767 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=191562.66666666666, ans=0.125
+2024-08-27 00:41:12,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=191669.33333333334, ans=0.0
+2024-08-27 00:41:14,678 INFO [train.py:1114] (1/4) Epoch 15, batch 1100, loss[loss=0.1829, simple_loss=0.2649, pruned_loss=0.03613, ctc_loss=0.07167, over 19583.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2669, pruned_loss=0.04495, ctc_loss=0.08402, over 3830141.95 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:41:17,666 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=191722.66666666666, ans=0.2
+2024-08-27 00:41:20,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=191722.66666666666, ans=0.0
+2024-08-27 00:41:23,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=191776.0, ans=0.0
+2024-08-27 00:41:36,203 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 1.518e+02 1.811e+02 2.066e+02 3.149e+02, threshold=3.622e+02, percent-clipped=0.0
+2024-08-27 00:41:57,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=191936.0, ans=0.09899494936611666
+2024-08-27 00:42:07,801 INFO [train.py:1114] (1/4) Epoch 15, batch 1150, loss[loss=0.1834, simple_loss=0.2632, pruned_loss=0.03721, ctc_loss=0.07311, over 19589.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2666, pruned_loss=0.04485, ctc_loss=0.08393, over 3829266.93 frames. ], batch size: 52, lr: 1.00e-02, grad_scale: 16.0
+2024-08-27 00:42:08,083 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=191989.33333333334, ans=0.0
+2024-08-27 00:42:23,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=191989.33333333334, ans=0.125
+2024-08-27 00:42:42,068 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=192096.0, ans=0.0
+2024-08-27 00:42:55,005 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=192202.66666666666, ans=0.0
+2024-08-27 00:43:04,223 INFO [train.py:1114] (1/4) Epoch 15, batch 1200, loss[loss=0.1859, simple_loss=0.2721, pruned_loss=0.03625, ctc_loss=0.06805, over 19846.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2674, pruned_loss=0.04509, ctc_loss=0.08444, over 3825173.40 frames. ], batch size: 57, lr: 1.00e-02, grad_scale: 32.0
+2024-08-27 00:43:06,792 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.52 vs. limit=6.0
+2024-08-27 00:43:09,064 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=192256.0, ans=0.0
+2024-08-27 00:44:32,779 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.478e+02 1.729e+02 2.216e+02 4.347e+02, threshold=3.458e+02, percent-clipped=1.0
+2024-08-27 00:45:40,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=192416.0, ans=0.1
+2024-08-27 00:46:05,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=192469.33333333334, ans=0.0
+2024-08-27 00:46:05,582 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=192469.33333333334, ans=0.125
+2024-08-27 00:46:12,630 INFO [train.py:1114] (1/4) Epoch 15, batch 1250, loss[loss=0.2014, simple_loss=0.2689, pruned_loss=0.04918, ctc_loss=0.08879, over 19519.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2677, pruned_loss=0.04508, ctc_loss=0.08435, over 3843000.09 frames. ], batch size: 61, lr: 1.00e-02, grad_scale: 32.0
+2024-08-27 00:47:15,630 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=192576.0, ans=0.125
+2024-08-27 00:48:15,204 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.19 vs. limit=22.5
+2024-08-27 00:48:21,072 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=192736.0, ans=0.125
+2024-08-27 00:48:26,358 INFO [train.py:1114] (1/4) Epoch 15, batch 1300, loss[loss=0.2117, simple_loss=0.2834, pruned_loss=0.05026, ctc_loss=0.09879, over 18934.00 frames. ], tot_loss[loss=0.195, simple_loss=0.267, pruned_loss=0.04475, ctc_loss=0.08359, over 3846476.85 frames. ], batch size: 76, lr: 9.99e-03, grad_scale: 32.0
+2024-08-27 00:49:50,655 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.210e+02 1.421e+02 1.669e+02 2.080e+02 3.869e+02, threshold=3.339e+02, percent-clipped=2.0
+2024-08-27 00:50:09,960 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=192949.33333333334, ans=0.125
+2024-08-27 00:50:40,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=193002.66666666666, ans=0.125
+2024-08-27 00:50:43,622 INFO [train.py:1114] (1/4) Epoch 15, batch 1350, loss[loss=0.1839, simple_loss=0.2645, pruned_loss=0.03744, ctc_loss=0.07106, over 19755.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2665, pruned_loss=0.0444, ctc_loss=0.08277, over 3858434.80 frames. ], batch size: 54, lr: 9.98e-03, grad_scale: 32.0
+2024-08-27 00:50:43,780 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 00:50:48,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=193056.0, ans=0.025
+2024-08-27 00:51:56,719 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=193109.33333333334, ans=0.0
+2024-08-27 00:51:56,824 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 00:52:07,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=193162.66666666666, ans=0.0
+2024-08-27 00:52:08,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=193162.66666666666, ans=10.0
+2024-08-27 00:53:17,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=193269.33333333334, ans=0.125
+2024-08-27 00:53:24,207 INFO [train.py:1114] (1/4) Epoch 15, batch 1400, loss[loss=0.1766, simple_loss=0.236, pruned_loss=0.04286, ctc_loss=0.07899, over 19677.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.266, pruned_loss=0.04419, ctc_loss=0.08245, over 3864695.51 frames. ], batch size: 46, lr: 9.98e-03, grad_scale: 32.0
+2024-08-27 00:53:34,994 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.69 vs. limit=15.0
+2024-08-27 00:53:56,715 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=193429.33333333334, ans=0.0
+2024-08-27 00:53:57,413 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.168e+02 1.449e+02 1.647e+02 2.125e+02 3.032e+02, threshold=3.293e+02, percent-clipped=0.0
+2024-08-27 00:54:13,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=193429.33333333334, ans=0.2
+2024-08-27 00:54:22,536 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=193482.66666666666, ans=0.125
+2024-08-27 00:54:50,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=193482.66666666666, ans=0.1
+2024-08-27 00:54:57,444 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.57 vs. limit=15.0
+2024-08-27 00:55:08,678 INFO [train.py:1114] (1/4) Epoch 15, batch 1450, loss[loss=0.2191, simple_loss=0.2879, pruned_loss=0.05465, ctc_loss=0.1027, over 19683.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2668, pruned_loss=0.04439, ctc_loss=0.08285, over 3862898.74 frames. ], batch size: 63, lr: 9.97e-03, grad_scale: 32.0
+2024-08-27 00:55:34,969 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=193589.33333333334, ans=0.0
+2024-08-27 00:56:15,857 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=193749.33333333334, ans=0.2
+2024-08-27 00:56:20,314 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.71 vs. limit=10.0
+2024-08-27 00:56:36,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=193802.66666666666, ans=0.0
+2024-08-27 00:56:37,088 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=193802.66666666666, ans=0.125
+2024-08-27 00:56:39,804 INFO [train.py:1114] (1/4) Epoch 15, batch 1500, loss[loss=0.2041, simple_loss=0.277, pruned_loss=0.04792, ctc_loss=0.08831, over 19570.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.267, pruned_loss=0.04429, ctc_loss=0.08269, over 3862677.43 frames. ], batch size: 57, lr: 9.96e-03, grad_scale: 32.0
+2024-08-27 00:57:54,153 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=193909.33333333334, ans=0.125
+2024-08-27 00:58:20,413 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.504e+02 1.720e+02 2.138e+02 3.076e+02, threshold=3.439e+02, percent-clipped=0.0
+2024-08-27 00:58:45,618 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 00:58:50,644 INFO [train.py:1114] (1/4) Epoch 15, batch 1550, loss[loss=0.2196, simple_loss=0.29, pruned_loss=0.05431, ctc_loss=0.1012, over 19586.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2672, pruned_loss=0.04466, ctc_loss=0.08362, over 3845845.14 frames. ], batch size: 60, lr: 9.96e-03, grad_scale: 32.0
+2024-08-27 00:58:55,863 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.12 vs. limit=22.5
+2024-08-27 00:59:00,353 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=194176.0, ans=0.025
+2024-08-27 00:59:16,147 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=194229.33333333334, ans=0.125
+2024-08-27 00:59:16,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=194229.33333333334, ans=0.2
+2024-08-27 00:59:17,425 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.65 vs. limit=15.0
+2024-08-27 00:59:17,951 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=194282.66666666666, ans=0.0
+2024-08-27 00:59:18,788 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=194282.66666666666, ans=0.1
+2024-08-27 00:59:37,711 INFO [train.py:1114] (1/4) Epoch 15, batch 1600, loss[loss=0.202, simple_loss=0.2802, pruned_loss=0.04468, ctc_loss=0.08603, over 19833.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2674, pruned_loss=0.04494, ctc_loss=0.0841, over 3835219.53 frames. ], batch size: 57, lr: 9.95e-03, grad_scale: 32.0
+2024-08-27 00:59:43,515 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 00:59:50,003 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=194442.66666666666, ans=0.1
+2024-08-27 00:59:57,061 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.23 vs. limit=15.0
+2024-08-27 00:59:57,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=194496.0, ans=0.125
+2024-08-27 01:00:17,421 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.455e+02 1.710e+02 2.060e+02 3.831e+02, threshold=3.419e+02, percent-clipped=3.0
+2024-08-27 01:00:41,451 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=194602.66666666666, ans=0.05
+2024-08-27 01:00:50,799 INFO [train.py:1114] (1/4) Epoch 15, batch 1650, loss[loss=0.2059, simple_loss=0.2801, pruned_loss=0.0476, ctc_loss=0.09133, over 19646.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2676, pruned_loss=0.04505, ctc_loss=0.08421, over 3831815.30 frames. ], batch size: 59, lr: 9.94e-03, grad_scale: 16.0
+2024-08-27 01:01:25,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=194762.66666666666, ans=0.125
+2024-08-27 01:01:32,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=194762.66666666666, ans=0.07
+2024-08-27 01:01:42,661 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=194816.0, ans=0.125
+2024-08-27 01:01:43,614 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=194816.0, ans=0.0
+2024-08-27 01:02:11,998 INFO [train.py:1114] (1/4) Epoch 15, batch 1700, loss[loss=0.1831, simple_loss=0.2453, pruned_loss=0.04425, ctc_loss=0.081, over 19675.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2672, pruned_loss=0.04468, ctc_loss=0.08362, over 3846198.71 frames. ], batch size: 46, lr: 9.94e-03, grad_scale: 16.0
+2024-08-27 01:02:23,189 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.72 vs. limit=22.5
+2024-08-27 01:02:25,595 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=194976.0, ans=0.0
+2024-08-27 01:02:27,380 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=194976.0, ans=0.2
+2024-08-27 01:02:27,401 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=194976.0, ans=0.0
+2024-08-27 01:02:36,963 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.414e+02 1.817e+02 2.372e+02 3.799e+02, threshold=3.634e+02, percent-clipped=1.0
+2024-08-27 01:02:39,004 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=195029.33333333334, ans=0.0
+2024-08-27 01:02:43,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=195082.66666666666, ans=0.1
+2024-08-27 01:03:00,189 INFO [train.py:1114] (1/4) Epoch 15, batch 1750, loss[loss=0.1675, simple_loss=0.2364, pruned_loss=0.03547, ctc_loss=0.06936, over 19658.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2666, pruned_loss=0.04451, ctc_loss=0.08329, over 3851342.47 frames. ], batch size: 45, lr: 9.93e-03, grad_scale: 16.0
+2024-08-27 01:03:06,918 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.16 vs. limit=15.0
+2024-08-27 01:03:12,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=195242.66666666666, ans=0.125
+2024-08-27 01:03:16,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=195242.66666666666, ans=0.125
+2024-08-27 01:03:22,901 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.32 vs. limit=22.5
+2024-08-27 01:03:32,350 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=195349.33333333334, ans=0.0
+2024-08-27 01:03:44,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=195402.66666666666, ans=0.1
+2024-08-27 01:03:49,209 INFO [train.py:1114] (1/4) Epoch 15, batch 1800, loss[loss=0.1971, simple_loss=0.2737, pruned_loss=0.04399, ctc_loss=0.08131, over 19593.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2665, pruned_loss=0.0446, ctc_loss=0.08337, over 3852161.23 frames. ], batch size: 55, lr: 9.92e-03, grad_scale: 16.0
+2024-08-27 01:03:51,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=195456.0, ans=0.025
+2024-08-27 01:03:53,304 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.75 vs. limit=15.0
+2024-08-27 01:03:55,668 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=195456.0, ans=0.025
+2024-08-27 01:04:05,589 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=195509.33333333334, ans=0.2
+2024-08-27 01:04:34,019 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.99 vs. limit=15.0
+2024-08-27 01:04:34,457 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.516e+02 1.927e+02 2.557e+02 3.874e+02, threshold=3.854e+02, percent-clipped=2.0
+2024-08-27 01:04:36,418 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=195562.66666666666, ans=0.025
+2024-08-27 01:05:42,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=195669.33333333334, ans=0.09899494936611666
+2024-08-27 01:05:54,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=195722.66666666666, ans=0.125
+2024-08-27 01:05:54,931 INFO [train.py:1114] (1/4) Epoch 15, batch 1850, loss[loss=0.1998, simple_loss=0.2837, pruned_loss=0.04142, ctc_loss=0.0827, over 19563.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2665, pruned_loss=0.04459, ctc_loss=0.08333, over 3854557.53 frames. ], batch size: 57, lr: 9.92e-03, grad_scale: 16.0
+2024-08-27 01:06:23,503 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=195829.33333333334, ans=0.1
+2024-08-27 01:06:38,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=195882.66666666666, ans=0.125
+2024-08-27 01:06:40,166 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=195936.0, ans=0.1
+2024-08-27 01:06:44,578 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.38 vs. limit=15.0
+2024-08-27 01:06:49,443 INFO [train.py:1114] (1/4) Epoch 15, batch 1900, loss[loss=0.1971, simple_loss=0.2749, pruned_loss=0.04327, ctc_loss=0.0818, over 19630.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2671, pruned_loss=0.04469, ctc_loss=0.08356, over 3859475.40 frames. ], batch size: 59, lr: 9.91e-03, grad_scale: 16.0
+2024-08-27 01:06:57,608 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=196042.66666666666, ans=0.125
+2024-08-27 01:07:14,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=196096.0, ans=0.0
+2024-08-27 01:07:43,096 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.422e+02 1.649e+02 2.231e+02 4.535e+02, threshold=3.297e+02, percent-clipped=1.0
+2024-08-27 01:07:52,694 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=196149.33333333334, ans=0.125
+2024-08-27 01:08:04,606 INFO [train.py:1114] (1/4) Epoch 15, batch 1950, loss[loss=0.1763, simple_loss=0.2519, pruned_loss=0.03652, ctc_loss=0.06949, over 19582.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2677, pruned_loss=0.04458, ctc_loss=0.08333, over 3868676.38 frames. ], batch size: 52, lr: 9.90e-03, grad_scale: 16.0
+2024-08-27 01:08:26,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=196362.66666666666, ans=0.2
+2024-08-27 01:08:42,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=196469.33333333334, ans=0.035
+2024-08-27 01:08:49,900 INFO [train.py:1114] (1/4) Epoch 15, batch 2000, loss[loss=0.1768, simple_loss=0.2417, pruned_loss=0.04098, ctc_loss=0.07476, over 19662.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2687, pruned_loss=0.04514, ctc_loss=0.08444, over 3854088.50 frames. ], batch size: 45, lr: 9.90e-03, grad_scale: 32.0
+2024-08-27 01:08:50,386 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.48 vs. limit=12.0
+2024-08-27 01:09:33,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=196576.0, ans=0.125
+2024-08-27 01:09:34,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=196576.0, ans=0.2
+2024-08-27 01:09:45,504 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.11 vs. limit=15.0
+2024-08-27 01:09:46,684 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.403e+02 1.640e+02 2.044e+02 3.050e+02, threshold=3.279e+02, percent-clipped=0.0
+2024-08-27 01:09:55,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=196682.66666666666, ans=0.0
+2024-08-27 01:10:01,037 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:10:10,633 INFO [train.py:1114] (1/4) Epoch 15, batch 2050, loss[loss=0.1657, simple_loss=0.2326, pruned_loss=0.03669, ctc_loss=0.06346, over 19731.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2674, pruned_loss=0.04484, ctc_loss=0.08386, over 3851358.94 frames. ], batch size: 47, lr: 9.89e-03, grad_scale: 32.0
+2024-08-27 01:10:14,625 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.26 vs. limit=22.5
+2024-08-27 01:10:27,713 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=196842.66666666666, ans=0.125
+2024-08-27 01:10:44,729 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=196949.33333333334, ans=0.1
+2024-08-27 01:10:45,133 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.99 vs. limit=6.0
+2024-08-27 01:10:54,857 INFO [train.py:1114] (1/4) Epoch 15, batch 2100, loss[loss=0.1919, simple_loss=0.2645, pruned_loss=0.04237, ctc_loss=0.08653, over 19772.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2663, pruned_loss=0.0442, ctc_loss=0.08275, over 3859328.21 frames. ], batch size: 54, lr: 9.88e-03, grad_scale: 32.0
+2024-08-27 01:11:11,449 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=197056.0, ans=0.125
+2024-08-27 01:11:20,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=197109.33333333334, ans=0.0
+2024-08-27 01:11:22,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=197162.66666666666, ans=0.125
+2024-08-27 01:11:26,635 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.442e+02 1.703e+02 2.065e+02 4.080e+02, threshold=3.406e+02, percent-clipped=2.0
+2024-08-27 01:11:39,946 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=197269.33333333334, ans=0.125
+2024-08-27 01:11:46,867 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197269.33333333334, ans=0.1
+2024-08-27 01:11:48,561 INFO [train.py:1114] (1/4) Epoch 15, batch 2150, loss[loss=0.1896, simple_loss=0.2676, pruned_loss=0.04115, ctc_loss=0.07331, over 19579.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.266, pruned_loss=0.04414, ctc_loss=0.08234, over 3869107.04 frames. ], batch size: 52, lr: 9.88e-03, grad_scale: 32.0
+2024-08-27 01:11:51,470 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=197322.66666666666, ans=0.5
+2024-08-27 01:11:51,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=197322.66666666666, ans=0.1
+2024-08-27 01:11:58,797 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.32 vs. limit=15.0
+2024-08-27 01:11:59,956 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197376.0, ans=0.1
+2024-08-27 01:12:06,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=197429.33333333334, ans=0.025
+2024-08-27 01:12:16,400 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=197482.66666666666, ans=0.0
+2024-08-27 01:12:31,747 INFO [train.py:1114] (1/4) Epoch 15, batch 2200, loss[loss=0.2125, simple_loss=0.2891, pruned_loss=0.04924, ctc_loss=0.09371, over 19604.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2661, pruned_loss=0.04418, ctc_loss=0.08242, over 3868156.74 frames. ], batch size: 57, lr: 9.87e-03, grad_scale: 16.0
+2024-08-27 01:12:42,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=197642.66666666666, ans=0.09899494936611666
+2024-08-27 01:12:44,812 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=197642.66666666666, ans=0.1
+2024-08-27 01:12:50,774 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=197696.0, ans=0.0
+2024-08-27 01:12:52,455 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:12:54,923 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.477e+02 1.816e+02 2.262e+02 3.833e+02, threshold=3.631e+02, percent-clipped=4.0
+2024-08-27 01:13:01,041 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=197749.33333333334, ans=0.125
+2024-08-27 01:13:01,976 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=197749.33333333334, ans=0.125
+2024-08-27 01:13:02,862 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=197749.33333333334, ans=0.125
+2024-08-27 01:13:15,797 INFO [train.py:1114] (1/4) Epoch 15, batch 2250, loss[loss=0.2028, simple_loss=0.2852, pruned_loss=0.04299, ctc_loss=0.08587, over 19619.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2662, pruned_loss=0.04407, ctc_loss=0.08234, over 3868504.27 frames. ], batch size: 55, lr: 9.87e-03, grad_scale: 16.0
+2024-08-27 01:13:32,193 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=197962.66666666666, ans=0.1
+2024-08-27 01:13:46,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=198016.0, ans=0.125
+2024-08-27 01:13:47,362 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=198016.0, ans=0.0
+2024-08-27 01:13:58,193 INFO [train.py:1114] (1/4) Epoch 15, batch 2300, loss[loss=0.1863, simple_loss=0.2558, pruned_loss=0.04274, ctc_loss=0.07831, over 19482.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2652, pruned_loss=0.044, ctc_loss=0.0821, over 3861569.81 frames. ], batch size: 49, lr: 9.86e-03, grad_scale: 16.0
+2024-08-27 01:14:03,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=198122.66666666666, ans=0.125
+2024-08-27 01:14:05,076 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=198122.66666666666, ans=0.0
+2024-08-27 01:14:58,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=198229.33333333334, ans=0.05
+2024-08-27 01:14:59,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=198229.33333333334, ans=0.0
+2024-08-27 01:15:02,203 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.441e+02 1.617e+02 1.954e+02 3.129e+02, threshold=3.235e+02, percent-clipped=0.0
+2024-08-27 01:15:03,875 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=8.06 vs. limit=15.0
+2024-08-27 01:15:07,117 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:15:07,372 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.33 vs. limit=15.0
+2024-08-27 01:15:13,928 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.39 vs. limit=15.0
+2024-08-27 01:15:16,490 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=198336.0, ans=0.2
+2024-08-27 01:15:23,091 INFO [train.py:1114] (1/4) Epoch 15, batch 2350, loss[loss=0.2061, simple_loss=0.2783, pruned_loss=0.04861, ctc_loss=0.09162, over 19666.00 frames. ], tot_loss[loss=0.1938, simple_loss=0.2657, pruned_loss=0.04443, ctc_loss=0.08275, over 3863692.55 frames. ], batch size: 63, lr: 9.85e-03, grad_scale: 16.0
+2024-08-27 01:15:35,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=198442.66666666666, ans=0.1
+2024-08-27 01:15:40,559 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=198496.0, ans=0.0
+2024-08-27 01:15:41,375 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=198496.0, ans=0.0
+2024-08-27 01:15:44,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=198496.0, ans=0.125
+2024-08-27 01:16:00,728 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=198602.66666666666, ans=0.5
+2024-08-27 01:16:01,609 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=198602.66666666666, ans=0.125
+2024-08-27 01:16:31,894 INFO [train.py:1114] (1/4) Epoch 15, batch 2400, loss[loss=0.2074, simple_loss=0.2824, pruned_loss=0.04795, ctc_loss=0.09097, over 19295.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2677, pruned_loss=0.04523, ctc_loss=0.0843, over 3856804.39 frames. ], batch size: 71, lr: 9.85e-03, grad_scale: 32.0
+2024-08-27 01:17:15,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=198656.0, ans=0.0
+2024-08-27 01:17:16,029 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:17:22,003 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=198709.33333333334, ans=0.125
+2024-08-27 01:17:35,485 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.164e+02 1.452e+02 1.605e+02 2.004e+02 3.213e+02, threshold=3.211e+02, percent-clipped=0.0
+2024-08-27 01:17:35,631 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=198762.66666666666, ans=0.2
+2024-08-27 01:17:37,140 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.72 vs. limit=22.5
+2024-08-27 01:17:38,664 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=198816.0, ans=0.0
+2024-08-27 01:17:45,418 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.26 vs. limit=6.0
+2024-08-27 01:17:50,935 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=6.39 vs. limit=12.0
+2024-08-27 01:17:57,656 INFO [train.py:1114] (1/4) Epoch 15, batch 2450, loss[loss=0.2848, simple_loss=0.3193, pruned_loss=0.09325, ctc_loss=0.1596, over 12649.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2714, pruned_loss=0.04782, ctc_loss=0.08919, over 3727997.94 frames. ], batch size: 140, lr: 9.84e-03, grad_scale: 32.0
+2024-08-27 01:18:33,819 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=199082.66666666666, ans=0.125
+2024-08-27 01:20:20,975 INFO [train.py:1114] (1/4) Epoch 16, batch 0, loss[loss=0.1877, simple_loss=0.2555, pruned_loss=0.0445, ctc_loss=0.07718, over 19798.00 frames. ], tot_loss[loss=0.1877, simple_loss=0.2555, pruned_loss=0.0445, ctc_loss=0.07718, over 19798.00 frames. ], batch size: 49, lr: 9.52e-03, grad_scale: 32.0
+2024-08-27 01:20:20,976 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-27 01:21:17,371 INFO [train.py:1146] (1/4) Epoch 16, validation: loss=0.1744, simple_loss=0.2673, pruned_loss=0.03034, ctc_loss=0.05204, over 944034.00 frames.
+2024-08-27 01:21:17,372 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-27 01:21:17,569 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=199130.66666666666, ans=0.0
+2024-08-27 01:21:24,987 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=199130.66666666666, ans=0.125
+2024-08-27 01:21:25,828 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=199130.66666666666, ans=0.0
+2024-08-27 01:21:32,290 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=11.70 vs. limit=22.5
+2024-08-27 01:21:40,040 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=199237.33333333334, ans=0.125
+2024-08-27 01:21:54,897 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 1.674e+02 1.811e+02 2.106e+02 3.737e+02, threshold=3.622e+02, percent-clipped=2.0
+2024-08-27 01:22:07,226 INFO [train.py:1114] (1/4) Epoch 16, batch 50, loss[loss=0.1713, simple_loss=0.243, pruned_loss=0.03632, ctc_loss=0.06736, over 19724.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.267, pruned_loss=0.04568, ctc_loss=0.08508, over 845315.06 frames. ], batch size: 47, lr: 9.51e-03, grad_scale: 32.0
+2024-08-27 01:22:08,322 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=199397.33333333334, ans=0.125
+2024-08-27 01:22:11,153 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:22:45,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=199610.66666666666, ans=0.0
+2024-08-27 01:22:53,661 INFO [train.py:1114] (1/4) Epoch 16, batch 100, loss[loss=0.1751, simple_loss=0.2482, pruned_loss=0.0371, ctc_loss=0.06941, over 19711.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2687, pruned_loss=0.04544, ctc_loss=0.08474, over 1500370.32 frames. ], batch size: 51, lr: 9.51e-03, grad_scale: 32.0
+2024-08-27 01:23:11,998 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.41 vs. limit=12.0
+2024-08-27 01:23:15,282 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=199770.66666666666, ans=0.035
+2024-08-27 01:23:17,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=199770.66666666666, ans=0.07
+2024-08-27 01:23:29,475 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.40 vs. limit=22.5
+2024-08-27 01:23:33,430 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.159e+02 1.434e+02 1.536e+02 1.885e+02 3.287e+02, threshold=3.072e+02, percent-clipped=0.0
+2024-08-27 01:23:45,324 INFO [train.py:1114] (1/4) Epoch 16, batch 150, loss[loss=0.161, simple_loss=0.2273, pruned_loss=0.0343, ctc_loss=0.06503, over 19726.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2669, pruned_loss=0.04466, ctc_loss=0.08313, over 2028336.53 frames. ], batch size: 47, lr: 9.50e-03, grad_scale: 32.0
+2024-08-27 01:23:45,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=199930.66666666666, ans=0.0
+2024-08-27 01:23:56,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=199984.0, ans=0.125
+2024-08-27 01:23:57,633 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=199984.0, ans=0.125
+2024-08-27 01:24:01,213 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=199984.0, ans=0.1
+2024-08-27 01:24:06,145 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=200037.33333333334, ans=0.0
+2024-08-27 01:24:11,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=200037.33333333334, ans=0.125
+2024-08-27 01:24:22,433 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.30 vs. limit=22.5
+2024-08-27 01:24:34,903 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=200197.33333333334, ans=10.0
+2024-08-27 01:24:35,675 INFO [train.py:1114] (1/4) Epoch 16, batch 200, loss[loss=0.2173, simple_loss=0.2796, pruned_loss=0.05651, ctc_loss=0.1051, over 17985.00 frames. ], tot_loss[loss=0.1928, simple_loss=0.265, pruned_loss=0.04391, ctc_loss=0.08185, over 2435728.98 frames. ], batch size: 85, lr: 9.49e-03, grad_scale: 32.0
+2024-08-27 01:24:42,594 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=200197.33333333334, ans=0.1
+2024-08-27 01:24:58,390 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.23 vs. limit=15.0
+2024-08-27 01:25:14,233 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.526e+02 1.826e+02 2.235e+02 3.925e+02, threshold=3.652e+02, percent-clipped=6.0
+2024-08-27 01:25:52,471 INFO [train.py:1114] (1/4) Epoch 16, batch 250, loss[loss=0.182, simple_loss=0.2636, pruned_loss=0.03624, ctc_loss=0.06983, over 19429.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2651, pruned_loss=0.04374, ctc_loss=0.08172, over 2754843.08 frames. ], batch size: 67, lr: 9.49e-03, grad_scale: 32.0
+2024-08-27 01:26:00,213 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=200464.0, ans=0.125
+2024-08-27 01:26:07,119 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.62 vs. limit=15.0
+2024-08-27 01:26:11,568 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=200517.33333333334, ans=0.125
+2024-08-27 01:26:11,673 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=200517.33333333334, ans=0.0
+2024-08-27 01:26:23,742 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=200570.66666666666, ans=0.125
+2024-08-27 01:26:33,821 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=200624.0, ans=0.125
+2024-08-27 01:26:34,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=200624.0, ans=0.0
+2024-08-27 01:26:44,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=200677.33333333334, ans=0.0
+2024-08-27 01:26:44,888 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=200677.33333333334, ans=0.05
+2024-08-27 01:26:45,014 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=200677.33333333334, ans=0.125
+2024-08-27 01:26:46,567 INFO [train.py:1114] (1/4) Epoch 16, batch 300, loss[loss=0.2118, simple_loss=0.2766, pruned_loss=0.05364, ctc_loss=0.0992, over 19518.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2644, pruned_loss=0.04337, ctc_loss=0.0811, over 3000764.20 frames. ], batch size: 61, lr: 9.48e-03, grad_scale: 32.0
+2024-08-27 01:26:47,752 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=200730.66666666666, ans=0.2
+2024-08-27 01:27:03,419 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=200784.0, ans=0.125
+2024-08-27 01:27:22,589 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.168e+02 1.450e+02 1.677e+02 2.025e+02 3.129e+02, threshold=3.354e+02, percent-clipped=0.0
+2024-08-27 01:27:29,424 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=200944.0, ans=10.0
+2024-08-27 01:27:36,611 INFO [train.py:1114] (1/4) Epoch 16, batch 350, loss[loss=0.1894, simple_loss=0.2546, pruned_loss=0.04471, ctc_loss=0.08673, over 19773.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2653, pruned_loss=0.04368, ctc_loss=0.08155, over 3190855.31 frames. ], batch size: 48, lr: 9.48e-03, grad_scale: 32.0
+2024-08-27 01:27:44,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=200997.33333333334, ans=10.0
+2024-08-27 01:27:51,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=201050.66666666666, ans=0.125
+2024-08-27 01:28:17,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=201210.66666666666, ans=0.2
+2024-08-27 01:28:24,285 INFO [train.py:1114] (1/4) Epoch 16, batch 400, loss[loss=0.1821, simple_loss=0.2586, pruned_loss=0.03893, ctc_loss=0.0694, over 19477.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2651, pruned_loss=0.04356, ctc_loss=0.08137, over 3342904.94 frames. ], batch size: 54, lr: 9.47e-03, grad_scale: 32.0
+2024-08-27 01:28:28,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=201264.0, ans=0.07
+2024-08-27 01:28:30,214 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=201264.0, ans=0.125
+2024-08-27 01:28:31,053 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=201264.0, ans=0.0
+2024-08-27 01:28:55,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=201424.0, ans=0.125
+2024-08-27 01:28:57,024 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=201424.0, ans=0.2
+2024-08-27 01:28:58,541 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.444e+02 1.663e+02 2.108e+02 3.293e+02, threshold=3.326e+02, percent-clipped=0.0
+2024-08-27 01:29:00,874 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=201477.33333333334, ans=0.125
+2024-08-27 01:29:10,026 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=201530.66666666666, ans=0.125
+2024-08-27 01:29:10,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=201530.66666666666, ans=0.0
+2024-08-27 01:29:10,813 INFO [train.py:1114] (1/4) Epoch 16, batch 450, loss[loss=0.1965, simple_loss=0.2716, pruned_loss=0.04466, ctc_loss=0.07996, over 19611.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2657, pruned_loss=0.04395, ctc_loss=0.08207, over 3450336.79 frames. ], batch size: 55, lr: 9.46e-03, grad_scale: 32.0
+2024-08-27 01:29:13,844 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=201530.66666666666, ans=0.1
+2024-08-27 01:29:40,722 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.07 vs. limit=15.0
+2024-08-27 01:29:48,770 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=201690.66666666666, ans=0.2
+2024-08-27 01:29:59,019 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=201744.0, ans=0.0
+2024-08-27 01:30:01,622 INFO [train.py:1114] (1/4) Epoch 16, batch 500, loss[loss=0.2138, simple_loss=0.2891, pruned_loss=0.05196, ctc_loss=0.08651, over 19649.00 frames. ], tot_loss[loss=0.1923, simple_loss=0.2648, pruned_loss=0.04358, ctc_loss=0.0815, over 3545536.02 frames. ], batch size: 63, lr: 9.46e-03, grad_scale: 32.0
+2024-08-27 01:30:33,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=201957.33333333334, ans=0.125
+2024-08-27 01:30:39,494 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.484e+02 1.746e+02 2.096e+02 4.072e+02, threshold=3.492e+02, percent-clipped=1.0
+2024-08-27 01:30:40,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=201957.33333333334, ans=0.125
+2024-08-27 01:30:51,384 INFO [train.py:1114] (1/4) Epoch 16, batch 550, loss[loss=0.2214, simple_loss=0.2956, pruned_loss=0.05388, ctc_loss=0.0989, over 19295.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2651, pruned_loss=0.0437, ctc_loss=0.08173, over 3607433.72 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 32.0
+2024-08-27 01:30:53,671 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=202064.0, ans=0.0
+2024-08-27 01:31:18,365 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=202224.0, ans=0.1
+2024-08-27 01:31:27,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=202277.33333333334, ans=0.125
+2024-08-27 01:31:37,738 INFO [train.py:1114] (1/4) Epoch 16, batch 600, loss[loss=0.2041, simple_loss=0.2821, pruned_loss=0.04643, ctc_loss=0.08311, over 19341.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2657, pruned_loss=0.04391, ctc_loss=0.08203, over 3665042.59 frames. ], batch size: 67, lr: 9.45e-03, grad_scale: 32.0
+2024-08-27 01:31:45,224 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:31:46,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=202384.0, ans=0.2
+2024-08-27 01:31:55,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=202437.33333333334, ans=0.125
+2024-08-27 01:31:58,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.24 vs. limit=15.0
+2024-08-27 01:32:14,251 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.474e+02 1.879e+02 2.462e+02 5.922e+02, threshold=3.759e+02, percent-clipped=13.0
+2024-08-27 01:32:23,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=202544.0, ans=0.2
+2024-08-27 01:32:25,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=202597.33333333334, ans=0.125
+2024-08-27 01:32:26,188 INFO [train.py:1114] (1/4) Epoch 16, batch 650, loss[loss=0.1778, simple_loss=0.2559, pruned_loss=0.03581, ctc_loss=0.07024, over 19775.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2651, pruned_loss=0.04359, ctc_loss=0.08148, over 3716361.67 frames. ], batch size: 54, lr: 9.44e-03, grad_scale: 32.0
+2024-08-27 01:32:30,407 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.90 vs. limit=15.0
+2024-08-27 01:32:31,927 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=202597.33333333334, ans=0.125
+2024-08-27 01:32:35,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=202597.33333333334, ans=0.125
+2024-08-27 01:32:37,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=202650.66666666666, ans=0.0
+2024-08-27 01:32:41,359 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=202650.66666666666, ans=0.125
+2024-08-27 01:32:55,207 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=202757.33333333334, ans=0.0
+2024-08-27 01:32:57,866 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=202757.33333333334, ans=0.2
+2024-08-27 01:33:04,442 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.01 vs. limit=15.0
+2024-08-27 01:33:05,501 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.09 vs. limit=15.0
+2024-08-27 01:33:18,144 INFO [train.py:1114] (1/4) Epoch 16, batch 700, loss[loss=0.1861, simple_loss=0.2645, pruned_loss=0.03945, ctc_loss=0.07205, over 19726.00 frames. ], tot_loss[loss=0.1928, simple_loss=0.2655, pruned_loss=0.04377, ctc_loss=0.08163, over 3748555.01 frames. ], batch size: 51, lr: 9.43e-03, grad_scale: 32.0
+2024-08-27 01:33:26,062 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.96 vs. limit=22.5
+2024-08-27 01:33:38,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=202970.66666666666, ans=0.1
+2024-08-27 01:33:40,055 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.53 vs. limit=22.5
+2024-08-27 01:33:52,574 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.223e+02 1.460e+02 1.707e+02 2.152e+02 4.812e+02, threshold=3.413e+02, percent-clipped=3.0
+2024-08-27 01:34:04,697 INFO [train.py:1114] (1/4) Epoch 16, batch 750, loss[loss=0.178, simple_loss=0.2597, pruned_loss=0.03552, ctc_loss=0.06282, over 19527.00 frames. ], tot_loss[loss=0.1919, simple_loss=0.2646, pruned_loss=0.04337, ctc_loss=0.08095, over 3773192.21 frames. ], batch size: 54, lr: 9.43e-03, grad_scale: 32.0
+2024-08-27 01:34:11,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=203130.66666666666, ans=0.0
+2024-08-27 01:34:18,044 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=203184.0, ans=0.0
+2024-08-27 01:34:31,128 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=203237.33333333334, ans=0.125
+2024-08-27 01:34:31,989 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=203290.66666666666, ans=0.125
+2024-08-27 01:34:43,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=203344.0, ans=0.125
+2024-08-27 01:34:43,425 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.29 vs. limit=15.0
+2024-08-27 01:34:57,323 INFO [train.py:1114] (1/4) Epoch 16, batch 800, loss[loss=0.1687, simple_loss=0.2405, pruned_loss=0.03562, ctc_loss=0.06433, over 19808.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2646, pruned_loss=0.04337, ctc_loss=0.08091, over 3794738.18 frames. ], batch size: 49, lr: 9.42e-03, grad_scale: 32.0
+2024-08-27 01:35:29,038 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.03 vs. limit=22.5
+2024-08-27 01:35:32,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=203504.0, ans=0.125
+2024-08-27 01:35:48,034 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=203557.33333333334, ans=0.125
+2024-08-27 01:35:49,633 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.508e+02 1.846e+02 2.334e+02 3.502e+02, threshold=3.692e+02, percent-clipped=1.0
+2024-08-27 01:35:56,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=203610.66666666666, ans=0.025
+2024-08-27 01:36:01,625 INFO [train.py:1114] (1/4) Epoch 16, batch 850, loss[loss=0.2128, simple_loss=0.2824, pruned_loss=0.05209, ctc_loss=0.09754, over 19655.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2644, pruned_loss=0.04341, ctc_loss=0.08111, over 3814579.97 frames. ], batch size: 59, lr: 9.42e-03, grad_scale: 32.0
+2024-08-27 01:36:14,172 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.38 vs. limit=22.5
+2024-08-27 01:36:16,933 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.47 vs. limit=12.0
+2024-08-27 01:36:23,100 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=203770.66666666666, ans=0.125
+2024-08-27 01:36:38,100 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.67 vs. limit=15.0
+2024-08-27 01:36:41,794 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=203877.33333333334, ans=0.0
+2024-08-27 01:36:51,749 INFO [train.py:1114] (1/4) Epoch 16, batch 900, loss[loss=0.1673, simple_loss=0.2392, pruned_loss=0.03485, ctc_loss=0.06402, over 19423.00 frames. ], tot_loss[loss=0.1923, simple_loss=0.2647, pruned_loss=0.04366, ctc_loss=0.0816, over 3817507.33 frames. ], batch size: 48, lr: 9.41e-03, grad_scale: 32.0
+2024-08-27 01:36:55,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=203930.66666666666, ans=0.1
+2024-08-27 01:36:56,950 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.22 vs. limit=12.0
+2024-08-27 01:37:02,184 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:37:03,924 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=203984.0, ans=0.125
+2024-08-27 01:37:09,042 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.71 vs. limit=12.0
+2024-08-27 01:37:26,147 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.118e+02 1.398e+02 1.563e+02 1.898e+02 3.698e+02, threshold=3.126e+02, percent-clipped=1.0
+2024-08-27 01:37:31,298 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.29 vs. limit=15.0
+2024-08-27 01:37:32,773 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=204144.0, ans=0.125
+2024-08-27 01:37:38,123 INFO [train.py:1114] (1/4) Epoch 16, batch 950, loss[loss=0.1797, simple_loss=0.253, pruned_loss=0.03834, ctc_loss=0.07424, over 19489.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.265, pruned_loss=0.04374, ctc_loss=0.08184, over 3820500.71 frames. ], batch size: 49, lr: 9.40e-03, grad_scale: 32.0
+2024-08-27 01:37:46,155 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.81 vs. limit=15.0
+2024-08-27 01:37:54,914 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=204250.66666666666, ans=0.2
+2024-08-27 01:38:16,520 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=204357.33333333334, ans=0.125
+2024-08-27 01:38:27,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=204410.66666666666, ans=0.1
+2024-08-27 01:38:29,230 INFO [train.py:1114] (1/4) Epoch 16, batch 1000, loss[loss=0.1765, simple_loss=0.2565, pruned_loss=0.03432, ctc_loss=0.06953, over 19869.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.266, pruned_loss=0.04403, ctc_loss=0.08238, over 3817813.39 frames. ], batch size: 52, lr: 9.40e-03, grad_scale: 32.0
+2024-08-27 01:38:46,383 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.91 vs. limit=15.0
+2024-08-27 01:38:53,281 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.whiten.whitening_limit, batch_count=204570.66666666666, ans=15.0
+2024-08-27 01:39:07,622 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.409e+02 1.616e+02 2.034e+02 3.159e+02, threshold=3.231e+02, percent-clipped=1.0
+2024-08-27 01:39:19,857 INFO [train.py:1114] (1/4) Epoch 16, batch 1050, loss[loss=0.1852, simple_loss=0.2667, pruned_loss=0.03749, ctc_loss=0.07148, over 19829.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2651, pruned_loss=0.04373, ctc_loss=0.08166, over 3823563.85 frames. ], batch size: 57, lr: 9.39e-03, grad_scale: 32.0
+2024-08-27 01:39:23,133 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.36 vs. limit=15.0
+2024-08-27 01:39:26,596 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=204730.66666666666, ans=0.025
+2024-08-27 01:39:39,062 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=204837.33333333334, ans=0.2
+2024-08-27 01:39:47,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=204890.66666666666, ans=0.0
+2024-08-27 01:39:47,205 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=204890.66666666666, ans=0.125
+2024-08-27 01:39:49,109 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=204890.66666666666, ans=0.0
+2024-08-27 01:39:56,489 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=204944.0, ans=0.125
+2024-08-27 01:39:59,727 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=204944.0, ans=0.2
+2024-08-27 01:40:03,540 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=204944.0, ans=0.0
+2024-08-27 01:40:07,055 INFO [train.py:1114] (1/4) Epoch 16, batch 1100, loss[loss=0.1806, simple_loss=0.2556, pruned_loss=0.03838, ctc_loss=0.07207, over 19592.00 frames. ], tot_loss[loss=0.1919, simple_loss=0.2647, pruned_loss=0.04334, ctc_loss=0.08119, over 3829492.88 frames. ], batch size: 52, lr: 9.39e-03, grad_scale: 32.0
+2024-08-27 01:40:09,301 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.71 vs. limit=15.0
+2024-08-27 01:40:21,863 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=205050.66666666666, ans=0.1
+2024-08-27 01:40:29,434 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=205104.0, ans=0.0
+2024-08-27 01:40:43,784 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:40:44,420 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.119e+02 1.474e+02 1.664e+02 2.002e+02 3.685e+02, threshold=3.328e+02, percent-clipped=2.0
+2024-08-27 01:40:51,463 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.60 vs. limit=6.0
+2024-08-27 01:40:59,572 INFO [train.py:1114] (1/4) Epoch 16, batch 1150, loss[loss=0.1838, simple_loss=0.2555, pruned_loss=0.03989, ctc_loss=0.08074, over 19567.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2645, pruned_loss=0.04328, ctc_loss=0.08112, over 3828230.62 frames. ], batch size: 52, lr: 9.38e-03, grad_scale: 32.0
+2024-08-27 01:42:50,465 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.21 vs. limit=15.0
+2024-08-27 01:42:51,257 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=205264.0, ans=0.125
+2024-08-27 01:42:52,536 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.75 vs. limit=15.0
+2024-08-27 01:43:07,255 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=205317.33333333334, ans=0.1
+2024-08-27 01:43:25,009 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=205424.0, ans=0.0
+2024-08-27 01:43:33,491 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=205477.33333333334, ans=0.125
+2024-08-27 01:43:36,415 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=205477.33333333334, ans=0.025
+2024-08-27 01:43:37,545 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.04 vs. limit=15.0
+2024-08-27 01:43:37,694 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.37 vs. limit=22.5
+2024-08-27 01:43:40,100 INFO [train.py:1114] (1/4) Epoch 16, batch 1200, loss[loss=0.1948, simple_loss=0.2749, pruned_loss=0.04128, ctc_loss=0.08043, over 19850.00 frames. ], tot_loss[loss=0.193, simple_loss=0.266, pruned_loss=0.04366, ctc_loss=0.08173, over 3824031.93 frames. ], batch size: 57, lr: 9.38e-03, grad_scale: 32.0
+2024-08-27 01:43:47,630 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=205530.66666666666, ans=0.0
+2024-08-27 01:43:49,539 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer_ff2.min_abs, batch_count=205530.66666666666, ans=0.1
+2024-08-27 01:43:51,690 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.93 vs. limit=15.0
+2024-08-27 01:43:55,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=205584.0, ans=0.0
+2024-08-27 01:43:59,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=205637.33333333334, ans=0.0
+2024-08-27 01:44:05,431 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=205637.33333333334, ans=0.125
+2024-08-27 01:44:09,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=205690.66666666666, ans=0.125
+2024-08-27 01:44:09,940 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=205690.66666666666, ans=0.0
+2024-08-27 01:44:16,054 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.206e+02 1.520e+02 1.803e+02 2.158e+02 3.897e+02, threshold=3.606e+02, percent-clipped=2.0
+2024-08-27 01:44:21,910 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=205744.0, ans=0.125
+2024-08-27 01:44:22,884 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=205744.0, ans=0.125
+2024-08-27 01:44:28,170 INFO [train.py:1114] (1/4) Epoch 16, batch 1250, loss[loss=0.2107, simple_loss=0.2887, pruned_loss=0.04757, ctc_loss=0.09367, over 19502.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2659, pruned_loss=0.04347, ctc_loss=0.08145, over 3842506.11 frames. ], batch size: 61, lr: 9.37e-03, grad_scale: 32.0
+2024-08-27 01:44:29,414 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=205797.33333333334, ans=0.0
+2024-08-27 01:44:45,398 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=205850.66666666666, ans=0.1
+2024-08-27 01:44:50,036 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=205904.0, ans=0.125
+2024-08-27 01:45:17,709 INFO [train.py:1114] (1/4) Epoch 16, batch 1300, loss[loss=0.2049, simple_loss=0.2796, pruned_loss=0.04817, ctc_loss=0.08484, over 18933.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2654, pruned_loss=0.04346, ctc_loss=0.08112, over 3846574.33 frames. ], batch size: 76, lr: 9.36e-03, grad_scale: 32.0
+2024-08-27 01:45:19,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=206064.0, ans=0.2
+2024-08-27 01:45:26,183 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=206117.33333333334, ans=0.125
+2024-08-27 01:45:36,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=206170.66666666666, ans=0.125
+2024-08-27 01:45:38,744 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=206170.66666666666, ans=0.125
+2024-08-27 01:45:40,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=206170.66666666666, ans=0.125
+2024-08-27 01:45:42,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=206170.66666666666, ans=0.1
+2024-08-27 01:45:52,826 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.516e+02 1.773e+02 2.282e+02 3.618e+02, threshold=3.546e+02, percent-clipped=1.0
+2024-08-27 01:45:56,213 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.75 vs. limit=15.0
+2024-08-27 01:45:58,963 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.39 vs. limit=12.0
+2024-08-27 01:46:03,388 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=206277.33333333334, ans=0.125
+2024-08-27 01:46:06,815 INFO [train.py:1114] (1/4) Epoch 16, batch 1350, loss[loss=0.1797, simple_loss=0.2591, pruned_loss=0.03645, ctc_loss=0.06831, over 19737.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2649, pruned_loss=0.04312, ctc_loss=0.08032, over 3857255.26 frames. ], batch size: 54, lr: 9.36e-03, grad_scale: 32.0
+2024-08-27 01:46:10,682 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=206330.66666666666, ans=0.0
+2024-08-27 01:46:19,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=206384.0, ans=0.125
+2024-08-27 01:46:22,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=206384.0, ans=0.0
+2024-08-27 01:46:25,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=206384.0, ans=0.0
+2024-08-27 01:46:27,443 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=206437.33333333334, ans=0.125
+2024-08-27 01:46:31,108 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:46:31,954 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=206437.33333333334, ans=0.125
+2024-08-27 01:46:56,732 INFO [train.py:1114] (1/4) Epoch 16, batch 1400, loss[loss=0.1827, simple_loss=0.2494, pruned_loss=0.04183, ctc_loss=0.08076, over 19665.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2648, pruned_loss=0.04317, ctc_loss=0.08031, over 3863890.49 frames. ], batch size: 46, lr: 9.35e-03, grad_scale: 32.0
+2024-08-27 01:46:57,266 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.17 vs. limit=12.0
+2024-08-27 01:47:02,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=206597.33333333334, ans=0.125
+2024-08-27 01:47:15,494 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=206704.0, ans=0.125
+2024-08-27 01:47:19,176 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=1.94 vs. limit=15.0
+2024-08-27 01:48:25,293 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.410e+02 1.569e+02 1.892e+02 4.037e+02, threshold=3.138e+02, percent-clipped=1.0
+2024-08-27 01:48:26,509 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=206757.33333333334, ans=0.07
+2024-08-27 01:48:36,161 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.31 vs. limit=15.0
+2024-08-27 01:48:37,438 INFO [train.py:1114] (1/4) Epoch 16, batch 1450, loss[loss=0.2029, simple_loss=0.2722, pruned_loss=0.04841, ctc_loss=0.09191, over 19670.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2653, pruned_loss=0.0434, ctc_loss=0.08088, over 3862124.09 frames. ], batch size: 63, lr: 9.35e-03, grad_scale: 32.0
+2024-08-27 01:48:39,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=206864.0, ans=15.0
+2024-08-27 01:48:44,825 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:48:51,338 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=206917.33333333334, ans=0.125
+2024-08-27 01:49:00,392 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=206970.66666666666, ans=0.125
+2024-08-27 01:49:05,816 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=206970.66666666666, ans=0.125
+2024-08-27 01:49:07,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=207024.0, ans=0.025
+2024-08-27 01:49:21,210 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=207077.33333333334, ans=0.05
+2024-08-27 01:49:25,843 INFO [train.py:1114] (1/4) Epoch 16, batch 1500, loss[loss=0.2057, simple_loss=0.2833, pruned_loss=0.04722, ctc_loss=0.08391, over 19580.00 frames. ], tot_loss[loss=0.1922, simple_loss=0.2655, pruned_loss=0.04328, ctc_loss=0.08071, over 3862803.67 frames. ], batch size: 57, lr: 9.34e-03, grad_scale: 32.0
+2024-08-27 01:49:34,519 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=207130.66666666666, ans=0.0
+2024-08-27 01:49:42,817 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=207184.0, ans=0.125
+2024-08-27 01:49:47,323 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:50:02,522 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.65 vs. limit=6.0
+2024-08-27 01:50:03,768 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.422e+02 1.666e+02 2.042e+02 4.208e+02, threshold=3.332e+02, percent-clipped=3.0
+2024-08-27 01:50:19,544 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=207344.0, ans=0.0
+2024-08-27 01:50:22,137 INFO [train.py:1114] (1/4) Epoch 16, batch 1550, loss[loss=0.2043, simple_loss=0.2785, pruned_loss=0.04758, ctc_loss=0.08708, over 19604.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2661, pruned_loss=0.0436, ctc_loss=0.08156, over 3847412.94 frames. ], batch size: 60, lr: 9.33e-03, grad_scale: 32.0
+2024-08-27 01:50:29,967 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.43 vs. limit=15.0
+2024-08-27 01:51:10,035 INFO [train.py:1114] (1/4) Epoch 16, batch 1600, loss[loss=0.1967, simple_loss=0.2766, pruned_loss=0.04252, ctc_loss=0.07921, over 19845.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2658, pruned_loss=0.04367, ctc_loss=0.08175, over 3835699.28 frames. ], batch size: 57, lr: 9.33e-03, grad_scale: 32.0
+2024-08-27 01:51:29,334 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=9.94 vs. limit=22.5
+2024-08-27 01:51:55,658 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.401e+02 1.606e+02 1.975e+02 3.175e+02, threshold=3.213e+02, percent-clipped=0.0
+2024-08-27 01:51:59,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=207877.33333333334, ans=0.2
+2024-08-27 01:52:00,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=207877.33333333334, ans=0.125
+2024-08-27 01:52:06,859 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=207930.66666666666, ans=0.125
+2024-08-27 01:52:14,354 INFO [train.py:1114] (1/4) Epoch 16, batch 1650, loss[loss=0.2042, simple_loss=0.2844, pruned_loss=0.04518, ctc_loss=0.08399, over 19618.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2655, pruned_loss=0.04366, ctc_loss=0.08171, over 3831939.66 frames. ], batch size: 59, lr: 9.32e-03, grad_scale: 32.0
+2024-08-27 01:52:15,779 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.19 vs. limit=15.0
+2024-08-27 01:52:41,321 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=208037.33333333334, ans=0.1
+2024-08-27 01:52:56,765 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=8.03 vs. limit=15.0
+2024-08-27 01:52:57,453 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:53:04,339 INFO [train.py:1114] (1/4) Epoch 16, batch 1700, loss[loss=0.1736, simple_loss=0.2436, pruned_loss=0.03693, ctc_loss=0.07451, over 19656.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2649, pruned_loss=0.04317, ctc_loss=0.08076, over 3846742.17 frames. ], batch size: 46, lr: 9.32e-03, grad_scale: 64.0
+2024-08-27 01:53:04,487 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=208197.33333333334, ans=0.125
+2024-08-27 01:53:17,477 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=208250.66666666666, ans=0.0
+2024-08-27 01:53:19,158 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=208250.66666666666, ans=0.025
+2024-08-27 01:53:21,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_positive, batch_count=208304.0, ans=0.05
+2024-08-27 01:53:26,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.17 vs. limit=22.5
+2024-08-27 01:53:27,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=208304.0, ans=0.025
+2024-08-27 01:53:33,996 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=208357.33333333334, ans=0.125
+2024-08-27 01:53:39,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=208357.33333333334, ans=0.125
+2024-08-27 01:53:42,381 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.468e+02 1.742e+02 2.214e+02 3.607e+02, threshold=3.484e+02, percent-clipped=2.0
+2024-08-27 01:53:46,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=208410.66666666666, ans=0.125
+2024-08-27 01:53:53,048 INFO [train.py:1114] (1/4) Epoch 16, batch 1750, loss[loss=0.1697, simple_loss=0.2343, pruned_loss=0.038, ctc_loss=0.07301, over 19637.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.264, pruned_loss=0.04282, ctc_loss=0.08028, over 3850980.22 frames. ], batch size: 45, lr: 9.31e-03, grad_scale: 32.0
+2024-08-27 01:54:09,237 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=208517.33333333334, ans=0.2
+2024-08-27 01:54:19,688 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=208624.0, ans=0.015
+2024-08-27 01:54:32,862 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=208677.33333333334, ans=0.1
+2024-08-27 01:54:34,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=208677.33333333334, ans=0.09899494936611666
+2024-08-27 01:54:37,048 INFO [train.py:1114] (1/4) Epoch 16, batch 1800, loss[loss=0.1869, simple_loss=0.2677, pruned_loss=0.03824, ctc_loss=0.07439, over 19619.00 frames. ], tot_loss[loss=0.1912, simple_loss=0.2642, pruned_loss=0.04296, ctc_loss=0.08044, over 3853296.31 frames. ], batch size: 55, lr: 9.31e-03, grad_scale: 32.0
+2024-08-27 01:54:38,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=208730.66666666666, ans=0.2
+2024-08-27 01:54:45,114 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=208784.0, ans=0.025
+2024-08-27 01:54:46,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=208784.0, ans=0.0
+2024-08-27 01:54:48,822 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=208784.0, ans=0.2
+2024-08-27 01:54:52,918 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=208784.0, ans=0.0
+2024-08-27 01:55:10,175 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.155e+02 1.563e+02 1.995e+02 2.578e+02 4.186e+02, threshold=3.991e+02, percent-clipped=7.0
+2024-08-27 01:55:16,624 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=208944.0, ans=0.1
+2024-08-27 01:55:17,725 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.42 vs. limit=22.5
+2024-08-27 01:55:20,663 INFO [train.py:1114] (1/4) Epoch 16, batch 1850, loss[loss=0.211, simple_loss=0.285, pruned_loss=0.04946, ctc_loss=0.09487, over 19586.00 frames. ], tot_loss[loss=0.191, simple_loss=0.264, pruned_loss=0.04289, ctc_loss=0.08025, over 3856883.61 frames. ], batch size: 57, lr: 9.30e-03, grad_scale: 32.0
+2024-08-27 01:55:21,722 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=208997.33333333334, ans=0.0
+2024-08-27 01:55:30,500 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 01:55:43,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=209104.0, ans=0.125
+2024-08-27 01:55:48,314 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=209157.33333333334, ans=0.025
+2024-08-27 01:55:55,711 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.43 vs. limit=15.0
+2024-08-27 01:56:04,472 INFO [train.py:1114] (1/4) Epoch 16, batch 1900, loss[loss=0.1941, simple_loss=0.2742, pruned_loss=0.04174, ctc_loss=0.07616, over 19647.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2648, pruned_loss=0.0432, ctc_loss=0.08059, over 3860956.87 frames. ], batch size: 59, lr: 9.29e-03, grad_scale: 32.0
+2024-08-27 01:56:07,288 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=209264.0, ans=0.0
+2024-08-27 01:56:17,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=209317.33333333334, ans=0.125
+2024-08-27 01:56:37,687 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.165e+02 1.418e+02 1.626e+02 2.079e+02 4.675e+02, threshold=3.252e+02, percent-clipped=2.0
+2024-08-27 01:56:42,238 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=209477.33333333334, ans=0.07
+2024-08-27 01:56:48,329 INFO [train.py:1114] (1/4) Epoch 16, batch 1950, loss[loss=0.1777, simple_loss=0.2607, pruned_loss=0.0343, ctc_loss=0.06538, over 19605.00 frames. ], tot_loss[loss=0.1924, simple_loss=0.2657, pruned_loss=0.04336, ctc_loss=0.08082, over 3870048.19 frames. ], batch size: 52, lr: 9.29e-03, grad_scale: 32.0
+2024-08-27 01:56:54,190 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.60 vs. limit=10.0
+2024-08-27 01:57:08,872 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=209637.33333333334, ans=0.1
+2024-08-27 01:57:13,248 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=209637.33333333334, ans=0.2
+2024-08-27 01:57:22,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=209690.66666666666, ans=0.1
+2024-08-27 01:57:35,865 INFO [train.py:1114] (1/4) Epoch 16, batch 2000, loss[loss=0.1683, simple_loss=0.2366, pruned_loss=0.03622, ctc_loss=0.0688, over 19660.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.2666, pruned_loss=0.04384, ctc_loss=0.08169, over 3855245.04 frames. ], batch size: 45, lr: 9.28e-03, grad_scale: 32.0
+2024-08-27 01:57:36,435 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.whiten.whitening_limit, batch_count=209797.33333333334, ans=15.0
+2024-08-27 01:58:00,108 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=209904.0, ans=0.0
+2024-08-27 01:58:09,440 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.194e+02 1.401e+02 1.655e+02 2.254e+02 4.011e+02, threshold=3.310e+02, percent-clipped=6.0
+2024-08-27 01:58:13,425 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.83 vs. limit=22.5
+2024-08-27 01:58:20,009 INFO [train.py:1114] (1/4) Epoch 16, batch 2050, loss[loss=0.1625, simple_loss=0.2339, pruned_loss=0.03279, ctc_loss=0.06347, over 19737.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2655, pruned_loss=0.04367, ctc_loss=0.08133, over 3852853.59 frames. ], batch size: 47, lr: 9.28e-03, grad_scale: 32.0
+2024-08-27 01:58:41,806 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=210170.66666666666, ans=0.0
+2024-08-27 01:58:41,911 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=210170.66666666666, ans=0.0
+2024-08-27 01:58:57,216 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=210277.33333333334, ans=0.125
+2024-08-27 01:59:02,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=210330.66666666666, ans=0.0
+2024-08-27 01:59:03,129 INFO [train.py:1114] (1/4) Epoch 16, batch 2100, loss[loss=0.1817, simple_loss=0.2616, pruned_loss=0.03686, ctc_loss=0.0702, over 19766.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2646, pruned_loss=0.04298, ctc_loss=0.08018, over 3859400.07 frames. ], batch size: 54, lr: 9.27e-03, grad_scale: 32.0
+2024-08-27 01:59:08,396 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=210330.66666666666, ans=0.125
+2024-08-27 01:59:08,412 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=210330.66666666666, ans=0.125
+2024-08-27 01:59:11,047 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=210384.0, ans=0.125
+2024-08-27 01:59:11,920 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=210384.0, ans=0.125
+2024-08-27 01:59:16,367 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=210384.0, ans=22.5
+2024-08-27 01:59:20,420 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=210437.33333333334, ans=0.05
+2024-08-27 01:59:35,722 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.547e+02 1.892e+02 2.472e+02 4.594e+02, threshold=3.784e+02, percent-clipped=3.0
+2024-08-27 01:59:47,029 INFO [train.py:1114] (1/4) Epoch 16, batch 2150, loss[loss=0.1736, simple_loss=0.251, pruned_loss=0.03525, ctc_loss=0.06423, over 19576.00 frames. ], tot_loss[loss=0.1907, simple_loss=0.264, pruned_loss=0.04273, ctc_loss=0.07982, over 3869308.08 frames. ], batch size: 52, lr: 9.27e-03, grad_scale: 32.0
+2024-08-27 01:59:56,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=210650.66666666666, ans=0.05
+2024-08-27 01:59:57,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=210650.66666666666, ans=0.1
+2024-08-27 02:00:09,762 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=210704.0, ans=0.0
+2024-08-27 02:00:30,378 INFO [train.py:1114] (1/4) Epoch 16, batch 2200, loss[loss=0.1891, simple_loss=0.2618, pruned_loss=0.04175, ctc_loss=0.08196, over 19591.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2636, pruned_loss=0.04239, ctc_loss=0.07913, over 3867258.39 frames. ], batch size: 57, lr: 9.26e-03, grad_scale: 32.0
+2024-08-27 02:00:40,525 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.19 vs. limit=15.0
+2024-08-27 02:00:59,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=211024.0, ans=0.125
+2024-08-27 02:01:04,658 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=211024.0, ans=0.125
+2024-08-27 02:01:04,839 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=211024.0, ans=0.125
+2024-08-27 02:01:06,364 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.493e+02 1.671e+02 2.113e+02 4.070e+02, threshold=3.342e+02, percent-clipped=1.0
+2024-08-27 02:01:17,555 INFO [train.py:1114] (1/4) Epoch 16, batch 2250, loss[loss=0.1918, simple_loss=0.272, pruned_loss=0.04161, ctc_loss=0.07096, over 19622.00 frames. ], tot_loss[loss=0.1903, simple_loss=0.2637, pruned_loss=0.04255, ctc_loss=0.07936, over 3866351.87 frames. ], batch size: 55, lr: 9.25e-03, grad_scale: 32.0
+2024-08-27 02:01:36,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=211237.33333333334, ans=0.125
+2024-08-27 02:01:38,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=211237.33333333334, ans=0.07
+2024-08-27 02:01:41,771 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=211237.33333333334, ans=0.0
+2024-08-27 02:01:41,839 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=7.73 vs. limit=12.0
+2024-08-27 02:01:46,888 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=211290.66666666666, ans=0.2
+2024-08-27 02:01:48,076 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.79 vs. limit=22.5
+2024-08-27 02:02:00,445 INFO [train.py:1114] (1/4) Epoch 16, batch 2300, loss[loss=0.1781, simple_loss=0.2507, pruned_loss=0.03824, ctc_loss=0.07247, over 19515.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2628, pruned_loss=0.04249, ctc_loss=0.07931, over 3859699.18 frames. ], batch size: 49, lr: 9.25e-03, grad_scale: 32.0
+2024-08-27 02:02:18,740 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=211504.0, ans=0.1
+2024-08-27 02:02:27,340 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=211557.33333333334, ans=0.125
+2024-08-27 02:02:33,264 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.186e+02 1.480e+02 1.722e+02 2.096e+02 3.640e+02, threshold=3.444e+02, percent-clipped=3.0
+2024-08-27 02:02:36,389 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.40 vs. limit=6.0
+2024-08-27 02:02:44,151 INFO [train.py:1114] (1/4) Epoch 16, batch 2350, loss[loss=0.2117, simple_loss=0.2832, pruned_loss=0.05129, ctc_loss=0.09434, over 19677.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2631, pruned_loss=0.04292, ctc_loss=0.08003, over 3862859.07 frames. ], batch size: 63, lr: 9.24e-03, grad_scale: 32.0
+2024-08-27 02:02:59,600 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=211717.33333333334, ans=10.0
+2024-08-27 02:03:00,902 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.67 vs. limit=15.0
+2024-08-27 02:03:12,256 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.23 vs. limit=15.0
+2024-08-27 02:03:12,999 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.39 vs. limit=15.0
+2024-08-27 02:03:28,716 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=211877.33333333334, ans=0.0
+2024-08-27 02:03:29,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=211877.33333333334, ans=0.07
+2024-08-27 02:03:32,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=211877.33333333334, ans=0.1
+2024-08-27 02:03:34,532 INFO [train.py:1114] (1/4) Epoch 16, batch 2400, loss[loss=0.21, simple_loss=0.2809, pruned_loss=0.05103, ctc_loss=0.09248, over 19323.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2654, pruned_loss=0.04387, ctc_loss=0.08184, over 3858282.58 frames. ], batch size: 71, lr: 9.24e-03, grad_scale: 32.0
+2024-08-27 02:03:36,884 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.36 vs. limit=22.5
+2024-08-27 02:03:41,795 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.98 vs. limit=15.0
+2024-08-27 02:03:43,336 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=211984.0, ans=0.1
+2024-08-27 02:03:50,372 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=211984.0, ans=0.125
+2024-08-27 02:03:54,669 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=212037.33333333334, ans=0.125
+2024-08-27 02:04:01,437 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=212090.66666666666, ans=0.0
+2024-08-27 02:04:07,965 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.276e+02 1.442e+02 1.653e+02 2.239e+02 3.362e+02, threshold=3.307e+02, percent-clipped=0.0
+2024-08-27 02:04:11,898 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=212144.0, ans=0.1
+2024-08-27 02:04:18,803 INFO [train.py:1114] (1/4) Epoch 16, batch 2450, loss[loss=0.2534, simple_loss=0.2987, pruned_loss=0.07543, ctc_loss=0.1432, over 13173.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2688, pruned_loss=0.04605, ctc_loss=0.08615, over 3731705.48 frames. ], batch size: 143, lr: 9.23e-03, grad_scale: 32.0
+2024-08-27 02:04:23,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=212197.33333333334, ans=0.125
+2024-08-27 02:04:24,141 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.94 vs. limit=5.0
+2024-08-27 02:04:42,273 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=212304.0, ans=0.125
+2024-08-27 02:05:43,528 INFO [train.py:1114] (1/4) Epoch 17, batch 0, loss[loss=0.1764, simple_loss=0.2461, pruned_loss=0.03837, ctc_loss=0.07496, over 19820.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2461, pruned_loss=0.03837, ctc_loss=0.07496, over 19820.00 frames. ], batch size: 49, lr: 8.95e-03, grad_scale: 32.0
+2024-08-27 02:05:43,529 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-27 02:05:51,185 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.6209, 4.8802, 5.5564, 5.4054], device='cuda:1')
+2024-08-27 02:05:53,282 INFO [train.py:1146] (1/4) Epoch 17, validation: loss=0.172, simple_loss=0.265, pruned_loss=0.02949, ctc_loss=0.04976, over 944034.00 frames.
+2024-08-27 02:05:53,283 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-27 02:06:02,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=212458.66666666666, ans=0.125
+2024-08-27 02:06:05,872 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.60 vs. limit=15.0
+2024-08-27 02:06:13,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=212512.0, ans=0.125
+2024-08-27 02:06:13,260 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.90 vs. limit=15.0
+2024-08-27 02:06:28,675 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=212565.33333333334, ans=0.0
+2024-08-27 02:06:29,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=212618.66666666666, ans=0.125
+2024-08-27 02:06:40,307 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.204e+02 1.629e+02 1.801e+02 2.001e+02 3.255e+02, threshold=3.602e+02, percent-clipped=0.0
+2024-08-27 02:06:40,342 INFO [train.py:1114] (1/4) Epoch 17, batch 50, loss[loss=0.1839, simple_loss=0.2532, pruned_loss=0.04171, ctc_loss=0.07791, over 19696.00 frames. ], tot_loss[loss=0.1927, simple_loss=0.2659, pruned_loss=0.04336, ctc_loss=0.0816, over 844645.99 frames. ], batch size: 47, lr: 8.94e-03, grad_scale: 16.0
+2024-08-27 02:06:43,219 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=212672.0, ans=0.2
+2024-08-27 02:06:55,397 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212725.33333333334, ans=0.1
+2024-08-27 02:07:00,628 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=212778.66666666666, ans=10.0
+2024-08-27 02:07:04,463 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=212778.66666666666, ans=0.125
+2024-08-27 02:07:29,668 INFO [train.py:1114] (1/4) Epoch 17, batch 100, loss[loss=0.1831, simple_loss=0.2592, pruned_loss=0.03877, ctc_loss=0.0735, over 19711.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2673, pruned_loss=0.04376, ctc_loss=0.08169, over 1499050.01 frames. ], batch size: 51, lr: 8.94e-03, grad_scale: 16.0
+2024-08-27 02:07:40,852 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=212992.0, ans=0.1
+2024-08-27 02:08:17,651 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=213152.0, ans=0.125
+2024-08-27 02:08:20,137 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.458e+02 1.665e+02 2.006e+02 3.256e+02, threshold=3.330e+02, percent-clipped=0.0
+2024-08-27 02:08:20,171 INFO [train.py:1114] (1/4) Epoch 17, batch 150, loss[loss=0.163, simple_loss=0.2278, pruned_loss=0.0362, ctc_loss=0.06461, over 19703.00 frames. ], tot_loss[loss=0.1918, simple_loss=0.2652, pruned_loss=0.04315, ctc_loss=0.08039, over 2028977.84 frames. ], batch size: 47, lr: 8.93e-03, grad_scale: 16.0
+2024-08-27 02:09:18,734 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=213365.33333333334, ans=0.1
+2024-08-27 02:10:25,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=213418.66666666666, ans=0.125
+2024-08-27 02:10:30,589 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=6.61 vs. limit=15.0
+2024-08-27 02:10:50,169 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=213418.66666666666, ans=0.2
+2024-08-27 02:10:53,110 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=213418.66666666666, ans=0.025
+2024-08-27 02:10:55,453 INFO [train.py:1114] (1/4) Epoch 17, batch 200, loss[loss=0.1965, simple_loss=0.2734, pruned_loss=0.04333, ctc_loss=0.08217, over 18150.00 frames. ], tot_loss[loss=0.191, simple_loss=0.2644, pruned_loss=0.04279, ctc_loss=0.07978, over 2437042.07 frames. ], batch size: 85, lr: 8.93e-03, grad_scale: 16.0
+2024-08-27 02:10:55,636 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=213472.0, ans=0.0
+2024-08-27 02:11:06,712 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=213525.33333333334, ans=0.1
+2024-08-27 02:11:06,845 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=213525.33333333334, ans=0.125
+2024-08-27 02:11:09,942 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.19 vs. limit=15.0
+2024-08-27 02:11:19,721 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=213578.66666666666, ans=0.125
+2024-08-27 02:11:30,603 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=213632.0, ans=0.125
+2024-08-27 02:11:41,052 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=213685.33333333334, ans=0.1
+2024-08-27 02:11:42,062 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.66 vs. limit=15.0
+2024-08-27 02:11:49,176 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.098e+02 1.468e+02 1.730e+02 2.457e+02 4.645e+02, threshold=3.460e+02, percent-clipped=6.0
+2024-08-27 02:11:49,210 INFO [train.py:1114] (1/4) Epoch 17, batch 250, loss[loss=0.205, simple_loss=0.2787, pruned_loss=0.04874, ctc_loss=0.08484, over 19395.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2639, pruned_loss=0.04229, ctc_loss=0.07893, over 2757247.20 frames. ], batch size: 67, lr: 8.92e-03, grad_scale: 16.0
+2024-08-27 02:11:56,936 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.14 vs. limit=15.0
+2024-08-27 02:12:18,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_na.min_abs, batch_count=213845.33333333334, ans=0.02
+2024-08-27 02:12:39,243 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:14:34,927 INFO [train.py:1114] (1/4) Epoch 17, batch 300, loss[loss=0.2064, simple_loss=0.2821, pruned_loss=0.04741, ctc_loss=0.08968, over 19510.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.2632, pruned_loss=0.04205, ctc_loss=0.07864, over 3001398.30 frames. ], batch size: 61, lr: 8.92e-03, grad_scale: 16.0
+2024-08-27 02:14:48,194 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=214058.66666666666, ans=0.125
+2024-08-27 02:16:42,472 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=214218.66666666666, ans=10.0
+2024-08-27 02:16:48,690 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.450e+02 1.705e+02 2.074e+02 4.169e+02, threshold=3.410e+02, percent-clipped=2.0
+2024-08-27 02:16:48,724 INFO [train.py:1114] (1/4) Epoch 17, batch 350, loss[loss=0.1737, simple_loss=0.2474, pruned_loss=0.03663, ctc_loss=0.06692, over 19756.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2639, pruned_loss=0.04237, ctc_loss=0.07898, over 3191743.97 frames. ], batch size: 48, lr: 8.91e-03, grad_scale: 16.0
+2024-08-27 02:16:48,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=214272.0, ans=0.0
+2024-08-27 02:16:50,847 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=214272.0, ans=0.125
+2024-08-27 02:16:57,301 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=214325.33333333334, ans=0.125
+2024-08-27 02:17:25,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=214432.0, ans=0.125
+2024-08-27 02:17:30,945 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=214485.33333333334, ans=0.0
+2024-08-27 02:17:31,826 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=214485.33333333334, ans=0.125
+2024-08-27 02:17:36,075 INFO [train.py:1114] (1/4) Epoch 17, batch 400, loss[loss=0.1792, simple_loss=0.2641, pruned_loss=0.03435, ctc_loss=0.06392, over 19487.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2635, pruned_loss=0.04228, ctc_loss=0.07886, over 3343628.60 frames. ], batch size: 54, lr: 8.91e-03, grad_scale: 32.0
+2024-08-27 02:17:39,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_na.min_abs, batch_count=214538.66666666666, ans=0.02
+2024-08-27 02:17:42,939 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214538.66666666666, ans=0.1
+2024-08-27 02:17:55,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=214645.33333333334, ans=0.125
+2024-08-27 02:18:01,965 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214645.33333333334, ans=0.1
+2024-08-27 02:18:07,387 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=214698.66666666666, ans=0.125
+2024-08-27 02:18:23,095 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=214752.0, ans=0.125
+2024-08-27 02:18:25,574 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.479e+02 1.707e+02 2.031e+02 4.496e+02, threshold=3.413e+02, percent-clipped=2.0
+2024-08-27 02:18:25,608 INFO [train.py:1114] (1/4) Epoch 17, batch 450, loss[loss=0.2009, simple_loss=0.2815, pruned_loss=0.04467, ctc_loss=0.07735, over 19605.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2637, pruned_loss=0.04243, ctc_loss=0.07913, over 3450831.97 frames. ], batch size: 55, lr: 8.90e-03, grad_scale: 32.0
+2024-08-27 02:18:28,575 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=214805.33333333334, ans=0.1
+2024-08-27 02:19:13,500 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:19:15,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=215018.66666666666, ans=0.125
+2024-08-27 02:19:18,862 INFO [train.py:1114] (1/4) Epoch 17, batch 500, loss[loss=0.195, simple_loss=0.274, pruned_loss=0.04208, ctc_loss=0.07983, over 19661.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2625, pruned_loss=0.04183, ctc_loss=0.07812, over 3546821.36 frames. ], batch size: 63, lr: 8.90e-03, grad_scale: 32.0
+2024-08-27 02:19:43,549 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.53 vs. limit=15.0
+2024-08-27 02:19:43,645 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.06 vs. limit=10.0
+2024-08-27 02:19:44,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=215178.66666666666, ans=0.125
+2024-08-27 02:20:25,034 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:20:26,999 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.04 vs. limit=10.0
+2024-08-27 02:20:44,559 INFO [train.py:1114] (1/4) Epoch 17, batch 550, loss[loss=0.1933, simple_loss=0.2681, pruned_loss=0.04352, ctc_loss=0.07893, over 19299.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2622, pruned_loss=0.04185, ctc_loss=0.07809, over 3608064.98 frames. ], batch size: 71, lr: 8.89e-03, grad_scale: 16.0
+2024-08-27 02:20:45,395 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.201e+02 1.446e+02 1.711e+02 2.254e+02 3.980e+02, threshold=3.422e+02, percent-clipped=2.0
+2024-08-27 02:20:47,513 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=215338.66666666666, ans=0.125
+2024-08-27 02:20:50,274 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:20:53,039 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=215392.0, ans=0.0
+2024-08-27 02:20:53,319 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.84 vs. limit=10.0
+2024-08-27 02:20:55,749 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=215392.0, ans=0.125
+2024-08-27 02:21:09,343 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.84 vs. limit=22.5
+2024-08-27 02:21:10,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=215445.33333333334, ans=0.125
+2024-08-27 02:21:43,267 INFO [train.py:1114] (1/4) Epoch 17, batch 600, loss[loss=0.2073, simple_loss=0.2854, pruned_loss=0.04758, ctc_loss=0.08538, over 19430.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2622, pruned_loss=0.04163, ctc_loss=0.07763, over 3665994.82 frames. ], batch size: 67, lr: 8.88e-03, grad_scale: 16.0
+2024-08-27 02:21:43,462 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=215605.33333333334, ans=0.2
+2024-08-27 02:21:44,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=215605.33333333334, ans=0.0
+2024-08-27 02:21:47,272 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=215605.33333333334, ans=0.125
+2024-08-27 02:21:57,082 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=215658.66666666666, ans=0.1
+2024-08-27 02:22:34,200 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=215818.66666666666, ans=0.1
+2024-08-27 02:22:35,837 INFO [train.py:1114] (1/4) Epoch 17, batch 650, loss[loss=0.1736, simple_loss=0.2501, pruned_loss=0.03527, ctc_loss=0.06642, over 19764.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2614, pruned_loss=0.04126, ctc_loss=0.07701, over 3716311.99 frames. ], batch size: 54, lr: 8.88e-03, grad_scale: 16.0
+2024-08-27 02:22:36,093 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=215872.0, ans=0.025
+2024-08-27 02:22:36,655 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.454e+02 1.765e+02 2.281e+02 4.784e+02, threshold=3.530e+02, percent-clipped=4.0
+2024-08-27 02:22:51,049 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=5.89 vs. limit=15.0
+2024-08-27 02:23:09,260 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=216032.0, ans=0.05
+2024-08-27 02:23:11,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=216032.0, ans=0.125
+2024-08-27 02:23:14,629 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=216032.0, ans=0.125
+2024-08-27 02:23:16,757 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.37 vs. limit=15.0
+2024-08-27 02:23:23,711 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=216085.33333333334, ans=0.125
+2024-08-27 02:23:25,366 INFO [train.py:1114] (1/4) Epoch 17, batch 700, loss[loss=0.169, simple_loss=0.2512, pruned_loss=0.03185, ctc_loss=0.05768, over 19726.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2624, pruned_loss=0.0416, ctc_loss=0.07762, over 3749338.22 frames. ], batch size: 51, lr: 8.87e-03, grad_scale: 16.0
+2024-08-27 02:23:34,645 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=216192.0, ans=0.125
+2024-08-27 02:23:50,588 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=216245.33333333334, ans=0.0
+2024-08-27 02:27:51,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=216352.0, ans=0.025
+2024-08-27 02:28:51,342 INFO [train.py:1114] (1/4) Epoch 17, batch 750, loss[loss=0.1814, simple_loss=0.2661, pruned_loss=0.03495, ctc_loss=0.06723, over 19479.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2619, pruned_loss=0.04149, ctc_loss=0.07744, over 3775726.61 frames. ], batch size: 54, lr: 8.87e-03, grad_scale: 16.0
+2024-08-27 02:29:21,550 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.483e+02 1.820e+02 2.509e+02 4.091e+02, threshold=3.640e+02, percent-clipped=8.0
+2024-08-27 02:32:03,670 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=216405.33333333334, ans=0.1
+2024-08-27 02:32:31,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=216458.66666666666, ans=0.025
+2024-08-27 02:33:17,948 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.53 vs. limit=12.0
+2024-08-27 02:36:16,316 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216565.33333333334, ans=0.1
+2024-08-27 02:38:07,119 INFO [train.py:1114] (1/4) Epoch 17, batch 800, loss[loss=0.1785, simple_loss=0.237, pruned_loss=0.0438, ctc_loss=0.08114, over 19834.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.262, pruned_loss=0.0417, ctc_loss=0.07799, over 3795773.92 frames. ], batch size: 49, lr: 8.86e-03, grad_scale: 32.0
+2024-08-27 02:40:02,446 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=216778.66666666666, ans=0.125
+2024-08-27 02:40:07,784 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=216778.66666666666, ans=0.2
+2024-08-27 02:40:22,337 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=216832.0, ans=0.1
+2024-08-27 02:40:43,403 INFO [train.py:1114] (1/4) Epoch 17, batch 850, loss[loss=0.213, simple_loss=0.2864, pruned_loss=0.0506, ctc_loss=0.09607, over 19643.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.262, pruned_loss=0.04171, ctc_loss=0.07798, over 3815745.90 frames. ], batch size: 59, lr: 8.86e-03, grad_scale: 32.0
+2024-08-27 02:40:44,270 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.490e+02 1.788e+02 2.181e+02 3.218e+02, threshold=3.576e+02, percent-clipped=0.0
+2024-08-27 02:40:46,577 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=216938.66666666666, ans=0.1
+2024-08-27 02:40:53,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=216992.0, ans=0.2
+2024-08-27 02:40:56,136 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=216992.0, ans=0.125
+2024-08-27 02:41:19,759 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=217045.33333333334, ans=0.0
+2024-08-27 02:41:23,395 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=217045.33333333334, ans=0.125
+2024-08-27 02:41:29,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=217098.66666666666, ans=0.1
+2024-08-27 02:41:32,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=217098.66666666666, ans=0.125
+2024-08-27 02:41:34,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=217098.66666666666, ans=0.125
+2024-08-27 02:41:35,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=217098.66666666666, ans=0.125
+2024-08-27 02:41:41,829 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=217152.0, ans=0.1
+2024-08-27 02:41:48,135 INFO [train.py:1114] (1/4) Epoch 17, batch 900, loss[loss=0.1688, simple_loss=0.2415, pruned_loss=0.03507, ctc_loss=0.06492, over 19805.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2625, pruned_loss=0.04219, ctc_loss=0.07889, over 3819499.53 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-27 02:41:54,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=217205.33333333334, ans=0.0
+2024-08-27 02:42:02,981 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=217258.66666666666, ans=0.125
+2024-08-27 02:42:23,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=217365.33333333334, ans=0.125
+2024-08-27 02:42:24,955 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=217365.33333333334, ans=0.125
+2024-08-27 02:42:42,362 INFO [train.py:1114] (1/4) Epoch 17, batch 950, loss[loss=0.1733, simple_loss=0.2381, pruned_loss=0.03921, ctc_loss=0.07513, over 19497.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2632, pruned_loss=0.0425, ctc_loss=0.07945, over 3821559.39 frames. ], batch size: 49, lr: 8.85e-03, grad_scale: 32.0
+2024-08-27 02:42:43,222 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.442e+02 1.596e+02 1.963e+02 3.277e+02, threshold=3.193e+02, percent-clipped=0.0
+2024-08-27 02:44:41,769 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=217632.0, ans=0.0
+2024-08-27 02:44:46,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=217632.0, ans=0.125
+2024-08-27 02:45:22,498 INFO [train.py:1114] (1/4) Epoch 17, batch 1000, loss[loss=0.1736, simple_loss=0.2529, pruned_loss=0.03391, ctc_loss=0.06621, over 19883.00 frames. ], tot_loss[loss=0.1904, simple_loss=0.2638, pruned_loss=0.04258, ctc_loss=0.0797, over 3817867.61 frames. ], batch size: 52, lr: 8.84e-03, grad_scale: 32.0
+2024-08-27 02:45:24,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=217738.66666666666, ans=0.0
+2024-08-27 02:45:24,719 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=217738.66666666666, ans=0.125
+2024-08-27 02:45:27,422 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=217738.66666666666, ans=0.1
+2024-08-27 02:45:48,980 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=217845.33333333334, ans=0.025
+2024-08-27 02:46:02,020 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=217845.33333333334, ans=0.2
+2024-08-27 02:46:02,105 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=217845.33333333334, ans=0.125
+2024-08-27 02:46:09,020 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=217898.66666666666, ans=0.125
+2024-08-27 02:46:23,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=217952.0, ans=0.125
+2024-08-27 02:46:28,538 INFO [train.py:1114] (1/4) Epoch 17, batch 1050, loss[loss=0.1878, simple_loss=0.2718, pruned_loss=0.0382, ctc_loss=0.0686, over 19821.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2628, pruned_loss=0.04214, ctc_loss=0.07894, over 3824747.58 frames. ], batch size: 57, lr: 8.84e-03, grad_scale: 32.0
+2024-08-27 02:46:29,437 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.403e+02 1.586e+02 2.025e+02 2.959e+02, threshold=3.171e+02, percent-clipped=1.0
+2024-08-27 02:46:35,383 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.46 vs. limit=15.0
+2024-08-27 02:47:09,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=218112.0, ans=0.125
+2024-08-27 02:47:38,611 INFO [train.py:1114] (1/4) Epoch 17, batch 1100, loss[loss=0.1841, simple_loss=0.2633, pruned_loss=0.03834, ctc_loss=0.07028, over 19584.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2628, pruned_loss=0.04197, ctc_loss=0.07849, over 3831537.03 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-27 02:47:38,797 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=218272.0, ans=0.0
+2024-08-27 02:47:43,312 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=218272.0, ans=0.0
+2024-08-27 02:47:45,097 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=218272.0, ans=0.125
+2024-08-27 02:48:25,344 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=218325.33333333334, ans=0.125
+2024-08-27 02:48:47,994 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.12 vs. limit=15.0
+2024-08-27 02:48:57,235 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=218378.66666666666, ans=0.1
+2024-08-27 02:49:11,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=218432.0, ans=0.1
+2024-08-27 02:49:14,900 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.13 vs. limit=15.0
+2024-08-27 02:49:18,616 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=218485.33333333334, ans=0.125
+2024-08-27 02:49:27,451 INFO [train.py:1114] (1/4) Epoch 17, batch 1150, loss[loss=0.1793, simple_loss=0.2528, pruned_loss=0.03918, ctc_loss=0.06846, over 19583.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.2629, pruned_loss=0.04213, ctc_loss=0.07881, over 3831079.30 frames. ], batch size: 52, lr: 8.83e-03, grad_scale: 32.0
+2024-08-27 02:49:28,306 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.459e+02 1.619e+02 1.965e+02 3.390e+02, threshold=3.239e+02, percent-clipped=1.0
+2024-08-27 02:49:29,562 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:49:46,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=218645.33333333334, ans=0.125
+2024-08-27 02:49:49,367 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:49:53,307 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=218645.33333333334, ans=0.125
+2024-08-27 02:49:59,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=218698.66666666666, ans=0.125
+2024-08-27 02:50:02,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=218698.66666666666, ans=0.0
+2024-08-27 02:50:03,439 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.78 vs. limit=15.0
+2024-08-27 02:50:14,225 INFO [train.py:1114] (1/4) Epoch 17, batch 1200, loss[loss=0.188, simple_loss=0.2691, pruned_loss=0.03891, ctc_loss=0.07259, over 19841.00 frames. ], tot_loss[loss=0.1898, simple_loss=0.2635, pruned_loss=0.04227, ctc_loss=0.0791, over 3825453.22 frames. ], batch size: 57, lr: 8.82e-03, grad_scale: 32.0
+2024-08-27 02:50:38,571 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=218912.0, ans=0.0
+2024-08-27 02:51:04,355 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=219018.66666666666, ans=10.0
+2024-08-27 02:51:28,548 INFO [train.py:1114] (1/4) Epoch 17, batch 1250, loss[loss=0.2159, simple_loss=0.2799, pruned_loss=0.05595, ctc_loss=0.09992, over 19520.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2637, pruned_loss=0.04238, ctc_loss=0.07915, over 3843634.46 frames. ], batch size: 61, lr: 8.82e-03, grad_scale: 32.0
+2024-08-27 02:51:29,444 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.488e+02 1.826e+02 2.228e+02 3.440e+02, threshold=3.652e+02, percent-clipped=1.0
+2024-08-27 02:51:46,591 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.37 vs. limit=15.0
+2024-08-27 02:52:01,650 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=219232.0, ans=0.125
+2024-08-27 02:52:40,149 INFO [train.py:1114] (1/4) Epoch 17, batch 1300, loss[loss=0.2098, simple_loss=0.2784, pruned_loss=0.05153, ctc_loss=0.09535, over 18867.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2627, pruned_loss=0.04199, ctc_loss=0.07846, over 3847381.66 frames. ], batch size: 76, lr: 8.81e-03, grad_scale: 32.0
+2024-08-27 02:52:41,266 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 02:52:48,875 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=5.00 vs. limit=15.0
+2024-08-27 02:53:40,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=219552.0, ans=0.2
+2024-08-27 02:53:49,257 INFO [train.py:1114] (1/4) Epoch 17, batch 1350, loss[loss=0.1913, simple_loss=0.2746, pruned_loss=0.03869, ctc_loss=0.0765, over 19769.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2621, pruned_loss=0.04161, ctc_loss=0.07779, over 3857863.66 frames. ], batch size: 54, lr: 8.81e-03, grad_scale: 32.0
+2024-08-27 02:53:50,132 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.111e+02 1.487e+02 1.709e+02 2.118e+02 3.687e+02, threshold=3.418e+02, percent-clipped=1.0
+2024-08-27 02:53:53,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=219605.33333333334, ans=0.125
+2024-08-27 02:54:05,777 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=219658.66666666666, ans=0.0
+2024-08-27 02:54:06,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=219712.0, ans=0.125
+2024-08-27 02:54:14,196 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=219712.0, ans=0.125
+2024-08-27 02:54:47,051 INFO [train.py:1114] (1/4) Epoch 17, batch 1400, loss[loss=0.189, simple_loss=0.2543, pruned_loss=0.04492, ctc_loss=0.08475, over 19679.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2617, pruned_loss=0.04153, ctc_loss=0.07754, over 3864231.98 frames. ], batch size: 46, lr: 8.80e-03, grad_scale: 32.0
+2024-08-27 02:54:49,360 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.35 vs. limit=6.0
+2024-08-27 02:54:52,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=219872.0, ans=0.1
+2024-08-27 02:54:59,317 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=219925.33333333334, ans=0.125
+2024-08-27 02:55:18,313 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.79 vs. limit=15.0
+2024-08-27 02:55:30,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=220085.33333333334, ans=0.04949747468305833
+2024-08-27 02:55:40,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=220138.66666666666, ans=0.0
+2024-08-27 02:55:41,669 INFO [train.py:1114] (1/4) Epoch 17, batch 1450, loss[loss=0.2176, simple_loss=0.2908, pruned_loss=0.05324, ctc_loss=0.09474, over 19695.00 frames. ], tot_loss[loss=0.189, simple_loss=0.2627, pruned_loss=0.04197, ctc_loss=0.0783, over 3862167.13 frames. ], batch size: 63, lr: 8.80e-03, grad_scale: 32.0
+2024-08-27 02:55:42,529 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.445e+02 1.654e+02 2.032e+02 3.496e+02, threshold=3.307e+02, percent-clipped=1.0
+2024-08-27 02:55:57,401 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=220192.0, ans=0.1
+2024-08-27 02:56:22,402 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.12 vs. limit=15.0
+2024-08-27 02:56:24,842 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=220352.0, ans=0.0
+2024-08-27 02:56:27,192 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=220352.0, ans=22.5
+2024-08-27 02:56:33,717 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.whiten.whitening_limit, batch_count=220352.0, ans=12.0
+2024-08-27 02:56:35,344 INFO [train.py:1114] (1/4) Epoch 17, batch 1500, loss[loss=0.1867, simple_loss=0.2669, pruned_loss=0.03867, ctc_loss=0.07303, over 19565.00 frames. ], tot_loss[loss=0.1888, simple_loss=0.263, pruned_loss=0.04177, ctc_loss=0.07783, over 3862445.51 frames. ], batch size: 57, lr: 8.79e-03, grad_scale: 32.0
+2024-08-27 02:56:46,163 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=220458.66666666666, ans=0.025
+2024-08-27 02:56:51,676 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=220458.66666666666, ans=0.125
+2024-08-27 02:57:21,153 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=220618.66666666666, ans=0.0
+2024-08-27 02:57:22,839 INFO [train.py:1114] (1/4) Epoch 17, batch 1550, loss[loss=0.2014, simple_loss=0.2758, pruned_loss=0.04633, ctc_loss=0.08596, over 19604.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2634, pruned_loss=0.0421, ctc_loss=0.07865, over 3847588.66 frames. ], batch size: 60, lr: 8.79e-03, grad_scale: 32.0
+2024-08-27 02:57:23,802 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.186e+02 1.433e+02 1.700e+02 2.311e+02 3.923e+02, threshold=3.401e+02, percent-clipped=1.0
+2024-08-27 02:57:24,130 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=220672.0, ans=0.025
+2024-08-27 02:57:53,234 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=220725.33333333334, ans=0.125
+2024-08-27 02:57:55,868 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=220725.33333333334, ans=0.125
+2024-08-27 02:58:16,979 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_ff3.min_abs, batch_count=220832.0, ans=0.2
+2024-08-27 02:58:27,693 INFO [train.py:1114] (1/4) Epoch 17, batch 1600, loss[loss=0.1911, simple_loss=0.2706, pruned_loss=0.03965, ctc_loss=0.08044, over 19845.00 frames. ], tot_loss[loss=0.19, simple_loss=0.2636, pruned_loss=0.04233, ctc_loss=0.07907, over 3836188.10 frames. ], batch size: 57, lr: 8.78e-03, grad_scale: 32.0
+2024-08-27 02:59:27,396 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.76 vs. limit=22.5
+2024-08-27 02:59:31,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=220992.0, ans=0.125
+2024-08-27 03:00:06,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=221045.33333333334, ans=0.125
+2024-08-27 03:00:29,356 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.88 vs. limit=22.5
+2024-08-27 03:00:53,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=221152.0, ans=0.2
+2024-08-27 03:00:55,470 INFO [train.py:1114] (1/4) Epoch 17, batch 1650, loss[loss=0.1881, simple_loss=0.2653, pruned_loss=0.04009, ctc_loss=0.07687, over 19679.00 frames. ], tot_loss[loss=0.1896, simple_loss=0.2634, pruned_loss=0.0422, ctc_loss=0.07877, over 3831818.43 frames. ], batch size: 59, lr: 8.77e-03, grad_scale: 32.0
+2024-08-27 03:00:56,097 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.74 vs. limit=15.0
+2024-08-27 03:00:58,237 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.431e+02 1.952e+02 2.452e+02 3.980e+02, threshold=3.905e+02, percent-clipped=5.0
+2024-08-27 03:01:02,239 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=221205.33333333334, ans=0.0
+2024-08-27 03:01:07,531 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=221258.66666666666, ans=0.2
+2024-08-27 03:01:19,926 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=13.22 vs. limit=15.0
+2024-08-27 03:01:22,384 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.06 vs. limit=15.0
+2024-08-27 03:01:39,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=221365.33333333334, ans=0.125
+2024-08-27 03:01:51,807 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten.whitening_limit, batch_count=221418.66666666666, ans=22.5
+2024-08-27 03:01:57,110 INFO [train.py:1114] (1/4) Epoch 17, batch 1700, loss[loss=0.1792, simple_loss=0.247, pruned_loss=0.04043, ctc_loss=0.0765, over 19675.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2632, pruned_loss=0.04213, ctc_loss=0.07863, over 3845734.92 frames. ], batch size: 46, lr: 8.77e-03, grad_scale: 32.0
+2024-08-27 03:02:05,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=221525.33333333334, ans=0.125
+2024-08-27 03:02:07,512 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.30 vs. limit=6.0
+2024-08-27 03:02:08,204 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=221525.33333333334, ans=0.125
+2024-08-27 03:02:16,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=221525.33333333334, ans=0.125
+2024-08-27 03:02:34,528 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=221632.0, ans=0.0
+2024-08-27 03:02:39,757 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=221685.33333333334, ans=0.0
+2024-08-27 03:02:45,944 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=221685.33333333334, ans=0.04949747468305833
+2024-08-27 03:02:48,263 INFO [train.py:1114] (1/4) Epoch 17, batch 1750, loss[loss=0.1753, simple_loss=0.2377, pruned_loss=0.0417, ctc_loss=0.07389, over 19647.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.2626, pruned_loss=0.04207, ctc_loss=0.07852, over 3850469.71 frames. ], batch size: 45, lr: 8.76e-03, grad_scale: 16.0
+2024-08-27 03:02:49,980 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.526e+02 1.896e+02 2.459e+02 4.889e+02, threshold=3.791e+02, percent-clipped=1.0
+2024-08-27 03:03:10,977 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=221792.0, ans=0.0
+2024-08-27 03:03:13,644 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=221792.0, ans=0.2
+2024-08-27 03:03:20,161 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.42 vs. limit=15.0
+2024-08-27 03:03:29,750 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=221898.66666666666, ans=0.125
+2024-08-27 03:03:33,155 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=221898.66666666666, ans=0.1
+2024-08-27 03:03:42,702 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=221952.0, ans=0.1
+2024-08-27 03:03:46,723 INFO [train.py:1114] (1/4) Epoch 17, batch 1800, loss[loss=0.1955, simple_loss=0.2737, pruned_loss=0.04183, ctc_loss=0.08431, over 19604.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2629, pruned_loss=0.04203, ctc_loss=0.0785, over 3851798.91 frames. ], batch size: 55, lr: 8.76e-03, grad_scale: 16.0
+2024-08-27 03:03:47,813 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=222005.33333333334, ans=0.1
+2024-08-27 03:03:48,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=222005.33333333334, ans=0.125
+2024-08-27 03:03:48,835 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.51 vs. limit=6.0
+2024-08-27 03:03:53,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=222005.33333333334, ans=0.1
+2024-08-27 03:03:56,555 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=222058.66666666666, ans=0.125
+2024-08-27 03:03:56,852 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.65 vs. limit=15.0
+2024-08-27 03:04:10,896 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.55 vs. limit=22.5
+2024-08-27 03:04:30,761 INFO [train.py:1114] (1/4) Epoch 17, batch 1850, loss[loss=0.1973, simple_loss=0.2775, pruned_loss=0.04331, ctc_loss=0.07593, over 19592.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2627, pruned_loss=0.04175, ctc_loss=0.07781, over 3854949.06 frames. ], batch size: 57, lr: 8.75e-03, grad_scale: 16.0
+2024-08-27 03:04:32,493 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.249e+02 1.484e+02 1.846e+02 2.436e+02 4.218e+02, threshold=3.691e+02, percent-clipped=2.0
+2024-08-27 03:04:46,120 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=222325.33333333334, ans=0.125
+2024-08-27 03:04:50,586 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.56 vs. limit=22.5
+2024-08-27 03:05:13,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=222485.33333333334, ans=0.125
+2024-08-27 03:05:14,599 INFO [train.py:1114] (1/4) Epoch 17, batch 1900, loss[loss=0.2078, simple_loss=0.2871, pruned_loss=0.04752, ctc_loss=0.08366, over 19647.00 frames. ], tot_loss[loss=0.1885, simple_loss=0.2628, pruned_loss=0.04158, ctc_loss=0.07751, over 3860703.66 frames. ], batch size: 59, lr: 8.75e-03, grad_scale: 16.0
+2024-08-27 03:05:21,703 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.60 vs. limit=15.0
+2024-08-27 03:05:45,593 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=222698.66666666666, ans=0.125
+2024-08-27 03:05:57,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=222752.0, ans=0.0
+2024-08-27 03:06:00,580 INFO [train.py:1114] (1/4) Epoch 17, batch 1950, loss[loss=0.1742, simple_loss=0.2538, pruned_loss=0.0338, ctc_loss=0.06768, over 19595.00 frames. ], tot_loss[loss=0.1892, simple_loss=0.2637, pruned_loss=0.04178, ctc_loss=0.07793, over 3870458.50 frames. ], batch size: 52, lr: 8.74e-03, grad_scale: 16.0
+2024-08-27 03:06:02,414 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.462e+02 1.715e+02 2.122e+02 4.504e+02, threshold=3.430e+02, percent-clipped=1.0
+2024-08-27 03:06:11,300 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=222858.66666666666, ans=0.0
+2024-08-27 03:06:27,567 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=222965.33333333334, ans=0.2
+2024-08-27 03:06:27,690 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=222965.33333333334, ans=0.125
+2024-08-27 03:06:38,197 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.60 vs. limit=22.5
+2024-08-27 03:06:48,020 INFO [train.py:1114] (1/4) Epoch 17, batch 2000, loss[loss=0.159, simple_loss=0.2304, pruned_loss=0.03214, ctc_loss=0.05836, over 19625.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2647, pruned_loss=0.04246, ctc_loss=0.0792, over 3856646.93 frames. ], batch size: 45, lr: 8.74e-03, grad_scale: 32.0
+2024-08-27 03:06:49,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=223072.0, ans=0.0
+2024-08-27 03:06:50,248 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.65 vs. limit=15.0
+2024-08-27 03:09:30,168 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=223285.33333333334, ans=0.125
+2024-08-27 03:09:33,652 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=223285.33333333334, ans=0.035
+2024-08-27 03:09:41,585 INFO [train.py:1114] (1/4) Epoch 17, batch 2050, loss[loss=0.1648, simple_loss=0.2348, pruned_loss=0.03492, ctc_loss=0.06243, over 19729.00 frames. ], tot_loss[loss=0.1895, simple_loss=0.2634, pruned_loss=0.04212, ctc_loss=0.07865, over 3852531.93 frames. ], batch size: 47, lr: 8.73e-03, grad_scale: 32.0
+2024-08-27 03:09:43,284 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.433e+02 1.718e+02 2.194e+02 3.489e+02, threshold=3.436e+02, percent-clipped=1.0
+2024-08-27 03:09:52,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=223338.66666666666, ans=0.2
+2024-08-27 03:10:35,993 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.22 vs. limit=15.0
+2024-08-27 03:13:12,279 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=223552.0, ans=0.125
+2024-08-27 03:13:17,715 INFO [train.py:1114] (1/4) Epoch 17, batch 2100, loss[loss=0.1713, simple_loss=0.2498, pruned_loss=0.03353, ctc_loss=0.06419, over 19766.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2626, pruned_loss=0.04169, ctc_loss=0.0779, over 3859529.90 frames. ], batch size: 54, lr: 8.73e-03, grad_scale: 32.0
+2024-08-27 03:13:55,360 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=223658.66666666666, ans=0.125
+2024-08-27 03:23:47,215 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.61 vs. limit=10.0
+2024-08-27 03:24:00,140 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=223818.66666666666, ans=0.125
+2024-08-27 03:24:47,085 INFO [train.py:1114] (1/4) Epoch 17, batch 2150, loss[loss=0.1697, simple_loss=0.2489, pruned_loss=0.03314, ctc_loss=0.06055, over 19570.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.2621, pruned_loss=0.04169, ctc_loss=0.07785, over 3870502.16 frames. ], batch size: 52, lr: 8.72e-03, grad_scale: 32.0
+2024-08-27 03:24:47,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=223872.0, ans=0.1
+2024-08-27 03:24:49,700 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.181e+02 1.464e+02 1.691e+02 2.317e+02 5.931e+02, threshold=3.382e+02, percent-clipped=6.0
+2024-08-27 03:24:54,278 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=223872.0, ans=0.125
+2024-08-27 03:25:03,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=223872.0, ans=0.0
+2024-08-27 03:25:30,051 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.61 vs. limit=12.0
+2024-08-27 03:26:13,187 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.61 vs. limit=10.0
+2024-08-27 03:26:31,804 INFO [train.py:1114] (1/4) Epoch 17, batch 2200, loss[loss=0.1934, simple_loss=0.2725, pruned_loss=0.0416, ctc_loss=0.07767, over 19579.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.262, pruned_loss=0.04165, ctc_loss=0.07763, over 3867850.87 frames. ], batch size: 57, lr: 8.72e-03, grad_scale: 32.0
+2024-08-27 03:27:07,334 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=224298.66666666666, ans=0.2
+2024-08-27 03:27:07,701 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.61 vs. limit=15.0
+2024-08-27 03:27:09,958 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=224298.66666666666, ans=0.125
+2024-08-27 03:27:26,236 INFO [train.py:1114] (1/4) Epoch 17, batch 2250, loss[loss=0.1844, simple_loss=0.2647, pruned_loss=0.0377, ctc_loss=0.07199, over 19616.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2624, pruned_loss=0.04178, ctc_loss=0.07801, over 3867771.57 frames. ], batch size: 55, lr: 8.71e-03, grad_scale: 32.0
+2024-08-27 03:27:29,878 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.517e+02 1.774e+02 2.256e+02 3.791e+02, threshold=3.548e+02, percent-clipped=1.0
+2024-08-27 03:27:50,796 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=224458.66666666666, ans=0.2
+2024-08-27 03:27:52,900 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.88 vs. limit=15.0
+2024-08-27 03:29:04,920 INFO [train.py:1114] (1/4) Epoch 17, batch 2300, loss[loss=0.1631, simple_loss=0.2336, pruned_loss=0.03348, ctc_loss=0.06434, over 19506.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.262, pruned_loss=0.04194, ctc_loss=0.07841, over 3861446.76 frames. ], batch size: 49, lr: 8.71e-03, grad_scale: 16.0
+2024-08-27 03:29:10,983 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=224672.0, ans=0.2
+2024-08-27 03:29:16,425 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=224725.33333333334, ans=0.0
+2024-08-27 03:29:24,977 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.01 vs. limit=6.0
+2024-08-27 03:30:49,129 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=224778.66666666666, ans=0.125
+2024-08-27 03:32:21,079 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=6.97 vs. limit=15.0
+2024-08-27 03:36:20,904 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=224885.33333333334, ans=0.1
+2024-08-27 03:36:29,127 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=224885.33333333334, ans=0.1
+2024-08-27 03:36:49,581 INFO [train.py:1114] (1/4) Epoch 17, batch 2350, loss[loss=0.2005, simple_loss=0.2766, pruned_loss=0.04618, ctc_loss=0.08034, over 19670.00 frames. ], tot_loss[loss=0.1888, simple_loss=0.2621, pruned_loss=0.04206, ctc_loss=0.07835, over 3863977.51 frames. ], batch size: 63, lr: 8.70e-03, grad_scale: 16.0
+2024-08-27 03:37:01,829 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.439e+02 1.647e+02 2.102e+02 4.091e+02, threshold=3.295e+02, percent-clipped=1.0
+2024-08-27 03:37:22,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=224992.0, ans=0.125
+2024-08-27 03:37:30,274 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=224992.0, ans=0.125
+2024-08-27 03:37:37,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=224992.0, ans=0.0
+2024-08-27 03:37:40,711 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.04 vs. limit=15.0
+2024-08-27 03:37:53,817 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.68 vs. limit=6.0
+2024-08-27 03:38:20,256 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=225098.66666666666, ans=0.125
+2024-08-27 03:39:26,221 INFO [train.py:1114] (1/4) Epoch 17, batch 2400, loss[loss=0.2131, simple_loss=0.2857, pruned_loss=0.05097, ctc_loss=0.09628, over 19262.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2636, pruned_loss=0.04247, ctc_loss=0.07909, over 3858306.35 frames. ], batch size: 71, lr: 8.70e-03, grad_scale: 32.0
+2024-08-27 03:41:34,585 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.98 vs. limit=12.0
+2024-08-27 03:42:54,161 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=225365.33333333334, ans=0.125
+2024-08-27 03:43:47,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=225365.33333333334, ans=0.125
+2024-08-27 03:43:56,868 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.44 vs. limit=15.0
+2024-08-27 03:44:00,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=225418.66666666666, ans=0.125
+2024-08-27 03:44:22,632 INFO [train.py:1114] (1/4) Epoch 17, batch 2450, loss[loss=0.2586, simple_loss=0.3022, pruned_loss=0.07836, ctc_loss=0.1458, over 13116.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2674, pruned_loss=0.04508, ctc_loss=0.08415, over 3731084.46 frames. ], batch size: 140, lr: 8.69e-03, grad_scale: 32.0
+2024-08-27 03:44:30,564 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.519e+02 1.805e+02 2.064e+02 2.900e+02, threshold=3.609e+02, percent-clipped=0.0
+2024-08-27 03:44:39,150 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.95 vs. limit=22.5
+2024-08-27 03:45:47,704 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=225525.33333333334, ans=0.5
+2024-08-27 03:45:47,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=225525.33333333334, ans=0.1
+2024-08-27 03:47:09,516 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=225578.66666666666, ans=0.04949747468305833
+2024-08-27 03:50:08,498 INFO [train.py:1114] (1/4) Epoch 18, batch 0, loss[loss=0.1598, simple_loss=0.2341, pruned_loss=0.03049, ctc_loss=0.06103, over 19410.00 frames. ], tot_loss[loss=0.1598, simple_loss=0.2341, pruned_loss=0.03049, ctc_loss=0.06103, over 19410.00 frames. ], batch size: 48, lr: 8.44e-03, grad_scale: 32.0
+2024-08-27 03:50:08,499 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-27 03:56:59,334 INFO [train.py:1146] (1/4) Epoch 18, validation: loss=0.1731, simple_loss=0.2653, pruned_loss=0.0303, ctc_loss=0.05087, over 944034.00 frames.
+2024-08-27 03:56:59,336 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-27 03:58:10,825 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.66 vs. limit=15.0
+2024-08-27 03:58:49,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=225786.66666666666, ans=0.0
+2024-08-27 03:59:36,538 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=225893.33333333334, ans=0.07
+2024-08-27 03:59:40,036 INFO [train.py:1114] (1/4) Epoch 18, batch 50, loss[loss=0.1642, simple_loss=0.2337, pruned_loss=0.03461, ctc_loss=0.06345, over 19735.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2632, pruned_loss=0.04249, ctc_loss=0.08033, over 843693.80 frames. ], batch size: 47, lr: 8.44e-03, grad_scale: 32.0
+2024-08-27 03:59:51,394 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=226000.0, ans=0.0
+2024-08-27 03:59:52,928 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.620e+02 1.870e+02 2.127e+02 3.474e+02, threshold=3.740e+02, percent-clipped=0.0
+2024-08-27 04:00:01,622 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=226053.33333333334, ans=0.1
+2024-08-27 04:00:09,701 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.02 vs. limit=12.0
+2024-08-27 04:00:10,201 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=226106.66666666666, ans=0.125
+2024-08-27 04:00:23,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=226160.0, ans=0.0
+2024-08-27 04:00:34,086 INFO [train.py:1114] (1/4) Epoch 18, batch 100, loss[loss=0.1819, simple_loss=0.2559, pruned_loss=0.03934, ctc_loss=0.07288, over 19719.00 frames. ], tot_loss[loss=0.1905, simple_loss=0.2648, pruned_loss=0.04222, ctc_loss=0.07928, over 1498452.85 frames. ], batch size: 51, lr: 8.43e-03, grad_scale: 32.0
+2024-08-27 04:04:08,964 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=226266.66666666666, ans=0.125
+2024-08-27 04:05:20,886 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.53 vs. limit=22.5
+2024-08-27 04:05:27,963 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=226320.0, ans=0.1
+2024-08-27 04:05:34,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=226320.0, ans=0.125
+2024-08-27 04:05:38,011 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=226320.0, ans=0.025
+2024-08-27 04:05:56,309 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.08 vs. limit=15.0
+2024-08-27 04:05:57,971 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=226426.66666666666, ans=0.125
+2024-08-27 04:05:58,783 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:06:00,492 INFO [train.py:1114] (1/4) Epoch 18, batch 150, loss[loss=0.1786, simple_loss=0.2444, pruned_loss=0.04118, ctc_loss=0.07631, over 19694.00 frames. ], tot_loss[loss=0.1882, simple_loss=0.2624, pruned_loss=0.04137, ctc_loss=0.07804, over 2027899.17 frames. ], batch size: 47, lr: 8.43e-03, grad_scale: 32.0
+2024-08-27 04:06:14,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-27 04:06:16,208 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.128e+02 1.462e+02 1.764e+02 2.186e+02 3.977e+02, threshold=3.529e+02, percent-clipped=1.0
+2024-08-27 04:06:16,329 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-27 04:06:18,252 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=226533.33333333334, ans=0.125
+2024-08-27 04:06:49,832 INFO [train.py:1114] (1/4) Epoch 18, batch 200, loss[loss=0.194, simple_loss=0.2699, pruned_loss=0.04249, ctc_loss=0.08252, over 18231.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2619, pruned_loss=0.0414, ctc_loss=0.0779, over 2435641.98 frames. ], batch size: 85, lr: 8.42e-03, grad_scale: 32.0
+2024-08-27 04:06:58,230 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=226800.0, ans=0.125
+2024-08-27 04:07:11,176 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=226853.33333333334, ans=0.05
+2024-08-27 04:07:13,992 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=226853.33333333334, ans=0.2
+2024-08-27 04:07:22,393 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=226906.66666666666, ans=0.025
+2024-08-27 04:07:22,471 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=226906.66666666666, ans=0.125
+2024-08-27 04:07:22,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=226906.66666666666, ans=0.0
+2024-08-27 04:07:35,896 INFO [train.py:1114] (1/4) Epoch 18, batch 250, loss[loss=0.2155, simple_loss=0.2881, pruned_loss=0.05219, ctc_loss=0.09606, over 19375.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2615, pruned_loss=0.04104, ctc_loss=0.07697, over 2757155.58 frames. ], batch size: 67, lr: 8.42e-03, grad_scale: 32.0
+2024-08-27 04:07:41,196 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.83 vs. limit=15.0
+2024-08-27 04:07:44,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=227013.33333333334, ans=0.125
+2024-08-27 04:07:46,586 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=227066.66666666666, ans=0.125
+2024-08-27 04:07:50,816 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.244e+02 1.521e+02 1.873e+02 2.606e+02 4.367e+02, threshold=3.746e+02, percent-clipped=8.0
+2024-08-27 04:08:03,150 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=227120.0, ans=0.1
+2024-08-27 04:08:16,564 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff3.min_abs, batch_count=227173.33333333334, ans=0.2
+2024-08-27 04:08:17,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=227173.33333333334, ans=0.1
+2024-08-27 04:08:31,219 INFO [train.py:1114] (1/4) Epoch 18, batch 300, loss[loss=0.1896, simple_loss=0.2678, pruned_loss=0.03941, ctc_loss=0.08146, over 19505.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2607, pruned_loss=0.04062, ctc_loss=0.07598, over 3002465.67 frames. ], batch size: 61, lr: 8.41e-03, grad_scale: 32.0
+2024-08-27 04:08:44,725 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=227333.33333333334, ans=0.1
+2024-08-27 04:08:54,808 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:09:04,882 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=227440.0, ans=0.1
+2024-08-27 04:09:06,140 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.18 vs. limit=15.0
+2024-08-27 04:09:17,362 INFO [train.py:1114] (1/4) Epoch 18, batch 350, loss[loss=0.1519, simple_loss=0.2285, pruned_loss=0.02798, ctc_loss=0.04824, over 19745.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2612, pruned_loss=0.04093, ctc_loss=0.07636, over 3192448.38 frames. ], batch size: 48, lr: 8.41e-03, grad_scale: 32.0
+2024-08-27 04:09:17,517 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=227546.66666666666, ans=0.1
+2024-08-27 04:09:18,094 INFO [scaling.py:1024] (1/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.47 vs. limit=8.0
+2024-08-27 04:09:30,336 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.460e+02 1.643e+02 1.956e+02 3.165e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-27 04:10:17,470 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=227706.66666666666, ans=0.0
+2024-08-27 04:10:33,215 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=227760.0, ans=0.125
+2024-08-27 04:10:35,077 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=227760.0, ans=0.0
+2024-08-27 04:10:40,379 INFO [train.py:1114] (1/4) Epoch 18, batch 400, loss[loss=0.1711, simple_loss=0.2574, pruned_loss=0.03035, ctc_loss=0.0601, over 19499.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.261, pruned_loss=0.04068, ctc_loss=0.07573, over 3342545.79 frames. ], batch size: 54, lr: 8.40e-03, grad_scale: 32.0
+2024-08-27 04:10:48,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=227813.33333333334, ans=0.025
+2024-08-27 04:12:28,881 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.08 vs. limit=15.0
+2024-08-27 04:12:32,842 INFO [train.py:1114] (1/4) Epoch 18, batch 450, loss[loss=0.2, simple_loss=0.2776, pruned_loss=0.0441, ctc_loss=0.08526, over 19601.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.2612, pruned_loss=0.04081, ctc_loss=0.07615, over 3451509.51 frames. ], batch size: 55, lr: 8.40e-03, grad_scale: 32.0
+2024-08-27 04:12:34,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=228080.0, ans=0.1
+2024-08-27 04:12:59,754 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.135e+02 1.395e+02 1.673e+02 2.305e+02 3.910e+02, threshold=3.347e+02, percent-clipped=3.0
+2024-08-27 04:13:04,469 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=228186.66666666666, ans=0.0
+2024-08-27 04:13:15,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=228240.0, ans=0.0
+2024-08-27 04:13:17,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=228240.0, ans=0.04949747468305833
+2024-08-27 04:13:18,404 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=228240.0, ans=0.125
+2024-08-27 04:13:32,855 INFO [train.py:1114] (1/4) Epoch 18, batch 500, loss[loss=0.1954, simple_loss=0.2743, pruned_loss=0.0424, ctc_loss=0.07934, over 19696.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2604, pruned_loss=0.04038, ctc_loss=0.07559, over 3546801.94 frames. ], batch size: 63, lr: 8.39e-03, grad_scale: 32.0
+2024-08-27 04:13:37,472 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=228346.66666666666, ans=0.1
+2024-08-27 04:13:39,446 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.92 vs. limit=15.0
+2024-08-27 04:13:43,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=228400.0, ans=0.0
+2024-08-27 04:13:45,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=228400.0, ans=0.0
+2024-08-27 04:13:46,874 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=228400.0, ans=0.125
+2024-08-27 04:13:51,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=228400.0, ans=0.125
+2024-08-27 04:14:18,481 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=228560.0, ans=0.2
+2024-08-27 04:14:20,883 INFO [train.py:1114] (1/4) Epoch 18, batch 550, loss[loss=0.2117, simple_loss=0.2833, pruned_loss=0.05063, ctc_loss=0.09699, over 19259.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2604, pruned_loss=0.04042, ctc_loss=0.07544, over 3609220.25 frames. ], batch size: 71, lr: 8.39e-03, grad_scale: 32.0
+2024-08-27 04:14:28,546 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=228613.33333333334, ans=0.025
+2024-08-27 04:14:34,525 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.436e+02 1.681e+02 2.031e+02 3.505e+02, threshold=3.363e+02, percent-clipped=1.0
+2024-08-27 04:14:38,025 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.07 vs. limit=22.5
+2024-08-27 04:14:38,697 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=228666.66666666666, ans=0.125
+2024-08-27 04:14:43,520 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.93 vs. limit=15.0
+2024-08-27 04:14:47,012 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=228720.0, ans=0.0
+2024-08-27 04:15:07,918 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.50 vs. limit=15.0
+2024-08-27 04:15:08,605 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=228826.66666666666, ans=0.2
+2024-08-27 04:15:14,825 INFO [train.py:1114] (1/4) Epoch 18, batch 600, loss[loss=0.2116, simple_loss=0.2891, pruned_loss=0.04919, ctc_loss=0.08946, over 19439.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2609, pruned_loss=0.04061, ctc_loss=0.07597, over 3667203.93 frames. ], batch size: 67, lr: 8.38e-03, grad_scale: 32.0
+2024-08-27 04:15:20,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.57 vs. limit=15.0
+2024-08-27 04:15:20,642 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=228880.0, ans=0.125
+2024-08-27 04:15:46,891 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=228986.66666666666, ans=0.125
+2024-08-27 04:16:57,877 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=229093.33333333334, ans=0.5
+2024-08-27 04:17:06,962 INFO [train.py:1114] (1/4) Epoch 18, batch 650, loss[loss=0.1856, simple_loss=0.2645, pruned_loss=0.03798, ctc_loss=0.077, over 19779.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2601, pruned_loss=0.04027, ctc_loss=0.07543, over 3716775.23 frames. ], batch size: 54, lr: 8.38e-03, grad_scale: 32.0
+2024-08-27 04:17:20,099 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.194e+02 1.567e+02 1.955e+02 2.726e+02 4.189e+02, threshold=3.909e+02, percent-clipped=6.0
+2024-08-27 04:17:24,016 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=229200.0, ans=0.125
+2024-08-27 04:17:24,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=229200.0, ans=0.1
+2024-08-27 04:17:43,516 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=229306.66666666666, ans=0.125
+2024-08-27 04:17:54,858 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.33 vs. limit=15.0
+2024-08-27 04:18:45,811 INFO [train.py:1114] (1/4) Epoch 18, batch 700, loss[loss=0.1805, simple_loss=0.2536, pruned_loss=0.03803, ctc_loss=0.07829, over 19707.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2607, pruned_loss=0.04048, ctc_loss=0.07587, over 3748465.26 frames. ], batch size: 51, lr: 8.37e-03, grad_scale: 32.0
+2024-08-27 04:18:54,811 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=229413.33333333334, ans=0.025
+2024-08-27 04:18:59,641 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=229466.66666666666, ans=10.0
+2024-08-27 04:19:10,055 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=229520.0, ans=0.035
+2024-08-27 04:19:15,467 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=229520.0, ans=0.125
+2024-08-27 04:19:19,302 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=229573.33333333334, ans=0.125
+2024-08-27 04:19:35,533 INFO [train.py:1114] (1/4) Epoch 18, batch 750, loss[loss=0.1908, simple_loss=0.2711, pruned_loss=0.04018, ctc_loss=0.0756, over 19498.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2604, pruned_loss=0.0405, ctc_loss=0.07585, over 3775010.57 frames. ], batch size: 54, lr: 8.37e-03, grad_scale: 16.0
+2024-08-27 04:19:38,429 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=229680.0, ans=0.125
+2024-08-27 04:19:45,091 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=3.99 vs. limit=6.0
+2024-08-27 04:19:49,144 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.146e+02 1.399e+02 1.632e+02 2.193e+02 3.721e+02, threshold=3.263e+02, percent-clipped=0.0
+2024-08-27 04:19:58,562 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=229786.66666666666, ans=0.0
+2024-08-27 04:20:00,070 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.28 vs. limit=15.0
+2024-08-27 04:20:10,683 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=229840.0, ans=0.125
+2024-08-27 04:20:16,346 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.26 vs. limit=15.0
+2024-08-27 04:20:24,098 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=229893.33333333334, ans=0.125
+2024-08-27 04:20:27,826 INFO [train.py:1114] (1/4) Epoch 18, batch 800, loss[loss=0.181, simple_loss=0.2536, pruned_loss=0.03955, ctc_loss=0.07314, over 19409.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2605, pruned_loss=0.04061, ctc_loss=0.07588, over 3795416.46 frames. ], batch size: 48, lr: 8.37e-03, grad_scale: 32.0
+2024-08-27 04:20:56,574 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.97 vs. limit=10.0
+2024-08-27 04:21:05,008 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=230053.33333333334, ans=0.125
+2024-08-27 04:21:06,885 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.30 vs. limit=15.0
+2024-08-27 04:21:31,530 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=230160.0, ans=0.125
+2024-08-27 04:21:31,585 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=230160.0, ans=0.0
+2024-08-27 04:21:33,171 INFO [train.py:1114] (1/4) Epoch 18, batch 850, loss[loss=0.1857, simple_loss=0.2663, pruned_loss=0.03807, ctc_loss=0.07233, over 19648.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2606, pruned_loss=0.04058, ctc_loss=0.07596, over 3814604.01 frames. ], batch size: 59, lr: 8.36e-03, grad_scale: 32.0
+2024-08-27 04:21:34,431 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.24 vs. limit=22.5
+2024-08-27 04:21:42,610 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=230266.66666666666, ans=0.07
+2024-08-27 04:21:57,960 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.249e+02 1.452e+02 1.736e+02 2.395e+02 3.551e+02, threshold=3.472e+02, percent-clipped=2.0
+2024-08-27 04:22:05,146 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.29 vs. limit=15.0
+2024-08-27 04:22:18,482 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=230373.33333333334, ans=0.1
+2024-08-27 04:22:28,708 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=230426.66666666666, ans=0.125
+2024-08-27 04:22:30,022 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.04 vs. limit=15.0
+2024-08-27 04:22:31,228 INFO [train.py:1114] (1/4) Epoch 18, batch 900, loss[loss=0.1752, simple_loss=0.2427, pruned_loss=0.03854, ctc_loss=0.07642, over 19809.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2611, pruned_loss=0.04112, ctc_loss=0.07684, over 3819625.95 frames. ], batch size: 49, lr: 8.36e-03, grad_scale: 32.0
+2024-08-27 04:22:38,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=230480.0, ans=0.125
+2024-08-27 04:23:01,731 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.45 vs. limit=15.0
+2024-08-27 04:23:08,840 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=230693.33333333334, ans=0.0
+2024-08-27 04:23:11,612 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=230693.33333333334, ans=0.125
+2024-08-27 04:23:13,432 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=230693.33333333334, ans=0.2
+2024-08-27 04:23:17,852 INFO [train.py:1114] (1/4) Epoch 18, batch 950, loss[loss=0.1746, simple_loss=0.2474, pruned_loss=0.03717, ctc_loss=0.06862, over 19518.00 frames. ], tot_loss[loss=0.1868, simple_loss=0.2609, pruned_loss=0.04102, ctc_loss=0.07669, over 3819326.72 frames. ], batch size: 49, lr: 8.35e-03, grad_scale: 32.0
+2024-08-27 04:23:18,317 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.83 vs. limit=15.0
+2024-08-27 04:23:21,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=230746.66666666666, ans=0.0
+2024-08-27 04:23:22,543 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=230746.66666666666, ans=0.0
+2024-08-27 04:23:36,279 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.393e+02 1.674e+02 2.227e+02 4.492e+02, threshold=3.349e+02, percent-clipped=5.0
+2024-08-27 04:24:03,324 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=230906.66666666666, ans=10.0
+2024-08-27 04:24:04,553 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.84 vs. limit=15.0
+2024-08-27 04:24:09,111 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=230960.0, ans=0.5
+2024-08-27 04:24:09,515 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.67 vs. limit=22.5
+2024-08-27 04:24:15,458 INFO [train.py:1114] (1/4) Epoch 18, batch 1000, loss[loss=0.1642, simple_loss=0.2425, pruned_loss=0.03138, ctc_loss=0.05785, over 19849.00 frames. ], tot_loss[loss=0.187, simple_loss=0.2613, pruned_loss=0.04102, ctc_loss=0.07668, over 3815683.01 frames. ], batch size: 52, lr: 8.35e-03, grad_scale: 32.0
+2024-08-27 04:24:27,871 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231066.66666666666, ans=0.1
+2024-08-27 04:24:30,753 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=231066.66666666666, ans=0.1
+2024-08-27 04:24:33,479 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=231120.0, ans=0.0
+2024-08-27 04:24:39,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=231120.0, ans=0.125
+2024-08-27 04:24:51,732 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.55 vs. limit=12.0
+2024-08-27 04:24:57,125 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=231226.66666666666, ans=0.1
+2024-08-27 04:25:01,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=231226.66666666666, ans=0.125
+2024-08-27 04:25:11,444 INFO [train.py:1114] (1/4) Epoch 18, batch 1050, loss[loss=0.1873, simple_loss=0.2631, pruned_loss=0.04084, ctc_loss=0.07462, over 19852.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2606, pruned_loss=0.04085, ctc_loss=0.07631, over 3823270.16 frames. ], batch size: 57, lr: 8.34e-03, grad_scale: 32.0
+2024-08-27 04:25:11,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=231280.0, ans=0.2
+2024-08-27 04:25:18,128 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=231280.0, ans=0.125
+2024-08-27 04:25:25,222 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.118e+02 1.375e+02 1.549e+02 1.865e+02 3.480e+02, threshold=3.097e+02, percent-clipped=1.0
+2024-08-27 04:25:35,342 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=231386.66666666666, ans=0.025
+2024-08-27 04:25:37,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=231386.66666666666, ans=0.0
+2024-08-27 04:25:57,488 INFO [train.py:1114] (1/4) Epoch 18, batch 1100, loss[loss=0.1846, simple_loss=0.2592, pruned_loss=0.03952, ctc_loss=0.07737, over 19594.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.26, pruned_loss=0.04049, ctc_loss=0.07566, over 3830526.83 frames. ], batch size: 52, lr: 8.34e-03, grad_scale: 16.0
+2024-08-27 04:26:01,350 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=231546.66666666666, ans=0.0
+2024-08-27 04:26:03,761 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.29 vs. limit=6.0
+2024-08-27 04:26:16,848 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=231600.0, ans=0.125
+2024-08-27 04:26:20,956 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.51 vs. limit=10.0
+2024-08-27 04:26:23,793 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:26:48,672 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=231706.66666666666, ans=0.125
+2024-08-27 04:27:14,051 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.40 vs. limit=15.0
+2024-08-27 04:27:28,523 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.83 vs. limit=12.0
+2024-08-27 04:27:31,647 INFO [train.py:1114] (1/4) Epoch 18, batch 1150, loss[loss=0.1796, simple_loss=0.2578, pruned_loss=0.03755, ctc_loss=0.06559, over 19588.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2602, pruned_loss=0.04064, ctc_loss=0.07588, over 3830167.85 frames. ], batch size: 52, lr: 8.33e-03, grad_scale: 16.0
+2024-08-27 04:27:44,227 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=231866.66666666666, ans=0.2
+2024-08-27 04:27:45,263 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=231866.66666666666, ans=0.0
+2024-08-27 04:27:50,646 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.153e+02 1.426e+02 1.640e+02 2.078e+02 3.185e+02, threshold=3.280e+02, percent-clipped=3.0
+2024-08-27 04:28:05,808 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=231920.0, ans=0.2
+2024-08-27 04:28:27,033 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=232026.66666666666, ans=0.025
+2024-08-27 04:28:27,957 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:28:32,362 INFO [train.py:1114] (1/4) Epoch 18, batch 1200, loss[loss=0.1881, simple_loss=0.2722, pruned_loss=0.03766, ctc_loss=0.07181, over 19831.00 frames. ], tot_loss[loss=0.1871, simple_loss=0.2618, pruned_loss=0.04091, ctc_loss=0.07657, over 3825669.59 frames. ], batch size: 57, lr: 8.33e-03, grad_scale: 32.0
+2024-08-27 04:28:37,059 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=232080.0, ans=0.0
+2024-08-27 04:28:38,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232080.0, ans=0.1
+2024-08-27 04:28:45,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=232133.33333333334, ans=0.125
+2024-08-27 04:29:04,438 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232240.0, ans=0.1
+2024-08-27 04:29:07,215 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=232240.0, ans=0.0
+2024-08-27 04:29:07,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=232240.0, ans=0.125
+2024-08-27 04:29:08,133 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=232240.0, ans=0.025
+2024-08-27 04:29:18,069 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=232293.33333333334, ans=0.0
+2024-08-27 04:29:19,801 INFO [train.py:1114] (1/4) Epoch 18, batch 1250, loss[loss=0.2066, simple_loss=0.2834, pruned_loss=0.04705, ctc_loss=0.08935, over 19525.00 frames. ], tot_loss[loss=0.1874, simple_loss=0.262, pruned_loss=0.04101, ctc_loss=0.07675, over 3842781.75 frames. ], batch size: 61, lr: 8.32e-03, grad_scale: 32.0
+2024-08-27 04:29:27,343 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=232346.66666666666, ans=0.1
+2024-08-27 04:29:33,894 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=232400.0, ans=0.1
+2024-08-27 04:29:34,586 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.452e+02 1.815e+02 2.295e+02 4.200e+02, threshold=3.630e+02, percent-clipped=5.0
+2024-08-27 04:29:39,333 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=232453.33333333334, ans=0.125
+2024-08-27 04:29:51,094 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=232506.66666666666, ans=0.125
+2024-08-27 04:30:22,747 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=232506.66666666666, ans=0.05
+2024-08-27 04:30:43,404 INFO [train.py:1114] (1/4) Epoch 18, batch 1300, loss[loss=0.2056, simple_loss=0.28, pruned_loss=0.04803, ctc_loss=0.08797, over 18867.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2609, pruned_loss=0.0405, ctc_loss=0.07588, over 3846959.32 frames. ], batch size: 76, lr: 8.32e-03, grad_scale: 16.0
+2024-08-27 04:31:02,836 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=232666.66666666666, ans=0.2
+2024-08-27 04:31:09,280 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=232720.0, ans=0.0
+2024-08-27 04:31:13,231 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.31 vs. limit=15.0
+2024-08-27 04:31:18,560 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=232773.33333333334, ans=0.1
+2024-08-27 04:31:20,430 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=232773.33333333334, ans=0.0
+2024-08-27 04:31:26,128 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.75 vs. limit=15.0
+2024-08-27 04:31:29,668 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=232826.66666666666, ans=0.125
+2024-08-27 04:31:33,098 INFO [train.py:1114] (1/4) Epoch 18, batch 1350, loss[loss=0.1838, simple_loss=0.2614, pruned_loss=0.03857, ctc_loss=0.07276, over 19771.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2608, pruned_loss=0.0405, ctc_loss=0.07566, over 3856082.84 frames. ], batch size: 54, lr: 8.31e-03, grad_scale: 16.0
+2024-08-27 04:31:48,899 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.176e+02 1.387e+02 1.655e+02 2.106e+02 4.022e+02, threshold=3.310e+02, percent-clipped=4.0
+2024-08-27 04:32:17,933 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=233093.33333333334, ans=0.125
+2024-08-27 04:32:19,576 INFO [train.py:1114] (1/4) Epoch 18, batch 1400, loss[loss=0.1715, simple_loss=0.2319, pruned_loss=0.04048, ctc_loss=0.07535, over 19659.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2607, pruned_loss=0.04047, ctc_loss=0.07567, over 3863302.16 frames. ], batch size: 46, lr: 8.31e-03, grad_scale: 16.0
+2024-08-27 04:32:19,889 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.const_attention_rate, batch_count=233146.66666666666, ans=0.025
+2024-08-27 04:32:58,305 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=233253.33333333334, ans=0.1
+2024-08-27 04:33:20,363 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=233306.66666666666, ans=0.125
+2024-08-27 04:33:33,181 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=233360.0, ans=0.125
+2024-08-27 04:33:40,130 INFO [train.py:1114] (1/4) Epoch 18, batch 1450, loss[loss=0.1953, simple_loss=0.2737, pruned_loss=0.04223, ctc_loss=0.08092, over 19656.00 frames. ], tot_loss[loss=0.1861, simple_loss=0.2611, pruned_loss=0.04042, ctc_loss=0.07562, over 3861502.71 frames. ], batch size: 63, lr: 8.30e-03, grad_scale: 16.0
+2024-08-27 04:33:56,221 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=233413.33333333334, ans=0.0
+2024-08-27 04:34:13,118 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=233466.66666666666, ans=0.125
+2024-08-27 04:34:21,008 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.457e+02 1.713e+02 1.981e+02 3.848e+02, threshold=3.426e+02, percent-clipped=1.0
+2024-08-27 04:34:41,029 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.96 vs. limit=22.5
+2024-08-27 04:34:45,461 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=233573.33333333334, ans=0.125
+2024-08-27 04:35:28,123 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=233626.66666666666, ans=0.1
+2024-08-27 04:36:44,890 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=233626.66666666666, ans=0.125
+2024-08-27 04:36:59,905 INFO [train.py:1114] (1/4) Epoch 18, batch 1500, loss[loss=0.2036, simple_loss=0.2781, pruned_loss=0.04691, ctc_loss=0.08834, over 19574.00 frames. ], tot_loss[loss=0.1862, simple_loss=0.2614, pruned_loss=0.04041, ctc_loss=0.07565, over 3860727.16 frames. ], batch size: 57, lr: 8.30e-03, grad_scale: 16.0
+2024-08-27 04:37:05,968 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=233680.0, ans=0.125
+2024-08-27 04:37:38,021 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.74 vs. limit=15.0
+2024-08-27 04:37:45,144 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=233733.33333333334, ans=0.0
+2024-08-27 04:38:23,578 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.29 vs. limit=15.0
+2024-08-27 04:38:29,623 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=233840.0, ans=0.1
+2024-08-27 04:38:29,786 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=233840.0, ans=0.1
+2024-08-27 04:38:58,814 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=233893.33333333334, ans=0.1
+2024-08-27 04:39:00,338 INFO [train.py:1114] (1/4) Epoch 18, batch 1550, loss[loss=0.2094, simple_loss=0.2786, pruned_loss=0.05201, ctc_loss=0.09038, over 19621.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2614, pruned_loss=0.04077, ctc_loss=0.07619, over 3845155.05 frames. ], batch size: 60, lr: 8.30e-03, grad_scale: 16.0
+2024-08-27 04:39:35,298 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=234000.0, ans=0.025
+2024-08-27 04:39:36,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=234000.0, ans=0.125
+2024-08-27 04:39:36,269 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=234000.0, ans=0.125
+2024-08-27 04:39:39,113 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=234000.0, ans=0.125
+2024-08-27 04:39:39,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=234000.0, ans=0.125
+2024-08-27 04:39:49,441 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=234000.0, ans=0.125
+2024-08-27 04:39:51,778 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.195e+02 1.454e+02 1.713e+02 2.109e+02 3.815e+02, threshold=3.426e+02, percent-clipped=1.0
+2024-08-27 04:39:58,573 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=234053.33333333334, ans=0.125
+2024-08-27 04:40:27,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=234160.0, ans=0.125
+2024-08-27 04:40:35,439 INFO [train.py:1114] (1/4) Epoch 18, batch 1600, loss[loss=0.1922, simple_loss=0.2717, pruned_loss=0.04127, ctc_loss=0.07536, over 19841.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2611, pruned_loss=0.04086, ctc_loss=0.07632, over 3834361.49 frames. ], batch size: 57, lr: 8.29e-03, grad_scale: 32.0
+2024-08-27 04:40:59,620 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=234320.0, ans=0.1
+2024-08-27 04:41:02,785 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.59 vs. limit=22.5
+2024-08-27 04:41:21,970 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=234373.33333333334, ans=0.1
+2024-08-27 04:41:23,800 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=234373.33333333334, ans=0.025
+2024-08-27 04:41:44,766 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=234426.66666666666, ans=0.04949747468305833
+2024-08-27 04:41:53,483 INFO [train.py:1114] (1/4) Epoch 18, batch 1650, loss[loss=0.1874, simple_loss=0.2671, pruned_loss=0.03934, ctc_loss=0.07225, over 19662.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2608, pruned_loss=0.04099, ctc_loss=0.07638, over 3831307.67 frames. ], batch size: 59, lr: 8.29e-03, grad_scale: 32.0
+2024-08-27 04:41:59,089 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=234480.0, ans=0.07
+2024-08-27 04:42:16,284 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.209e+02 1.559e+02 1.894e+02 2.296e+02 3.896e+02, threshold=3.788e+02, percent-clipped=3.0
+2024-08-27 04:42:20,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.11 vs. limit=22.5
+2024-08-27 04:42:47,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=234640.0, ans=0.125
+2024-08-27 04:42:52,838 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=234693.33333333334, ans=0.125
+2024-08-27 04:43:00,751 INFO [train.py:1114] (1/4) Epoch 18, batch 1700, loss[loss=0.1797, simple_loss=0.2412, pruned_loss=0.04357, ctc_loss=0.07747, over 19665.00 frames. ], tot_loss[loss=0.1863, simple_loss=0.2608, pruned_loss=0.04073, ctc_loss=0.07591, over 3846215.94 frames. ], batch size: 46, lr: 8.28e-03, grad_scale: 32.0
+2024-08-27 04:43:14,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=234800.0, ans=0.125
+2024-08-27 04:43:14,731 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=234800.0, ans=0.0
+2024-08-27 04:43:15,869 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.35 vs. limit=10.0
+2024-08-27 04:43:21,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=234853.33333333334, ans=0.125
+2024-08-27 04:43:24,254 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=234853.33333333334, ans=0.04949747468305833
+2024-08-27 04:43:26,042 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=234853.33333333334, ans=0.125
+2024-08-27 04:43:33,244 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=234906.66666666666, ans=0.0
+2024-08-27 04:43:59,954 INFO [train.py:1114] (1/4) Epoch 18, batch 1750, loss[loss=0.1649, simple_loss=0.2327, pruned_loss=0.03542, ctc_loss=0.06589, over 19628.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2605, pruned_loss=0.04058, ctc_loss=0.0757, over 3851893.90 frames. ], batch size: 45, lr: 8.28e-03, grad_scale: 32.0
+2024-08-27 04:44:02,792 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=235013.33333333334, ans=0.0
+2024-08-27 04:44:10,732 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=235066.66666666666, ans=0.025
+2024-08-27 04:44:16,982 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.479e+02 1.670e+02 2.161e+02 3.908e+02, threshold=3.340e+02, percent-clipped=1.0
+2024-08-27 04:44:18,967 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=235120.0, ans=0.1
+2024-08-27 04:44:31,116 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=235173.33333333334, ans=0.0
+2024-08-27 04:44:50,512 INFO [train.py:1114] (1/4) Epoch 18, batch 1800, loss[loss=0.1868, simple_loss=0.2673, pruned_loss=0.0379, ctc_loss=0.07633, over 19601.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2607, pruned_loss=0.04043, ctc_loss=0.07533, over 3852585.51 frames. ], batch size: 55, lr: 8.27e-03, grad_scale: 16.0
+2024-08-27 04:44:55,025 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=235280.0, ans=0.025
+2024-08-27 04:44:57,803 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=235280.0, ans=0.025
+2024-08-27 04:44:58,883 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.90 vs. limit=22.5
+2024-08-27 04:45:01,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=235333.33333333334, ans=0.1
+2024-08-27 04:45:02,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=235333.33333333334, ans=0.2
+2024-08-27 04:45:09,854 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=235386.66666666666, ans=0.0
+2024-08-27 04:45:25,251 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=235386.66666666666, ans=0.1
+2024-08-27 04:45:48,225 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.76 vs. limit=15.0
+2024-08-27 04:45:48,631 INFO [train.py:1114] (1/4) Epoch 18, batch 1850, loss[loss=0.1801, simple_loss=0.2627, pruned_loss=0.03511, ctc_loss=0.06815, over 19574.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2609, pruned_loss=0.04043, ctc_loss=0.07535, over 3855922.66 frames. ], batch size: 57, lr: 8.27e-03, grad_scale: 8.0
+2024-08-27 04:45:48,863 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=235546.66666666666, ans=0.125
+2024-08-27 04:45:55,114 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.18 vs. limit=15.0
+2024-08-27 04:46:31,674 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=235600.0, ans=0.1
+2024-08-27 04:46:38,524 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.122e+02 1.500e+02 1.800e+02 2.247e+02 4.177e+02, threshold=3.601e+02, percent-clipped=3.0
+2024-08-27 04:46:47,134 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=235653.33333333334, ans=0.125
+2024-08-27 04:47:01,309 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=235760.0, ans=0.0
+2024-08-27 04:47:03,149 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=235760.0, ans=0.07
+2024-08-27 04:47:05,548 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=235813.33333333334, ans=0.035
+2024-08-27 04:47:06,322 INFO [train.py:1114] (1/4) Epoch 18, batch 1900, loss[loss=0.1915, simple_loss=0.2715, pruned_loss=0.03961, ctc_loss=0.08052, over 19667.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2613, pruned_loss=0.04058, ctc_loss=0.07567, over 3860504.63 frames. ], batch size: 59, lr: 8.26e-03, grad_scale: 8.0
+2024-08-27 04:47:09,262 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.39 vs. limit=15.0
+2024-08-27 04:47:16,298 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.13 vs. limit=22.5
+2024-08-27 04:47:42,501 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=235973.33333333334, ans=0.125
+2024-08-27 04:47:42,511 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=235973.33333333334, ans=0.1
+2024-08-27 04:47:53,603 INFO [train.py:1114] (1/4) Epoch 18, batch 1950, loss[loss=0.1778, simple_loss=0.2612, pruned_loss=0.03481, ctc_loss=0.0621, over 19581.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2624, pruned_loss=0.04086, ctc_loss=0.07612, over 3869594.56 frames. ], batch size: 52, lr: 8.26e-03, grad_scale: 8.0
+2024-08-27 04:48:12,685 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.245e+02 1.481e+02 1.697e+02 2.159e+02 5.555e+02, threshold=3.394e+02, percent-clipped=1.0
+2024-08-27 04:48:15,558 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=236186.66666666666, ans=0.0
+2024-08-27 04:48:27,139 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=236240.0, ans=0.125
+2024-08-27 04:48:50,298 INFO [train.py:1114] (1/4) Epoch 18, batch 2000, loss[loss=0.1688, simple_loss=0.2354, pruned_loss=0.03744, ctc_loss=0.06822, over 19639.00 frames. ], tot_loss[loss=0.1881, simple_loss=0.2631, pruned_loss=0.04116, ctc_loss=0.07679, over 3853760.36 frames. ], batch size: 45, lr: 8.25e-03, grad_scale: 8.0
+2024-08-27 04:48:51,370 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=236346.66666666666, ans=0.125
+2024-08-27 04:49:10,259 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=236400.0, ans=0.125
+2024-08-27 04:49:53,379 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=236453.33333333334, ans=0.025
+2024-08-27 04:50:00,896 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=236506.66666666666, ans=0.1
+2024-08-27 04:50:16,421 INFO [train.py:1114] (1/4) Epoch 18, batch 2050, loss[loss=0.1598, simple_loss=0.2318, pruned_loss=0.03164, ctc_loss=0.06135, over 19710.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2619, pruned_loss=0.04096, ctc_loss=0.07642, over 3849371.92 frames. ], batch size: 47, lr: 8.25e-03, grad_scale: 8.0
+2024-08-27 04:50:47,343 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.127e+02 1.468e+02 1.842e+02 2.423e+02 4.039e+02, threshold=3.684e+02, percent-clipped=4.0
+2024-08-27 04:51:00,801 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=236773.33333333334, ans=0.0
+2024-08-27 04:51:03,450 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.49 vs. limit=15.0
+2024-08-27 04:51:09,417 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=236826.66666666666, ans=0.0
+2024-08-27 04:51:11,178 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=236826.66666666666, ans=0.125
+2024-08-27 04:51:12,081 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=236826.66666666666, ans=0.0
+2024-08-27 04:51:12,216 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.29 vs. limit=15.0
+2024-08-27 04:51:12,849 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=236880.0, ans=0.125
+2024-08-27 04:51:13,547 INFO [train.py:1114] (1/4) Epoch 18, batch 2100, loss[loss=0.192, simple_loss=0.2669, pruned_loss=0.04314, ctc_loss=0.07705, over 19787.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2608, pruned_loss=0.04026, ctc_loss=0.07524, over 3857332.35 frames. ], batch size: 54, lr: 8.25e-03, grad_scale: 8.0
+2024-08-27 04:51:30,272 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.93 vs. limit=15.0
+2024-08-27 04:51:31,587 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:51:40,598 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.05 vs. limit=15.0
+2024-08-27 04:51:50,804 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=236986.66666666666, ans=0.125
+2024-08-27 04:51:51,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=236986.66666666666, ans=0.0
+2024-08-27 04:51:58,028 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=237040.0, ans=0.125
+2024-08-27 04:51:59,171 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.29 vs. limit=15.0
+2024-08-27 04:52:02,293 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=237040.0, ans=0.0
+2024-08-27 04:52:07,264 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=237093.33333333334, ans=0.2
+2024-08-27 04:52:09,085 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 04:52:11,797 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=237093.33333333334, ans=0.125
+2024-08-27 04:52:13,291 INFO [train.py:1114] (1/4) Epoch 18, batch 2150, loss[loss=0.1791, simple_loss=0.2578, pruned_loss=0.03655, ctc_loss=0.06826, over 19581.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2601, pruned_loss=0.04014, ctc_loss=0.07497, over 3867655.65 frames. ], batch size: 52, lr: 8.24e-03, grad_scale: 8.0
+2024-08-27 04:52:14,122 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=237146.66666666666, ans=0.125
+2024-08-27 04:52:20,929 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=237146.66666666666, ans=0.07
+2024-08-27 04:52:23,377 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=237200.0, ans=0.0
+2024-08-27 04:52:31,104 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.418e+02 1.667e+02 2.145e+02 4.483e+02, threshold=3.333e+02, percent-clipped=3.0
+2024-08-27 04:52:31,728 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=12.30 vs. limit=22.5
+2024-08-27 04:52:34,441 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.75 vs. limit=15.0
+2024-08-27 04:52:41,735 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=237306.66666666666, ans=0.125
+2024-08-27 04:52:42,565 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=237306.66666666666, ans=0.125
+2024-08-27 04:52:47,915 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=237360.0, ans=0.125
+2024-08-27 04:52:57,217 INFO [train.py:1114] (1/4) Epoch 18, batch 2200, loss[loss=0.1938, simple_loss=0.2727, pruned_loss=0.04177, ctc_loss=0.07861, over 19570.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2601, pruned_loss=0.04006, ctc_loss=0.07485, over 3867394.06 frames. ], batch size: 57, lr: 8.24e-03, grad_scale: 8.0
+2024-08-27 04:53:12,709 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=237466.66666666666, ans=0.0
+2024-08-27 04:53:16,177 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=237520.0, ans=0.125
+2024-08-27 04:53:28,697 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.46 vs. limit=10.0
+2024-08-27 04:53:44,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=237680.0, ans=0.2
+2024-08-27 04:53:45,567 INFO [train.py:1114] (1/4) Epoch 18, batch 2250, loss[loss=0.1989, simple_loss=0.2707, pruned_loss=0.04627, ctc_loss=0.08627, over 19609.00 frames. ], tot_loss[loss=0.1855, simple_loss=0.2605, pruned_loss=0.04024, ctc_loss=0.07513, over 3866849.03 frames. ], batch size: 55, lr: 8.23e-03, grad_scale: 8.0
+2024-08-27 04:57:16,413 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.60 vs. limit=15.0
+2024-08-27 04:58:07,208 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.445e+02 1.673e+02 2.181e+02 3.635e+02, threshold=3.347e+02, percent-clipped=1.0
+2024-08-27 05:00:01,296 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=237893.33333333334, ans=0.1
+2024-08-27 05:00:07,279 INFO [train.py:1114] (1/4) Epoch 18, batch 2300, loss[loss=0.1803, simple_loss=0.2571, pruned_loss=0.03774, ctc_loss=0.06986, over 19507.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2597, pruned_loss=0.0402, ctc_loss=0.07505, over 3861725.30 frames. ], batch size: 49, lr: 8.23e-03, grad_scale: 8.0
+2024-08-27 05:00:10,465 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=237946.66666666666, ans=0.025
+2024-08-27 05:05:01,202 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=238053.33333333334, ans=0.0
+2024-08-27 05:06:05,450 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=238160.0, ans=0.07
+2024-08-27 05:06:14,069 INFO [train.py:1114] (1/4) Epoch 18, batch 2350, loss[loss=0.2003, simple_loss=0.2779, pruned_loss=0.0447, ctc_loss=0.08317, over 19669.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2599, pruned_loss=0.04034, ctc_loss=0.07529, over 3864437.57 frames. ], batch size: 63, lr: 8.22e-03, grad_scale: 8.0
+2024-08-27 05:06:15,474 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=238213.33333333334, ans=10.0
+2024-08-27 05:08:17,559 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.50 vs. limit=22.5
+2024-08-27 05:09:45,142 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.152e+02 1.379e+02 1.605e+02 2.102e+02 3.614e+02, threshold=3.209e+02, percent-clipped=2.0
+2024-08-27 05:10:04,775 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=238320.0, ans=0.07
+2024-08-27 05:10:23,143 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=238373.33333333334, ans=0.125
+2024-08-27 05:11:08,693 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=238426.66666666666, ans=0.95
+2024-08-27 05:11:24,997 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=238426.66666666666, ans=0.0
+2024-08-27 05:11:54,342 INFO [train.py:1114] (1/4) Epoch 18, batch 2400, loss[loss=0.1898, simple_loss=0.2641, pruned_loss=0.04213, ctc_loss=0.07792, over 19258.00 frames. ], tot_loss[loss=0.1872, simple_loss=0.2618, pruned_loss=0.04098, ctc_loss=0.07654, over 3859107.89 frames. ], batch size: 71, lr: 8.22e-03, grad_scale: 16.0
+2024-08-27 05:13:07,727 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.13 vs. limit=22.5
+2024-08-27 05:13:24,228 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=238586.66666666666, ans=0.0
+2024-08-27 05:13:43,087 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=238586.66666666666, ans=0.125
+2024-08-27 05:14:36,116 INFO [train.py:1114] (1/4) Epoch 18, batch 2450, loss[loss=0.2318, simple_loss=0.2845, pruned_loss=0.06462, ctc_loss=0.1248, over 12966.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2657, pruned_loss=0.04347, ctc_loss=0.08155, over 3729718.85 frames. ], batch size: 141, lr: 8.21e-03, grad_scale: 16.0
+2024-08-27 05:14:44,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=238800.0, ans=0.0
+2024-08-27 05:14:44,862 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.88 vs. limit=10.0
+2024-08-27 05:15:11,772 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.08 vs. limit=15.0
+2024-08-27 05:15:19,821 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.338e+02 1.631e+02 1.872e+02 2.220e+02 3.951e+02, threshold=3.743e+02, percent-clipped=5.0
+2024-08-27 05:15:29,764 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=238853.33333333334, ans=0.0
+2024-08-27 05:15:31,998 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=238853.33333333334, ans=0.125
+2024-08-27 05:15:32,142 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=238853.33333333334, ans=0.1
+2024-08-27 05:19:02,747 INFO [train.py:1114] (1/4) Epoch 19, batch 0, loss[loss=0.1767, simple_loss=0.248, pruned_loss=0.03835, ctc_loss=0.07156, over 19417.00 frames. ], tot_loss[loss=0.1767, simple_loss=0.248, pruned_loss=0.03835, ctc_loss=0.07156, over 19417.00 frames. ], batch size: 48, lr: 7.99e-03, grad_scale: 32.0
+2024-08-27 05:19:02,748 INFO [train.py:1137] (1/4) Computing validation loss
+2024-08-27 05:19:44,311 INFO [zipformer.py:1858] (1/4) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([3.9009, 3.3176, 3.8041, 3.7220], device='cuda:1')
+2024-08-27 05:20:05,937 INFO [train.py:1146] (1/4) Epoch 19, validation: loss=0.1709, simple_loss=0.2636, pruned_loss=0.02933, ctc_loss=0.04896, over 944034.00 frames.
+2024-08-27 05:20:05,939 INFO [train.py:1147] (1/4) Maximum memory allocated so far is 12945MB
+2024-08-27 05:20:07,986 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=238954.66666666666, ans=10.0
+2024-08-27 05:20:09,876 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=238954.66666666666, ans=0.0
+2024-08-27 05:20:23,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=239061.33333333334, ans=0.1
+2024-08-27 05:20:27,301 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.63 vs. limit=15.0
+2024-08-27 05:21:15,032 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:21:28,837 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=239114.66666666666, ans=0.125
+2024-08-27 05:21:28,972 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.21 vs. limit=22.5
+2024-08-27 05:21:32,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=239114.66666666666, ans=0.125
+2024-08-27 05:22:55,134 INFO [train.py:1114] (1/4) Epoch 19, batch 50, loss[loss=0.1651, simple_loss=0.2366, pruned_loss=0.03335, ctc_loss=0.06743, over 19694.00 frames. ], tot_loss[loss=0.1884, simple_loss=0.2621, pruned_loss=0.04168, ctc_loss=0.07858, over 843816.80 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-27 05:22:55,275 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=239221.33333333334, ans=0.1
+2024-08-27 05:23:20,755 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=239221.33333333334, ans=0.0
+2024-08-27 05:23:20,851 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:23:21,186 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=9.69 vs. limit=22.5
+2024-08-27 05:23:24,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=239221.33333333334, ans=0.1
+2024-08-27 05:23:24,700 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=239221.33333333334, ans=0.1
+2024-08-27 05:23:33,294 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=239274.66666666666, ans=0.125
+2024-08-27 05:23:56,393 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=239328.0, ans=0.0
+2024-08-27 05:23:59,567 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.492e+02 1.734e+02 2.135e+02 3.431e+02, threshold=3.468e+02, percent-clipped=0.0
+2024-08-27 05:23:59,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=239381.33333333334, ans=0.125
+2024-08-27 05:24:00,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=239381.33333333334, ans=0.0
+2024-08-27 05:24:08,748 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=239381.33333333334, ans=0.125
+2024-08-27 05:24:16,943 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=239434.66666666666, ans=0.2
+2024-08-27 05:24:19,427 INFO [train.py:1114] (1/4) Epoch 19, batch 100, loss[loss=0.1616, simple_loss=0.2359, pruned_loss=0.03153, ctc_loss=0.06082, over 19715.00 frames. ], tot_loss[loss=0.1879, simple_loss=0.2626, pruned_loss=0.04111, ctc_loss=0.07727, over 1498509.86 frames. ], batch size: 51, lr: 7.98e-03, grad_scale: 32.0
+2024-08-27 05:24:19,643 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=239488.0, ans=0.125
+2024-08-27 05:24:24,269 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:24:28,046 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=239541.33333333334, ans=0.125
+2024-08-27 05:24:28,862 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=239541.33333333334, ans=0.0
+2024-08-27 05:24:30,889 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.72 vs. limit=22.5
+2024-08-27 05:24:39,897 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=239594.66666666666, ans=0.07
+2024-08-27 05:25:47,354 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=239648.0, ans=0.1
+2024-08-27 05:25:48,985 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=239648.0, ans=0.125
+2024-08-27 05:26:04,440 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=239754.66666666666, ans=0.1
+2024-08-27 05:26:05,121 INFO [train.py:1114] (1/4) Epoch 19, batch 150, loss[loss=0.1553, simple_loss=0.2303, pruned_loss=0.0297, ctc_loss=0.05245, over 19722.00 frames. ], tot_loss[loss=0.1859, simple_loss=0.2609, pruned_loss=0.04033, ctc_loss=0.0755, over 2027222.49 frames. ], batch size: 47, lr: 7.98e-03, grad_scale: 32.0
+2024-08-27 05:26:27,820 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.92 vs. limit=22.5
+2024-08-27 05:26:28,468 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=239754.66666666666, ans=0.125
+2024-08-27 05:26:38,970 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:27:19,957 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=239914.66666666666, ans=0.1
+2024-08-27 05:27:20,689 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.059e+02 1.500e+02 1.966e+02 2.497e+02 3.604e+02, threshold=3.932e+02, percent-clipped=3.0
+2024-08-27 05:27:20,925 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=239914.66666666666, ans=0.0
+2024-08-27 05:27:33,799 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=239914.66666666666, ans=0.125
+2024-08-27 05:27:48,357 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=239968.0, ans=0.125
+2024-08-27 05:28:02,391 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=239968.0, ans=0.0
+2024-08-27 05:28:10,144 INFO [train.py:1114] (1/4) Epoch 19, batch 200, loss[loss=0.2, simple_loss=0.2754, pruned_loss=0.04517, ctc_loss=0.08544, over 18110.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2599, pruned_loss=0.03987, ctc_loss=0.07445, over 2435690.74 frames. ], batch size: 85, lr: 7.97e-03, grad_scale: 32.0
+2024-08-27 05:28:11,337 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=240021.33333333334, ans=0.2
+2024-08-27 05:28:15,802 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=240021.33333333334, ans=0.125
+2024-08-27 05:28:30,800 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.07 vs. limit=10.0
+2024-08-27 05:29:18,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=240181.33333333334, ans=0.0
+2024-08-27 05:29:28,875 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=240234.66666666666, ans=0.125
+2024-08-27 05:29:34,216 INFO [train.py:1114] (1/4) Epoch 19, batch 250, loss[loss=0.1992, simple_loss=0.2738, pruned_loss=0.04522, ctc_loss=0.0853, over 19370.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2596, pruned_loss=0.03955, ctc_loss=0.07407, over 2755982.86 frames. ], batch size: 67, lr: 7.97e-03, grad_scale: 32.0
+2024-08-27 05:29:40,860 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=240288.0, ans=0.125
+2024-08-27 05:29:49,480 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.50 vs. limit=10.0
+2024-08-27 05:30:02,560 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.191e+02 1.446e+02 1.683e+02 2.499e+02 4.574e+02, threshold=3.367e+02, percent-clipped=7.0
+2024-08-27 05:30:09,310 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=240448.0, ans=0.125
+2024-08-27 05:30:10,187 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=240448.0, ans=0.2
+2024-08-27 05:30:11,075 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240448.0, ans=0.1
+2024-08-27 05:30:13,790 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=240501.33333333334, ans=0.125
+2024-08-27 05:30:15,599 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=240501.33333333334, ans=0.125
+2024-08-27 05:30:22,806 INFO [train.py:1114] (1/4) Epoch 19, batch 300, loss[loss=0.2165, simple_loss=0.2865, pruned_loss=0.05358, ctc_loss=0.09816, over 19524.00 frames. ], tot_loss[loss=0.1838, simple_loss=0.2591, pruned_loss=0.03949, ctc_loss=0.0738, over 3000158.22 frames. ], batch size: 61, lr: 7.96e-03, grad_scale: 32.0
+2024-08-27 05:30:29,297 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=240554.66666666666, ans=0.125
+2024-08-27 05:30:43,195 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240661.33333333334, ans=0.1
+2024-08-27 05:30:54,879 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=240714.66666666666, ans=0.1
+2024-08-27 05:30:55,825 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=240714.66666666666, ans=0.0
+2024-08-27 05:31:02,337 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.30 vs. limit=15.0
+2024-08-27 05:31:09,980 INFO [train.py:1114] (1/4) Epoch 19, batch 350, loss[loss=0.1633, simple_loss=0.2358, pruned_loss=0.03241, ctc_loss=0.06489, over 19750.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2598, pruned_loss=0.03967, ctc_loss=0.07412, over 3190941.52 frames. ], batch size: 48, lr: 7.96e-03, grad_scale: 32.0
+2024-08-27 05:31:19,729 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.04 vs. limit=15.0
+2024-08-27 05:31:36,772 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.91 vs. limit=22.5
+2024-08-27 05:31:39,936 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.453e+02 1.753e+02 2.405e+02 3.677e+02, threshold=3.507e+02, percent-clipped=2.0
+2024-08-27 05:31:40,250 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=240981.33333333334, ans=0.125
+2024-08-27 05:31:41,107 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=240981.33333333334, ans=0.125
+2024-08-27 05:31:50,241 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=241034.66666666666, ans=0.0
+2024-08-27 05:31:51,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=241034.66666666666, ans=0.125
+2024-08-27 05:31:57,316 INFO [train.py:1114] (1/4) Epoch 19, batch 400, loss[loss=0.1771, simple_loss=0.2609, pruned_loss=0.03339, ctc_loss=0.06637, over 19516.00 frames. ], tot_loss[loss=0.184, simple_loss=0.2594, pruned_loss=0.03953, ctc_loss=0.07393, over 3342063.45 frames. ], batch size: 54, lr: 7.95e-03, grad_scale: 32.0
+2024-08-27 05:32:27,288 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:32:51,167 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=241301.33333333334, ans=0.0
+2024-08-27 05:33:00,037 INFO [train.py:1114] (1/4) Epoch 19, batch 450, loss[loss=0.2003, simple_loss=0.2845, pruned_loss=0.04158, ctc_loss=0.0821, over 19611.00 frames. ], tot_loss[loss=0.1852, simple_loss=0.2603, pruned_loss=0.04006, ctc_loss=0.07484, over 3450262.93 frames. ], batch size: 55, lr: 7.95e-03, grad_scale: 32.0
+2024-08-27 05:33:07,524 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=241354.66666666666, ans=0.125
+2024-08-27 05:33:09,358 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=241354.66666666666, ans=0.125
+2024-08-27 05:33:15,651 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=241408.0, ans=0.1
+2024-08-27 05:33:16,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=241408.0, ans=0.125
+2024-08-27 05:33:16,595 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=241408.0, ans=0.125
+2024-08-27 05:33:19,651 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=6.10 vs. limit=15.0
+2024-08-27 05:33:22,865 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=241461.33333333334, ans=0.125
+2024-08-27 05:33:23,842 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=241461.33333333334, ans=0.125
+2024-08-27 05:33:30,920 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.397e+02 1.631e+02 2.046e+02 3.175e+02, threshold=3.262e+02, percent-clipped=0.0
+2024-08-27 05:33:32,994 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=241514.66666666666, ans=0.1
+2024-08-27 05:33:39,225 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=241568.0, ans=0.2
+2024-08-27 05:33:49,280 INFO [train.py:1114] (1/4) Epoch 19, batch 500, loss[loss=0.1956, simple_loss=0.2702, pruned_loss=0.04423, ctc_loss=0.08139, over 19638.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2595, pruned_loss=0.03966, ctc_loss=0.07414, over 3545831.45 frames. ], batch size: 63, lr: 7.95e-03, grad_scale: 32.0
+2024-08-27 05:33:57,255 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.66 vs. limit=15.0
+2024-08-27 05:34:04,579 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.62 vs. limit=15.0
+2024-08-27 05:34:07,191 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=241674.66666666666, ans=0.05
+2024-08-27 05:34:10,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=241728.0, ans=0.125
+2024-08-27 05:34:11,182 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=10.10 vs. limit=10.0
+2024-08-27 05:34:18,104 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=241781.33333333334, ans=0.0
+2024-08-27 05:34:21,131 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.00 vs. limit=15.0
+2024-08-27 05:34:24,750 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=241781.33333333334, ans=0.125
+2024-08-27 05:34:30,984 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-27 05:34:31,065 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-27 05:34:31,766 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-27 05:34:37,346 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=241834.66666666666, ans=0.125
+2024-08-27 05:34:39,019 INFO [train.py:1114] (1/4) Epoch 19, batch 550, loss[loss=0.2263, simple_loss=0.2927, pruned_loss=0.05874, ctc_loss=0.1061, over 19283.00 frames. ], tot_loss[loss=0.185, simple_loss=0.2599, pruned_loss=0.04014, ctc_loss=0.07482, over 3608551.17 frames. ], batch size: 71, lr: 7.94e-03, grad_scale: 32.0
+2024-08-27 05:34:59,602 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=241994.66666666666, ans=0.2
+2024-08-27 05:34:59,621 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=241994.66666666666, ans=0.0
+2024-08-27 05:35:03,768 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.45 vs. limit=6.0
+2024-08-27 05:35:09,354 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.148e+02 1.385e+02 1.667e+02 1.980e+02 3.512e+02, threshold=3.334e+02, percent-clipped=2.0
+2024-08-27 05:35:13,268 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=242048.0, ans=0.5
+2024-08-27 05:35:16,063 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=242048.0, ans=0.07
+2024-08-27 05:35:18,701 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=242101.33333333334, ans=0.0
+2024-08-27 05:35:21,648 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=242101.33333333334, ans=0.1
+2024-08-27 05:35:27,105 INFO [train.py:1114] (1/4) Epoch 19, batch 600, loss[loss=0.213, simple_loss=0.292, pruned_loss=0.04848, ctc_loss=0.09245, over 19427.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2601, pruned_loss=0.04014, ctc_loss=0.07469, over 3666336.13 frames. ], batch size: 67, lr: 7.94e-03, grad_scale: 32.0
+2024-08-27 05:35:33,883 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=242154.66666666666, ans=0.0
+2024-08-27 05:35:39,657 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=4.58 vs. limit=15.0
+2024-08-27 05:35:40,093 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=242208.0, ans=0.125
+2024-08-27 05:35:54,895 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=242261.33333333334, ans=0.125
+2024-08-27 05:35:57,775 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.36 vs. limit=15.0
+2024-08-27 05:36:09,562 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=242314.66666666666, ans=0.0
+2024-08-27 05:36:14,308 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=242368.0, ans=0.2
+2024-08-27 05:36:15,190 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=242368.0, ans=0.2
+2024-08-27 05:36:18,225 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.78 vs. limit=22.5
+2024-08-27 05:36:23,052 INFO [train.py:1114] (1/4) Epoch 19, batch 650, loss[loss=0.1912, simple_loss=0.2711, pruned_loss=0.04014, ctc_loss=0.07745, over 19773.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2595, pruned_loss=0.03994, ctc_loss=0.07437, over 3717066.29 frames. ], batch size: 54, lr: 7.93e-03, grad_scale: 32.0
+2024-08-27 05:36:28,724 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=242421.33333333334, ans=0.0
+2024-08-27 05:36:28,781 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=242421.33333333334, ans=0.125
+2024-08-27 05:36:38,637 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=242474.66666666666, ans=0.025
+2024-08-27 05:36:46,075 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:36:53,254 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.055e+02 1.470e+02 1.907e+02 2.471e+02 4.129e+02, threshold=3.814e+02, percent-clipped=9.0
+2024-08-27 05:36:55,276 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=242581.33333333334, ans=0.125
+2024-08-27 05:36:56,284 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=242581.33333333334, ans=0.125
+2024-08-27 05:37:01,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=242634.66666666666, ans=0.125
+2024-08-27 05:37:03,601 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=242634.66666666666, ans=0.125
+2024-08-27 05:37:31,526 INFO [scaling.py:1120] (1/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-27 05:37:33,298 INFO [train.py:1114] (1/4) Epoch 19, batch 700, loss[loss=0.1793, simple_loss=0.2503, pruned_loss=0.03897, ctc_loss=0.07591, over 19732.00 frames. ], tot_loss[loss=0.1853, simple_loss=0.2603, pruned_loss=0.04019, ctc_loss=0.07492, over 3749165.96 frames. ], batch size: 51, lr: 7.93e-03, grad_scale: 32.0
+2024-08-27 05:37:43,507 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=242741.33333333334, ans=0.07
+2024-08-27 05:37:56,382 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=242794.66666666666, ans=0.125
+2024-08-27 05:38:02,743 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=242848.0, ans=0.0
+2024-08-27 05:38:17,716 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=242901.33333333334, ans=0.2
+2024-08-27 05:38:23,030 INFO [train.py:1114] (1/4) Epoch 19, batch 750, loss[loss=0.1849, simple_loss=0.2667, pruned_loss=0.038, ctc_loss=0.06795, over 19487.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2598, pruned_loss=0.03999, ctc_loss=0.0747, over 3774464.38 frames. ], batch size: 54, lr: 7.92e-03, grad_scale: 32.0
+2024-08-27 05:38:32,538 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=8.24 vs. limit=15.0
+2024-08-27 05:38:45,993 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=243061.33333333334, ans=0.0
+2024-08-27 05:38:51,402 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.489e+02 1.823e+02 2.314e+02 3.772e+02, threshold=3.647e+02, percent-clipped=0.0
+2024-08-27 05:38:58,146 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=243114.66666666666, ans=0.0
+2024-08-27 05:39:11,754 INFO [train.py:1114] (1/4) Epoch 19, batch 800, loss[loss=0.1569, simple_loss=0.2316, pruned_loss=0.0305, ctc_loss=0.05289, over 19841.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2595, pruned_loss=0.03995, ctc_loss=0.0746, over 3796520.43 frames. ], batch size: 49, lr: 7.92e-03, grad_scale: 32.0
+2024-08-27 05:39:19,141 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=243221.33333333334, ans=0.2
+2024-08-27 05:39:58,029 INFO [train.py:1114] (1/4) Epoch 19, batch 850, loss[loss=0.1994, simple_loss=0.2759, pruned_loss=0.045, ctc_loss=0.08212, over 19650.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2592, pruned_loss=0.03987, ctc_loss=0.07447, over 3814948.74 frames. ], batch size: 59, lr: 7.92e-03, grad_scale: 32.0
+2024-08-27 05:40:04,667 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=243488.0, ans=0.025
+2024-08-27 05:40:04,963 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.67 vs. limit=15.0
+2024-08-27 05:40:10,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=243541.33333333334, ans=0.0
+2024-08-27 05:40:17,606 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=243594.66666666666, ans=0.0
+2024-08-27 05:40:25,247 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=243594.66666666666, ans=0.2
+2024-08-27 05:40:28,701 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.388e+02 1.609e+02 2.074e+02 4.897e+02, threshold=3.218e+02, percent-clipped=1.0
+2024-08-27 05:40:34,662 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=243648.0, ans=0.125
+2024-08-27 05:40:35,970 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=6.73 vs. limit=15.0
+2024-08-27 05:40:51,626 INFO [train.py:1114] (1/4) Epoch 19, batch 900, loss[loss=0.162, simple_loss=0.2366, pruned_loss=0.0325, ctc_loss=0.05594, over 19823.00 frames. ], tot_loss[loss=0.1845, simple_loss=0.2593, pruned_loss=0.03994, ctc_loss=0.07445, over 3819183.23 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-27 05:41:21,071 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=243808.0, ans=0.025
+2024-08-27 05:41:47,172 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=243861.33333333334, ans=0.1
+2024-08-27 05:45:50,347 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=243968.0, ans=0.125
+2024-08-27 05:46:16,460 INFO [train.py:1114] (1/4) Epoch 19, batch 950, loss[loss=0.1614, simple_loss=0.2411, pruned_loss=0.02919, ctc_loss=0.05808, over 19491.00 frames. ], tot_loss[loss=0.1846, simple_loss=0.2595, pruned_loss=0.04, ctc_loss=0.07454, over 3820498.17 frames. ], batch size: 49, lr: 7.91e-03, grad_scale: 32.0
+2024-08-27 05:46:35,572 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=244074.66666666666, ans=0.1
+2024-08-27 05:46:39,555 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.whiten.whitening_limit, batch_count=244074.66666666666, ans=12.0
+2024-08-27 05:46:50,917 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=244074.66666666666, ans=0.2
+2024-08-27 05:46:57,905 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=244128.0, ans=0.125
+2024-08-27 05:47:05,276 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.185e+02 1.465e+02 1.729e+02 2.037e+02 3.385e+02, threshold=3.459e+02, percent-clipped=1.0
+2024-08-27 05:47:06,488 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=244181.33333333334, ans=0.0
+2024-08-27 05:47:24,477 INFO [train.py:1114] (1/4) Epoch 19, batch 1000, loss[loss=0.1651, simple_loss=0.2413, pruned_loss=0.03151, ctc_loss=0.06477, over 19867.00 frames. ], tot_loss[loss=0.1857, simple_loss=0.2604, pruned_loss=0.0404, ctc_loss=0.07532, over 3816829.39 frames. ], batch size: 52, lr: 7.90e-03, grad_scale: 32.0
+2024-08-27 05:47:26,640 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=244288.0, ans=0.0
+2024-08-27 05:47:33,096 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=244341.33333333334, ans=0.1
+2024-08-27 05:47:33,197 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=244341.33333333334, ans=0.125
+2024-08-27 05:47:55,348 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=244448.0, ans=0.125
+2024-08-27 05:48:00,058 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=244448.0, ans=0.125
+2024-08-27 05:48:12,810 INFO [train.py:1114] (1/4) Epoch 19, batch 1050, loss[loss=0.1821, simple_loss=0.2633, pruned_loss=0.03661, ctc_loss=0.06956, over 19850.00 frames. ], tot_loss[loss=0.1849, simple_loss=0.2595, pruned_loss=0.04015, ctc_loss=0.07499, over 3822541.22 frames. ], batch size: 57, lr: 7.90e-03, grad_scale: 32.0
+2024-08-27 05:48:13,974 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=244554.66666666666, ans=0.0
+2024-08-27 05:48:23,329 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.13 vs. limit=22.5
+2024-08-27 05:48:42,881 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.407e+02 1.559e+02 1.901e+02 2.565e+02, threshold=3.118e+02, percent-clipped=0.0
+2024-08-27 05:48:51,782 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.28 vs. limit=15.0
+2024-08-27 05:49:01,678 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=244821.33333333334, ans=0.2
+2024-08-27 05:49:02,416 INFO [train.py:1114] (1/4) Epoch 19, batch 1100, loss[loss=0.1792, simple_loss=0.2538, pruned_loss=0.03824, ctc_loss=0.07008, over 19592.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2593, pruned_loss=0.03989, ctc_loss=0.07443, over 3830857.76 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-27 05:49:04,640 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.97 vs. limit=12.0
+2024-08-27 05:49:39,795 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=244981.33333333334, ans=0.125
+2024-08-27 05:49:51,779 INFO [train.py:1114] (1/4) Epoch 19, batch 1150, loss[loss=0.1707, simple_loss=0.2541, pruned_loss=0.03195, ctc_loss=0.05838, over 19598.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2592, pruned_loss=0.03963, ctc_loss=0.07412, over 3830026.08 frames. ], batch size: 52, lr: 7.89e-03, grad_scale: 32.0
+2024-08-27 05:49:53,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=245088.0, ans=0.0
+2024-08-27 05:51:12,231 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=245141.33333333334, ans=0.0
+2024-08-27 05:52:27,615 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.437e+02 1.648e+02 2.100e+02 3.411e+02, threshold=3.296e+02, percent-clipped=3.0
+2024-08-27 05:52:32,320 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=245248.0, ans=0.1
+2024-08-27 05:52:47,006 INFO [train.py:1114] (1/4) Epoch 19, batch 1200, loss[loss=0.1987, simple_loss=0.2734, pruned_loss=0.04526, ctc_loss=0.08359, over 19840.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2601, pruned_loss=0.03985, ctc_loss=0.07457, over 3824926.60 frames. ], batch size: 57, lr: 7.89e-03, grad_scale: 32.0
+2024-08-27 05:53:09,074 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=245461.33333333334, ans=0.0
+2024-08-27 05:53:14,611 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=245461.33333333334, ans=0.0
+2024-08-27 05:53:14,655 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=245461.33333333334, ans=10.0
+2024-08-27 05:53:18,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=245514.66666666666, ans=0.0
+2024-08-27 05:53:34,959 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.25 vs. limit=10.0
+2024-08-27 05:53:35,240 INFO [train.py:1114] (1/4) Epoch 19, batch 1250, loss[loss=0.1986, simple_loss=0.2734, pruned_loss=0.04488, ctc_loss=0.08499, over 19547.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2608, pruned_loss=0.03998, ctc_loss=0.07477, over 3843041.14 frames. ], batch size: 61, lr: 7.88e-03, grad_scale: 32.0
+2024-08-27 05:53:51,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=245674.66666666666, ans=0.125
+2024-08-27 05:53:54,961 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=245728.0, ans=0.1
+2024-08-27 05:54:05,855 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.471e+02 1.735e+02 2.173e+02 3.319e+02, threshold=3.470e+02, percent-clipped=1.0
+2024-08-27 05:54:19,361 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=245834.66666666666, ans=0.125
+2024-08-27 05:54:26,195 INFO [train.py:1114] (1/4) Epoch 19, batch 1300, loss[loss=0.2091, simple_loss=0.2791, pruned_loss=0.05029, ctc_loss=0.09621, over 18776.00 frames. ], tot_loss[loss=0.1844, simple_loss=0.2601, pruned_loss=0.03957, ctc_loss=0.07399, over 3846494.21 frames. ], batch size: 76, lr: 7.88e-03, grad_scale: 32.0
+2024-08-27 05:54:26,427 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=245888.0, ans=0.1
+2024-08-27 05:54:36,021 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=245941.33333333334, ans=0.125
+2024-08-27 05:54:39,099 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.93 vs. limit=15.0
+2024-08-27 05:54:47,111 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.77 vs. limit=15.0
+2024-08-27 05:54:52,692 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=245994.66666666666, ans=0.125
+2024-08-27 05:55:05,844 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=246101.33333333334, ans=0.1
+2024-08-27 05:55:13,893 INFO [train.py:1114] (1/4) Epoch 19, batch 1350, loss[loss=0.1647, simple_loss=0.245, pruned_loss=0.03108, ctc_loss=0.05566, over 19783.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2594, pruned_loss=0.03924, ctc_loss=0.07331, over 3856904.11 frames. ], batch size: 54, lr: 7.87e-03, grad_scale: 16.0
+2024-08-27 05:55:29,264 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.40 vs. limit=15.0
+2024-08-27 05:55:42,922 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=246314.66666666666, ans=0.2
+2024-08-27 05:55:45,497 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.133e+02 1.414e+02 1.634e+02 2.144e+02 3.359e+02, threshold=3.268e+02, percent-clipped=0.0
+2024-08-27 05:56:02,662 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.75 vs. limit=12.0
+2024-08-27 05:56:03,131 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=246421.33333333334, ans=0.0
+2024-08-27 05:56:03,864 INFO [train.py:1114] (1/4) Epoch 19, batch 1400, loss[loss=0.1435, simple_loss=0.2172, pruned_loss=0.0251, ctc_loss=0.0489, over 19689.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2589, pruned_loss=0.03911, ctc_loss=0.07298, over 3864018.00 frames. ], batch size: 46, lr: 7.87e-03, grad_scale: 16.0
+2024-08-27 05:56:11,532 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=246421.33333333334, ans=0.125
+2024-08-27 05:56:15,208 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=246474.66666666666, ans=0.1
+2024-08-27 05:56:22,972 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.56 vs. limit=10.0
+2024-08-27 05:56:33,121 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=246581.33333333334, ans=0.125
+2024-08-27 05:56:42,126 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=246634.66666666666, ans=0.2
+2024-08-27 05:56:53,086 INFO [train.py:1114] (1/4) Epoch 19, batch 1450, loss[loss=0.1951, simple_loss=0.2699, pruned_loss=0.0439, ctc_loss=0.08096, over 19664.00 frames. ], tot_loss[loss=0.1836, simple_loss=0.2595, pruned_loss=0.03924, ctc_loss=0.07339, over 3863145.24 frames. ], batch size: 63, lr: 7.87e-03, grad_scale: 16.0
+2024-08-27 05:56:55,635 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=246688.0, ans=0.2
+2024-08-27 05:57:07,566 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=246741.33333333334, ans=0.0
+2024-08-27 05:57:08,521 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=246741.33333333334, ans=0.125
+2024-08-27 05:57:25,427 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.422e+02 1.608e+02 1.963e+02 3.546e+02, threshold=3.216e+02, percent-clipped=4.0
+2024-08-27 05:57:35,341 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.53 vs. limit=15.0
+2024-08-27 05:57:42,305 INFO [train.py:1114] (1/4) Epoch 19, batch 1500, loss[loss=0.1988, simple_loss=0.274, pruned_loss=0.0451, ctc_loss=0.08363, over 19575.00 frames. ], tot_loss[loss=0.1835, simple_loss=0.2596, pruned_loss=0.03909, ctc_loss=0.07321, over 3862569.89 frames. ], batch size: 57, lr: 7.86e-03, grad_scale: 16.0
+2024-08-27 05:57:42,607 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=246954.66666666666, ans=0.0
+2024-08-27 05:58:48,132 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=247114.66666666666, ans=0.5
+2024-08-27 05:58:52,798 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=247168.0, ans=0.0
+2024-08-27 05:58:58,206 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=247168.0, ans=0.1
+2024-08-27 05:59:00,945 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=247221.33333333334, ans=0.125
+2024-08-27 05:59:01,745 INFO [train.py:1114] (1/4) Epoch 19, batch 1550, loss[loss=0.2112, simple_loss=0.2783, pruned_loss=0.05219, ctc_loss=0.09906, over 19583.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2597, pruned_loss=0.03942, ctc_loss=0.07387, over 3847411.22 frames. ], batch size: 60, lr: 7.86e-03, grad_scale: 16.0
+2024-08-27 05:59:03,057 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=247221.33333333334, ans=0.0
+2024-08-27 05:59:06,969 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.03 vs. limit=15.0
+2024-08-27 05:59:39,712 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.73 vs. limit=6.0
+2024-08-27 05:59:43,877 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.416e+02 1.634e+02 2.007e+02 4.215e+02, threshold=3.267e+02, percent-clipped=2.0
+2024-08-27 05:59:52,656 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=247434.66666666666, ans=0.0
+2024-08-27 06:00:02,731 INFO [train.py:1114] (1/4) Epoch 19, batch 1600, loss[loss=0.1846, simple_loss=0.2701, pruned_loss=0.03613, ctc_loss=0.06736, over 19843.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2596, pruned_loss=0.03956, ctc_loss=0.07423, over 3836078.74 frames. ], batch size: 57, lr: 7.85e-03, grad_scale: 32.0
+2024-08-27 06:00:07,720 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=247488.0, ans=0.1
+2024-08-27 06:00:20,167 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.94 vs. limit=22.5
+2024-08-27 06:00:26,514 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=247594.66666666666, ans=0.0
+2024-08-27 06:00:29,975 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=247594.66666666666, ans=0.125
+2024-08-27 06:00:45,570 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=247701.33333333334, ans=0.125
+2024-08-27 06:00:47,353 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=247701.33333333334, ans=0.2
+2024-08-27 06:00:47,484 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=247701.33333333334, ans=0.0
+2024-08-27 06:00:51,744 INFO [train.py:1114] (1/4) Epoch 19, batch 1650, loss[loss=0.1831, simple_loss=0.2687, pruned_loss=0.03558, ctc_loss=0.06585, over 19660.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2594, pruned_loss=0.03959, ctc_loss=0.07415, over 3832558.49 frames. ], batch size: 59, lr: 7.85e-03, grad_scale: 32.0
+2024-08-27 06:01:21,526 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.147e+02 1.539e+02 1.985e+02 2.467e+02 4.637e+02, threshold=3.969e+02, percent-clipped=10.0
+2024-08-27 06:01:21,864 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=247914.66666666666, ans=0.125
+2024-08-27 06:01:23,684 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=247914.66666666666, ans=0.025
+2024-08-27 06:01:39,978 INFO [train.py:1114] (1/4) Epoch 19, batch 1700, loss[loss=0.1644, simple_loss=0.2325, pruned_loss=0.03514, ctc_loss=0.06511, over 19679.00 frames. ], tot_loss[loss=0.1834, simple_loss=0.2591, pruned_loss=0.03913, ctc_loss=0.07347, over 3846785.53 frames. ], batch size: 46, lr: 7.84e-03, grad_scale: 32.0
+2024-08-27 06:01:49,243 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=248074.66666666666, ans=0.2
+2024-08-27 06:01:57,835 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=248128.0, ans=0.1
+2024-08-27 06:02:23,933 INFO [train.py:1114] (1/4) Epoch 19, batch 1750, loss[loss=0.1738, simple_loss=0.2372, pruned_loss=0.03995, ctc_loss=0.07625, over 19659.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2589, pruned_loss=0.03911, ctc_loss=0.07339, over 3852102.13 frames. ], batch size: 45, lr: 7.84e-03, grad_scale: 32.0
+2024-08-27 06:02:31,189 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=248288.0, ans=0.125
+2024-08-27 06:02:38,160 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=248341.33333333334, ans=0.125
+2024-08-27 06:02:46,886 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=248394.66666666666, ans=0.125
+2024-08-27 06:02:57,020 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.179e+02 1.492e+02 1.808e+02 2.313e+02 3.735e+02, threshold=3.616e+02, percent-clipped=0.0
+2024-08-27 06:02:58,043 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=248448.0, ans=0.0
+2024-08-27 06:03:09,265 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=248501.33333333334, ans=0.2
+2024-08-27 06:03:18,749 INFO [train.py:1114] (1/4) Epoch 19, batch 1800, loss[loss=0.184, simple_loss=0.2659, pruned_loss=0.03734, ctc_loss=0.06864, over 19617.00 frames. ], tot_loss[loss=0.1833, simple_loss=0.2589, pruned_loss=0.03915, ctc_loss=0.07344, over 3853513.81 frames. ], batch size: 55, lr: 7.84e-03, grad_scale: 16.0
+2024-08-27 06:03:25,468 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.29 vs. limit=15.0
+2024-08-27 06:04:02,719 INFO [train.py:1114] (1/4) Epoch 19, batch 1850, loss[loss=0.2017, simple_loss=0.2797, pruned_loss=0.04516, ctc_loss=0.08326, over 19581.00 frames. ], tot_loss[loss=0.1835, simple_loss=0.2592, pruned_loss=0.03924, ctc_loss=0.07351, over 3856194.78 frames. ], batch size: 57, lr: 7.83e-03, grad_scale: 16.0
+2024-08-27 06:04:02,870 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=248821.33333333334, ans=0.2
+2024-08-27 06:04:25,991 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=248928.0, ans=0.09899494936611666
+2024-08-27 06:04:32,247 INFO [scaling.py:1024] (1/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.32 vs. limit=15.0
+2024-08-27 06:04:32,744 WARNING [optim.py:487] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.120e+02 1.498e+02 2.037e+02 3.063e+02 6.275e+02, threshold=4.074e+02, percent-clipped=13.0
+2024-08-27 06:04:34,761 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=248981.33333333334, ans=0.1
+2024-08-27 06:04:46,101 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=249034.66666666666, ans=0.125
+2024-08-27 06:04:47,723 INFO [train.py:1114] (1/4) Epoch 19, batch 1900, loss[loss=0.1751, simple_loss=0.2584, pruned_loss=0.03333, ctc_loss=0.06278, over 19643.00 frames. ], tot_loss[loss=0.1842, simple_loss=0.2599, pruned_loss=0.03951, ctc_loss=0.07383, over 3860254.22 frames. ], batch size: 59, lr: 7.83e-03, grad_scale: 16.0
+2024-08-27 06:04:49,696 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=249088.0, ans=0.025
+2024-08-27 06:05:00,128 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=249141.33333333334, ans=0.0
+2024-08-27 06:05:06,051 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=249194.66666666666, ans=0.0
+2024-08-27 06:05:53,928 INFO [scaling.py:214] (1/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=249301.33333333334, ans=0.2
+2024-08-27 06:05:58,956 INFO [train.py:1114] (1/4) Epoch 19, batch 1950, loss[loss=0.1726, simple_loss=0.2519, pruned_loss=0.03352, ctc_loss=0.06567, over 19609.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2612, pruned_loss=0.0397, ctc_loss=0.07427, over 3869695.05 frames. ], batch size: 52, lr: 7.82e-03, grad_scale: 16.0
+2024-08-27 06:16:15,092 INFO [train.py:1050] (1/4) Caught exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=471644, OpType=ALLREDUCE, NumelIn=745, NumelOut=745, Timeout(ms)=600000) ran for 600000 milliseconds before timing out..
+2024-08-27 06:16:15,094 INFO [checkpoint.py:75] (1/4) Saving checkpoint to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/bad-model-1.pt
+2024-08-27 06:16:22,126 INFO [train.py:1413] (1/4) Saving batch to /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/batch-277582f0-93f5-0c2c-488e-44f94ecc6c7f.pt
+2024-08-27 06:16:22,170 INFO [train.py:1419] (1/4) features shape: torch.Size([50, 1582, 80])
+2024-08-27 06:16:22,172 INFO [train.py:1423] (1/4) num tokens: 4046
diff --git a/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-03-2 b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-03-2
new file mode 100644
index 0000000000000000000000000000000000000000..083b0b6bd960faebe4e487f492dcca369c0e07ab
--- /dev/null
+++ b/zipformer/pretrained/ctc/non_causal/exp/log/log-train-2024-08-26-14-14-03-2
@@ -0,0 +1,5473 @@
+2024-08-26 14:14:06,049 INFO [train.py:1182] (2/4) Training started
+2024-08-26 14:14:09,228 INFO [train.py:1192] (2/4) Device: cuda:2
+2024-08-26 14:14:11,783 INFO [train.py:1210] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'ignore_id': -1, 'label_smoothing': 0.1, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'ff1d435a8d3c4eaa15828a84a7240678a70539a7', 'k2-git-date': 'Fri Feb 23 01:48:38 2024', 'lhotse-version': '1.25.0.dev+git.012532f.clean', 'torch-version': '2.2.1', 'torch-cuda-available': True, 'torch-cuda-version': '12.1', 'python-version': '3.10', 'icefall-git-branch': 'master', 'icefall-git-sha1': '201257e-dirty', 'icefall-git-date': 'Tue Aug 20 00:02:11 2024', 'icefall-path': '/workspace/icefall', 'k2-path': '/opt/conda/lib/python3.10/site-packages/k2/__init__.py', 'lhotse-path': '/opt/conda/lib/python3.10/site-packages/lhotse/__init__.py', 'hostname': 'cdr2652.int.cedar.computecanada.ca', 'IP address': '172.16.146.89'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 4, 'start_batch': 0, 'exp_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp'), 'bpe_model': '/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/lang_bpe_500/bpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 3.5, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'ctc_loss_scale': 0.2, 'attention_decoder_loss_scale': 0.8, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'attention_decoder_dim': 512, 'attention_decoder_num_layers': 6, 'attention_decoder_attention_dim': 512, 'attention_decoder_num_heads': 8, 'attention_decoder_feedforward_dim': 2048, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'use_attention_decoder': False, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('/home/liqihan/scratch/git/icefall/egs/librispeech/ASR/data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': False, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'sos_id': 1, 'eos_id': 1, 'vocab_size': 500}
+2024-08-26 14:14:11,784 INFO [train.py:1212] (2/4) About to create model
+2024-08-26 14:14:12,458 INFO [train.py:1216] (2/4) Number of model parameters: 65805511
+2024-08-26 14:14:12,459 INFO [checkpoint.py:112] (2/4) Loading checkpoint from /home/liqihan/scratch/git/icefall/egs/librispeech/ASR/zipformer/ctc/exp/epoch-3.pt
+2024-08-26 14:14:19,979 INFO [train.py:1231] (2/4) Using DDP
+2024-08-26 14:14:24,082 INFO [train.py:1243] (2/4) Loading optimizer state dict
+2024-08-26 14:14:24,276 INFO [train.py:1251] (2/4) Loading scheduler state dict
+2024-08-26 14:14:24,276 INFO [asr_datamodule.py:894] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:696] (2/4) Disable MUSAN
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:714] (2/4) Enable SpecAugment
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:715] (2/4) Time warp factor: 80
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:725] (2/4) Num frame mask: 10
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:738] (2/4) About to create train dataset
+2024-08-26 14:14:27,296 INFO [asr_datamodule.py:765] (2/4) Using DynamicBucketingSampler.
+2024-08-26 14:14:28,855 INFO [asr_datamodule.py:782] (2/4) About to create train dataloader
+2024-08-26 14:14:28,856 INFO [asr_datamodule.py:911] (2/4) About to get dev-clean cuts
+2024-08-26 14:14:31,125 INFO [asr_datamodule.py:918] (2/4) About to get dev-other cuts
+2024-08-26 14:14:32,027 INFO [asr_datamodule.py:814] (2/4) About to create dev dataset
+2024-08-26 14:14:32,332 INFO [asr_datamodule.py:831] (2/4) About to create dev dataloader
+2024-08-26 14:14:32,333 INFO [train.py:1435] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
+2024-08-26 14:18:38,883 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=512, metric=3.11 vs. limit=7.5
+2024-08-26 14:18:40,630 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12109MB
+2024-08-26 14:18:41,872 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12109MB
+2024-08-26 14:18:49,642 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12109MB
+2024-08-26 14:18:50,842 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12109MB
+2024-08-26 14:19:04,366 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=15.55 vs. limit=7.5
+2024-08-26 14:19:04,870 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12109MB
+2024-08-26 14:19:05,660 INFO [scaling.py:1024] (2/4) Whitening: name=None, num_groups=1, num_channels=384, metric=14.04 vs. limit=7.5
+2024-08-26 14:19:06,160 INFO [train.py:1463] (2/4) Maximum memory allocated so far is 12109MB
+2024-08-26 14:19:06,179 INFO [train.py:1344] (2/4) Loading grad scaler state dict
+2024-08-26 14:19:49,550 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.12 vs. limit=15.0
+2024-08-26 14:19:52,359 INFO [train.py:1114] (2/4) Epoch 4, batch 0, loss[loss=0.2774, simple_loss=0.3163, pruned_loss=0.08761, ctc_loss=0.1582, over 19424.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3163, pruned_loss=0.08761, ctc_loss=0.1582, over 19424.00 frames. ], batch size: 48, lr: 3.30e-02, grad_scale: 32.0
+2024-08-26 14:19:52,359 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 14:20:25,782 INFO [train.py:1146] (2/4) Epoch 4, validation: loss=0.2421, simple_loss=0.3218, pruned_loss=0.05945, ctc_loss=0.1086, over 944034.00 frames.
+2024-08-26 14:20:25,783 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12216MB
+2024-08-26 14:22:00,889 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=39936.0, ans=0.125
+2024-08-26 14:22:01,009 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=39936.0, ans=0.125
+2024-08-26 14:22:21,367 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=39989.333333333336, ans=0.125
+2024-08-26 14:22:42,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=39989.333333333336, ans=0.125
+2024-08-26 14:22:48,269 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.53 vs. limit=22.5
+2024-08-26 14:23:04,537 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.845e+02 2.126e+02 2.642e+02 4.004e+02, threshold=4.252e+02, percent-clipped=0.0
+2024-08-26 14:23:20,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=40042.666666666664, ans=0.1
+2024-08-26 14:23:26,406 INFO [train.py:1114] (2/4) Epoch 4, batch 50, loss[loss=0.2577, simple_loss=0.2973, pruned_loss=0.07975, ctc_loss=0.1468, over 19707.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3364, pruned_loss=0.09795, ctc_loss=0.183, over 844557.52 frames. ], batch size: 47, lr: 3.30e-02, grad_scale: 32.0
+2024-08-26 14:23:41,696 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:23:59,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=40149.333333333336, ans=0.0
+2024-08-26 14:24:08,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=40149.333333333336, ans=0.125
+2024-08-26 14:24:52,615 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=40256.0, ans=0.125
+2024-08-26 14:25:11,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=40256.0, ans=0.0
+2024-08-26 14:25:30,069 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=40309.333333333336, ans=0.125
+2024-08-26 14:25:33,108 INFO [train.py:1114] (2/4) Epoch 4, batch 100, loss[loss=0.2827, simple_loss=0.3253, pruned_loss=0.08744, ctc_loss=0.163, over 19712.00 frames. ], tot_loss[loss=0.3, simple_loss=0.3356, pruned_loss=0.09612, ctc_loss=0.1802, over 1498880.38 frames. ], batch size: 51, lr: 3.29e-02, grad_scale: 32.0
+2024-08-26 14:25:35,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=40362.666666666664, ans=0.125
+2024-08-26 14:25:37,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40362.666666666664, ans=0.1
+2024-08-26 14:25:58,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=40416.0, ans=0.125
+2024-08-26 14:26:03,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=40416.0, ans=0.0020834782608695653
+2024-08-26 14:26:31,059 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=15.52 vs. limit=15.0
+2024-08-26 14:26:35,005 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.77 vs. limit=22.5
+2024-08-26 14:26:40,724 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 1.662e+02 1.906e+02 2.226e+02 3.245e+02, threshold=3.812e+02, percent-clipped=0.0
+2024-08-26 14:26:41,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=40576.0, ans=0.0
+2024-08-26 14:26:48,533 INFO [train.py:1114] (2/4) Epoch 4, batch 150, loss[loss=0.2754, simple_loss=0.3013, pruned_loss=0.09238, ctc_loss=0.1616, over 19732.00 frames. ], tot_loss[loss=0.295, simple_loss=0.3316, pruned_loss=0.09405, ctc_loss=0.1759, over 2028245.15 frames. ], batch size: 47, lr: 3.28e-02, grad_scale: 32.0
+2024-08-26 14:26:49,800 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:26:54,201 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.41 vs. limit=12.0
+2024-08-26 14:27:38,555 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=40736.0, ans=0.5
+2024-08-26 14:27:41,565 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=40736.0, ans=0.2
+2024-08-26 14:27:52,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=40789.333333333336, ans=0.0
+2024-08-26 14:28:04,924 INFO [train.py:1114] (2/4) Epoch 4, batch 200, loss[loss=0.3447, simple_loss=0.3668, pruned_loss=0.1183, ctc_loss=0.2149, over 18389.00 frames. ], tot_loss[loss=0.2924, simple_loss=0.3295, pruned_loss=0.0929, ctc_loss=0.1739, over 2436426.00 frames. ], batch size: 86, lr: 3.28e-02, grad_scale: 32.0
+2024-08-26 14:28:07,106 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:28:08,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=40896.0, ans=0.00197913043478261
+2024-08-26 14:28:10,976 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.61 vs. limit=15.0
+2024-08-26 14:28:16,510 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.14 vs. limit=22.5
+2024-08-26 14:28:39,478 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=41056.0, ans=0.125
+2024-08-26 14:28:49,778 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.824e+02 2.102e+02 2.533e+02 3.992e+02, threshold=4.203e+02, percent-clipped=3.0
+2024-08-26 14:28:55,766 INFO [train.py:1114] (2/4) Epoch 4, batch 250, loss[loss=0.2885, simple_loss=0.3306, pruned_loss=0.08993, ctc_loss=0.1663, over 19404.00 frames. ], tot_loss[loss=0.2904, simple_loss=0.3286, pruned_loss=0.09171, ctc_loss=0.1719, over 2756072.04 frames. ], batch size: 67, lr: 3.27e-02, grad_scale: 32.0
+2024-08-26 14:29:07,101 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.65 vs. limit=15.0
+2024-08-26 14:29:07,112 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.70 vs. limit=22.5
+2024-08-26 14:29:09,552 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=41216.0, ans=0.0019095652173913048
+2024-08-26 14:29:11,776 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.67 vs. limit=15.0
+2024-08-26 14:29:22,091 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=41269.333333333336, ans=0.125
+2024-08-26 14:29:38,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=41376.0, ans=0.025
+2024-08-26 14:29:46,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=41429.333333333336, ans=0.125
+2024-08-26 14:29:46,779 INFO [train.py:1114] (2/4) Epoch 4, batch 300, loss[loss=0.3234, simple_loss=0.3615, pruned_loss=0.1047, ctc_loss=0.1899, over 19523.00 frames. ], tot_loss[loss=0.2902, simple_loss=0.3284, pruned_loss=0.09172, ctc_loss=0.1718, over 3001487.19 frames. ], batch size: 61, lr: 3.27e-02, grad_scale: 32.0
+2024-08-26 14:30:07,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=41536.0, ans=0.125
+2024-08-26 14:30:32,090 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 1.674e+02 1.880e+02 2.161e+02 3.950e+02, threshold=3.761e+02, percent-clipped=0.0
+2024-08-26 14:30:32,507 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=41642.666666666664, ans=0.125
+2024-08-26 14:30:37,854 INFO [train.py:1114] (2/4) Epoch 4, batch 350, loss[loss=0.2616, simple_loss=0.3019, pruned_loss=0.08109, ctc_loss=0.1478, over 19751.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3289, pruned_loss=0.09177, ctc_loss=0.1717, over 3191000.54 frames. ], batch size: 48, lr: 3.26e-02, grad_scale: 32.0
+2024-08-26 14:30:42,013 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=41696.0, ans=0.5
+2024-08-26 14:30:49,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=41749.333333333336, ans=0.0
+2024-08-26 14:31:04,529 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.07 vs. limit=22.5
+2024-08-26 14:31:10,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=41856.0, ans=0.125
+2024-08-26 14:31:34,672 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:31:35,386 INFO [train.py:1114] (2/4) Epoch 4, batch 400, loss[loss=0.2696, simple_loss=0.3227, pruned_loss=0.07871, ctc_loss=0.1473, over 19489.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3278, pruned_loss=0.09106, ctc_loss=0.1705, over 3342031.38 frames. ], batch size: 54, lr: 3.26e-02, grad_scale: 32.0
+2024-08-26 14:31:49,954 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=42016.0, ans=0.1
+2024-08-26 14:31:49,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=42016.0, ans=0.125
+2024-08-26 14:31:55,544 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=42069.333333333336, ans=0.125
+2024-08-26 14:32:03,204 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=42122.666666666664, ans=0.125
+2024-08-26 14:32:11,151 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.48 vs. limit=22.5
+2024-08-26 14:32:11,783 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=42122.666666666664, ans=0.0
+2024-08-26 14:32:19,256 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 1.828e+02 2.157e+02 2.598e+02 8.551e+02, threshold=4.314e+02, percent-clipped=2.0
+2024-08-26 14:32:23,144 INFO [train.py:1114] (2/4) Epoch 4, batch 450, loss[loss=0.2845, simple_loss=0.3305, pruned_loss=0.08639, ctc_loss=0.1646, over 19613.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.3279, pruned_loss=0.09109, ctc_loss=0.1706, over 3449509.33 frames. ], batch size: 55, lr: 3.25e-02, grad_scale: 8.0
+2024-08-26 14:32:37,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=42282.666666666664, ans=0.125
+2024-08-26 14:32:37,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=42282.666666666664, ans=0.0
+2024-08-26 14:32:38,913 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=42282.666666666664, ans=0.0
+2024-08-26 14:32:40,892 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=42282.666666666664, ans=0.1
+2024-08-26 14:32:51,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=42336.0, ans=0.07
+2024-08-26 14:32:58,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=42389.333333333336, ans=0.1
+2024-08-26 14:33:00,495 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.44 vs. limit=15.0
+2024-08-26 14:33:05,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=42442.666666666664, ans=0.05
+2024-08-26 14:33:10,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=42442.666666666664, ans=0.025
+2024-08-26 14:33:14,184 INFO [train.py:1114] (2/4) Epoch 4, batch 500, loss[loss=0.32, simple_loss=0.3539, pruned_loss=0.1031, ctc_loss=0.1999, over 19651.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3261, pruned_loss=0.09002, ctc_loss=0.169, over 3545956.63 frames. ], batch size: 63, lr: 3.25e-02, grad_scale: 8.0
+2024-08-26 14:33:55,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=42656.0, ans=0.0
+2024-08-26 14:34:05,703 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.84 vs. limit=22.5
+2024-08-26 14:34:07,927 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.676e+02 1.857e+02 2.171e+02 5.331e+02, threshold=3.714e+02, percent-clipped=2.0
+2024-08-26 14:34:11,017 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=42762.666666666664, ans=0.07
+2024-08-26 14:34:11,746 INFO [train.py:1114] (2/4) Epoch 4, batch 550, loss[loss=0.3036, simple_loss=0.3439, pruned_loss=0.09598, ctc_loss=0.1785, over 19266.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3264, pruned_loss=0.09027, ctc_loss=0.1691, over 3607040.34 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 8.0
+2024-08-26 14:34:20,682 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=42816.0, ans=0.015
+2024-08-26 14:34:22,662 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=42816.0, ans=0.125
+2024-08-26 14:34:26,630 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=42816.0, ans=0.0
+2024-08-26 14:34:34,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=42869.333333333336, ans=0.125
+2024-08-26 14:34:38,795 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=9.39 vs. limit=15.0
+2024-08-26 14:35:02,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=43029.333333333336, ans=0.00151536231884058
+2024-08-26 14:35:03,328 INFO [train.py:1114] (2/4) Epoch 4, batch 600, loss[loss=0.2917, simple_loss=0.3371, pruned_loss=0.08824, ctc_loss=0.1743, over 19372.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3266, pruned_loss=0.09026, ctc_loss=0.169, over 3664404.59 frames. ], batch size: 67, lr: 3.24e-02, grad_scale: 8.0
+2024-08-26 14:35:13,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=43082.666666666664, ans=0.125
+2024-08-26 14:35:24,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=43136.0, ans=0.125
+2024-08-26 14:35:31,478 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=43136.0, ans=0.125
+2024-08-26 14:35:41,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=43189.333333333336, ans=0.07
+2024-08-26 14:35:42,901 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=43189.333333333336, ans=0.125
+2024-08-26 14:35:50,399 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 1.699e+02 1.953e+02 2.270e+02 5.390e+02, threshold=3.906e+02, percent-clipped=1.0
+2024-08-26 14:35:54,192 INFO [train.py:1114] (2/4) Epoch 4, batch 650, loss[loss=0.2682, simple_loss=0.3152, pruned_loss=0.08114, ctc_loss=0.1475, over 19765.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3257, pruned_loss=0.08992, ctc_loss=0.1681, over 3714588.22 frames. ], batch size: 54, lr: 3.23e-02, grad_scale: 8.0
+2024-08-26 14:36:07,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=43349.333333333336, ans=0.0
+2024-08-26 14:36:10,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=43349.333333333336, ans=0.0
+2024-08-26 14:36:12,169 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.25 vs. limit=6.0
+2024-08-26 14:36:37,657 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=43509.333333333336, ans=0.125
+2024-08-26 14:36:39,799 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.53 vs. limit=22.5
+2024-08-26 14:36:45,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=43509.333333333336, ans=0.1
+2024-08-26 14:36:47,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=43562.666666666664, ans=0.125
+2024-08-26 14:36:48,328 INFO [train.py:1114] (2/4) Epoch 4, batch 700, loss[loss=0.2722, simple_loss=0.3122, pruned_loss=0.08519, ctc_loss=0.1546, over 19721.00 frames. ], tot_loss[loss=0.2868, simple_loss=0.3264, pruned_loss=0.08996, ctc_loss=0.1682, over 3746358.54 frames. ], batch size: 51, lr: 3.22e-02, grad_scale: 8.0
+2024-08-26 14:37:16,007 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.50 vs. limit=22.5
+2024-08-26 14:37:17,627 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=43722.666666666664, ans=0.125
+2024-08-26 14:37:32,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=43776.0, ans=0.04949747468305833
+2024-08-26 14:37:36,039 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.717e+02 1.974e+02 2.287e+02 3.794e+02, threshold=3.948e+02, percent-clipped=0.0
+2024-08-26 14:37:36,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=43776.0, ans=0.00135304347826087
+2024-08-26 14:37:37,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=43776.0, ans=0.2
+2024-08-26 14:37:39,960 INFO [train.py:1114] (2/4) Epoch 4, batch 750, loss[loss=0.2522, simple_loss=0.3095, pruned_loss=0.07073, ctc_loss=0.1335, over 19518.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3256, pruned_loss=0.08945, ctc_loss=0.1669, over 3773031.37 frames. ], batch size: 54, lr: 3.22e-02, grad_scale: 8.0
+2024-08-26 14:37:53,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=43882.666666666664, ans=0.125
+2024-08-26 14:38:04,575 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=43936.0, ans=0.5
+2024-08-26 14:38:16,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=43989.333333333336, ans=0.05
+2024-08-26 14:38:24,335 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=44042.666666666664, ans=0.2
+2024-08-26 14:38:31,794 INFO [train.py:1114] (2/4) Epoch 4, batch 800, loss[loss=0.2249, simple_loss=0.278, pruned_loss=0.06311, ctc_loss=0.1139, over 19794.00 frames. ], tot_loss[loss=0.285, simple_loss=0.3251, pruned_loss=0.0892, ctc_loss=0.1663, over 3794584.01 frames. ], batch size: 49, lr: 3.21e-02, grad_scale: 16.0
+2024-08-26 14:38:42,079 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=4.04 vs. limit=12.0
+2024-08-26 14:38:48,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=44149.333333333336, ans=0.0012718840579710143
+2024-08-26 14:38:53,339 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=44202.666666666664, ans=0.0012602898550724637
+2024-08-26 14:38:56,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=44202.666666666664, ans=0.125
+2024-08-26 14:39:07,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=44256.0, ans=0.015
+2024-08-26 14:39:16,263 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.706e+02 1.876e+02 2.197e+02 5.470e+02, threshold=3.751e+02, percent-clipped=2.0
+2024-08-26 14:39:19,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=44309.333333333336, ans=0.125
+2024-08-26 14:39:21,090 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=44309.333333333336, ans=0.125
+2024-08-26 14:39:22,913 INFO [train.py:1114] (2/4) Epoch 4, batch 850, loss[loss=0.2984, simple_loss=0.3384, pruned_loss=0.09253, ctc_loss=0.183, over 19663.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3248, pruned_loss=0.08905, ctc_loss=0.1659, over 3814262.28 frames. ], batch size: 59, lr: 3.21e-02, grad_scale: 16.0
+2024-08-26 14:39:36,473 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=44416.0, ans=0.0012139130434782597
+2024-08-26 14:39:46,527 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.10 vs. limit=22.5
+2024-08-26 14:39:50,156 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=44469.333333333336, ans=0.0
+2024-08-26 14:39:53,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=44522.666666666664, ans=0.0011907246376811603
+2024-08-26 14:40:01,868 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=44576.0, ans=0.0011791304347826097
+2024-08-26 14:40:03,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=44576.0, ans=0.125
+2024-08-26 14:40:11,454 INFO [train.py:1114] (2/4) Epoch 4, batch 900, loss[loss=0.2829, simple_loss=0.3121, pruned_loss=0.0929, ctc_loss=0.1699, over 19806.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3254, pruned_loss=0.08956, ctc_loss=0.167, over 3817793.98 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 16.0
+2024-08-26 14:40:15,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=44629.333333333336, ans=0.0011675362318840574
+2024-08-26 14:40:27,052 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:40:27,867 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=44682.666666666664, ans=0.025
+2024-08-26 14:40:28,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=44682.666666666664, ans=0.025
+2024-08-26 14:40:28,798 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=44682.666666666664, ans=0.125
+2024-08-26 14:40:28,958 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=44682.666666666664, ans=0.125
+2024-08-26 14:40:37,016 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=44736.0, ans=0.1
+2024-08-26 14:40:59,419 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.686e+02 1.871e+02 2.157e+02 4.639e+02, threshold=3.742e+02, percent-clipped=1.0
+2024-08-26 14:41:03,414 INFO [train.py:1114] (2/4) Epoch 4, batch 950, loss[loss=0.2617, simple_loss=0.3051, pruned_loss=0.0801, ctc_loss=0.1453, over 19494.00 frames. ], tot_loss[loss=0.2859, simple_loss=0.3256, pruned_loss=0.08963, ctc_loss=0.1675, over 3819918.60 frames. ], batch size: 49, lr: 3.20e-02, grad_scale: 16.0
+2024-08-26 14:41:03,977 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.55 vs. limit=15.0
+2024-08-26 14:41:07,947 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.58 vs. limit=15.0
+2024-08-26 14:41:54,729 INFO [train.py:1114] (2/4) Epoch 4, batch 1000, loss[loss=0.2526, simple_loss=0.3039, pruned_loss=0.07229, ctc_loss=0.1419, over 19849.00 frames. ], tot_loss[loss=0.2865, simple_loss=0.3262, pruned_loss=0.08982, ctc_loss=0.1677, over 3814838.08 frames. ], batch size: 52, lr: 3.19e-02, grad_scale: 16.0
+2024-08-26 14:42:14,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=45216.0, ans=0.0
+2024-08-26 14:42:16,008 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.22 vs. limit=22.5
+2024-08-26 14:42:16,824 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.00 vs. limit=15.0
+2024-08-26 14:42:42,492 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.702e+02 1.844e+02 2.187e+02 3.225e+02, threshold=3.689e+02, percent-clipped=0.0
+2024-08-26 14:42:46,505 INFO [train.py:1114] (2/4) Epoch 4, batch 1050, loss[loss=0.2782, simple_loss=0.3321, pruned_loss=0.08121, ctc_loss=0.1548, over 19836.00 frames. ], tot_loss[loss=0.285, simple_loss=0.325, pruned_loss=0.08913, ctc_loss=0.1666, over 3822005.55 frames. ], batch size: 57, lr: 3.19e-02, grad_scale: 16.0
+2024-08-26 14:42:55,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=45482.666666666664, ans=0.125
+2024-08-26 14:42:57,312 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=45482.666666666664, ans=0.025
+2024-08-26 14:43:10,071 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.66 vs. limit=10.0
+2024-08-26 14:43:10,915 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.93 vs. limit=15.0
+2024-08-26 14:43:18,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45589.333333333336, ans=0.1
+2024-08-26 14:43:38,133 INFO [train.py:1114] (2/4) Epoch 4, batch 1100, loss[loss=0.2661, simple_loss=0.3024, pruned_loss=0.08305, ctc_loss=0.1595, over 19606.00 frames. ], tot_loss[loss=0.2837, simple_loss=0.3242, pruned_loss=0.08854, ctc_loss=0.1656, over 3829079.95 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-26 14:43:44,188 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=45696.0, ans=0.1
+2024-08-26 14:43:46,107 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=45696.0, ans=0.2
+2024-08-26 14:43:53,656 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=45749.333333333336, ans=0.125
+2024-08-26 14:43:56,619 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=45802.666666666664, ans=0.0
+2024-08-26 14:44:12,049 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=45856.0, ans=0.1
+2024-08-26 14:44:25,686 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.748e+02 1.997e+02 2.350e+02 6.199e+02, threshold=3.995e+02, percent-clipped=5.0
+2024-08-26 14:44:29,537 INFO [train.py:1114] (2/4) Epoch 4, batch 1150, loss[loss=0.2546, simple_loss=0.3053, pruned_loss=0.07517, ctc_loss=0.1339, over 19592.00 frames. ], tot_loss[loss=0.2833, simple_loss=0.3238, pruned_loss=0.08832, ctc_loss=0.1653, over 3830223.75 frames. ], batch size: 52, lr: 3.18e-02, grad_scale: 16.0
+2024-08-26 14:44:33,880 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=45962.666666666664, ans=0.125
+2024-08-26 14:44:33,917 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=45962.666666666664, ans=0.1
+2024-08-26 14:45:33,152 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=46016.0, ans=0.5
+2024-08-26 14:46:30,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=46176.0, ans=0.0
+2024-08-26 14:46:33,611 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.97 vs. limit=10.0
+2024-08-26 14:46:38,936 INFO [train.py:1114] (2/4) Epoch 4, batch 1200, loss[loss=0.2851, simple_loss=0.3284, pruned_loss=0.08826, ctc_loss=0.1634, over 19845.00 frames. ], tot_loss[loss=0.2847, simple_loss=0.325, pruned_loss=0.08892, ctc_loss=0.1663, over 3825950.41 frames. ], batch size: 57, lr: 3.17e-02, grad_scale: 32.0
+2024-08-26 14:46:42,487 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.79 vs. limit=15.0
+2024-08-26 14:46:49,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=46282.666666666664, ans=0.0
+2024-08-26 14:46:53,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=46282.666666666664, ans=0.1
+2024-08-26 14:46:55,318 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:46:59,324 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=46336.0, ans=0.125
+2024-08-26 14:47:13,665 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=46389.333333333336, ans=0.125
+2024-08-26 14:47:23,211 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 1.767e+02 1.944e+02 2.283e+02 5.479e+02, threshold=3.889e+02, percent-clipped=1.0
+2024-08-26 14:47:29,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=46496.0, ans=0.05
+2024-08-26 14:47:29,955 INFO [train.py:1114] (2/4) Epoch 4, batch 1250, loss[loss=0.293, simple_loss=0.3365, pruned_loss=0.09035, ctc_loss=0.1722, over 19543.00 frames. ], tot_loss[loss=0.2854, simple_loss=0.3258, pruned_loss=0.08916, ctc_loss=0.1666, over 3843771.03 frames. ], batch size: 61, lr: 3.17e-02, grad_scale: 32.0
+2024-08-26 14:47:42,265 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=46549.333333333336, ans=0.125
+2024-08-26 14:47:43,425 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.91 vs. limit=15.0
+2024-08-26 14:47:46,848 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=46549.333333333336, ans=0.1
+2024-08-26 14:47:54,849 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=46602.666666666664, ans=0.0007385507246376825
+2024-08-26 14:48:19,288 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=46709.333333333336, ans=0.125
+2024-08-26 14:48:20,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=46709.333333333336, ans=0.125
+2024-08-26 14:48:20,811 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=14.19 vs. limit=15.0
+2024-08-26 14:48:22,039 INFO [train.py:1114] (2/4) Epoch 4, batch 1300, loss[loss=0.318, simple_loss=0.3423, pruned_loss=0.1068, ctc_loss=0.2, over 18907.00 frames. ], tot_loss[loss=0.2835, simple_loss=0.3242, pruned_loss=0.08838, ctc_loss=0.1651, over 3847068.18 frames. ], batch size: 76, lr: 3.16e-02, grad_scale: 32.0
+2024-08-26 14:48:49,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=46922.666666666664, ans=0.125
+2024-08-26 14:49:06,441 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 1.633e+02 1.793e+02 2.136e+02 4.035e+02, threshold=3.586e+02, percent-clipped=1.0
+2024-08-26 14:49:08,550 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=46976.0, ans=0.125
+2024-08-26 14:49:10,194 INFO [train.py:1114] (2/4) Epoch 4, batch 1350, loss[loss=0.2799, simple_loss=0.3279, pruned_loss=0.08458, ctc_loss=0.1566, over 19758.00 frames. ], tot_loss[loss=0.2817, simple_loss=0.3231, pruned_loss=0.08746, ctc_loss=0.1635, over 3858266.94 frames. ], batch size: 54, lr: 3.16e-02, grad_scale: 32.0
+2024-08-26 14:49:10,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=47029.333333333336, ans=0.1
+2024-08-26 14:49:19,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=47029.333333333336, ans=0.05
+2024-08-26 14:49:23,347 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.91 vs. limit=22.5
+2024-08-26 14:49:23,913 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=47082.666666666664, ans=0.2
+2024-08-26 14:49:36,744 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=47136.0, ans=0.0
+2024-08-26 14:49:58,171 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.75 vs. limit=10.0
+2024-08-26 14:50:01,599 INFO [train.py:1114] (2/4) Epoch 4, batch 1400, loss[loss=0.229, simple_loss=0.2765, pruned_loss=0.06533, ctc_loss=0.1273, over 19705.00 frames. ], tot_loss[loss=0.2808, simple_loss=0.3223, pruned_loss=0.08708, ctc_loss=0.1629, over 3865064.39 frames. ], batch size: 46, lr: 3.15e-02, grad_scale: 32.0
+2024-08-26 14:50:05,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=47296.0, ans=0.0
+2024-08-26 14:50:43,665 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.07 vs. limit=15.0
+2024-08-26 14:50:45,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=47509.333333333336, ans=0.125
+2024-08-26 14:50:49,035 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.701e+02 1.930e+02 2.137e+02 5.469e+02, threshold=3.859e+02, percent-clipped=2.0
+2024-08-26 14:50:53,072 INFO [train.py:1114] (2/4) Epoch 4, batch 1450, loss[loss=0.2934, simple_loss=0.3366, pruned_loss=0.09111, ctc_loss=0.1699, over 19676.00 frames. ], tot_loss[loss=0.2818, simple_loss=0.323, pruned_loss=0.08758, ctc_loss=0.1638, over 3863127.45 frames. ], batch size: 63, lr: 3.15e-02, grad_scale: 32.0
+2024-08-26 14:51:11,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=47616.0, ans=0.125
+2024-08-26 14:51:35,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=47776.0, ans=0.00048347826086956626
+2024-08-26 14:51:43,458 INFO [train.py:1114] (2/4) Epoch 4, batch 1500, loss[loss=0.3127, simple_loss=0.3498, pruned_loss=0.1025, ctc_loss=0.1766, over 19574.00 frames. ], tot_loss[loss=0.2827, simple_loss=0.3239, pruned_loss=0.08792, ctc_loss=0.1643, over 3862632.16 frames. ], batch size: 57, lr: 3.14e-02, grad_scale: 32.0
+2024-08-26 14:52:16,478 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=47936.0, ans=0.0
+2024-08-26 14:52:19,592 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=47989.333333333336, ans=0.0
+2024-08-26 14:52:23,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=47989.333333333336, ans=0.125
+2024-08-26 14:52:34,699 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.743e+02 1.956e+02 2.243e+02 3.928e+02, threshold=3.912e+02, percent-clipped=1.0
+2024-08-26 14:52:38,440 INFO [train.py:1114] (2/4) Epoch 4, batch 1550, loss[loss=0.3179, simple_loss=0.358, pruned_loss=0.1016, ctc_loss=0.1864, over 19574.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.3239, pruned_loss=0.08803, ctc_loss=0.1645, over 3847553.78 frames. ], batch size: 60, lr: 3.14e-02, grad_scale: 32.0
+2024-08-26 14:52:46,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=48096.0, ans=0.00041391304347826105
+2024-08-26 14:52:49,157 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:53:28,280 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=48309.333333333336, ans=0.09899494936611666
+2024-08-26 14:53:28,309 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=48309.333333333336, ans=0.125
+2024-08-26 14:53:29,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=48362.666666666664, ans=0.125
+2024-08-26 14:53:29,869 INFO [train.py:1114] (2/4) Epoch 4, batch 1600, loss[loss=0.283, simple_loss=0.3355, pruned_loss=0.08445, ctc_loss=0.154, over 19829.00 frames. ], tot_loss[loss=0.2834, simple_loss=0.324, pruned_loss=0.08839, ctc_loss=0.165, over 3837485.55 frames. ], batch size: 57, lr: 3.13e-02, grad_scale: 32.0
+2024-08-26 14:53:44,947 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=48416.0, ans=0.2
+2024-08-26 14:53:46,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=48416.0, ans=0.1
+2024-08-26 14:53:55,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=48469.333333333336, ans=0.125
+2024-08-26 14:54:07,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=48522.666666666664, ans=0.125
+2024-08-26 14:54:14,473 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.15 vs. limit=15.0
+2024-08-26 14:54:18,013 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 1.701e+02 1.882e+02 2.341e+02 4.982e+02, threshold=3.764e+02, percent-clipped=3.0
+2024-08-26 14:54:19,330 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=48576.0, ans=0.125
+2024-08-26 14:54:21,792 INFO [train.py:1114] (2/4) Epoch 4, batch 1650, loss[loss=0.3017, simple_loss=0.3417, pruned_loss=0.09456, ctc_loss=0.1814, over 19647.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3242, pruned_loss=0.08874, ctc_loss=0.1655, over 3833438.53 frames. ], batch size: 59, lr: 3.13e-02, grad_scale: 32.0
+2024-08-26 14:55:17,034 INFO [train.py:1114] (2/4) Epoch 4, batch 1700, loss[loss=0.2531, simple_loss=0.2898, pruned_loss=0.07885, ctc_loss=0.1466, over 19645.00 frames. ], tot_loss[loss=0.2831, simple_loss=0.3238, pruned_loss=0.08827, ctc_loss=0.1645, over 3847276.12 frames. ], batch size: 46, lr: 3.12e-02, grad_scale: 32.0
+2024-08-26 14:55:27,819 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.10 vs. limit=15.0
+2024-08-26 14:55:40,547 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.62 vs. limit=15.0
+2024-08-26 14:55:41,247 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=49002.666666666664, ans=0.125
+2024-08-26 14:55:42,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=49002.666666666664, ans=0.0
+2024-08-26 14:55:45,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=49056.0, ans=0.125
+2024-08-26 14:55:48,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=49056.0, ans=0.00020521739130434716
+2024-08-26 14:55:51,760 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.84 vs. limit=12.0
+2024-08-26 14:55:56,434 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.66 vs. limit=15.0
+2024-08-26 14:55:59,532 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 1.770e+02 1.975e+02 2.193e+02 4.882e+02, threshold=3.950e+02, percent-clipped=1.0
+2024-08-26 14:56:00,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=49109.333333333336, ans=15.0
+2024-08-26 14:56:03,229 INFO [train.py:1114] (2/4) Epoch 4, batch 1750, loss[loss=0.2606, simple_loss=0.2927, pruned_loss=0.08386, ctc_loss=0.1519, over 19645.00 frames. ], tot_loss[loss=0.2817, simple_loss=0.3225, pruned_loss=0.0877, ctc_loss=0.1636, over 3852181.27 frames. ], batch size: 45, lr: 3.11e-02, grad_scale: 32.0
+2024-08-26 14:56:06,181 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=49162.666666666664, ans=0.0
+2024-08-26 14:56:23,245 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.59 vs. limit=15.0
+2024-08-26 14:56:24,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=49269.333333333336, ans=0.2
+2024-08-26 14:56:29,180 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:56:29,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=49322.666666666664, ans=0.125
+2024-08-26 14:56:39,954 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=49376.0, ans=0.125
+2024-08-26 14:56:48,547 INFO [train.py:1114] (2/4) Epoch 4, batch 1800, loss[loss=0.285, simple_loss=0.3317, pruned_loss=0.08678, ctc_loss=0.1618, over 19611.00 frames. ], tot_loss[loss=0.2812, simple_loss=0.3224, pruned_loss=0.08742, ctc_loss=0.1631, over 3852946.72 frames. ], batch size: 55, lr: 3.11e-02, grad_scale: 32.0
+2024-08-26 14:56:55,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=49429.333333333336, ans=0.035
+2024-08-26 14:57:01,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=49482.666666666664, ans=0.1
+2024-08-26 14:57:02,645 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.21 vs. limit=15.0
+2024-08-26 14:57:30,228 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.664e+02 1.898e+02 2.172e+02 3.982e+02, threshold=3.795e+02, percent-clipped=1.0
+2024-08-26 14:57:33,986 INFO [train.py:1114] (2/4) Epoch 4, batch 1850, loss[loss=0.2927, simple_loss=0.3308, pruned_loss=0.09245, ctc_loss=0.174, over 19561.00 frames. ], tot_loss[loss=0.2809, simple_loss=0.322, pruned_loss=0.08735, ctc_loss=0.1629, over 3856583.60 frames. ], batch size: 57, lr: 3.10e-02, grad_scale: 32.0
+2024-08-26 14:57:39,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=49696.0, ans=0.125
+2024-08-26 14:57:54,627 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.74 vs. limit=22.5
+2024-08-26 14:58:00,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=49802.666666666664, ans=0.2
+2024-08-26 14:58:16,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=49909.333333333336, ans=0.0
+2024-08-26 14:58:21,310 INFO [train.py:1114] (2/4) Epoch 4, batch 1900, loss[loss=0.2969, simple_loss=0.3445, pruned_loss=0.09021, ctc_loss=0.1724, over 19638.00 frames. ], tot_loss[loss=0.2812, simple_loss=0.3225, pruned_loss=0.08737, ctc_loss=0.1628, over 3861482.76 frames. ], batch size: 59, lr: 3.10e-02, grad_scale: 16.0
+2024-08-26 14:58:25,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=49962.666666666664, ans=0.125
+2024-08-26 14:58:25,988 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 14:58:26,970 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=49962.666666666664, ans=0.125
+2024-08-26 14:58:48,129 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=50122.666666666664, ans=0.125
+2024-08-26 14:58:59,202 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.77 vs. limit=10.0
+2024-08-26 14:59:02,113 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.09 vs. limit=10.0
+2024-08-26 14:59:03,258 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.687e+02 1.820e+02 2.228e+02 3.741e+02, threshold=3.639e+02, percent-clipped=0.0
+2024-08-26 14:59:06,128 INFO [train.py:1114] (2/4) Epoch 4, batch 1950, loss[loss=0.2723, simple_loss=0.3201, pruned_loss=0.08182, ctc_loss=0.152, over 19573.00 frames. ], tot_loss[loss=0.2819, simple_loss=0.3237, pruned_loss=0.08752, ctc_loss=0.1628, over 3870492.34 frames. ], batch size: 52, lr: 3.09e-02, grad_scale: 16.0
+2024-08-26 14:59:10,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=50229.333333333336, ans=0.125
+2024-08-26 14:59:22,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=50282.666666666664, ans=0.125
+2024-08-26 14:59:34,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=50389.333333333336, ans=0.125
+2024-08-26 14:59:53,446 INFO [train.py:1114] (2/4) Epoch 4, batch 2000, loss[loss=0.2313, simple_loss=0.2772, pruned_loss=0.06785, ctc_loss=0.1243, over 19668.00 frames. ], tot_loss[loss=0.2828, simple_loss=0.3241, pruned_loss=0.088, ctc_loss=0.1636, over 3853499.94 frames. ], batch size: 45, lr: 3.09e-02, grad_scale: 32.0
+2024-08-26 14:59:57,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=50496.0, ans=0.125
+2024-08-26 15:00:15,273 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=50602.666666666664, ans=0.125
+2024-08-26 15:00:18,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=50602.666666666664, ans=0.125
+2024-08-26 15:00:24,115 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=50656.0, ans=0.125
+2024-08-26 15:00:35,434 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 1.722e+02 2.023e+02 2.377e+02 8.657e+02, threshold=4.047e+02, percent-clipped=4.0
+2024-08-26 15:00:37,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=50762.666666666664, ans=0.125
+2024-08-26 15:00:38,085 INFO [train.py:1114] (2/4) Epoch 4, batch 2050, loss[loss=0.2374, simple_loss=0.2826, pruned_loss=0.06884, ctc_loss=0.1362, over 19717.00 frames. ], tot_loss[loss=0.2819, simple_loss=0.3233, pruned_loss=0.08758, ctc_loss=0.1632, over 3850611.27 frames. ], batch size: 47, lr: 3.08e-02, grad_scale: 32.0
+2024-08-26 15:00:39,350 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.16 vs. limit=15.0
+2024-08-26 15:00:44,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=50762.666666666664, ans=0.0
+2024-08-26 15:00:46,642 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.89 vs. limit=15.0
+2024-08-26 15:00:52,763 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.72 vs. limit=6.0
+2024-08-26 15:01:00,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=50869.333333333336, ans=0.125
+2024-08-26 15:01:22,456 INFO [train.py:1114] (2/4) Epoch 4, batch 2100, loss[loss=0.2499, simple_loss=0.3066, pruned_loss=0.07062, ctc_loss=0.1296, over 19763.00 frames. ], tot_loss[loss=0.2801, simple_loss=0.3222, pruned_loss=0.08666, ctc_loss=0.1617, over 3858330.99 frames. ], batch size: 54, lr: 3.08e-02, grad_scale: 32.0
+2024-08-26 15:01:27,983 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=51029.333333333336, ans=0.025
+2024-08-26 15:01:29,733 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=51029.333333333336, ans=0.025
+2024-08-26 15:01:42,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=51136.0, ans=0.125
+2024-08-26 15:01:49,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=51189.333333333336, ans=0.125
+2024-08-26 15:01:51,389 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.59 vs. limit=15.0
+2024-08-26 15:02:00,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=51242.666666666664, ans=0.95
+2024-08-26 15:02:04,155 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 1.626e+02 1.780e+02 1.895e+02 2.709e+02, threshold=3.561e+02, percent-clipped=0.0
+2024-08-26 15:02:07,169 INFO [train.py:1114] (2/4) Epoch 4, batch 2150, loss[loss=0.2625, simple_loss=0.3126, pruned_loss=0.07595, ctc_loss=0.1512, over 19858.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3211, pruned_loss=0.08602, ctc_loss=0.1605, over 3869328.31 frames. ], batch size: 52, lr: 3.07e-02, grad_scale: 32.0
+2024-08-26 15:02:08,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=51296.0, ans=0.0
+2024-08-26 15:02:13,630 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=51296.0, ans=0.0
+2024-08-26 15:02:15,383 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=51349.333333333336, ans=0.2
+2024-08-26 15:02:18,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=51349.333333333336, ans=0.0
+2024-08-26 15:02:22,616 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=51349.333333333336, ans=0.0
+2024-08-26 15:02:42,534 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.73 vs. limit=6.0
+2024-08-26 15:02:45,137 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.77 vs. limit=15.0
+2024-08-26 15:02:45,784 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=51509.333333333336, ans=0.125
+2024-08-26 15:02:54,329 INFO [train.py:1114] (2/4) Epoch 4, batch 2200, loss[loss=0.2637, simple_loss=0.3129, pruned_loss=0.07824, ctc_loss=0.1452, over 19574.00 frames. ], tot_loss[loss=0.279, simple_loss=0.3213, pruned_loss=0.08624, ctc_loss=0.1606, over 3868226.85 frames. ], batch size: 57, lr: 3.07e-02, grad_scale: 32.0
+2024-08-26 15:03:05,017 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=51616.0, ans=0.125
+2024-08-26 15:03:08,596 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=51616.0, ans=0.125
+2024-08-26 15:03:08,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=51616.0, ans=0.07
+2024-08-26 15:03:28,313 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.61 vs. limit=15.0
+2024-08-26 15:03:30,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=51776.0, ans=0.0
+2024-08-26 15:03:36,544 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 1.687e+02 1.993e+02 2.251e+02 9.209e+02, threshold=3.987e+02, percent-clipped=2.0
+2024-08-26 15:03:39,213 INFO [train.py:1114] (2/4) Epoch 4, batch 2250, loss[loss=0.2934, simple_loss=0.3335, pruned_loss=0.09131, ctc_loss=0.1768, over 19617.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.3216, pruned_loss=0.08638, ctc_loss=0.1609, over 3867770.48 frames. ], batch size: 55, lr: 3.06e-02, grad_scale: 32.0
+2024-08-26 15:03:41,980 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=51829.333333333336, ans=0.2
+2024-08-26 15:03:56,821 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:03:59,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1.whitening_limit, batch_count=51936.0, ans=10.0
+2024-08-26 15:04:03,837 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=51936.0, ans=0.125
+2024-08-26 15:04:23,341 INFO [train.py:1114] (2/4) Epoch 4, batch 2300, loss[loss=0.2752, simple_loss=0.3185, pruned_loss=0.08417, ctc_loss=0.1587, over 19514.00 frames. ], tot_loss[loss=0.2788, simple_loss=0.3206, pruned_loss=0.08627, ctc_loss=0.1609, over 3861902.31 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 32.0
+2024-08-26 15:04:31,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=52096.0, ans=15.0
+2024-08-26 15:04:31,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=52149.333333333336, ans=0.125
+2024-08-26 15:04:42,243 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.43 vs. limit=15.0
+2024-08-26 15:04:43,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52202.666666666664, ans=0.1
+2024-08-26 15:04:46,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=52202.666666666664, ans=0.125
+2024-08-26 15:04:46,292 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=52202.666666666664, ans=0.0
+2024-08-26 15:04:55,330 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=52256.0, ans=0.0
+2024-08-26 15:04:57,220 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=52256.0, ans=0.1
+2024-08-26 15:05:06,727 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 1.800e+02 1.978e+02 2.376e+02 5.904e+02, threshold=3.955e+02, percent-clipped=2.0
+2024-08-26 15:05:09,373 INFO [train.py:1114] (2/4) Epoch 4, batch 2350, loss[loss=0.321, simple_loss=0.3537, pruned_loss=0.1063, ctc_loss=0.1893, over 19663.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3205, pruned_loss=0.08641, ctc_loss=0.1613, over 3863950.36 frames. ], batch size: 63, lr: 3.05e-02, grad_scale: 32.0
+2024-08-26 15:05:10,836 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.92 vs. limit=15.0
+2024-08-26 15:05:16,401 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.min_positive, batch_count=52362.666666666664, ans=0.05
+2024-08-26 15:05:30,836 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.66 vs. limit=10.0
+2024-08-26 15:05:33,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=52469.333333333336, ans=0.125
+2024-08-26 15:05:46,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=52576.0, ans=0.0
+2024-08-26 15:05:47,719 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=52576.0, ans=0.1
+2024-08-26 15:06:03,791 INFO [train.py:1114] (2/4) Epoch 4, batch 2400, loss[loss=0.2952, simple_loss=0.3311, pruned_loss=0.09342, ctc_loss=0.1809, over 19289.00 frames. ], tot_loss[loss=0.2816, simple_loss=0.323, pruned_loss=0.0875, ctc_loss=0.1631, over 3858454.32 frames. ], batch size: 71, lr: 3.05e-02, grad_scale: 32.0
+2024-08-26 15:06:10,042 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=52629.333333333336, ans=0.125
+2024-08-26 15:06:24,841 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=52682.666666666664, ans=0.0
+2024-08-26 15:06:30,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=52736.0, ans=0.125
+2024-08-26 15:06:53,223 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.824e+02 2.127e+02 2.398e+02 5.215e+02, threshold=4.254e+02, percent-clipped=1.0
+2024-08-26 15:06:55,098 INFO [train.py:1114] (2/4) Epoch 4, batch 2450, loss[loss=0.3269, simple_loss=0.3353, pruned_loss=0.1154, ctc_loss=0.2192, over 13795.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3278, pruned_loss=0.09163, ctc_loss=0.1709, over 3732216.21 frames. ], batch size: 140, lr: 3.05e-02, grad_scale: 16.0
+2024-08-26 15:06:56,250 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=52896.0, ans=0.125
+2024-08-26 15:07:00,251 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=7.54 vs. limit=15.0
+2024-08-26 15:07:09,082 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.92 vs. limit=15.0
+2024-08-26 15:07:13,172 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=53002.666666666664, ans=0.125
+2024-08-26 15:07:15,852 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff3.min_abs, batch_count=53002.666666666664, ans=0.2
+2024-08-26 15:07:17,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=53002.666666666664, ans=0.0
+2024-08-26 15:07:22,020 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=53056.0, ans=0.1
+2024-08-26 15:07:25,782 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=53056.0, ans=0.1
+2024-08-26 15:09:12,283 INFO [train.py:1114] (2/4) Epoch 5, batch 0, loss[loss=0.2531, simple_loss=0.3026, pruned_loss=0.07468, ctc_loss=0.1354, over 19811.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3026, pruned_loss=0.07468, ctc_loss=0.1354, over 19811.00 frames. ], batch size: 49, lr: 2.83e-02, grad_scale: 32.0
+2024-08-26 15:09:12,283 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 15:09:22,088 INFO [train.py:1146] (2/4) Epoch 5, validation: loss=0.2289, simple_loss=0.3118, pruned_loss=0.05352, ctc_loss=0.09739, over 944034.00 frames.
+2024-08-26 15:09:22,763 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 15:09:30,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=53104.0, ans=0.125
+2024-08-26 15:09:36,699 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=53157.333333333336, ans=0.0
+2024-08-26 15:09:41,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=53210.666666666664, ans=0.1
+2024-08-26 15:09:49,784 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=53210.666666666664, ans=0.04949747468305833
+2024-08-26 15:09:59,786 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.59 vs. limit=6.0
+2024-08-26 15:10:10,883 INFO [train.py:1114] (2/4) Epoch 5, batch 50, loss[loss=0.2379, simple_loss=0.2883, pruned_loss=0.0674, ctc_loss=0.1316, over 19692.00 frames. ], tot_loss[loss=0.2798, simple_loss=0.3224, pruned_loss=0.08604, ctc_loss=0.1627, over 843425.80 frames. ], batch size: 47, lr: 2.83e-02, grad_scale: 32.0
+2024-08-26 15:10:13,091 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.45 vs. limit=15.0
+2024-08-26 15:10:22,335 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.804e+02 2.028e+02 2.297e+02 4.038e+02, threshold=4.056e+02, percent-clipped=0.0
+2024-08-26 15:10:47,417 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=53530.666666666664, ans=0.125
+2024-08-26 15:10:52,223 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=53584.0, ans=0.125
+2024-08-26 15:11:01,252 INFO [train.py:1114] (2/4) Epoch 5, batch 100, loss[loss=0.2679, simple_loss=0.3075, pruned_loss=0.08265, ctc_loss=0.1575, over 19718.00 frames. ], tot_loss[loss=0.2828, simple_loss=0.3255, pruned_loss=0.08732, ctc_loss=0.1638, over 1498153.10 frames. ], batch size: 51, lr: 2.82e-02, grad_scale: 32.0
+2024-08-26 15:11:08,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=53637.333333333336, ans=0.0
+2024-08-26 15:11:15,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=53690.666666666664, ans=0.125
+2024-08-26 15:11:17,050 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.34 vs. limit=22.5
+2024-08-26 15:11:17,759 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=53690.666666666664, ans=0.125
+2024-08-26 15:11:28,912 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=53744.0, ans=0.0
+2024-08-26 15:11:59,576 INFO [train.py:1114] (2/4) Epoch 5, batch 150, loss[loss=0.2652, simple_loss=0.3004, pruned_loss=0.08393, ctc_loss=0.1556, over 19685.00 frames. ], tot_loss[loss=0.2778, simple_loss=0.3214, pruned_loss=0.08516, ctc_loss=0.1598, over 2028120.81 frames. ], batch size: 47, lr: 2.82e-02, grad_scale: 32.0
+2024-08-26 15:12:10,018 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.696e+02 1.862e+02 2.172e+02 3.492e+02, threshold=3.724e+02, percent-clipped=0.0
+2024-08-26 15:12:10,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=53957.333333333336, ans=0.1
+2024-08-26 15:12:13,134 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=53957.333333333336, ans=0.2
+2024-08-26 15:12:18,050 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.86 vs. limit=15.0
+2024-08-26 15:12:24,350 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=54010.666666666664, ans=0.0
+2024-08-26 15:12:27,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=54010.666666666664, ans=0.025
+2024-08-26 15:12:33,970 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.67 vs. limit=22.5
+2024-08-26 15:12:43,042 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=54117.333333333336, ans=0.1
+2024-08-26 15:12:48,446 INFO [train.py:1114] (2/4) Epoch 5, batch 200, loss[loss=0.2998, simple_loss=0.3371, pruned_loss=0.09541, ctc_loss=0.1795, over 18092.00 frames. ], tot_loss[loss=0.274, simple_loss=0.3183, pruned_loss=0.08353, ctc_loss=0.1565, over 2435948.10 frames. ], batch size: 85, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:12:50,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=54170.666666666664, ans=0.125
+2024-08-26 15:12:52,427 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer_ff3.min_abs, batch_count=54170.666666666664, ans=0.2
+2024-08-26 15:12:56,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=54170.666666666664, ans=0.025
+2024-08-26 15:12:57,121 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=54224.0, ans=0.125
+2024-08-26 15:13:12,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=54277.333333333336, ans=0.125
+2024-08-26 15:13:39,766 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.52 vs. limit=22.5
+2024-08-26 15:13:41,974 INFO [train.py:1114] (2/4) Epoch 5, batch 250, loss[loss=0.2819, simple_loss=0.3329, pruned_loss=0.08426, ctc_loss=0.1559, over 19349.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.3177, pruned_loss=0.0828, ctc_loss=0.1552, over 2756553.01 frames. ], batch size: 67, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:13:50,499 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.685e+02 1.803e+02 2.078e+02 3.456e+02, threshold=3.607e+02, percent-clipped=0.0
+2024-08-26 15:14:01,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=54544.0, ans=0.0
+2024-08-26 15:14:14,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=54597.333333333336, ans=0.125
+2024-08-26 15:14:28,857 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=54650.666666666664, ans=0.1
+2024-08-26 15:14:32,309 INFO [train.py:1114] (2/4) Epoch 5, batch 300, loss[loss=0.3053, simple_loss=0.3344, pruned_loss=0.1015, ctc_loss=0.1833, over 19510.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3163, pruned_loss=0.08202, ctc_loss=0.1537, over 3000980.14 frames. ], batch size: 61, lr: 2.81e-02, grad_scale: 32.0
+2024-08-26 15:15:13,535 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=54917.333333333336, ans=0.025
+2024-08-26 15:15:19,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=54917.333333333336, ans=0.0
+2024-08-26 15:15:22,166 INFO [train.py:1114] (2/4) Epoch 5, batch 350, loss[loss=0.2323, simple_loss=0.2911, pruned_loss=0.06314, ctc_loss=0.1183, over 19759.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3172, pruned_loss=0.08243, ctc_loss=0.154, over 3190823.52 frames. ], batch size: 48, lr: 2.80e-02, grad_scale: 32.0
+2024-08-26 15:15:24,551 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=54970.666666666664, ans=0.2
+2024-08-26 15:15:28,415 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=54970.666666666664, ans=0.1
+2024-08-26 15:15:30,997 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=55024.0, ans=0.0
+2024-08-26 15:15:31,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=55024.0, ans=0.125
+2024-08-26 15:15:31,768 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.410e+02 1.717e+02 1.933e+02 2.233e+02 3.797e+02, threshold=3.865e+02, percent-clipped=1.0
+2024-08-26 15:15:36,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=55024.0, ans=0.025
+2024-08-26 15:15:44,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=55077.333333333336, ans=0.07
+2024-08-26 15:16:15,652 INFO [train.py:1114] (2/4) Epoch 5, batch 400, loss[loss=0.2609, simple_loss=0.3212, pruned_loss=0.07224, ctc_loss=0.1406, over 19486.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3172, pruned_loss=0.08242, ctc_loss=0.1539, over 3342865.79 frames. ], batch size: 54, lr: 2.80e-02, grad_scale: 32.0
+2024-08-26 15:16:20,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=55237.333333333336, ans=0.0
+2024-08-26 15:16:21,780 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=55237.333333333336, ans=0.0
+2024-08-26 15:16:41,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=55344.0, ans=0.125
+2024-08-26 15:16:47,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55397.333333333336, ans=0.1
+2024-08-26 15:17:01,611 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=55450.666666666664, ans=0.125
+2024-08-26 15:17:07,108 INFO [train.py:1114] (2/4) Epoch 5, batch 450, loss[loss=0.27, simple_loss=0.3265, pruned_loss=0.07775, ctc_loss=0.1449, over 19619.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3171, pruned_loss=0.08229, ctc_loss=0.1536, over 3452048.21 frames. ], batch size: 55, lr: 2.79e-02, grad_scale: 16.0
+2024-08-26 15:17:07,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=55504.0, ans=0.0
+2024-08-26 15:17:17,440 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.642e+02 1.899e+02 2.179e+02 3.523e+02, threshold=3.798e+02, percent-clipped=0.0
+2024-08-26 15:17:17,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=55557.333333333336, ans=0.0
+2024-08-26 15:17:23,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55557.333333333336, ans=0.1
+2024-08-26 15:17:27,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=55610.666666666664, ans=0.125
+2024-08-26 15:17:45,240 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=55664.0, ans=0.125
+2024-08-26 15:17:46,738 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.40 vs. limit=15.0
+2024-08-26 15:18:04,475 INFO [train.py:1114] (2/4) Epoch 5, batch 500, loss[loss=0.2799, simple_loss=0.3299, pruned_loss=0.0849, ctc_loss=0.1504, over 19677.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3157, pruned_loss=0.0814, ctc_loss=0.152, over 3548439.66 frames. ], batch size: 63, lr: 2.79e-02, grad_scale: 16.0
+2024-08-26 15:18:10,286 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=55770.666666666664, ans=0.0
+2024-08-26 15:18:31,455 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.84 vs. limit=15.0
+2024-08-26 15:18:37,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=55877.333333333336, ans=0.125
+2024-08-26 15:18:42,584 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.75 vs. limit=5.0
+2024-08-26 15:18:52,456 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.15 vs. limit=15.0
+2024-08-26 15:18:55,138 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=55930.666666666664, ans=0.0
+2024-08-26 15:18:57,403 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=55930.666666666664, ans=0.125
+2024-08-26 15:19:22,163 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.81 vs. limit=15.0
+2024-08-26 15:19:46,992 INFO [train.py:1114] (2/4) Epoch 5, batch 550, loss[loss=0.2926, simple_loss=0.3322, pruned_loss=0.09178, ctc_loss=0.1738, over 19298.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3155, pruned_loss=0.08138, ctc_loss=0.1522, over 3609573.60 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 16.0
+2024-08-26 15:19:56,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=56037.333333333336, ans=0.0
+2024-08-26 15:20:04,959 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 1.676e+02 1.860e+02 2.053e+02 4.118e+02, threshold=3.720e+02, percent-clipped=1.0
+2024-08-26 15:20:18,987 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=56144.0, ans=0.0
+2024-08-26 15:20:51,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=56250.666666666664, ans=0.0
+2024-08-26 15:20:56,219 INFO [train.py:1114] (2/4) Epoch 5, batch 600, loss[loss=0.2876, simple_loss=0.3417, pruned_loss=0.0847, ctc_loss=0.1603, over 19362.00 frames. ], tot_loss[loss=0.2701, simple_loss=0.3163, pruned_loss=0.08147, ctc_loss=0.1522, over 3667641.63 frames. ], batch size: 67, lr: 2.78e-02, grad_scale: 16.0
+2024-08-26 15:21:04,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=56304.0, ans=0.125
+2024-08-26 15:21:09,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=56357.333333333336, ans=0.125
+2024-08-26 15:21:21,642 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:21:26,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=56410.666666666664, ans=0.125
+2024-08-26 15:21:26,516 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:21:44,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=56517.333333333336, ans=0.2
+2024-08-26 15:21:45,172 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.50 vs. limit=10.0
+2024-08-26 15:21:48,992 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.85 vs. limit=10.0
+2024-08-26 15:21:49,360 INFO [train.py:1114] (2/4) Epoch 5, batch 650, loss[loss=0.272, simple_loss=0.3179, pruned_loss=0.08163, ctc_loss=0.1574, over 19763.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3154, pruned_loss=0.08119, ctc_loss=0.1519, over 3717753.44 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:21:53,490 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=56570.666666666664, ans=0.1
+2024-08-26 15:21:59,894 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 1.659e+02 1.803e+02 2.095e+02 3.596e+02, threshold=3.607e+02, percent-clipped=0.0
+2024-08-26 15:22:02,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=56624.0, ans=0.0
+2024-08-26 15:22:26,772 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=56730.666666666664, ans=0.125
+2024-08-26 15:22:39,297 INFO [train.py:1114] (2/4) Epoch 5, batch 700, loss[loss=0.2524, simple_loss=0.3014, pruned_loss=0.07272, ctc_loss=0.1449, over 19738.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3153, pruned_loss=0.08069, ctc_loss=0.151, over 3749019.41 frames. ], batch size: 51, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:22:51,370 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.49 vs. limit=12.0
+2024-08-26 15:22:51,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=56890.666666666664, ans=0.2
+2024-08-26 15:23:05,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=56944.0, ans=0.5
+2024-08-26 15:23:15,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=56997.333333333336, ans=0.2
+2024-08-26 15:23:17,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=57050.666666666664, ans=0.125
+2024-08-26 15:23:29,317 INFO [train.py:1114] (2/4) Epoch 5, batch 750, loss[loss=0.2694, simple_loss=0.3227, pruned_loss=0.07811, ctc_loss=0.1499, over 19513.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3152, pruned_loss=0.08107, ctc_loss=0.1515, over 3774483.61 frames. ], batch size: 54, lr: 2.77e-02, grad_scale: 16.0
+2024-08-26 15:23:33,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=57104.0, ans=0.0
+2024-08-26 15:23:37,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=57104.0, ans=0.125
+2024-08-26 15:23:39,770 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.452e+02 1.732e+02 1.957e+02 2.375e+02 6.184e+02, threshold=3.914e+02, percent-clipped=3.0
+2024-08-26 15:23:42,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=57157.333333333336, ans=0.025
+2024-08-26 15:23:57,217 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=57264.0, ans=0.2
+2024-08-26 15:24:13,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.72 vs. limit=15.0
+2024-08-26 15:24:19,566 INFO [train.py:1114] (2/4) Epoch 5, batch 800, loss[loss=0.2452, simple_loss=0.2879, pruned_loss=0.07318, ctc_loss=0.1403, over 19784.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3149, pruned_loss=0.08106, ctc_loss=0.1515, over 3797225.22 frames. ], batch size: 49, lr: 2.76e-02, grad_scale: 32.0
+2024-08-26 15:24:42,656 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=57477.333333333336, ans=0.0
+2024-08-26 15:24:44,541 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=57477.333333333336, ans=0.125
+2024-08-26 15:24:51,383 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.53 vs. limit=12.0
+2024-08-26 15:24:56,265 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.63 vs. limit=22.5
+2024-08-26 15:25:10,622 INFO [train.py:1114] (2/4) Epoch 5, batch 850, loss[loss=0.2653, simple_loss=0.3196, pruned_loss=0.07573, ctc_loss=0.1489, over 19643.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.314, pruned_loss=0.08063, ctc_loss=0.1507, over 3816385.78 frames. ], batch size: 59, lr: 2.76e-02, grad_scale: 32.0
+2024-08-26 15:25:16,864 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.32 vs. limit=22.5
+2024-08-26 15:25:24,569 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.744e+02 1.971e+02 2.331e+02 4.591e+02, threshold=3.942e+02, percent-clipped=1.0
+2024-08-26 15:25:45,175 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=12.00 vs. limit=15.0
+2024-08-26 15:25:51,915 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=57797.333333333336, ans=0.0
+2024-08-26 15:25:56,101 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=57850.666666666664, ans=0.0
+2024-08-26 15:25:56,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.05 vs. limit=15.0
+2024-08-26 15:26:07,577 INFO [train.py:1114] (2/4) Epoch 5, batch 900, loss[loss=0.2314, simple_loss=0.2747, pruned_loss=0.0684, ctc_loss=0.128, over 19431.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3144, pruned_loss=0.08117, ctc_loss=0.1516, over 3820078.46 frames. ], batch size: 48, lr: 2.75e-02, grad_scale: 32.0
+2024-08-26 15:26:07,840 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=57904.0, ans=0.0
+2024-08-26 15:26:12,639 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1.whitening_limit, batch_count=57904.0, ans=10.0
+2024-08-26 15:26:16,805 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.63 vs. limit=15.0
+2024-08-26 15:26:26,794 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=57957.333333333336, ans=0.125
+2024-08-26 15:26:33,529 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=58010.666666666664, ans=0.125
+2024-08-26 15:26:41,527 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.35 vs. limit=15.0
+2024-08-26 15:26:57,564 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=58170.666666666664, ans=0.125
+2024-08-26 15:26:58,232 INFO [train.py:1114] (2/4) Epoch 5, batch 950, loss[loss=0.2288, simple_loss=0.283, pruned_loss=0.06396, ctc_loss=0.1169, over 19505.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3142, pruned_loss=0.08103, ctc_loss=0.1515, over 3818664.89 frames. ], batch size: 49, lr: 2.75e-02, grad_scale: 32.0
+2024-08-26 15:26:58,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=58170.666666666664, ans=0.0
+2024-08-26 15:27:02,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=58170.666666666664, ans=0.025
+2024-08-26 15:27:04,160 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=58170.666666666664, ans=0.125
+2024-08-26 15:27:04,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=58170.666666666664, ans=0.1
+2024-08-26 15:27:11,441 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.648e+02 1.859e+02 2.135e+02 3.098e+02, threshold=3.718e+02, percent-clipped=0.0
+2024-08-26 15:27:20,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=58277.333333333336, ans=0.09899494936611666
+2024-08-26 15:27:30,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=58330.666666666664, ans=0.0
+2024-08-26 15:27:33,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=58330.666666666664, ans=0.125
+2024-08-26 15:27:42,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=58384.0, ans=0.125
+2024-08-26 15:27:43,572 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=58384.0, ans=0.0
+2024-08-26 15:27:44,388 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=58384.0, ans=0.1
+2024-08-26 15:27:48,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=58384.0, ans=0.125
+2024-08-26 15:27:49,898 INFO [train.py:1114] (2/4) Epoch 5, batch 1000, loss[loss=0.2508, simple_loss=0.3059, pruned_loss=0.07063, ctc_loss=0.1363, over 19850.00 frames. ], tot_loss[loss=0.2702, simple_loss=0.3157, pruned_loss=0.08181, ctc_loss=0.1529, over 3815651.64 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 32.0
+2024-08-26 15:28:05,841 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=58490.666666666664, ans=0.0
+2024-08-26 15:28:40,051 INFO [train.py:1114] (2/4) Epoch 5, batch 1050, loss[loss=0.2735, simple_loss=0.3256, pruned_loss=0.08117, ctc_loss=0.1474, over 19839.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3148, pruned_loss=0.08134, ctc_loss=0.1521, over 3822385.69 frames. ], batch size: 57, lr: 2.74e-02, grad_scale: 32.0
+2024-08-26 15:28:43,367 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=58704.0, ans=0.125
+2024-08-26 15:28:45,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=58704.0, ans=0.125
+2024-08-26 15:28:50,843 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 1.680e+02 1.893e+02 2.161e+02 3.731e+02, threshold=3.786e+02, percent-clipped=1.0
+2024-08-26 15:28:51,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=58757.333333333336, ans=0.2
+2024-08-26 15:29:08,602 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.22 vs. limit=15.0
+2024-08-26 15:29:16,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=58864.0, ans=10.0
+2024-08-26 15:29:19,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=58864.0, ans=0.125
+2024-08-26 15:29:28,175 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.min_positive, batch_count=58917.333333333336, ans=0.025
+2024-08-26 15:29:33,657 INFO [train.py:1114] (2/4) Epoch 5, batch 1100, loss[loss=0.2311, simple_loss=0.2905, pruned_loss=0.06292, ctc_loss=0.1147, over 19577.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3141, pruned_loss=0.08085, ctc_loss=0.1514, over 3830046.76 frames. ], batch size: 52, lr: 2.74e-02, grad_scale: 16.0
+2024-08-26 15:29:42,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=59024.0, ans=0.5
+2024-08-26 15:29:45,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=59024.0, ans=0.2
+2024-08-26 15:29:50,244 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.82 vs. limit=15.0
+2024-08-26 15:29:56,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=59077.333333333336, ans=0.125
+2024-08-26 15:30:06,275 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten.whitening_limit, batch_count=59130.666666666664, ans=15.0
+2024-08-26 15:30:13,080 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=59130.666666666664, ans=0.125
+2024-08-26 15:30:16,255 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.45 vs. limit=15.0
+2024-08-26 15:30:24,315 INFO [train.py:1114] (2/4) Epoch 5, batch 1150, loss[loss=0.2596, simple_loss=0.3108, pruned_loss=0.07567, ctc_loss=0.1428, over 19601.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3139, pruned_loss=0.08057, ctc_loss=0.1509, over 3829557.59 frames. ], batch size: 52, lr: 2.73e-02, grad_scale: 16.0
+2024-08-26 15:30:27,450 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=59237.333333333336, ans=0.0
+2024-08-26 15:30:35,930 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 1.591e+02 1.744e+02 2.042e+02 4.394e+02, threshold=3.489e+02, percent-clipped=2.0
+2024-08-26 15:30:37,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=59290.666666666664, ans=0.125
+2024-08-26 15:30:53,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=59397.333333333336, ans=0.2
+2024-08-26 15:30:57,228 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.23 vs. limit=15.0
+2024-08-26 15:31:15,277 INFO [train.py:1114] (2/4) Epoch 5, batch 1200, loss[loss=0.2834, simple_loss=0.3324, pruned_loss=0.08593, ctc_loss=0.1566, over 19839.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3148, pruned_loss=0.08078, ctc_loss=0.1517, over 3825952.48 frames. ], batch size: 57, lr: 2.73e-02, grad_scale: 32.0
+2024-08-26 15:31:25,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=59557.333333333336, ans=0.0
+2024-08-26 15:31:40,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=59610.666666666664, ans=0.2
+2024-08-26 15:31:41,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=59610.666666666664, ans=0.125
+2024-08-26 15:32:06,258 INFO [train.py:1114] (2/4) Epoch 5, batch 1250, loss[loss=0.2748, simple_loss=0.3194, pruned_loss=0.08331, ctc_loss=0.1588, over 19528.00 frames. ], tot_loss[loss=0.2686, simple_loss=0.3152, pruned_loss=0.08073, ctc_loss=0.1515, over 3843642.12 frames. ], batch size: 61, lr: 2.72e-02, grad_scale: 32.0
+2024-08-26 15:32:18,025 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 1.635e+02 1.798e+02 2.001e+02 4.301e+02, threshold=3.596e+02, percent-clipped=1.0
+2024-08-26 15:32:21,264 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=7.12 vs. limit=15.0
+2024-08-26 15:32:24,719 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=59877.333333333336, ans=0.07
+2024-08-26 15:32:26,552 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=59877.333333333336, ans=0.0
+2024-08-26 15:32:28,694 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=12.62 vs. limit=15.0
+2024-08-26 15:32:34,443 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.50 vs. limit=15.0
+2024-08-26 15:32:54,754 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=59984.0, ans=0.125
+2024-08-26 15:32:56,445 INFO [train.py:1114] (2/4) Epoch 5, batch 1300, loss[loss=0.2843, simple_loss=0.326, pruned_loss=0.08667, ctc_loss=0.1733, over 18890.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3144, pruned_loss=0.08047, ctc_loss=0.151, over 3846186.35 frames. ], batch size: 76, lr: 2.72e-02, grad_scale: 32.0
+2024-08-26 15:33:01,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=60037.333333333336, ans=0.07
+2024-08-26 15:33:01,846 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.51 vs. limit=8.0
+2024-08-26 15:33:43,912 INFO [train.py:1114] (2/4) Epoch 5, batch 1350, loss[loss=0.2573, simple_loss=0.3176, pruned_loss=0.07125, ctc_loss=0.1361, over 19786.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3135, pruned_loss=0.07976, ctc_loss=0.1495, over 3857362.45 frames. ], batch size: 54, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:33:46,967 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:33:55,384 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 1.610e+02 1.752e+02 1.989e+02 4.527e+02, threshold=3.503e+02, percent-clipped=1.0
+2024-08-26 15:34:00,217 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=60357.333333333336, ans=0.2
+2024-08-26 15:34:01,830 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.98 vs. limit=15.0
+2024-08-26 15:34:02,619 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.01 vs. limit=15.0
+2024-08-26 15:34:10,892 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=60410.666666666664, ans=0.125
+2024-08-26 15:34:17,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=60464.0, ans=0.125
+2024-08-26 15:34:19,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=60464.0, ans=0.125
+2024-08-26 15:34:21,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=60464.0, ans=0.125
+2024-08-26 15:34:34,576 INFO [train.py:1114] (2/4) Epoch 5, batch 1400, loss[loss=0.2319, simple_loss=0.2783, pruned_loss=0.06752, ctc_loss=0.1263, over 19671.00 frames. ], tot_loss[loss=0.266, simple_loss=0.313, pruned_loss=0.07963, ctc_loss=0.1492, over 3864150.13 frames. ], batch size: 46, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:34:47,082 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:34:53,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=60677.333333333336, ans=0.125
+2024-08-26 15:35:14,524 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.37 vs. limit=15.0
+2024-08-26 15:35:27,704 INFO [train.py:1114] (2/4) Epoch 5, batch 1450, loss[loss=0.2764, simple_loss=0.3277, pruned_loss=0.08245, ctc_loss=0.1504, over 19676.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3138, pruned_loss=0.07987, ctc_loss=0.1495, over 3862100.12 frames. ], batch size: 63, lr: 2.71e-02, grad_scale: 32.0
+2024-08-26 15:35:42,509 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.680e+02 1.820e+02 2.123e+02 3.172e+02, threshold=3.639e+02, percent-clipped=0.0
+2024-08-26 15:35:43,751 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=60890.666666666664, ans=0.1
+2024-08-26 15:35:45,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=60890.666666666664, ans=0.125
+2024-08-26 15:36:03,683 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=60997.333333333336, ans=0.0
+2024-08-26 15:36:06,104 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.32 vs. limit=15.0
+2024-08-26 15:36:19,844 INFO [train.py:1114] (2/4) Epoch 5, batch 1500, loss[loss=0.2893, simple_loss=0.331, pruned_loss=0.09055, ctc_loss=0.1664, over 19586.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3141, pruned_loss=0.08003, ctc_loss=0.1498, over 3861725.35 frames. ], batch size: 57, lr: 2.70e-02, grad_scale: 32.0
+2024-08-26 15:36:25,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=61104.0, ans=0.2
+2024-08-26 15:36:30,201 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.54 vs. limit=15.0
+2024-08-26 15:36:31,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=61157.333333333336, ans=0.95
+2024-08-26 15:36:45,299 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=61210.666666666664, ans=0.1
+2024-08-26 15:36:58,094 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.01 vs. limit=15.0
+2024-08-26 15:37:09,976 INFO [train.py:1114] (2/4) Epoch 5, batch 1550, loss[loss=0.2839, simple_loss=0.3312, pruned_loss=0.08654, ctc_loss=0.159, over 19584.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3142, pruned_loss=0.08029, ctc_loss=0.1501, over 3847400.37 frames. ], batch size: 60, lr: 2.70e-02, grad_scale: 16.0
+2024-08-26 15:37:11,634 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.35 vs. limit=15.0
+2024-08-26 15:37:15,411 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.80 vs. limit=22.5
+2024-08-26 15:37:17,970 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=61370.666666666664, ans=0.125
+2024-08-26 15:37:22,472 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.359e+02 1.752e+02 1.975e+02 2.269e+02 3.644e+02, threshold=3.951e+02, percent-clipped=1.0
+2024-08-26 15:37:24,702 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=61424.0, ans=0.125
+2024-08-26 15:37:34,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=61477.333333333336, ans=0.2
+2024-08-26 15:37:55,618 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=9.24 vs. limit=12.0
+2024-08-26 15:38:03,694 INFO [train.py:1114] (2/4) Epoch 5, batch 1600, loss[loss=0.2626, simple_loss=0.3109, pruned_loss=0.07877, ctc_loss=0.1419, over 19851.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3135, pruned_loss=0.08002, ctc_loss=0.1494, over 3835621.73 frames. ], batch size: 57, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:38:22,273 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=61637.333333333336, ans=0.125
+2024-08-26 15:38:25,332 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.51 vs. limit=12.0
+2024-08-26 15:38:26,157 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=61690.666666666664, ans=0.1
+2024-08-26 15:39:04,033 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=61797.333333333336, ans=0.0
+2024-08-26 15:39:15,814 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=61850.666666666664, ans=0.125
+2024-08-26 15:39:18,604 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=61904.0, ans=0.125
+2024-08-26 15:39:19,225 INFO [train.py:1114] (2/4) Epoch 5, batch 1650, loss[loss=0.2689, simple_loss=0.3247, pruned_loss=0.07784, ctc_loss=0.1438, over 19673.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3136, pruned_loss=0.08003, ctc_loss=0.1497, over 3832831.11 frames. ], batch size: 59, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:39:22,585 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=61904.0, ans=0.2
+2024-08-26 15:39:31,765 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.578e+02 1.738e+02 2.103e+02 3.628e+02, threshold=3.475e+02, percent-clipped=0.0
+2024-08-26 15:39:32,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=61957.333333333336, ans=0.125
+2024-08-26 15:39:41,386 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:39:45,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=62010.666666666664, ans=0.125
+2024-08-26 15:39:47,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=62064.0, ans=0.125
+2024-08-26 15:39:49,967 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=62064.0, ans=0.125
+2024-08-26 15:39:51,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=62064.0, ans=0.0
+2024-08-26 15:40:00,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=62117.333333333336, ans=0.0
+2024-08-26 15:40:08,741 INFO [train.py:1114] (2/4) Epoch 5, batch 1700, loss[loss=0.238, simple_loss=0.2825, pruned_loss=0.07042, ctc_loss=0.1313, over 19662.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3127, pruned_loss=0.0792, ctc_loss=0.1483, over 3847310.42 frames. ], batch size: 46, lr: 2.69e-02, grad_scale: 32.0
+2024-08-26 15:40:22,171 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=16.10 vs. limit=15.0
+2024-08-26 15:40:31,992 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=62277.333333333336, ans=0.125
+2024-08-26 15:40:50,616 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.const_attention_rate, batch_count=62384.0, ans=0.025
+2024-08-26 15:40:53,566 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.18 vs. limit=12.0
+2024-08-26 15:40:54,053 INFO [train.py:1114] (2/4) Epoch 5, batch 1750, loss[loss=0.2251, simple_loss=0.2743, pruned_loss=0.06431, ctc_loss=0.1181, over 19603.00 frames. ], tot_loss[loss=0.2641, simple_loss=0.3119, pruned_loss=0.07869, ctc_loss=0.1474, over 3851911.58 frames. ], batch size: 45, lr: 2.68e-02, grad_scale: 32.0
+2024-08-26 15:40:56,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=62437.333333333336, ans=0.0
+2024-08-26 15:41:01,325 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=62437.333333333336, ans=0.125
+2024-08-26 15:41:05,740 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.599e+02 1.842e+02 2.097e+02 3.191e+02, threshold=3.683e+02, percent-clipped=0.0
+2024-08-26 15:41:09,286 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.88 vs. limit=15.0
+2024-08-26 15:41:16,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=62544.0, ans=0.0
+2024-08-26 15:41:25,655 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.82 vs. limit=22.5
+2024-08-26 15:41:39,328 INFO [train.py:1114] (2/4) Epoch 5, batch 1800, loss[loss=0.2591, simple_loss=0.3178, pruned_loss=0.07313, ctc_loss=0.1356, over 19623.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3124, pruned_loss=0.07884, ctc_loss=0.1475, over 3853742.09 frames. ], batch size: 55, lr: 2.68e-02, grad_scale: 32.0
+2024-08-26 15:41:40,007 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=8.67 vs. limit=10.0
+2024-08-26 15:41:44,118 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=62704.0, ans=0.125
+2024-08-26 15:41:56,439 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=62810.666666666664, ans=0.125
+2024-08-26 15:41:57,517 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.31 vs. limit=10.0
+2024-08-26 15:42:13,498 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=62864.0, ans=0.125
+2024-08-26 15:42:19,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=62917.333333333336, ans=0.125
+2024-08-26 15:42:21,106 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.25 vs. limit=22.5
+2024-08-26 15:42:24,238 INFO [train.py:1114] (2/4) Epoch 5, batch 1850, loss[loss=0.2722, simple_loss=0.3299, pruned_loss=0.07757, ctc_loss=0.1483, over 19584.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3118, pruned_loss=0.07836, ctc_loss=0.1466, over 3856585.11 frames. ], batch size: 57, lr: 2.67e-02, grad_scale: 32.0
+2024-08-26 15:42:30,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=62970.666666666664, ans=0.1
+2024-08-26 15:42:35,855 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 1.605e+02 1.818e+02 2.016e+02 3.945e+02, threshold=3.637e+02, percent-clipped=1.0
+2024-08-26 15:42:36,564 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.09 vs. limit=15.0
+2024-08-26 15:42:38,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=63024.0, ans=0.0
+2024-08-26 15:43:16,394 INFO [train.py:1114] (2/4) Epoch 5, batch 1900, loss[loss=0.266, simple_loss=0.3167, pruned_loss=0.07749, ctc_loss=0.1508, over 19653.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.3127, pruned_loss=0.07857, ctc_loss=0.1469, over 3860612.57 frames. ], batch size: 59, lr: 2.67e-02, grad_scale: 16.0
+2024-08-26 15:43:17,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=63237.333333333336, ans=0.2
+2024-08-26 15:43:20,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63237.333333333336, ans=0.1
+2024-08-26 15:43:26,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=63237.333333333336, ans=0.025
+2024-08-26 15:43:37,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=63344.0, ans=0.125
+2024-08-26 15:43:55,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=63450.666666666664, ans=0.125
+2024-08-26 15:43:55,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=63450.666666666664, ans=0.125
+2024-08-26 15:44:05,664 INFO [train.py:1114] (2/4) Epoch 5, batch 1950, loss[loss=0.2533, simple_loss=0.3059, pruned_loss=0.07344, ctc_loss=0.1344, over 19587.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3133, pruned_loss=0.07858, ctc_loss=0.147, over 3869917.03 frames. ], batch size: 52, lr: 2.67e-02, grad_scale: 16.0
+2024-08-26 15:44:11,907 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=63504.0, ans=0.0
+2024-08-26 15:44:12,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=63504.0, ans=0.2
+2024-08-26 15:44:20,098 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.677e+02 1.824e+02 1.963e+02 3.212e+02, threshold=3.647e+02, percent-clipped=0.0
+2024-08-26 15:44:22,053 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=63557.333333333336, ans=0.125
+2024-08-26 15:44:32,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=63610.666666666664, ans=0.125
+2024-08-26 15:44:32,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=63610.666666666664, ans=0.125
+2024-08-26 15:44:34,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=63664.0, ans=0.125
+2024-08-26 15:44:52,293 INFO [train.py:1114] (2/4) Epoch 5, batch 2000, loss[loss=0.2266, simple_loss=0.2736, pruned_loss=0.06484, ctc_loss=0.125, over 19684.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3143, pruned_loss=0.07944, ctc_loss=0.1484, over 3853024.26 frames. ], batch size: 45, lr: 2.66e-02, grad_scale: 32.0
+2024-08-26 15:44:57,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=63770.666666666664, ans=0.125
+2024-08-26 15:45:09,627 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.57 vs. limit=10.0
+2024-08-26 15:45:10,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=63877.333333333336, ans=0.125
+2024-08-26 15:45:16,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=63877.333333333336, ans=0.125
+2024-08-26 15:45:16,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=63877.333333333336, ans=0.0
+2024-08-26 15:45:23,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=63930.666666666664, ans=0.2
+2024-08-26 15:45:24,493 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=63930.666666666664, ans=0.0
+2024-08-26 15:45:42,245 INFO [train.py:1114] (2/4) Epoch 5, batch 2050, loss[loss=0.2409, simple_loss=0.2897, pruned_loss=0.07013, ctc_loss=0.1293, over 19738.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3126, pruned_loss=0.07878, ctc_loss=0.1471, over 3850417.08 frames. ], batch size: 47, lr: 2.66e-02, grad_scale: 32.0
+2024-08-26 15:45:54,606 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.354e+02 1.624e+02 1.773e+02 2.077e+02 3.322e+02, threshold=3.546e+02, percent-clipped=0.0
+2024-08-26 15:46:03,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=64144.0, ans=0.07
+2024-08-26 15:46:04,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=64144.0, ans=0.95
+2024-08-26 15:46:23,898 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=64250.666666666664, ans=0.95
+2024-08-26 15:46:26,367 INFO [train.py:1114] (2/4) Epoch 5, batch 2100, loss[loss=0.2524, simple_loss=0.3054, pruned_loss=0.07201, ctc_loss=0.1381, over 19770.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3118, pruned_loss=0.07816, ctc_loss=0.1459, over 3858122.29 frames. ], batch size: 54, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:46:30,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=64304.0, ans=0.035
+2024-08-26 15:46:30,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=64304.0, ans=0.125
+2024-08-26 15:46:31,860 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.49 vs. limit=15.0
+2024-08-26 15:46:32,613 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=64304.0, ans=0.1
+2024-08-26 15:46:32,850 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.55 vs. limit=15.0
+2024-08-26 15:46:38,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=64357.333333333336, ans=0.0
+2024-08-26 15:46:40,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=64357.333333333336, ans=0.125
+2024-08-26 15:46:52,165 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.83 vs. limit=15.0
+2024-08-26 15:47:19,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=64517.333333333336, ans=0.0
+2024-08-26 15:47:20,088 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=64517.333333333336, ans=0.0
+2024-08-26 15:47:23,493 INFO [train.py:1114] (2/4) Epoch 5, batch 2150, loss[loss=0.2602, simple_loss=0.312, pruned_loss=0.07612, ctc_loss=0.1405, over 19854.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.311, pruned_loss=0.07776, ctc_loss=0.1452, over 3869402.62 frames. ], batch size: 52, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:47:27,253 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=64570.666666666664, ans=0.0
+2024-08-26 15:47:28,974 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=64570.666666666664, ans=0.125
+2024-08-26 15:47:35,825 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.599e+02 1.757e+02 2.074e+02 2.995e+02, threshold=3.513e+02, percent-clipped=0.0
+2024-08-26 15:47:41,435 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=64677.333333333336, ans=0.125
+2024-08-26 15:47:54,333 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=64730.666666666664, ans=0.0
+2024-08-26 15:47:54,363 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=64730.666666666664, ans=0.2
+2024-08-26 15:48:07,247 INFO [train.py:1114] (2/4) Epoch 5, batch 2200, loss[loss=0.2861, simple_loss=0.3324, pruned_loss=0.08656, ctc_loss=0.1664, over 19582.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3109, pruned_loss=0.07758, ctc_loss=0.1449, over 3867300.66 frames. ], batch size: 57, lr: 2.65e-02, grad_scale: 32.0
+2024-08-26 15:48:11,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=64837.333333333336, ans=0.0
+2024-08-26 15:48:11,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=64837.333333333336, ans=0.2
+2024-08-26 15:48:19,807 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=64890.666666666664, ans=0.125
+2024-08-26 15:48:30,719 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.82 vs. limit=10.0
+2024-08-26 15:48:52,498 INFO [train.py:1114] (2/4) Epoch 5, batch 2250, loss[loss=0.2699, simple_loss=0.3268, pruned_loss=0.07737, ctc_loss=0.1455, over 19599.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3109, pruned_loss=0.07741, ctc_loss=0.1446, over 3867046.89 frames. ], batch size: 55, lr: 2.64e-02, grad_scale: 16.0
+2024-08-26 15:48:55,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=65104.0, ans=0.125
+2024-08-26 15:48:57,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=65104.0, ans=0.1
+2024-08-26 15:49:05,747 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.721e+02 2.056e+02 2.448e+02 6.138e+02, threshold=4.112e+02, percent-clipped=3.0
+2024-08-26 15:49:10,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=65210.666666666664, ans=0.125
+2024-08-26 15:49:26,369 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.74 vs. limit=15.0
+2024-08-26 15:49:33,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=65317.333333333336, ans=0.0
+2024-08-26 15:49:36,474 INFO [train.py:1114] (2/4) Epoch 5, batch 2300, loss[loss=0.2251, simple_loss=0.2826, pruned_loss=0.0613, ctc_loss=0.1124, over 19505.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3095, pruned_loss=0.07726, ctc_loss=0.1442, over 3860953.91 frames. ], batch size: 49, lr: 2.64e-02, grad_scale: 16.0
+2024-08-26 15:49:40,396 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.29 vs. limit=15.0
+2024-08-26 15:49:55,206 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=65477.333333333336, ans=0.2
+2024-08-26 15:50:02,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=65477.333333333336, ans=0.125
+2024-08-26 15:50:07,888 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.11 vs. limit=10.0
+2024-08-26 15:50:11,881 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=65530.666666666664, ans=0.2
+2024-08-26 15:50:23,060 INFO [train.py:1114] (2/4) Epoch 5, batch 2350, loss[loss=0.292, simple_loss=0.3418, pruned_loss=0.08822, ctc_loss=0.1646, over 19668.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3099, pruned_loss=0.07764, ctc_loss=0.1447, over 3863446.28 frames. ], batch size: 63, lr: 2.63e-02, grad_scale: 16.0
+2024-08-26 15:50:27,679 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=65637.33333333333, ans=0.125
+2024-08-26 15:50:31,205 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.03 vs. limit=15.0
+2024-08-26 15:50:36,072 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.297e+02 1.568e+02 1.781e+02 2.033e+02 3.218e+02, threshold=3.561e+02, percent-clipped=0.0
+2024-08-26 15:50:38,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=65690.66666666667, ans=0.1
+2024-08-26 15:50:53,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=65797.33333333333, ans=0.125
+2024-08-26 15:50:54,047 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=65797.33333333333, ans=0.1
+2024-08-26 15:51:07,083 INFO [train.py:1114] (2/4) Epoch 5, batch 2400, loss[loss=0.2991, simple_loss=0.3423, pruned_loss=0.09362, ctc_loss=0.1714, over 19291.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.3129, pruned_loss=0.07902, ctc_loss=0.147, over 3857528.14 frames. ], batch size: 71, lr: 2.63e-02, grad_scale: 32.0
+2024-08-26 15:51:16,931 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:51:17,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=65957.33333333333, ans=0.0
+2024-08-26 15:51:25,202 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=66010.66666666667, ans=0.125
+2024-08-26 15:51:49,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=66117.33333333333, ans=0.2
+2024-08-26 15:51:52,367 INFO [train.py:1114] (2/4) Epoch 5, batch 2450, loss[loss=0.332, simple_loss=0.351, pruned_loss=0.1135, ctc_loss=0.215, over 14018.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3181, pruned_loss=0.08339, ctc_loss=0.1554, over 3729975.43 frames. ], batch size: 141, lr: 2.63e-02, grad_scale: 16.0
+2024-08-26 15:51:52,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=66170.66666666667, ans=0.1
+2024-08-26 15:52:03,122 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66224.0, ans=0.1
+2024-08-26 15:52:05,942 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.26 vs. limit=10.0
+2024-08-26 15:52:07,305 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 1.716e+02 1.912e+02 2.213e+02 5.978e+02, threshold=3.825e+02, percent-clipped=3.0
+2024-08-26 15:52:18,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=66277.33333333333, ans=0.2
+2024-08-26 15:52:22,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=66330.66666666667, ans=0.0
+2024-08-26 15:52:25,023 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=66330.66666666667, ans=0.125
+2024-08-26 15:53:42,761 INFO [train.py:1114] (2/4) Epoch 6, batch 0, loss[loss=0.2499, simple_loss=0.2954, pruned_loss=0.07412, ctc_loss=0.1402, over 19792.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.2954, pruned_loss=0.07412, ctc_loss=0.1402, over 19792.00 frames. ], batch size: 49, lr: 2.45e-02, grad_scale: 32.0
+2024-08-26 15:53:42,762 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 15:54:26,133 INFO [train.py:1146] (2/4) Epoch 6, validation: loss=0.2162, simple_loss=0.3022, pruned_loss=0.04785, ctc_loss=0.08613, over 944034.00 frames.
+2024-08-26 15:54:26,134 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 15:54:29,411 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.75 vs. limit=10.0
+2024-08-26 15:54:33,606 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.97 vs. limit=6.0
+2024-08-26 15:54:51,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=66485.33333333333, ans=0.125
+2024-08-26 15:54:56,725 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=66538.66666666667, ans=0.125
+2024-08-26 15:55:13,594 INFO [train.py:1114] (2/4) Epoch 6, batch 50, loss[loss=0.2187, simple_loss=0.279, pruned_loss=0.05684, ctc_loss=0.1119, over 19723.00 frames. ], tot_loss[loss=0.2638, simple_loss=0.3127, pruned_loss=0.07816, ctc_loss=0.1461, over 845158.26 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:55:14,701 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=66645.33333333333, ans=0.0
+2024-08-26 15:55:31,856 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=66752.0, ans=0.125
+2024-08-26 15:55:39,181 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.306e+02 1.607e+02 1.759e+02 1.997e+02 3.496e+02, threshold=3.518e+02, percent-clipped=0.0
+2024-08-26 15:55:47,258 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.30 vs. limit=15.0
+2024-08-26 15:55:49,148 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.48 vs. limit=15.0
+2024-08-26 15:55:49,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=66805.33333333333, ans=0.125
+2024-08-26 15:55:54,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=66858.66666666667, ans=0.125
+2024-08-26 15:56:03,169 INFO [train.py:1114] (2/4) Epoch 6, batch 100, loss[loss=0.2477, simple_loss=0.2973, pruned_loss=0.07142, ctc_loss=0.1381, over 19707.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.3138, pruned_loss=0.0781, ctc_loss=0.147, over 1498722.63 frames. ], batch size: 51, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:56:24,631 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.37 vs. limit=6.0
+2024-08-26 15:56:43,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=67072.0, ans=0.125
+2024-08-26 15:56:45,496 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=67072.0, ans=0.125
+2024-08-26 15:56:54,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=67125.33333333333, ans=0.125
+2024-08-26 15:56:57,272 INFO [train.py:1114] (2/4) Epoch 6, batch 150, loss[loss=0.2435, simple_loss=0.2843, pruned_loss=0.07518, ctc_loss=0.1307, over 19740.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3111, pruned_loss=0.07702, ctc_loss=0.144, over 2027884.99 frames. ], batch size: 47, lr: 2.44e-02, grad_scale: 32.0
+2024-08-26 15:57:10,656 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 15:57:22,725 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.333e+02 1.584e+02 1.709e+02 1.986e+02 2.973e+02, threshold=3.418e+02, percent-clipped=0.0
+2024-08-26 15:57:27,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=67338.66666666667, ans=0.125
+2024-08-26 15:57:28,937 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.50 vs. limit=12.0
+2024-08-26 15:57:37,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=67392.0, ans=0.125
+2024-08-26 15:57:42,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=67392.0, ans=0.1
+2024-08-26 15:57:44,424 INFO [train.py:1114] (2/4) Epoch 6, batch 200, loss[loss=0.2855, simple_loss=0.3304, pruned_loss=0.08694, ctc_loss=0.1666, over 18250.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3103, pruned_loss=0.07734, ctc_loss=0.1443, over 2435615.73 frames. ], batch size: 85, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:57:54,745 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.56 vs. limit=15.0
+2024-08-26 15:58:04,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=67552.0, ans=0.1
+2024-08-26 15:58:11,321 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer_ff3.min_abs, batch_count=67552.0, ans=0.2
+2024-08-26 15:58:28,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=67658.66666666667, ans=0.125
+2024-08-26 15:58:28,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=67658.66666666667, ans=0.125
+2024-08-26 15:58:36,026 INFO [train.py:1114] (2/4) Epoch 6, batch 250, loss[loss=0.2711, simple_loss=0.325, pruned_loss=0.07893, ctc_loss=0.1481, over 19388.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3089, pruned_loss=0.07633, ctc_loss=0.1424, over 2755871.45 frames. ], batch size: 67, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:59:07,037 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.12 vs. limit=15.0
+2024-08-26 15:59:10,418 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.559e+02 1.703e+02 1.915e+02 3.590e+02, threshold=3.407e+02, percent-clipped=1.0
+2024-08-26 15:59:12,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=67872.0, ans=0.125
+2024-08-26 15:59:17,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=67872.0, ans=0.125
+2024-08-26 15:59:32,235 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.22 vs. limit=8.0
+2024-08-26 15:59:34,567 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=67978.66666666667, ans=0.125
+2024-08-26 15:59:35,249 INFO [train.py:1114] (2/4) Epoch 6, batch 300, loss[loss=0.2547, simple_loss=0.3144, pruned_loss=0.07071, ctc_loss=0.1339, over 19499.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3078, pruned_loss=0.07547, ctc_loss=0.1408, over 3000609.96 frames. ], batch size: 61, lr: 2.43e-02, grad_scale: 32.0
+2024-08-26 15:59:44,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=67978.66666666667, ans=0.125
+2024-08-26 15:59:51,973 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=18.36 vs. limit=15.0
+2024-08-26 16:00:08,532 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:00:14,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=68192.0, ans=0.125
+2024-08-26 16:00:20,894 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=5.05 vs. limit=12.0
+2024-08-26 16:00:23,323 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=68245.33333333333, ans=0.125
+2024-08-26 16:00:24,077 INFO [train.py:1114] (2/4) Epoch 6, batch 350, loss[loss=0.2133, simple_loss=0.2678, pruned_loss=0.05747, ctc_loss=0.1095, over 19758.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3078, pruned_loss=0.07539, ctc_loss=0.1407, over 3191275.62 frames. ], batch size: 48, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:00:29,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=68245.33333333333, ans=10.0
+2024-08-26 16:00:35,791 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=68298.66666666667, ans=0.0
+2024-08-26 16:00:47,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=68352.0, ans=0.125
+2024-08-26 16:00:49,642 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 1.625e+02 1.872e+02 2.224e+02 3.924e+02, threshold=3.744e+02, percent-clipped=2.0
+2024-08-26 16:00:56,391 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=68405.33333333333, ans=0.125
+2024-08-26 16:01:05,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=68458.66666666667, ans=0.0
+2024-08-26 16:01:11,324 INFO [train.py:1114] (2/4) Epoch 6, batch 400, loss[loss=0.2484, simple_loss=0.3105, pruned_loss=0.06714, ctc_loss=0.1302, over 19497.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3075, pruned_loss=0.07517, ctc_loss=0.1402, over 3342789.81 frames. ], batch size: 54, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:01:29,621 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=68618.66666666667, ans=0.0
+2024-08-26 16:01:31,933 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=15.78 vs. limit=15.0
+2024-08-26 16:01:34,760 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.72 vs. limit=15.0
+2024-08-26 16:01:55,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=68725.33333333333, ans=0.0
+2024-08-26 16:02:07,098 INFO [train.py:1114] (2/4) Epoch 6, batch 450, loss[loss=0.2401, simple_loss=0.3076, pruned_loss=0.06252, ctc_loss=0.1189, over 19615.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3075, pruned_loss=0.07496, ctc_loss=0.1399, over 3451041.50 frames. ], batch size: 55, lr: 2.42e-02, grad_scale: 32.0
+2024-08-26 16:02:10,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=68778.66666666667, ans=0.125
+2024-08-26 16:02:23,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=68832.0, ans=0.09899494936611666
+2024-08-26 16:02:34,031 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.302e+02 1.611e+02 1.799e+02 2.140e+02 4.925e+02, threshold=3.597e+02, percent-clipped=1.0
+2024-08-26 16:02:44,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=68938.66666666667, ans=0.2
+2024-08-26 16:02:54,093 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=68992.0, ans=0.0
+2024-08-26 16:02:55,682 INFO [train.py:1114] (2/4) Epoch 6, batch 500, loss[loss=0.2617, simple_loss=0.3105, pruned_loss=0.0789, ctc_loss=0.1377, over 19687.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3063, pruned_loss=0.07452, ctc_loss=0.1389, over 3545450.38 frames. ], batch size: 63, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:03:07,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=69098.66666666667, ans=0.125
+2024-08-26 16:03:08,659 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=20.26 vs. limit=22.5
+2024-08-26 16:03:31,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=69205.33333333333, ans=0.125
+2024-08-26 16:03:32,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=69205.33333333333, ans=0.2
+2024-08-26 16:03:43,066 INFO [train.py:1114] (2/4) Epoch 6, batch 550, loss[loss=0.2793, simple_loss=0.3209, pruned_loss=0.08629, ctc_loss=0.1631, over 19302.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3062, pruned_loss=0.07444, ctc_loss=0.1389, over 3606897.15 frames. ], batch size: 71, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:04:01,851 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.55 vs. limit=15.0
+2024-08-26 16:04:08,887 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.633e+02 1.875e+02 2.080e+02 6.681e+02, threshold=3.749e+02, percent-clipped=3.0
+2024-08-26 16:04:19,506 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.98 vs. limit=15.0
+2024-08-26 16:04:25,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=69525.33333333333, ans=0.2
+2024-08-26 16:04:30,180 INFO [train.py:1114] (2/4) Epoch 6, batch 600, loss[loss=0.272, simple_loss=0.3243, pruned_loss=0.07886, ctc_loss=0.1549, over 19395.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3065, pruned_loss=0.07453, ctc_loss=0.1395, over 3664060.99 frames. ], batch size: 67, lr: 2.41e-02, grad_scale: 32.0
+2024-08-26 16:04:30,584 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:04:30,830 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.16 vs. limit=15.0
+2024-08-26 16:04:31,447 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=69578.66666666667, ans=0.1
+2024-08-26 16:04:33,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=69578.66666666667, ans=0.0
+2024-08-26 16:04:37,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=69578.66666666667, ans=0.125
+2024-08-26 16:04:49,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=69632.0, ans=0.1
+2024-08-26 16:05:04,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=69685.33333333333, ans=0.0
+2024-08-26 16:05:07,661 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=69738.66666666667, ans=0.125
+2024-08-26 16:05:12,606 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=69738.66666666667, ans=0.0
+2024-08-26 16:05:24,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=69792.0, ans=0.125
+2024-08-26 16:05:25,773 INFO [train.py:1114] (2/4) Epoch 6, batch 650, loss[loss=0.245, simple_loss=0.3053, pruned_loss=0.06689, ctc_loss=0.1273, over 19766.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3057, pruned_loss=0.07391, ctc_loss=0.1381, over 3714679.70 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 32.0
+2024-08-26 16:05:53,414 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.596e+02 1.734e+02 1.974e+02 3.978e+02, threshold=3.467e+02, percent-clipped=1.0
+2024-08-26 16:05:59,466 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=70005.33333333333, ans=0.2
+2024-08-26 16:06:06,008 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=70058.66666666667, ans=0.1
+2024-08-26 16:06:15,337 INFO [train.py:1114] (2/4) Epoch 6, batch 700, loss[loss=0.2332, simple_loss=0.2839, pruned_loss=0.06565, ctc_loss=0.1281, over 19709.00 frames. ], tot_loss[loss=0.2546, simple_loss=0.3061, pruned_loss=0.07389, ctc_loss=0.1382, over 3746895.95 frames. ], batch size: 51, lr: 2.40e-02, grad_scale: 16.0
+2024-08-26 16:06:17,818 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.08 vs. limit=15.0
+2024-08-26 16:06:26,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=70165.33333333333, ans=0.125
+2024-08-26 16:06:29,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=70165.33333333333, ans=0.125
+2024-08-26 16:06:46,120 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=70272.0, ans=0.2
+2024-08-26 16:06:50,826 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=70272.0, ans=0.125
+2024-08-26 16:06:56,822 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:06:59,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=70325.33333333333, ans=0.0
+2024-08-26 16:07:02,325 INFO [train.py:1114] (2/4) Epoch 6, batch 750, loss[loss=0.2513, simple_loss=0.3145, pruned_loss=0.06829, ctc_loss=0.1288, over 19511.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3059, pruned_loss=0.07394, ctc_loss=0.1381, over 3773056.43 frames. ], batch size: 54, lr: 2.40e-02, grad_scale: 16.0
+2024-08-26 16:07:21,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=70432.0, ans=0.125
+2024-08-26 16:07:23,030 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=70432.0, ans=0.1
+2024-08-26 16:07:33,079 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.662e+02 1.845e+02 2.236e+02 2.956e+02, threshold=3.689e+02, percent-clipped=0.0
+2024-08-26 16:07:53,500 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=70538.66666666667, ans=0.0
+2024-08-26 16:08:06,369 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=70592.0, ans=0.0
+2024-08-26 16:08:25,917 INFO [train.py:1114] (2/4) Epoch 6, batch 800, loss[loss=0.2415, simple_loss=0.2809, pruned_loss=0.07371, ctc_loss=0.1366, over 19389.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3063, pruned_loss=0.07439, ctc_loss=0.1386, over 3794047.98 frames. ], batch size: 48, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:08:35,571 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=70698.66666666667, ans=0.125
+2024-08-26 16:09:29,624 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=70858.66666666667, ans=0.125
+2024-08-26 16:09:32,229 INFO [train.py:1114] (2/4) Epoch 6, batch 850, loss[loss=0.2666, simple_loss=0.3135, pruned_loss=0.07938, ctc_loss=0.1523, over 19665.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3059, pruned_loss=0.07415, ctc_loss=0.1384, over 3813304.36 frames. ], batch size: 59, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:09:42,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=70965.33333333333, ans=0.125
+2024-08-26 16:09:58,133 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=71018.66666666667, ans=0.125
+2024-08-26 16:09:58,790 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.315e+02 1.558e+02 1.696e+02 1.888e+02 5.151e+02, threshold=3.391e+02, percent-clipped=1.0
+2024-08-26 16:10:01,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=71072.0, ans=0.125
+2024-08-26 16:10:03,213 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=71072.0, ans=0.125
+2024-08-26 16:10:30,284 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=5.16 vs. limit=12.0
+2024-08-26 16:10:34,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=71125.33333333333, ans=0.2
+2024-08-26 16:10:35,830 INFO [train.py:1114] (2/4) Epoch 6, batch 900, loss[loss=0.2167, simple_loss=0.2764, pruned_loss=0.05662, ctc_loss=0.1097, over 19785.00 frames. ], tot_loss[loss=0.255, simple_loss=0.306, pruned_loss=0.07423, ctc_loss=0.1387, over 3818368.18 frames. ], batch size: 49, lr: 2.39e-02, grad_scale: 32.0
+2024-08-26 16:10:42,002 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=71178.66666666667, ans=0.125
+2024-08-26 16:10:45,713 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=71232.0, ans=0.125
+2024-08-26 16:10:51,242 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71232.0, ans=0.1
+2024-08-26 16:11:08,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=71338.66666666667, ans=0.0
+2024-08-26 16:11:10,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.const_attention_rate, batch_count=71338.66666666667, ans=0.025
+2024-08-26 16:11:23,807 INFO [train.py:1114] (2/4) Epoch 6, batch 950, loss[loss=0.2403, simple_loss=0.2921, pruned_loss=0.06951, ctc_loss=0.1238, over 19507.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3067, pruned_loss=0.07481, ctc_loss=0.1398, over 3820269.97 frames. ], batch size: 49, lr: 2.38e-02, grad_scale: 16.0
+2024-08-26 16:11:39,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=71445.33333333333, ans=0.125
+2024-08-26 16:11:40,278 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=71498.66666666667, ans=0.2
+2024-08-26 16:11:44,123 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=71498.66666666667, ans=0.0
+2024-08-26 16:11:51,247 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.02 vs. limit=22.5
+2024-08-26 16:11:53,731 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=71552.0, ans=0.2
+2024-08-26 16:11:53,827 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=71552.0, ans=0.0
+2024-08-26 16:11:56,215 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.40 vs. limit=15.0
+2024-08-26 16:11:57,181 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.39 vs. limit=22.5
+2024-08-26 16:11:57,823 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:11:59,452 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 1.602e+02 1.780e+02 2.099e+02 5.215e+02, threshold=3.559e+02, percent-clipped=4.0
+2024-08-26 16:12:05,428 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=71605.33333333333, ans=0.0
+2024-08-26 16:12:21,537 INFO [train.py:1114] (2/4) Epoch 6, batch 1000, loss[loss=0.2239, simple_loss=0.2785, pruned_loss=0.06219, ctc_loss=0.1126, over 19872.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3073, pruned_loss=0.07514, ctc_loss=0.1404, over 3816465.51 frames. ], batch size: 52, lr: 2.38e-02, grad_scale: 16.0
+2024-08-26 16:12:24,002 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=71712.0, ans=0.125
+2024-08-26 16:12:30,114 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=6.06 vs. limit=15.0
+2024-08-26 16:12:33,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=71765.33333333333, ans=0.125
+2024-08-26 16:12:34,498 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:12:52,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=71765.33333333333, ans=0.0
+2024-08-26 16:13:03,220 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.85 vs. limit=15.0
+2024-08-26 16:13:04,873 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=71872.0, ans=0.2
+2024-08-26 16:13:05,996 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.49 vs. limit=10.0
+2024-08-26 16:13:06,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=71872.0, ans=0.125
+2024-08-26 16:13:09,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=71872.0, ans=0.125
+2024-08-26 16:13:15,493 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=71925.33333333333, ans=0.125
+2024-08-26 16:13:22,626 INFO [train.py:1114] (2/4) Epoch 6, batch 1050, loss[loss=0.2779, simple_loss=0.3243, pruned_loss=0.08441, ctc_loss=0.1566, over 19849.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3063, pruned_loss=0.07472, ctc_loss=0.1397, over 3823461.72 frames. ], batch size: 57, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:13:23,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.whiten.whitening_limit, batch_count=71978.66666666667, ans=12.0
+2024-08-26 16:13:50,113 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.278e+02 1.587e+02 1.763e+02 2.081e+02 5.001e+02, threshold=3.526e+02, percent-clipped=1.0
+2024-08-26 16:13:55,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=72138.66666666667, ans=0.2
+2024-08-26 16:14:03,084 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=72192.0, ans=0.125
+2024-08-26 16:14:05,088 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=72192.0, ans=0.2
+2024-08-26 16:14:10,561 INFO [train.py:1114] (2/4) Epoch 6, batch 1100, loss[loss=0.2356, simple_loss=0.2918, pruned_loss=0.06509, ctc_loss=0.1229, over 19575.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3058, pruned_loss=0.07454, ctc_loss=0.1394, over 3830010.05 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:14:22,372 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.05 vs. limit=15.0
+2024-08-26 16:14:30,407 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=72298.66666666667, ans=0.125
+2024-08-26 16:14:37,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=72352.0, ans=0.1
+2024-08-26 16:14:37,610 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=72352.0, ans=0.2
+2024-08-26 16:15:13,689 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=72405.33333333333, ans=0.0
+2024-08-26 16:15:13,798 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.38 vs. limit=12.0
+2024-08-26 16:15:25,920 INFO [train.py:1114] (2/4) Epoch 6, batch 1150, loss[loss=0.2485, simple_loss=0.3036, pruned_loss=0.06973, ctc_loss=0.1349, over 19575.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3061, pruned_loss=0.07465, ctc_loss=0.1395, over 3829261.88 frames. ], batch size: 52, lr: 2.37e-02, grad_scale: 16.0
+2024-08-26 16:15:46,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_na.min_abs, batch_count=72512.0, ans=0.02
+2024-08-26 16:16:50,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=72618.66666666667, ans=0.125
+2024-08-26 16:16:51,599 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.628e+02 1.822e+02 2.077e+02 5.117e+02, threshold=3.645e+02, percent-clipped=2.0
+2024-08-26 16:16:55,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=72672.0, ans=0.125
+2024-08-26 16:17:03,559 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.57 vs. limit=22.5
+2024-08-26 16:17:15,528 INFO [train.py:1114] (2/4) Epoch 6, batch 1200, loss[loss=0.2618, simple_loss=0.3174, pruned_loss=0.07517, ctc_loss=0.1396, over 19839.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3068, pruned_loss=0.07498, ctc_loss=0.1401, over 3825281.55 frames. ], batch size: 57, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:17:22,720 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=72778.66666666667, ans=0.04949747468305833
+2024-08-26 16:17:28,598 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.89 vs. limit=15.0
+2024-08-26 16:17:31,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72832.0, ans=0.1
+2024-08-26 16:17:36,155 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.50 vs. limit=15.0
+2024-08-26 16:17:47,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=72938.66666666667, ans=0.2
+2024-08-26 16:18:04,243 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.55 vs. limit=6.0
+2024-08-26 16:18:04,585 INFO [train.py:1114] (2/4) Epoch 6, batch 1250, loss[loss=0.2701, simple_loss=0.3139, pruned_loss=0.08358, ctc_loss=0.1481, over 19541.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.307, pruned_loss=0.07454, ctc_loss=0.1392, over 3842971.79 frames. ], batch size: 61, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:18:04,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=73045.33333333333, ans=0.125
+2024-08-26 16:18:11,815 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=4.70 vs. limit=12.0
+2024-08-26 16:18:13,433 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=73098.66666666667, ans=0.0
+2024-08-26 16:18:17,642 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.57 vs. limit=22.5
+2024-08-26 16:18:31,859 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.253e+02 1.534e+02 1.709e+02 2.004e+02 3.682e+02, threshold=3.418e+02, percent-clipped=1.0
+2024-08-26 16:18:45,964 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=73205.33333333333, ans=0.0
+2024-08-26 16:18:59,489 INFO [train.py:1114] (2/4) Epoch 6, batch 1300, loss[loss=0.2691, simple_loss=0.3263, pruned_loss=0.07685, ctc_loss=0.1455, over 18813.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3062, pruned_loss=0.07379, ctc_loss=0.1378, over 3845774.18 frames. ], batch size: 76, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:19:14,154 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=73312.0, ans=0.2
+2024-08-26 16:19:15,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=73312.0, ans=0.0
+2024-08-26 16:20:00,987 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=73418.66666666667, ans=0.125
+2024-08-26 16:20:10,739 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.58 vs. limit=15.0
+2024-08-26 16:20:14,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=73472.0, ans=0.125
+2024-08-26 16:20:22,877 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=73525.33333333333, ans=0.0
+2024-08-26 16:20:25,874 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.64 vs. limit=10.0
+2024-08-26 16:20:32,211 INFO [train.py:1114] (2/4) Epoch 6, batch 1350, loss[loss=0.2761, simple_loss=0.3217, pruned_loss=0.08221, ctc_loss=0.1654, over 19767.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3056, pruned_loss=0.07331, ctc_loss=0.1367, over 3857695.64 frames. ], batch size: 54, lr: 2.36e-02, grad_scale: 32.0
+2024-08-26 16:20:32,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=73578.66666666667, ans=0.125
+2024-08-26 16:20:39,521 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.27 vs. limit=15.0
+2024-08-26 16:21:00,538 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.536e+02 1.657e+02 1.960e+02 3.055e+02, threshold=3.315e+02, percent-clipped=0.0
+2024-08-26 16:21:20,642 INFO [train.py:1114] (2/4) Epoch 6, batch 1400, loss[loss=0.2293, simple_loss=0.2819, pruned_loss=0.06424, ctc_loss=0.1205, over 19662.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3053, pruned_loss=0.07327, ctc_loss=0.1365, over 3865002.34 frames. ], batch size: 46, lr: 2.35e-02, grad_scale: 32.0
+2024-08-26 16:21:24,083 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=23.28 vs. limit=22.5
+2024-08-26 16:21:35,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=73898.66666666667, ans=0.04949747468305833
+2024-08-26 16:21:41,086 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=73952.0, ans=0.025
+2024-08-26 16:21:44,862 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=73952.0, ans=0.05
+2024-08-26 16:21:48,800 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=74005.33333333333, ans=0.09899494936611666
+2024-08-26 16:22:08,757 INFO [train.py:1114] (2/4) Epoch 6, batch 1450, loss[loss=0.2775, simple_loss=0.321, pruned_loss=0.08542, ctc_loss=0.1581, over 19682.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3062, pruned_loss=0.0741, ctc_loss=0.1381, over 3862462.85 frames. ], batch size: 63, lr: 2.35e-02, grad_scale: 16.0
+2024-08-26 16:22:20,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=74165.33333333333, ans=0.025
+2024-08-26 16:23:25,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=74218.66666666667, ans=0.1
+2024-08-26 16:23:32,943 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.303e+02 1.612e+02 1.863e+02 2.093e+02 4.374e+02, threshold=3.727e+02, percent-clipped=2.0
+2024-08-26 16:23:34,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=74272.0, ans=0.125
+2024-08-26 16:23:47,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=74325.33333333333, ans=0.125
+2024-08-26 16:23:57,154 INFO [train.py:1114] (2/4) Epoch 6, batch 1500, loss[loss=0.265, simple_loss=0.3168, pruned_loss=0.07776, ctc_loss=0.1443, over 19537.00 frames. ], tot_loss[loss=0.254, simple_loss=0.306, pruned_loss=0.07355, ctc_loss=0.1373, over 3861892.67 frames. ], batch size: 57, lr: 2.35e-02, grad_scale: 16.0
+2024-08-26 16:24:00,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=74378.66666666667, ans=0.0
+2024-08-26 16:24:25,663 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=74378.66666666667, ans=0.0
+2024-08-26 16:24:34,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=74432.0, ans=0.125
+2024-08-26 16:24:37,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=74432.0, ans=0.125
+2024-08-26 16:24:38,115 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.12 vs. limit=12.0
+2024-08-26 16:25:02,158 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74592.0, ans=0.1
+2024-08-26 16:25:02,432 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.12 vs. limit=15.0
+2024-08-26 16:25:10,566 INFO [train.py:1114] (2/4) Epoch 6, batch 1550, loss[loss=0.2804, simple_loss=0.328, pruned_loss=0.08553, ctc_loss=0.1546, over 19637.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3063, pruned_loss=0.074, ctc_loss=0.1382, over 3847052.20 frames. ], batch size: 60, lr: 2.34e-02, grad_scale: 16.0
+2024-08-26 16:25:55,555 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=74752.0, ans=0.1
+2024-08-26 16:26:20,816 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 1.577e+02 1.696e+02 1.957e+02 2.811e+02, threshold=3.391e+02, percent-clipped=0.0
+2024-08-26 16:26:40,315 INFO [train.py:1114] (2/4) Epoch 6, batch 1600, loss[loss=0.2787, simple_loss=0.3308, pruned_loss=0.08099, ctc_loss=0.1615, over 19842.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3062, pruned_loss=0.07404, ctc_loss=0.1384, over 3836413.87 frames. ], batch size: 57, lr: 2.34e-02, grad_scale: 32.0
+2024-08-26 16:26:44,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=74912.0, ans=0.125
+2024-08-26 16:26:54,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=74965.33333333333, ans=0.125
+2024-08-26 16:26:54,937 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=74965.33333333333, ans=0.0
+2024-08-26 16:26:58,144 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.49 vs. limit=15.0
+2024-08-26 16:27:22,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=75072.0, ans=0.125
+2024-08-26 16:27:22,772 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.25 vs. limit=15.0
+2024-08-26 16:27:35,217 INFO [train.py:1114] (2/4) Epoch 6, batch 1650, loss[loss=0.2598, simple_loss=0.3133, pruned_loss=0.07418, ctc_loss=0.1451, over 19652.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3054, pruned_loss=0.07367, ctc_loss=0.1377, over 3832415.40 frames. ], batch size: 59, lr: 2.34e-02, grad_scale: 32.0
+2024-08-26 16:28:43,071 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.584e+02 1.799e+02 2.082e+02 3.549e+02, threshold=3.597e+02, percent-clipped=1.0
+2024-08-26 16:28:44,332 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=75338.66666666667, ans=0.1
+2024-08-26 16:29:36,193 INFO [train.py:1114] (2/4) Epoch 6, batch 1700, loss[loss=0.2118, simple_loss=0.2675, pruned_loss=0.05647, ctc_loss=0.1078, over 19646.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3046, pruned_loss=0.07269, ctc_loss=0.136, over 3847042.32 frames. ], batch size: 46, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:29:43,877 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=75445.33333333333, ans=0.125
+2024-08-26 16:29:45,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=75498.66666666667, ans=0.0
+2024-08-26 16:30:10,514 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=75605.33333333333, ans=0.125
+2024-08-26 16:30:24,061 INFO [train.py:1114] (2/4) Epoch 6, batch 1750, loss[loss=0.2065, simple_loss=0.2588, pruned_loss=0.05548, ctc_loss=0.1079, over 19618.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3035, pruned_loss=0.07198, ctc_loss=0.1347, over 3852057.66 frames. ], batch size: 45, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:31:02,149 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=75818.66666666667, ans=0.125
+2024-08-26 16:31:04,531 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.307e+02 1.524e+02 1.697e+02 1.959e+02 3.052e+02, threshold=3.394e+02, percent-clipped=0.0
+2024-08-26 16:31:04,786 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=75872.0, ans=0.125
+2024-08-26 16:31:06,560 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=75872.0, ans=0.125
+2024-08-26 16:31:06,721 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.39 vs. limit=10.0
+2024-08-26 16:31:25,742 INFO [train.py:1114] (2/4) Epoch 6, batch 1800, loss[loss=0.2583, simple_loss=0.3147, pruned_loss=0.07399, ctc_loss=0.1349, over 19612.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.304, pruned_loss=0.07237, ctc_loss=0.1353, over 3854282.85 frames. ], batch size: 55, lr: 2.33e-02, grad_scale: 32.0
+2024-08-26 16:32:00,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75978.66666666667, ans=0.1
+2024-08-26 16:32:10,874 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=76032.0, ans=0.0
+2024-08-26 16:32:56,733 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=76192.0, ans=0.125
+2024-08-26 16:33:01,906 INFO [train.py:1114] (2/4) Epoch 6, batch 1850, loss[loss=0.2589, simple_loss=0.3152, pruned_loss=0.07351, ctc_loss=0.139, over 19589.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3032, pruned_loss=0.0721, ctc_loss=0.1346, over 3857785.01 frames. ], batch size: 57, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:33:22,146 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=76245.33333333333, ans=15.0
+2024-08-26 16:33:26,885 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=76245.33333333333, ans=0.1
+2024-08-26 16:33:43,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=76298.66666666667, ans=0.125
+2024-08-26 16:33:55,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=76298.66666666667, ans=0.2
+2024-08-26 16:34:05,205 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=76405.33333333333, ans=0.125
+2024-08-26 16:34:05,736 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 1.545e+02 1.701e+02 1.893e+02 2.907e+02, threshold=3.402e+02, percent-clipped=0.0
+2024-08-26 16:34:12,599 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=5.81 vs. limit=15.0
+2024-08-26 16:34:20,075 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=76458.66666666667, ans=0.0
+2024-08-26 16:34:23,397 INFO [train.py:1114] (2/4) Epoch 6, batch 1900, loss[loss=0.2564, simple_loss=0.3183, pruned_loss=0.07072, ctc_loss=0.1326, over 19662.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.304, pruned_loss=0.07242, ctc_loss=0.135, over 3862819.37 frames. ], batch size: 59, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:34:38,743 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.92 vs. limit=15.0
+2024-08-26 16:34:40,128 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=76618.66666666667, ans=0.0
+2024-08-26 16:34:48,912 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=76672.0, ans=0.0
+2024-08-26 16:35:14,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=76672.0, ans=0.125
+2024-08-26 16:35:14,658 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=76672.0, ans=0.125
+2024-08-26 16:35:14,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=76672.0, ans=0.025
+2024-08-26 16:35:27,760 INFO [train.py:1114] (2/4) Epoch 6, batch 1950, loss[loss=0.2176, simple_loss=0.2836, pruned_loss=0.0542, ctc_loss=0.1081, over 19603.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.305, pruned_loss=0.07231, ctc_loss=0.1352, over 3871571.98 frames. ], batch size: 52, lr: 2.32e-02, grad_scale: 32.0
+2024-08-26 16:36:32,172 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.646e+02 1.808e+02 2.059e+02 4.885e+02, threshold=3.617e+02, percent-clipped=2.0
+2024-08-26 16:36:38,121 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.28 vs. limit=22.5
+2024-08-26 16:36:44,676 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=76992.0, ans=0.125
+2024-08-26 16:36:52,967 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=77045.33333333333, ans=0.1
+2024-08-26 16:36:53,614 INFO [train.py:1114] (2/4) Epoch 6, batch 2000, loss[loss=0.2204, simple_loss=0.265, pruned_loss=0.06447, ctc_loss=0.1169, over 19653.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3052, pruned_loss=0.07267, ctc_loss=0.1357, over 3855460.36 frames. ], batch size: 45, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:37:11,017 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=77152.0, ans=0.125
+2024-08-26 16:37:11,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=77152.0, ans=0.0
+2024-08-26 16:37:38,182 INFO [train.py:1114] (2/4) Epoch 6, batch 2050, loss[loss=0.218, simple_loss=0.2729, pruned_loss=0.05833, ctc_loss=0.116, over 19722.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3041, pruned_loss=0.07249, ctc_loss=0.1353, over 3851218.18 frames. ], batch size: 47, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:37:41,957 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:37:51,641 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=77365.33333333333, ans=0.1
+2024-08-26 16:38:04,756 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 1.566e+02 1.748e+02 2.075e+02 4.290e+02, threshold=3.497e+02, percent-clipped=1.0
+2024-08-26 16:38:07,700 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=77472.0, ans=0.125
+2024-08-26 16:38:11,987 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=77472.0, ans=0.0
+2024-08-26 16:38:12,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=77525.33333333333, ans=0.0
+2024-08-26 16:38:14,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=77525.33333333333, ans=0.07
+2024-08-26 16:38:15,611 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.67 vs. limit=22.5
+2024-08-26 16:38:18,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=77525.33333333333, ans=0.07
+2024-08-26 16:38:34,155 INFO [train.py:1114] (2/4) Epoch 6, batch 2100, loss[loss=0.2437, simple_loss=0.298, pruned_loss=0.06834, ctc_loss=0.1317, over 19787.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3032, pruned_loss=0.07182, ctc_loss=0.1343, over 3858067.76 frames. ], batch size: 54, lr: 2.31e-02, grad_scale: 32.0
+2024-08-26 16:38:36,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=77578.66666666667, ans=0.125
+2024-08-26 16:39:09,116 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=77578.66666666667, ans=0.5
+2024-08-26 16:39:35,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=77738.66666666667, ans=0.1
+2024-08-26 16:39:38,251 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=77792.0, ans=0.0
+2024-08-26 16:39:46,023 INFO [train.py:1114] (2/4) Epoch 6, batch 2150, loss[loss=0.2372, simple_loss=0.2945, pruned_loss=0.06473, ctc_loss=0.1261, over 19851.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3029, pruned_loss=0.07172, ctc_loss=0.134, over 3868909.89 frames. ], batch size: 52, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:39:52,715 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=77845.33333333333, ans=0.125
+2024-08-26 16:39:53,040 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=5.88 vs. limit=15.0
+2024-08-26 16:40:00,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=77898.66666666667, ans=0.125
+2024-08-26 16:40:03,191 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=77898.66666666667, ans=0.0
+2024-08-26 16:40:03,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.83 vs. limit=22.5
+2024-08-26 16:40:13,052 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=78005.33333333333, ans=0.2
+2024-08-26 16:40:13,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_ff3.min_abs, batch_count=78005.33333333333, ans=0.2
+2024-08-26 16:40:13,759 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 1.590e+02 1.744e+02 2.019e+02 3.989e+02, threshold=3.489e+02, percent-clipped=1.0
+2024-08-26 16:40:20,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=78005.33333333333, ans=0.125
+2024-08-26 16:40:22,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=78058.66666666667, ans=0.125
+2024-08-26 16:40:29,804 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:40:31,354 INFO [train.py:1114] (2/4) Epoch 6, batch 2200, loss[loss=0.2583, simple_loss=0.3165, pruned_loss=0.07274, ctc_loss=0.1366, over 19581.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3028, pruned_loss=0.07167, ctc_loss=0.1339, over 3867615.28 frames. ], batch size: 57, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:40:40,443 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=78165.33333333333, ans=0.125
+2024-08-26 16:40:44,006 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=78165.33333333333, ans=0.1
+2024-08-26 16:40:59,624 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=78272.0, ans=0.0
+2024-08-26 16:42:04,355 INFO [train.py:1114] (2/4) Epoch 6, batch 2250, loss[loss=0.2358, simple_loss=0.2972, pruned_loss=0.0641, ctc_loss=0.1155, over 19622.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3026, pruned_loss=0.07131, ctc_loss=0.1334, over 3866433.00 frames. ], batch size: 55, lr: 2.30e-02, grad_scale: 32.0
+2024-08-26 16:42:09,172 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.16 vs. limit=10.0
+2024-08-26 16:42:30,469 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.366e+02 1.631e+02 1.850e+02 2.118e+02 4.912e+02, threshold=3.701e+02, percent-clipped=4.0
+2024-08-26 16:42:49,453 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=78592.0, ans=0.2
+2024-08-26 16:42:51,083 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=78592.0, ans=0.0
+2024-08-26 16:42:57,019 INFO [train.py:1114] (2/4) Epoch 6, batch 2300, loss[loss=0.2336, simple_loss=0.282, pruned_loss=0.06712, ctc_loss=0.1272, over 19498.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3016, pruned_loss=0.07101, ctc_loss=0.133, over 3860656.19 frames. ], batch size: 49, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:43:05,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=78698.66666666667, ans=0.125
+2024-08-26 16:43:09,401 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=78698.66666666667, ans=0.0
+2024-08-26 16:43:17,388 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=78752.0, ans=0.125
+2024-08-26 16:43:20,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=78752.0, ans=0.2
+2024-08-26 16:43:25,207 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=78805.33333333333, ans=0.125
+2024-08-26 16:43:39,447 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.14 vs. limit=15.0
+2024-08-26 16:43:41,596 INFO [train.py:1114] (2/4) Epoch 6, batch 2350, loss[loss=0.2781, simple_loss=0.3306, pruned_loss=0.08328, ctc_loss=0.1477, over 19700.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3017, pruned_loss=0.07109, ctc_loss=0.1329, over 3863792.78 frames. ], batch size: 63, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:43:59,285 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=78965.33333333333, ans=0.0
+2024-08-26 16:44:09,673 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.319e+02 1.571e+02 1.792e+02 2.053e+02 3.529e+02, threshold=3.585e+02, percent-clipped=0.0
+2024-08-26 16:44:13,568 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=79072.0, ans=0.0
+2024-08-26 16:44:16,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=79072.0, ans=0.125
+2024-08-26 16:44:24,140 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.48 vs. limit=10.0
+2024-08-26 16:44:27,085 INFO [train.py:1114] (2/4) Epoch 6, batch 2400, loss[loss=0.2726, simple_loss=0.3219, pruned_loss=0.08096, ctc_loss=0.1532, over 19254.00 frames. ], tot_loss[loss=0.251, simple_loss=0.304, pruned_loss=0.07217, ctc_loss=0.1343, over 3857246.83 frames. ], batch size: 71, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:44:29,349 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.52 vs. limit=22.5
+2024-08-26 16:44:33,816 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.28 vs. limit=15.0
+2024-08-26 16:44:40,832 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=79232.0, ans=0.1
+2024-08-26 16:44:41,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=79232.0, ans=0.125
+2024-08-26 16:44:43,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=79232.0, ans=0.125
+2024-08-26 16:44:54,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=79338.66666666667, ans=0.125
+2024-08-26 16:45:01,776 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=79338.66666666667, ans=0.0
+2024-08-26 16:45:12,827 INFO [train.py:1114] (2/4) Epoch 6, batch 2450, loss[loss=0.3195, simple_loss=0.3351, pruned_loss=0.1097, ctc_loss=0.2112, over 13151.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3096, pruned_loss=0.0768, ctc_loss=0.1432, over 3729667.10 frames. ], batch size: 140, lr: 2.29e-02, grad_scale: 32.0
+2024-08-26 16:45:14,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=79445.33333333333, ans=0.1
+2024-08-26 16:45:22,215 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=79498.66666666667, ans=0.025
+2024-08-26 16:45:23,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=79498.66666666667, ans=0.125
+2024-08-26 16:45:29,517 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=79498.66666666667, ans=0.125
+2024-08-26 16:45:29,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=79498.66666666667, ans=0.0
+2024-08-26 16:45:40,096 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 1.744e+02 1.902e+02 2.066e+02 3.652e+02, threshold=3.804e+02, percent-clipped=1.0
+2024-08-26 16:48:16,407 INFO [train.py:1114] (2/4) Epoch 7, batch 0, loss[loss=0.2543, simple_loss=0.2952, pruned_loss=0.07701, ctc_loss=0.1482, over 19811.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.2952, pruned_loss=0.07701, ctc_loss=0.1482, over 19811.00 frames. ], batch size: 49, lr: 2.14e-02, grad_scale: 32.0
+2024-08-26 16:48:16,408 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 16:48:29,568 INFO [train.py:1146] (2/4) Epoch 7, validation: loss=0.2068, simple_loss=0.2958, pruned_loss=0.04327, ctc_loss=0.07811, over 944034.00 frames.
+2024-08-26 16:48:29,569 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 16:49:04,254 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.90 vs. limit=15.0
+2024-08-26 16:49:08,980 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=1.87 vs. limit=15.0
+2024-08-26 16:49:19,278 INFO [train.py:1114] (2/4) Epoch 7, batch 50, loss[loss=0.226, simple_loss=0.2777, pruned_loss=0.06316, ctc_loss=0.12, over 19711.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3081, pruned_loss=0.07424, ctc_loss=0.1399, over 843578.70 frames. ], batch size: 47, lr: 2.14e-02, grad_scale: 32.0
+2024-08-26 16:49:25,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=79925.33333333333, ans=0.0
+2024-08-26 16:49:52,917 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=80085.33333333333, ans=0.125
+2024-08-26 16:49:57,477 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.282e+02 1.584e+02 1.822e+02 2.089e+02 3.575e+02, threshold=3.645e+02, percent-clipped=0.0
+2024-08-26 16:50:07,022 INFO [train.py:1114] (2/4) Epoch 7, batch 100, loss[loss=0.225, simple_loss=0.2845, pruned_loss=0.05976, ctc_loss=0.115, over 19722.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3073, pruned_loss=0.07421, ctc_loss=0.1385, over 1498623.17 frames. ], batch size: 51, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:50:13,667 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=80192.0, ans=0.0
+2024-08-26 16:50:13,753 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:50:25,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80245.33333333333, ans=0.1
+2024-08-26 16:50:36,409 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=80298.66666666667, ans=0.125
+2024-08-26 16:50:40,021 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=80352.0, ans=0.04949747468305833
+2024-08-26 16:51:01,451 INFO [train.py:1114] (2/4) Epoch 7, batch 150, loss[loss=0.234, simple_loss=0.283, pruned_loss=0.06795, ctc_loss=0.1227, over 19710.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.304, pruned_loss=0.0722, ctc_loss=0.1347, over 2027456.19 frames. ], batch size: 47, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:51:04,703 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=8.10 vs. limit=15.0
+2024-08-26 16:51:15,999 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=80512.0, ans=0.125
+2024-08-26 16:51:28,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=80565.33333333333, ans=0.125
+2024-08-26 16:51:33,811 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=80618.66666666667, ans=0.0
+2024-08-26 16:51:39,013 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.222e+02 1.525e+02 1.667e+02 1.863e+02 2.878e+02, threshold=3.334e+02, percent-clipped=0.0
+2024-08-26 16:51:41,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=80672.0, ans=0.125
+2024-08-26 16:51:48,553 INFO [train.py:1114] (2/4) Epoch 7, batch 200, loss[loss=0.2952, simple_loss=0.3365, pruned_loss=0.09293, ctc_loss=0.1699, over 18253.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3021, pruned_loss=0.07115, ctc_loss=0.1325, over 2436140.00 frames. ], batch size: 85, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:51:52,438 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=80725.33333333333, ans=0.0
+2024-08-26 16:51:58,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=80778.66666666667, ans=0.0
+2024-08-26 16:52:04,427 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:52:14,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=80832.0, ans=0.5
+2024-08-26 16:52:21,178 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=80885.33333333333, ans=0.125
+2024-08-26 16:52:35,139 INFO [train.py:1114] (2/4) Epoch 7, batch 250, loss[loss=0.266, simple_loss=0.3226, pruned_loss=0.07577, ctc_loss=0.1448, over 19380.00 frames. ], tot_loss[loss=0.2469, simple_loss=0.3011, pruned_loss=0.07016, ctc_loss=0.1309, over 2756169.51 frames. ], batch size: 67, lr: 2.13e-02, grad_scale: 32.0
+2024-08-26 16:52:42,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=80992.0, ans=0.1
+2024-08-26 16:52:50,292 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.99 vs. limit=15.0
+2024-08-26 16:53:03,208 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=81152.0, ans=0.2
+2024-08-26 16:53:06,082 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=81152.0, ans=0.0
+2024-08-26 16:53:14,098 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81152.0, ans=0.1
+2024-08-26 16:53:14,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81152.0, ans=0.1
+2024-08-26 16:53:16,584 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.591e+02 1.729e+02 1.900e+02 5.825e+02, threshold=3.457e+02, percent-clipped=1.0
+2024-08-26 16:53:21,051 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.84 vs. limit=15.0
+2024-08-26 16:53:25,916 INFO [train.py:1114] (2/4) Epoch 7, batch 300, loss[loss=0.2735, simple_loss=0.3272, pruned_loss=0.08011, ctc_loss=0.1491, over 19535.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.2996, pruned_loss=0.06913, ctc_loss=0.1292, over 3001114.53 frames. ], batch size: 61, lr: 2.12e-02, grad_scale: 32.0
+2024-08-26 16:53:49,188 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.61 vs. limit=15.0
+2024-08-26 16:53:49,857 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=81365.33333333333, ans=0.125
+2024-08-26 16:54:18,315 INFO [train.py:1114] (2/4) Epoch 7, batch 350, loss[loss=0.217, simple_loss=0.2724, pruned_loss=0.05888, ctc_loss=0.1099, over 19740.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.2995, pruned_loss=0.06893, ctc_loss=0.1288, over 3191443.24 frames. ], batch size: 48, lr: 2.12e-02, grad_scale: 16.0
+2024-08-26 16:54:29,759 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.43 vs. limit=22.5
+2024-08-26 16:54:42,489 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:54:56,442 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.574e+02 1.753e+02 2.022e+02 2.928e+02, threshold=3.506e+02, percent-clipped=0.0
+2024-08-26 16:55:04,117 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=81792.0, ans=0.125
+2024-08-26 16:55:04,697 INFO [train.py:1114] (2/4) Epoch 7, batch 400, loss[loss=0.2339, simple_loss=0.2991, pruned_loss=0.06134, ctc_loss=0.1149, over 19495.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.2988, pruned_loss=0.06832, ctc_loss=0.128, over 3343247.64 frames. ], batch size: 54, lr: 2.12e-02, grad_scale: 32.0
+2024-08-26 16:55:14,383 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=81845.33333333333, ans=0.125
+2024-08-26 16:55:18,157 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=81845.33333333333, ans=0.035
+2024-08-26 16:55:20,019 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=81845.33333333333, ans=0.1
+2024-08-26 16:55:22,208 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.84 vs. limit=12.0
+2024-08-26 16:55:26,774 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=81898.66666666667, ans=15.0
+2024-08-26 16:55:34,947 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=81952.0, ans=0.0
+2024-08-26 16:55:39,840 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=81952.0, ans=0.0
+2024-08-26 16:55:41,754 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=82005.33333333333, ans=0.09899494936611666
+2024-08-26 16:55:45,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=82005.33333333333, ans=0.2
+2024-08-26 16:55:51,736 INFO [train.py:1114] (2/4) Epoch 7, batch 450, loss[loss=0.2202, simple_loss=0.2905, pruned_loss=0.05395, ctc_loss=0.1051, over 19607.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.2991, pruned_loss=0.06846, ctc_loss=0.1285, over 3450420.32 frames. ], batch size: 55, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:56:03,967 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=82058.66666666667, ans=0.025
+2024-08-26 16:56:11,408 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=82112.0, ans=0.07
+2024-08-26 16:56:16,496 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.23 vs. limit=15.0
+2024-08-26 16:56:17,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=82165.33333333333, ans=0.0
+2024-08-26 16:56:18,008 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=82165.33333333333, ans=0.0
+2024-08-26 16:56:19,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=82165.33333333333, ans=0.2
+2024-08-26 16:56:36,480 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.23 vs. limit=15.0
+2024-08-26 16:56:41,736 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 1.485e+02 1.753e+02 2.038e+02 3.855e+02, threshold=3.505e+02, percent-clipped=1.0
+2024-08-26 16:56:42,995 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:56:44,764 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=82272.0, ans=0.2
+2024-08-26 16:56:49,040 INFO [train.py:1114] (2/4) Epoch 7, batch 500, loss[loss=0.2508, simple_loss=0.3077, pruned_loss=0.07024, ctc_loss=0.1339, over 19666.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.2989, pruned_loss=0.06848, ctc_loss=0.1283, over 3546182.01 frames. ], batch size: 63, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:56:50,242 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=82325.33333333333, ans=0.1
+2024-08-26 16:56:55,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=82325.33333333333, ans=0.2
+2024-08-26 16:57:00,418 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=82378.66666666667, ans=10.0
+2024-08-26 16:57:01,205 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 16:57:03,154 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82378.66666666667, ans=0.1
+2024-08-26 16:57:12,064 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.35 vs. limit=22.5
+2024-08-26 16:57:25,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=82538.66666666667, ans=0.1
+2024-08-26 16:57:30,377 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=82538.66666666667, ans=0.95
+2024-08-26 16:57:35,771 INFO [train.py:1114] (2/4) Epoch 7, batch 550, loss[loss=0.2807, simple_loss=0.3273, pruned_loss=0.08531, ctc_loss=0.1586, over 19277.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.2989, pruned_loss=0.06858, ctc_loss=0.1283, over 3608003.59 frames. ], batch size: 71, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:57:35,946 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=82592.0, ans=0.2
+2024-08-26 16:57:37,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=82592.0, ans=0.2
+2024-08-26 16:57:40,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=82592.0, ans=0.125
+2024-08-26 16:57:41,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=82592.0, ans=0.1
+2024-08-26 16:57:49,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82645.33333333333, ans=0.1
+2024-08-26 16:58:16,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=82805.33333333333, ans=0.025
+2024-08-26 16:58:16,877 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.530e+02 1.701e+02 1.927e+02 4.407e+02, threshold=3.402e+02, percent-clipped=1.0
+2024-08-26 16:58:30,186 INFO [train.py:1114] (2/4) Epoch 7, batch 600, loss[loss=0.2561, simple_loss=0.3157, pruned_loss=0.07194, ctc_loss=0.1317, over 19349.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.2997, pruned_loss=0.06857, ctc_loss=0.1283, over 3666110.17 frames. ], batch size: 67, lr: 2.11e-02, grad_scale: 16.0
+2024-08-26 16:58:57,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=82858.66666666667, ans=0.0
+2024-08-26 16:58:57,566 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82858.66666666667, ans=0.1
+2024-08-26 16:58:57,793 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.49 vs. limit=12.0
+2024-08-26 17:04:51,937 INFO [train.py:1114] (2/4) Epoch 7, batch 650, loss[loss=0.2473, simple_loss=0.3096, pruned_loss=0.06666, ctc_loss=0.1292, over 19768.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.2986, pruned_loss=0.06813, ctc_loss=0.1274, over 3716263.92 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:05:06,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=83125.33333333333, ans=0.125
+2024-08-26 17:05:23,613 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=83232.0, ans=0.2
+2024-08-26 17:05:34,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=83285.33333333333, ans=0.2
+2024-08-26 17:05:41,852 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.294e+02 1.502e+02 1.666e+02 1.880e+02 3.682e+02, threshold=3.331e+02, percent-clipped=2.0
+2024-08-26 17:06:20,354 INFO [train.py:1114] (2/4) Epoch 7, batch 700, loss[loss=0.2192, simple_loss=0.28, pruned_loss=0.05835, ctc_loss=0.1043, over 19700.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.2983, pruned_loss=0.06783, ctc_loss=0.1267, over 3748949.27 frames. ], batch size: 51, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:06:28,501 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=83392.0, ans=0.125
+2024-08-26 17:07:08,431 INFO [train.py:1114] (2/4) Epoch 7, batch 750, loss[loss=0.2469, simple_loss=0.3104, pruned_loss=0.06532, ctc_loss=0.1317, over 19515.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.2977, pruned_loss=0.06751, ctc_loss=0.1264, over 3774137.13 frames. ], batch size: 54, lr: 2.10e-02, grad_scale: 16.0
+2024-08-26 17:07:11,801 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.27 vs. limit=6.0
+2024-08-26 17:07:13,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=83658.66666666667, ans=0.125
+2024-08-26 17:07:15,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=83658.66666666667, ans=0.125
+2024-08-26 17:07:18,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=83712.0, ans=0.125
+2024-08-26 17:07:27,753 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=6.20 vs. limit=15.0
+2024-08-26 17:07:43,929 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=83818.66666666667, ans=0.0
+2024-08-26 17:07:48,231 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.281e+02 1.533e+02 1.678e+02 1.875e+02 3.166e+02, threshold=3.356e+02, percent-clipped=0.0
+2024-08-26 17:07:49,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=83872.0, ans=0.2
+2024-08-26 17:07:57,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=83925.33333333333, ans=0.125
+2024-08-26 17:07:58,364 INFO [train.py:1114] (2/4) Epoch 7, batch 800, loss[loss=0.2048, simple_loss=0.2656, pruned_loss=0.05269, ctc_loss=0.09683, over 19792.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2973, pruned_loss=0.06752, ctc_loss=0.1262, over 3796585.84 frames. ], batch size: 49, lr: 2.10e-02, grad_scale: 32.0
+2024-08-26 17:08:01,369 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=83925.33333333333, ans=0.0
+2024-08-26 17:08:13,766 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.51 vs. limit=15.0
+2024-08-26 17:08:14,384 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=83978.66666666667, ans=0.0
+2024-08-26 17:08:17,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=83978.66666666667, ans=0.025
+2024-08-26 17:08:25,333 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=84032.0, ans=0.125
+2024-08-26 17:08:36,301 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.95 vs. limit=15.0
+2024-08-26 17:08:39,215 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.53 vs. limit=15.0
+2024-08-26 17:08:43,561 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=84085.33333333333, ans=0.125
+2024-08-26 17:08:45,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=84085.33333333333, ans=0.125
+2024-08-26 17:08:56,343 INFO [train.py:1114] (2/4) Epoch 7, batch 850, loss[loss=0.2527, simple_loss=0.3095, pruned_loss=0.07088, ctc_loss=0.1352, over 19644.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2971, pruned_loss=0.06745, ctc_loss=0.126, over 3815446.47 frames. ], batch size: 59, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:09:00,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=84192.0, ans=0.125
+2024-08-26 17:09:07,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=84245.33333333333, ans=0.1
+2024-08-26 17:09:11,020 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.87 vs. limit=15.0
+2024-08-26 17:09:13,392 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_na.min_abs, batch_count=84245.33333333333, ans=0.02
+2024-08-26 17:09:14,717 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.82 vs. limit=15.0
+2024-08-26 17:09:29,543 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=84298.66666666667, ans=0.0
+2024-08-26 17:09:36,483 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=84298.66666666667, ans=0.04949747468305833
+2024-08-26 17:09:55,310 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=84352.0, ans=0.0
+2024-08-26 17:09:58,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=84352.0, ans=0.0
+2024-08-26 17:10:01,941 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.310e+02 1.545e+02 1.673e+02 1.909e+02 3.259e+02, threshold=3.346e+02, percent-clipped=0.0
+2024-08-26 17:10:04,465 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.00 vs. limit=15.0
+2024-08-26 17:10:06,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=84405.33333333333, ans=0.025
+2024-08-26 17:10:09,597 INFO [train.py:1114] (2/4) Epoch 7, batch 900, loss[loss=0.2279, simple_loss=0.281, pruned_loss=0.06455, ctc_loss=0.1142, over 19388.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2975, pruned_loss=0.06788, ctc_loss=0.1267, over 3818886.99 frames. ], batch size: 48, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:10:18,583 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=84458.66666666667, ans=0.025
+2024-08-26 17:10:22,389 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=84512.0, ans=0.035
+2024-08-26 17:10:47,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=84618.66666666667, ans=0.0
+2024-08-26 17:10:58,444 INFO [train.py:1114] (2/4) Epoch 7, batch 950, loss[loss=0.2146, simple_loss=0.2692, pruned_loss=0.05867, ctc_loss=0.1065, over 19514.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.2977, pruned_loss=0.06794, ctc_loss=0.1268, over 3819380.73 frames. ], batch size: 49, lr: 2.09e-02, grad_scale: 32.0
+2024-08-26 17:11:01,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=84725.33333333333, ans=0.125
+2024-08-26 17:11:11,287 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.78 vs. limit=15.0
+2024-08-26 17:11:14,694 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=84778.66666666667, ans=0.1
+2024-08-26 17:11:30,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=84832.0, ans=0.0
+2024-08-26 17:11:32,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=84885.33333333333, ans=0.0
+2024-08-26 17:11:34,951 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=84885.33333333333, ans=0.0
+2024-08-26 17:11:48,304 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.332e+02 1.566e+02 1.708e+02 1.976e+02 3.572e+02, threshold=3.415e+02, percent-clipped=1.0
+2024-08-26 17:12:17,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=84992.0, ans=0.0
+2024-08-26 17:12:18,428 INFO [train.py:1114] (2/4) Epoch 7, batch 1000, loss[loss=0.2214, simple_loss=0.2841, pruned_loss=0.05811, ctc_loss=0.1062, over 19845.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.2986, pruned_loss=0.06838, ctc_loss=0.1277, over 3815233.65 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:12:27,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=85045.33333333333, ans=0.025
+2024-08-26 17:12:31,099 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=85045.33333333333, ans=0.125
+2024-08-26 17:12:58,647 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=85045.33333333333, ans=0.025
+2024-08-26 17:13:01,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=85098.66666666667, ans=0.0
+2024-08-26 17:13:44,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=85152.0, ans=0.2
+2024-08-26 17:13:56,131 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=85205.33333333333, ans=0.0
+2024-08-26 17:13:59,712 INFO [train.py:1114] (2/4) Epoch 7, batch 1050, loss[loss=0.2369, simple_loss=0.2958, pruned_loss=0.06444, ctc_loss=0.123, over 19839.00 frames. ], tot_loss[loss=0.242, simple_loss=0.2977, pruned_loss=0.06781, ctc_loss=0.1267, over 3822145.97 frames. ], batch size: 57, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:14:03,666 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85258.66666666667, ans=0.1
+2024-08-26 17:14:08,633 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=85312.0, ans=0.125
+2024-08-26 17:16:21,270 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=85365.33333333333, ans=0.025
+2024-08-26 17:16:24,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=85365.33333333333, ans=0.5
+2024-08-26 17:16:26,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=85365.33333333333, ans=0.2
+2024-08-26 17:16:33,493 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.88 vs. limit=15.0
+2024-08-26 17:16:35,269 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=85418.66666666667, ans=0.025
+2024-08-26 17:16:40,666 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.203e+02 1.449e+02 1.584e+02 1.768e+02 2.861e+02, threshold=3.169e+02, percent-clipped=0.0
+2024-08-26 17:16:43,845 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:16:44,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=85472.0, ans=0.0
+2024-08-26 17:16:48,369 INFO [train.py:1114] (2/4) Epoch 7, batch 1100, loss[loss=0.2148, simple_loss=0.2748, pruned_loss=0.05715, ctc_loss=0.1012, over 19583.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.2968, pruned_loss=0.06733, ctc_loss=0.1258, over 3830826.14 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 32.0
+2024-08-26 17:16:49,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=85525.33333333333, ans=0.1
+2024-08-26 17:16:52,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=85525.33333333333, ans=0.125
+2024-08-26 17:16:56,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=85525.33333333333, ans=0.025
+2024-08-26 17:16:56,792 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=10.33 vs. limit=15.0
+2024-08-26 17:16:57,275 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=85578.66666666667, ans=0.125
+2024-08-26 17:17:00,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=85578.66666666667, ans=0.125
+2024-08-26 17:17:16,026 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=85685.33333333333, ans=0.125
+2024-08-26 17:17:45,022 INFO [train.py:1114] (2/4) Epoch 7, batch 1150, loss[loss=0.2027, simple_loss=0.264, pruned_loss=0.05148, ctc_loss=0.09585, over 19580.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.2971, pruned_loss=0.06749, ctc_loss=0.1259, over 3830840.94 frames. ], batch size: 52, lr: 2.08e-02, grad_scale: 16.0
+2024-08-26 17:17:50,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=85792.0, ans=0.125
+2024-08-26 17:18:03,443 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=85845.33333333333, ans=0.0
+2024-08-26 17:18:12,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=85898.66666666667, ans=0.1
+2024-08-26 17:18:15,791 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=85898.66666666667, ans=0.025
+2024-08-26 17:18:15,858 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=85898.66666666667, ans=0.0
+2024-08-26 17:18:24,286 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=85952.0, ans=0.125
+2024-08-26 17:18:38,591 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=86005.33333333333, ans=0.125
+2024-08-26 17:18:41,955 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.243e+02 1.522e+02 1.667e+02 1.891e+02 3.736e+02, threshold=3.335e+02, percent-clipped=2.0
+2024-08-26 17:18:46,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=86005.33333333333, ans=0.2
+2024-08-26 17:18:48,634 INFO [train.py:1114] (2/4) Epoch 7, batch 1200, loss[loss=0.2516, simple_loss=0.3088, pruned_loss=0.07076, ctc_loss=0.1325, over 19829.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.298, pruned_loss=0.06773, ctc_loss=0.1267, over 3825576.42 frames. ], batch size: 57, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:18:48,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=86058.66666666667, ans=0.0
+2024-08-26 17:18:55,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=86058.66666666667, ans=0.025
+2024-08-26 17:19:12,871 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=86165.33333333333, ans=0.025
+2024-08-26 17:19:28,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=86218.66666666667, ans=0.2
+2024-08-26 17:19:54,874 INFO [train.py:1114] (2/4) Epoch 7, batch 1250, loss[loss=0.2462, simple_loss=0.3015, pruned_loss=0.07026, ctc_loss=0.1259, over 19526.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.2987, pruned_loss=0.06783, ctc_loss=0.1266, over 3843045.10 frames. ], batch size: 61, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:19:56,437 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.84 vs. limit=22.5
+2024-08-26 17:20:01,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=86325.33333333333, ans=0.1
+2024-08-26 17:20:20,153 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.59 vs. limit=22.5
+2024-08-26 17:20:25,664 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.33 vs. limit=22.5
+2024-08-26 17:20:30,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=86485.33333333333, ans=0.5
+2024-08-26 17:20:35,653 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.207e+02 1.476e+02 1.609e+02 1.857e+02 3.245e+02, threshold=3.218e+02, percent-clipped=0.0
+2024-08-26 17:20:44,743 INFO [train.py:1114] (2/4) Epoch 7, batch 1300, loss[loss=0.274, simple_loss=0.3234, pruned_loss=0.08246, ctc_loss=0.1494, over 18934.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.2977, pruned_loss=0.06733, ctc_loss=0.1257, over 3846312.24 frames. ], batch size: 76, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:20:44,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=86592.0, ans=0.0
+2024-08-26 17:20:59,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=86645.33333333333, ans=0.125
+2024-08-26 17:21:08,978 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.88 vs. limit=6.0
+2024-08-26 17:21:38,925 INFO [train.py:1114] (2/4) Epoch 7, batch 1350, loss[loss=0.2179, simple_loss=0.2794, pruned_loss=0.05672, ctc_loss=0.1072, over 19768.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.297, pruned_loss=0.06689, ctc_loss=0.1252, over 3856738.72 frames. ], batch size: 54, lr: 2.07e-02, grad_scale: 32.0
+2024-08-26 17:21:43,832 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=86858.66666666667, ans=0.125
+2024-08-26 17:21:49,686 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.10 vs. limit=15.0
+2024-08-26 17:21:56,716 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=86965.33333333333, ans=0.035
+2024-08-26 17:21:59,873 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.98 vs. limit=15.0
+2024-08-26 17:21:59,914 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.67 vs. limit=12.0
+2024-08-26 17:22:04,518 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=86965.33333333333, ans=0.2
+2024-08-26 17:22:06,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=87018.66666666667, ans=0.125
+2024-08-26 17:22:10,353 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=87018.66666666667, ans=0.0
+2024-08-26 17:22:19,559 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.169e+02 1.495e+02 1.726e+02 1.992e+02 3.104e+02, threshold=3.452e+02, percent-clipped=0.0
+2024-08-26 17:22:20,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=87072.0, ans=0.125
+2024-08-26 17:22:26,110 INFO [train.py:1114] (2/4) Epoch 7, batch 1400, loss[loss=0.2202, simple_loss=0.2708, pruned_loss=0.0607, ctc_loss=0.1204, over 19666.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.2971, pruned_loss=0.06704, ctc_loss=0.1255, over 3863622.39 frames. ], batch size: 46, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:22:28,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=87125.33333333333, ans=0.5
+2024-08-26 17:22:33,807 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=87125.33333333333, ans=0.05
+2024-08-26 17:23:02,965 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=87285.33333333333, ans=0.125
+2024-08-26 17:23:04,105 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.66 vs. limit=22.5
+2024-08-26 17:23:06,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=87285.33333333333, ans=0.125
+2024-08-26 17:23:28,848 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.81 vs. limit=6.0
+2024-08-26 17:23:31,307 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=87338.66666666667, ans=0.125
+2024-08-26 17:23:35,097 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:23:35,695 INFO [train.py:1114] (2/4) Epoch 7, batch 1450, loss[loss=0.2466, simple_loss=0.3015, pruned_loss=0.06957, ctc_loss=0.1311, over 19690.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.2981, pruned_loss=0.0676, ctc_loss=0.1264, over 3861288.08 frames. ], batch size: 63, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:23:35,992 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=87392.0, ans=0.025
+2024-08-26 17:23:41,737 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.47 vs. limit=6.0
+2024-08-26 17:23:43,417 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=87392.0, ans=0.1
+2024-08-26 17:23:44,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=87445.33333333333, ans=0.0
+2024-08-26 17:24:05,932 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=87552.0, ans=0.0
+2024-08-26 17:24:18,015 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:24:19,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=87605.33333333333, ans=0.125
+2024-08-26 17:24:20,637 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.279e+02 1.540e+02 1.669e+02 1.894e+02 3.453e+02, threshold=3.338e+02, percent-clipped=1.0
+2024-08-26 17:24:24,028 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.05 vs. limit=10.0
+2024-08-26 17:24:29,674 INFO [train.py:1114] (2/4) Epoch 7, batch 1500, loss[loss=0.238, simple_loss=0.3016, pruned_loss=0.06225, ctc_loss=0.1249, over 19598.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2979, pruned_loss=0.06728, ctc_loss=0.1259, over 3861141.13 frames. ], batch size: 57, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:24:40,336 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.44 vs. limit=15.0
+2024-08-26 17:24:44,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=87712.0, ans=0.125
+2024-08-26 17:24:56,568 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.15 vs. limit=10.0
+2024-08-26 17:25:17,009 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=87872.0, ans=0.09899494936611666
+2024-08-26 17:25:19,508 INFO [train.py:1114] (2/4) Epoch 7, batch 1550, loss[loss=0.2418, simple_loss=0.2963, pruned_loss=0.06815, ctc_loss=0.1276, over 19605.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.298, pruned_loss=0.06735, ctc_loss=0.1261, over 3846994.56 frames. ], batch size: 60, lr: 2.06e-02, grad_scale: 32.0
+2024-08-26 17:25:20,667 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=87925.33333333333, ans=0.125
+2024-08-26 17:25:20,672 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=87925.33333333333, ans=0.0
+2024-08-26 17:25:40,854 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.85 vs. limit=6.0
+2024-08-26 17:26:04,260 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.330e+02 1.559e+02 1.788e+02 2.182e+02 5.116e+02, threshold=3.576e+02, percent-clipped=3.0
+2024-08-26 17:26:10,130 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:26:10,938 INFO [train.py:1114] (2/4) Epoch 7, batch 1600, loss[loss=0.2592, simple_loss=0.3094, pruned_loss=0.07649, ctc_loss=0.1403, over 19855.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.2978, pruned_loss=0.0676, ctc_loss=0.1264, over 3836156.02 frames. ], batch size: 57, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:26:19,021 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=88192.0, ans=0.0
+2024-08-26 17:26:52,747 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=88405.33333333333, ans=0.125
+2024-08-26 17:26:54,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=88405.33333333333, ans=0.1
+2024-08-26 17:27:01,932 INFO [train.py:1114] (2/4) Epoch 7, batch 1650, loss[loss=0.2231, simple_loss=0.2899, pruned_loss=0.05611, ctc_loss=0.11, over 19662.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.2975, pruned_loss=0.06744, ctc_loss=0.1262, over 3832636.10 frames. ], batch size: 59, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:27:33,475 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=88458.66666666667, ans=0.125
+2024-08-26 17:28:09,928 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=11.78 vs. limit=15.0
+2024-08-26 17:28:57,879 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.94 vs. limit=6.0
+2024-08-26 17:28:58,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=88618.66666666667, ans=0.0
+2024-08-26 17:29:07,040 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=88672.0, ans=0.0
+2024-08-26 17:29:25,555 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.503e+02 1.653e+02 1.809e+02 2.992e+02, threshold=3.307e+02, percent-clipped=0.0
+2024-08-26 17:29:40,037 INFO [train.py:1114] (2/4) Epoch 7, batch 1700, loss[loss=0.217, simple_loss=0.2701, pruned_loss=0.05909, ctc_loss=0.1145, over 19706.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.297, pruned_loss=0.06692, ctc_loss=0.1255, over 3847235.14 frames. ], batch size: 46, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:29:40,272 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=88725.33333333333, ans=0.125
+2024-08-26 17:30:02,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=88725.33333333333, ans=0.2
+2024-08-26 17:30:36,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=88938.66666666667, ans=0.5
+2024-08-26 17:30:42,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=88938.66666666667, ans=0.125
+2024-08-26 17:30:44,517 INFO [train.py:1114] (2/4) Epoch 7, batch 1750, loss[loss=0.2178, simple_loss=0.2707, pruned_loss=0.06024, ctc_loss=0.1113, over 19661.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2966, pruned_loss=0.06676, ctc_loss=0.1253, over 3851361.92 frames. ], batch size: 45, lr: 2.05e-02, grad_scale: 32.0
+2024-08-26 17:31:03,632 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=89098.66666666667, ans=0.2
+2024-08-26 17:31:23,266 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.487e+02 1.622e+02 1.808e+02 3.869e+02, threshold=3.245e+02, percent-clipped=1.0
+2024-08-26 17:31:29,439 INFO [train.py:1114] (2/4) Epoch 7, batch 1800, loss[loss=0.2475, simple_loss=0.3106, pruned_loss=0.06714, ctc_loss=0.1254, over 19615.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2969, pruned_loss=0.06671, ctc_loss=0.1252, over 3853077.02 frames. ], batch size: 55, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:31:30,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=89258.66666666667, ans=0.125
+2024-08-26 17:31:42,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=89312.0, ans=0.125
+2024-08-26 17:31:45,550 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=89312.0, ans=0.125
+2024-08-26 17:31:56,673 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.17 vs. limit=15.0
+2024-08-26 17:32:02,572 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=89418.66666666667, ans=0.0
+2024-08-26 17:32:08,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=89472.0, ans=0.0
+2024-08-26 17:32:14,104 INFO [train.py:1114] (2/4) Epoch 7, batch 1850, loss[loss=0.2551, simple_loss=0.3181, pruned_loss=0.07032, ctc_loss=0.1285, over 19598.00 frames. ], tot_loss[loss=0.24, simple_loss=0.2966, pruned_loss=0.06674, ctc_loss=0.1251, over 3856867.39 frames. ], batch size: 57, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:32:16,831 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=89525.33333333333, ans=0.1
+2024-08-26 17:32:33,361 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=89632.0, ans=0.0
+2024-08-26 17:32:34,323 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=89632.0, ans=0.1
+2024-08-26 17:32:34,769 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.95 vs. limit=15.0
+2024-08-26 17:32:35,480 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=17.45 vs. limit=15.0
+2024-08-26 17:32:55,556 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.278e+02 1.590e+02 1.759e+02 2.003e+02 3.443e+02, threshold=3.517e+02, percent-clipped=1.0
+2024-08-26 17:33:01,840 INFO [train.py:1114] (2/4) Epoch 7, batch 1900, loss[loss=0.2547, simple_loss=0.3124, pruned_loss=0.07072, ctc_loss=0.1389, over 19692.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.297, pruned_loss=0.06671, ctc_loss=0.125, over 3861026.33 frames. ], batch size: 59, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:33:03,754 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=89792.0, ans=0.125
+2024-08-26 17:33:13,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten.whitening_limit, batch_count=89845.33333333333, ans=15.0
+2024-08-26 17:35:14,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=90005.33333333333, ans=0.125
+2024-08-26 17:35:23,456 INFO [train.py:1114] (2/4) Epoch 7, batch 1950, loss[loss=0.2225, simple_loss=0.2837, pruned_loss=0.05908, ctc_loss=0.1077, over 19590.00 frames. ], tot_loss[loss=0.241, simple_loss=0.2979, pruned_loss=0.06694, ctc_loss=0.1254, over 3870001.50 frames. ], batch size: 52, lr: 2.04e-02, grad_scale: 32.0
+2024-08-26 17:35:39,827 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.18 vs. limit=22.5
+2024-08-26 17:35:40,386 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=90165.33333333333, ans=0.2
+2024-08-26 17:35:42,535 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.42 vs. limit=15.0
+2024-08-26 17:35:58,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=90218.66666666667, ans=0.1
+2024-08-26 17:35:59,953 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=90272.0, ans=0.0
+2024-08-26 17:36:00,165 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.96 vs. limit=10.0
+2024-08-26 17:36:03,249 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.531e+02 1.657e+02 1.854e+02 3.915e+02, threshold=3.314e+02, percent-clipped=1.0
+2024-08-26 17:36:08,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=90325.33333333333, ans=0.125
+2024-08-26 17:36:09,474 INFO [train.py:1114] (2/4) Epoch 7, batch 2000, loss[loss=0.206, simple_loss=0.2592, pruned_loss=0.05507, ctc_loss=0.1069, over 19660.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.2984, pruned_loss=0.06741, ctc_loss=0.1262, over 3854536.84 frames. ], batch size: 45, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:36:10,532 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90325.33333333333, ans=0.1
+2024-08-26 17:36:19,594 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=90378.66666666667, ans=0.125
+2024-08-26 17:36:20,638 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=90378.66666666667, ans=0.125
+2024-08-26 17:36:33,334 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=90432.0, ans=0.125
+2024-08-26 17:36:36,753 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90485.33333333333, ans=0.1
+2024-08-26 17:36:53,977 INFO [train.py:1114] (2/4) Epoch 7, batch 2050, loss[loss=0.1952, simple_loss=0.2528, pruned_loss=0.0502, ctc_loss=0.09333, over 19706.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.297, pruned_loss=0.06709, ctc_loss=0.1255, over 3850158.93 frames. ], batch size: 47, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:36:54,991 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90592.0, ans=0.1
+2024-08-26 17:37:03,707 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=90645.33333333333, ans=0.2
+2024-08-26 17:37:06,449 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=90645.33333333333, ans=0.0
+2024-08-26 17:37:10,750 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=90698.66666666667, ans=0.2
+2024-08-26 17:37:10,846 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=90698.66666666667, ans=0.04949747468305833
+2024-08-26 17:37:32,300 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.477e+02 1.642e+02 1.962e+02 4.346e+02, threshold=3.284e+02, percent-clipped=3.0
+2024-08-26 17:37:32,533 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=90805.33333333333, ans=0.1
+2024-08-26 17:37:33,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=90805.33333333333, ans=0.125
+2024-08-26 17:37:38,470 INFO [train.py:1114] (2/4) Epoch 7, batch 2100, loss[loss=0.2478, simple_loss=0.3051, pruned_loss=0.06956, ctc_loss=0.1283, over 19772.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.2966, pruned_loss=0.06682, ctc_loss=0.1249, over 3857497.22 frames. ], batch size: 54, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:37:48,167 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=90858.66666666667, ans=0.125
+2024-08-26 17:37:52,054 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.30 vs. limit=12.0
+2024-08-26 17:37:52,643 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.37 vs. limit=15.0
+2024-08-26 17:38:08,931 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=91018.66666666667, ans=0.04949747468305833
+2024-08-26 17:38:10,833 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=91018.66666666667, ans=0.0
+2024-08-26 17:38:23,019 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=91072.0, ans=0.125
+2024-08-26 17:38:25,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=91125.33333333333, ans=0.125
+2024-08-26 17:38:26,423 INFO [train.py:1114] (2/4) Epoch 7, batch 2150, loss[loss=0.2215, simple_loss=0.2797, pruned_loss=0.05844, ctc_loss=0.1161, over 19856.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.2957, pruned_loss=0.06642, ctc_loss=0.124, over 3868550.33 frames. ], batch size: 52, lr: 2.03e-02, grad_scale: 32.0
+2024-08-26 17:38:26,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=91125.33333333333, ans=0.125
+2024-08-26 17:38:29,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=91125.33333333333, ans=0.125
+2024-08-26 17:38:33,709 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=91125.33333333333, ans=0.1
+2024-08-26 17:38:37,168 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=91178.66666666667, ans=0.125
+2024-08-26 17:38:40,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=91178.66666666667, ans=0.2
+2024-08-26 17:38:54,812 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=91285.33333333333, ans=0.125
+2024-08-26 17:39:02,524 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=91338.66666666667, ans=0.125
+2024-08-26 17:39:04,130 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.288e+02 1.485e+02 1.702e+02 1.931e+02 2.999e+02, threshold=3.403e+02, percent-clipped=0.0
+2024-08-26 17:39:07,113 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:39:10,354 INFO [train.py:1114] (2/4) Epoch 7, batch 2200, loss[loss=0.2577, simple_loss=0.3098, pruned_loss=0.07424, ctc_loss=0.1426, over 19579.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.2959, pruned_loss=0.06664, ctc_loss=0.1243, over 3867474.02 frames. ], batch size: 57, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:39:19,485 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=91445.33333333333, ans=0.125
+2024-08-26 17:39:22,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=91445.33333333333, ans=0.07
+2024-08-26 17:39:22,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=91445.33333333333, ans=0.035
+2024-08-26 17:39:30,049 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=91498.66666666667, ans=0.125
+2024-08-26 17:39:38,986 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.54 vs. limit=15.0
+2024-08-26 17:39:48,545 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91605.33333333333, ans=0.1
+2024-08-26 17:39:54,477 INFO [train.py:1114] (2/4) Epoch 7, batch 2250, loss[loss=0.2444, simple_loss=0.3035, pruned_loss=0.0677, ctc_loss=0.1249, over 19606.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2958, pruned_loss=0.06628, ctc_loss=0.1236, over 3867046.13 frames. ], batch size: 55, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:39:56,423 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=91658.66666666667, ans=0.2
+2024-08-26 17:40:03,560 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=91712.0, ans=0.125
+2024-08-26 17:40:11,443 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=91765.33333333333, ans=0.0
+2024-08-26 17:40:14,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=91765.33333333333, ans=0.125
+2024-08-26 17:40:32,442 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.538e+02 1.708e+02 1.997e+02 3.315e+02, threshold=3.416e+02, percent-clipped=0.0
+2024-08-26 17:40:36,928 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=91872.0, ans=0.025
+2024-08-26 17:40:38,572 INFO [train.py:1114] (2/4) Epoch 7, batch 2300, loss[loss=0.2163, simple_loss=0.2773, pruned_loss=0.05578, ctc_loss=0.1092, over 19512.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.2953, pruned_loss=0.06651, ctc_loss=0.1239, over 3860740.89 frames. ], batch size: 49, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:40:42,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=91925.33333333333, ans=0.0
+2024-08-26 17:40:44,899 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=91925.33333333333, ans=0.0
+2024-08-26 17:40:48,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=91978.66666666667, ans=0.0
+2024-08-26 17:40:51,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=91978.66666666667, ans=0.1
+2024-08-26 17:40:52,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=91978.66666666667, ans=0.125
+2024-08-26 17:40:57,130 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=92032.0, ans=0.125
+2024-08-26 17:41:00,276 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.57 vs. limit=10.0
+2024-08-26 17:41:07,682 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.90 vs. limit=6.0
+2024-08-26 17:41:14,455 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.26 vs. limit=15.0
+2024-08-26 17:41:22,883 INFO [train.py:1114] (2/4) Epoch 7, batch 2350, loss[loss=0.2625, simple_loss=0.3184, pruned_loss=0.07482, ctc_loss=0.1421, over 19682.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.2956, pruned_loss=0.0669, ctc_loss=0.1248, over 3863305.38 frames. ], batch size: 63, lr: 2.02e-02, grad_scale: 32.0
+2024-08-26 17:41:25,569 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:41:29,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=92192.0, ans=0.1
+2024-08-26 17:41:29,258 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.36 vs. limit=15.0
+2024-08-26 17:41:30,968 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.47 vs. limit=22.5
+2024-08-26 17:41:53,560 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=9.60 vs. limit=15.0
+2024-08-26 17:41:58,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=92405.33333333333, ans=0.0
+2024-08-26 17:42:01,678 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.248e+02 1.515e+02 1.664e+02 1.862e+02 3.479e+02, threshold=3.327e+02, percent-clipped=1.0
+2024-08-26 17:42:06,893 INFO [train.py:1114] (2/4) Epoch 7, batch 2400, loss[loss=0.2396, simple_loss=0.3064, pruned_loss=0.06374, ctc_loss=0.1135, over 19256.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.298, pruned_loss=0.06776, ctc_loss=0.1258, over 3857578.70 frames. ], batch size: 71, lr: 2.01e-02, grad_scale: 32.0
+2024-08-26 17:42:24,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=92565.33333333333, ans=0.0
+2024-08-26 17:42:25,793 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=92565.33333333333, ans=0.1
+2024-08-26 17:42:56,035 INFO [train.py:1114] (2/4) Epoch 7, batch 2450, loss[loss=0.3316, simple_loss=0.3528, pruned_loss=0.1134, ctc_loss=0.2088, over 13041.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3025, pruned_loss=0.07137, ctc_loss=0.1329, over 3730281.85 frames. ], batch size: 140, lr: 2.01e-02, grad_scale: 16.0
+2024-08-26 17:43:01,757 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=92725.33333333333, ans=0.0
+2024-08-26 17:43:05,763 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.20 vs. limit=15.0
+2024-08-26 17:43:07,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=92778.66666666667, ans=0.125
+2024-08-26 17:43:19,016 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=92832.0, ans=0.125
+2024-08-26 17:43:29,055 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.12 vs. limit=15.0
+2024-08-26 17:44:23,152 INFO [train.py:1114] (2/4) Epoch 8, batch 0, loss[loss=0.2338, simple_loss=0.2856, pruned_loss=0.06568, ctc_loss=0.1264, over 19399.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.2856, pruned_loss=0.06568, ctc_loss=0.1264, over 19399.00 frames. ], batch size: 48, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:44:23,152 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 17:44:49,269 INFO [train.py:1146] (2/4) Epoch 8, validation: loss=0.2003, simple_loss=0.2903, pruned_loss=0.04062, ctc_loss=0.07268, over 944034.00 frames.
+2024-08-26 17:44:49,270 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 17:44:55,028 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.675e+02 1.918e+02 2.084e+02 4.365e+02, threshold=3.836e+02, percent-clipped=1.0
+2024-08-26 17:45:17,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=92986.66666666667, ans=0.125
+2024-08-26 17:45:28,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93040.0, ans=0.1
+2024-08-26 17:45:43,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=93146.66666666667, ans=0.125
+2024-08-26 17:45:48,346 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.36 vs. limit=15.0
+2024-08-26 17:45:51,634 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=93146.66666666667, ans=0.0
+2024-08-26 17:45:54,269 INFO [train.py:1114] (2/4) Epoch 8, batch 50, loss[loss=0.2261, simple_loss=0.2782, pruned_loss=0.06415, ctc_loss=0.1145, over 19716.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.2986, pruned_loss=0.06771, ctc_loss=0.127, over 843278.58 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:46:18,906 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.12 vs. limit=10.0
+2024-08-26 17:46:26,752 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.66 vs. limit=6.0
+2024-08-26 17:46:38,841 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.00 vs. limit=10.0
+2024-08-26 17:46:42,913 INFO [train.py:1114] (2/4) Epoch 8, batch 100, loss[loss=0.2141, simple_loss=0.2857, pruned_loss=0.05192, ctc_loss=0.09654, over 19700.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.2996, pruned_loss=0.06708, ctc_loss=0.1262, over 1498192.96 frames. ], batch size: 51, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:46:48,084 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.13 vs. limit=15.0
+2024-08-26 17:46:48,504 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.574e+02 1.749e+02 2.053e+02 3.512e+02, threshold=3.498e+02, percent-clipped=0.0
+2024-08-26 17:47:04,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=93573.33333333333, ans=0.0
+2024-08-26 17:47:20,577 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.84 vs. limit=6.0
+2024-08-26 17:47:32,169 INFO [train.py:1114] (2/4) Epoch 8, batch 150, loss[loss=0.2243, simple_loss=0.2773, pruned_loss=0.06198, ctc_loss=0.1185, over 19729.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.2965, pruned_loss=0.06579, ctc_loss=0.1232, over 2026296.86 frames. ], batch size: 47, lr: 1.89e-02, grad_scale: 32.0
+2024-08-26 17:47:41,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=93786.66666666667, ans=0.2
+2024-08-26 17:47:50,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=93786.66666666667, ans=0.04949747468305833
+2024-08-26 17:47:59,065 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.26 vs. limit=15.0
+2024-08-26 17:48:00,578 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=93893.33333333333, ans=0.0
+2024-08-26 17:48:06,465 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=93893.33333333333, ans=0.2
+2024-08-26 17:48:09,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=93893.33333333333, ans=0.125
+2024-08-26 17:48:09,591 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.21 vs. limit=15.0
+2024-08-26 17:48:14,177 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.80 vs. limit=12.0
+2024-08-26 17:48:20,169 INFO [train.py:1114] (2/4) Epoch 8, batch 200, loss[loss=0.2605, simple_loss=0.3166, pruned_loss=0.07396, ctc_loss=0.141, over 18115.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.2941, pruned_loss=0.06461, ctc_loss=0.121, over 2433444.82 frames. ], batch size: 85, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:48:25,556 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.434e+02 1.574e+02 1.787e+02 2.973e+02, threshold=3.148e+02, percent-clipped=0.0
+2024-08-26 17:48:30,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff2.min_abs, batch_count=94000.0, ans=0.1
+2024-08-26 17:48:31,283 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=94053.33333333333, ans=0.125
+2024-08-26 17:48:42,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=94053.33333333333, ans=0.125
+2024-08-26 17:48:45,032 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=94106.66666666667, ans=0.2
+2024-08-26 17:48:47,895 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=94106.66666666667, ans=0.0
+2024-08-26 17:48:47,922 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=94106.66666666667, ans=0.125
+2024-08-26 17:48:52,655 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=94160.0, ans=0.0
+2024-08-26 17:49:12,241 INFO [train.py:1114] (2/4) Epoch 8, batch 250, loss[loss=0.2493, simple_loss=0.3083, pruned_loss=0.06951, ctc_loss=0.1284, over 19420.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.2938, pruned_loss=0.06416, ctc_loss=0.1201, over 2753964.23 frames. ], batch size: 67, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:49:14,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=94266.66666666667, ans=0.2
+2024-08-26 17:49:35,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=94373.33333333333, ans=10.0
+2024-08-26 17:49:44,065 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=94426.66666666667, ans=0.125
+2024-08-26 17:49:48,124 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.62 vs. limit=6.0
+2024-08-26 17:49:58,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=94480.0, ans=0.05
+2024-08-26 17:50:03,381 INFO [train.py:1114] (2/4) Epoch 8, batch 300, loss[loss=0.2246, simple_loss=0.2949, pruned_loss=0.05575, ctc_loss=0.1072, over 19526.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.2939, pruned_loss=0.06447, ctc_loss=0.1204, over 2999223.72 frames. ], batch size: 61, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:50:09,194 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.273e+02 1.482e+02 1.652e+02 1.879e+02 4.693e+02, threshold=3.305e+02, percent-clipped=1.0
+2024-08-26 17:50:12,389 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=94586.66666666667, ans=0.2
+2024-08-26 17:50:16,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=94586.66666666667, ans=0.125
+2024-08-26 17:50:21,819 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=94640.0, ans=0.125
+2024-08-26 17:50:23,675 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=94640.0, ans=0.05
+2024-08-26 17:50:26,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=94640.0, ans=0.0
+2024-08-26 17:50:50,293 INFO [train.py:1114] (2/4) Epoch 8, batch 350, loss[loss=0.1903, simple_loss=0.254, pruned_loss=0.04449, ctc_loss=0.09418, over 19759.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.2938, pruned_loss=0.06407, ctc_loss=0.1199, over 3189915.13 frames. ], batch size: 48, lr: 1.88e-02, grad_scale: 32.0
+2024-08-26 17:50:59,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=94853.33333333333, ans=0.125
+2024-08-26 17:51:04,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=94853.33333333333, ans=0.0
+2024-08-26 17:51:06,355 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=94853.33333333333, ans=0.035
+2024-08-26 17:51:30,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=94960.0, ans=0.0
+2024-08-26 17:51:31,571 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:52:19,167 INFO [train.py:1114] (2/4) Epoch 8, batch 400, loss[loss=0.2254, simple_loss=0.2916, pruned_loss=0.05803, ctc_loss=0.1078, over 19503.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.2931, pruned_loss=0.06377, ctc_loss=0.1194, over 3341611.21 frames. ], batch size: 54, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:52:19,463 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=95066.66666666667, ans=0.125
+2024-08-26 17:52:22,104 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=95066.66666666667, ans=0.025
+2024-08-26 17:52:22,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=95066.66666666667, ans=0.125
+2024-08-26 17:52:24,631 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 1.574e+02 1.829e+02 2.059e+02 4.627e+02, threshold=3.659e+02, percent-clipped=2.0
+2024-08-26 17:52:35,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=95120.0, ans=0.125
+2024-08-26 17:52:41,045 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=95173.33333333333, ans=0.0
+2024-08-26 17:53:04,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=95280.0, ans=0.0
+2024-08-26 17:53:07,844 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=95333.33333333333, ans=0.125
+2024-08-26 17:53:08,491 INFO [train.py:1114] (2/4) Epoch 8, batch 450, loss[loss=0.2214, simple_loss=0.2882, pruned_loss=0.05627, ctc_loss=0.105, over 19616.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.2931, pruned_loss=0.06381, ctc_loss=0.1197, over 3450604.61 frames. ], batch size: 55, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:53:17,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=95333.33333333333, ans=0.2
+2024-08-26 17:53:21,746 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 17:53:23,570 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=95386.66666666667, ans=0.0
+2024-08-26 17:53:26,569 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.36 vs. limit=22.5
+2024-08-26 17:53:29,277 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.16 vs. limit=15.0
+2024-08-26 17:53:45,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=95493.33333333333, ans=0.125
+2024-08-26 17:53:50,188 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.60 vs. limit=10.0
+2024-08-26 17:53:58,075 INFO [train.py:1114] (2/4) Epoch 8, batch 500, loss[loss=0.2486, simple_loss=0.3093, pruned_loss=0.06969, ctc_loss=0.1214, over 19651.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2923, pruned_loss=0.06342, ctc_loss=0.1191, over 3546424.92 frames. ], batch size: 63, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:54:02,995 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=95600.0, ans=0.2
+2024-08-26 17:54:03,647 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.468e+02 1.609e+02 1.778e+02 4.606e+02, threshold=3.218e+02, percent-clipped=1.0
+2024-08-26 17:54:37,954 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=95706.66666666667, ans=0.125
+2024-08-26 17:55:43,738 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=95760.0, ans=0.125
+2024-08-26 17:55:55,630 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=95813.33333333333, ans=0.125
+2024-08-26 17:56:19,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=95813.33333333333, ans=0.1
+2024-08-26 17:56:20,600 INFO [train.py:1114] (2/4) Epoch 8, batch 550, loss[loss=0.2498, simple_loss=0.3057, pruned_loss=0.07022, ctc_loss=0.1335, over 19348.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.2922, pruned_loss=0.0634, ctc_loss=0.1191, over 3609011.82 frames. ], batch size: 71, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:57:17,688 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=95866.66666666667, ans=0.0
+2024-08-26 17:57:37,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=95973.33333333333, ans=0.125
+2024-08-26 17:57:46,031 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.77 vs. limit=15.0
+2024-08-26 17:57:52,338 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=96080.0, ans=0.1
+2024-08-26 17:57:58,513 INFO [train.py:1114] (2/4) Epoch 8, batch 600, loss[loss=0.2323, simple_loss=0.2974, pruned_loss=0.06145, ctc_loss=0.1107, over 19422.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2919, pruned_loss=0.06317, ctc_loss=0.1185, over 3667148.39 frames. ], batch size: 67, lr: 1.87e-02, grad_scale: 32.0
+2024-08-26 17:58:03,800 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.55 vs. limit=15.0
+2024-08-26 17:58:05,961 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.508e+02 1.654e+02 1.896e+02 3.415e+02, threshold=3.309e+02, percent-clipped=1.0
+2024-08-26 17:58:16,552 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=96186.66666666667, ans=0.0
+2024-08-26 17:58:39,908 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.02 vs. limit=10.0
+2024-08-26 17:58:47,812 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=96346.66666666667, ans=0.2
+2024-08-26 17:58:49,409 INFO [train.py:1114] (2/4) Epoch 8, batch 650, loss[loss=0.243, simple_loss=0.3005, pruned_loss=0.06732, ctc_loss=0.1271, over 19771.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.291, pruned_loss=0.06247, ctc_loss=0.1175, over 3717737.71 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 17:58:49,820 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=13.52 vs. limit=22.5
+2024-08-26 17:58:53,558 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=96400.0, ans=0.125
+2024-08-26 17:58:59,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=96453.33333333333, ans=0.125
+2024-08-26 17:59:08,178 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=96506.66666666667, ans=0.125
+2024-08-26 17:59:19,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=96560.0, ans=0.125
+2024-08-26 17:59:20,180 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=96560.0, ans=0.1
+2024-08-26 17:59:24,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=96560.0, ans=0.2
+2024-08-26 17:59:36,098 INFO [train.py:1114] (2/4) Epoch 8, batch 700, loss[loss=0.2126, simple_loss=0.2774, pruned_loss=0.05347, ctc_loss=0.1021, over 19715.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.2921, pruned_loss=0.06291, ctc_loss=0.1183, over 3749215.44 frames. ], batch size: 51, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 17:59:41,809 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.481e+02 1.644e+02 1.817e+02 3.294e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-26 17:59:42,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=96666.66666666667, ans=0.2
+2024-08-26 18:00:07,861 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.12 vs. limit=15.0
+2024-08-26 18:00:22,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=96880.0, ans=0.125
+2024-08-26 18:00:23,570 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.53 vs. limit=15.0
+2024-08-26 18:00:27,683 INFO [train.py:1114] (2/4) Epoch 8, batch 750, loss[loss=0.2424, simple_loss=0.307, pruned_loss=0.06517, ctc_loss=0.1184, over 19518.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2922, pruned_loss=0.06304, ctc_loss=0.1182, over 3775297.67 frames. ], batch size: 54, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 18:00:36,510 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.21 vs. limit=15.0
+2024-08-26 18:00:41,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2.whitening_limit, batch_count=96986.66666666667, ans=15.0
+2024-08-26 18:01:00,080 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.67 vs. limit=15.0
+2024-08-26 18:01:07,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=97146.66666666667, ans=0.0
+2024-08-26 18:01:19,053 INFO [train.py:1114] (2/4) Epoch 8, batch 800, loss[loss=0.2039, simple_loss=0.264, pruned_loss=0.05265, ctc_loss=0.09641, over 19412.00 frames. ], tot_loss[loss=0.233, simple_loss=0.292, pruned_loss=0.06326, ctc_loss=0.1186, over 3795967.75 frames. ], batch size: 48, lr: 1.86e-02, grad_scale: 32.0
+2024-08-26 18:01:24,568 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.172e+02 1.524e+02 1.729e+02 2.039e+02 3.596e+02, threshold=3.457e+02, percent-clipped=1.0
+2024-08-26 18:01:26,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=97200.0, ans=0.125
+2024-08-26 18:01:46,674 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.42 vs. limit=15.0
+2024-08-26 18:01:57,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=97413.33333333333, ans=0.0
+2024-08-26 18:01:58,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=97413.33333333333, ans=0.2
+2024-08-26 18:02:00,626 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=97413.33333333333, ans=0.125
+2024-08-26 18:02:06,313 INFO [train.py:1114] (2/4) Epoch 8, batch 850, loss[loss=0.2671, simple_loss=0.3169, pruned_loss=0.07897, ctc_loss=0.1486, over 19658.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2918, pruned_loss=0.06314, ctc_loss=0.1184, over 3815049.30 frames. ], batch size: 59, lr: 1.85e-02, grad_scale: 32.0
+2024-08-26 18:02:10,505 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.58 vs. limit=22.5
+2024-08-26 18:02:11,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=97466.66666666667, ans=0.125
+2024-08-26 18:02:19,256 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=97520.0, ans=10.0
+2024-08-26 18:02:20,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=97520.0, ans=0.125
+2024-08-26 18:02:21,352 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=97520.0, ans=0.0
+2024-08-26 18:02:24,354 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.30 vs. limit=22.5
+2024-08-26 18:02:37,228 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=97626.66666666667, ans=0.0
+2024-08-26 18:02:38,296 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=97626.66666666667, ans=0.0
+2024-08-26 18:02:39,570 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.17 vs. limit=6.0
+2024-08-26 18:02:47,217 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.94 vs. limit=15.0
+2024-08-26 18:02:58,337 INFO [train.py:1114] (2/4) Epoch 8, batch 900, loss[loss=0.2115, simple_loss=0.2715, pruned_loss=0.05543, ctc_loss=0.1014, over 19796.00 frames. ], tot_loss[loss=0.233, simple_loss=0.2918, pruned_loss=0.06344, ctc_loss=0.1185, over 3819189.68 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 32.0
+2024-08-26 18:03:04,003 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.312e+02 1.578e+02 1.704e+02 2.106e+02 3.434e+02, threshold=3.409e+02, percent-clipped=0.0
+2024-08-26 18:03:04,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=97733.33333333333, ans=0.125
+2024-08-26 18:03:05,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=97733.33333333333, ans=15.0
+2024-08-26 18:03:13,003 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=97786.66666666667, ans=0.0
+2024-08-26 18:03:13,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=97786.66666666667, ans=0.125
+2024-08-26 18:03:23,825 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.03 vs. limit=15.0
+2024-08-26 18:03:29,971 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=97893.33333333333, ans=0.125
+2024-08-26 18:03:36,509 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=97946.66666666667, ans=0.0
+2024-08-26 18:03:41,875 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=97946.66666666667, ans=0.2
+2024-08-26 18:03:45,505 INFO [train.py:1114] (2/4) Epoch 8, batch 950, loss[loss=0.2056, simple_loss=0.2696, pruned_loss=0.05045, ctc_loss=0.1017, over 19477.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.2925, pruned_loss=0.06395, ctc_loss=0.1195, over 3820856.06 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:03:59,682 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=98053.33333333333, ans=0.015
+2024-08-26 18:03:59,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=98053.33333333333, ans=0.125
+2024-08-26 18:04:01,100 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.10 vs. limit=22.5
+2024-08-26 18:04:10,197 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=98106.66666666667, ans=0.0
+2024-08-26 18:04:31,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=98213.33333333333, ans=0.0
+2024-08-26 18:04:37,643 INFO [train.py:1114] (2/4) Epoch 8, batch 1000, loss[loss=0.1945, simple_loss=0.2638, pruned_loss=0.04504, ctc_loss=0.0878, over 19843.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.2927, pruned_loss=0.06384, ctc_loss=0.1195, over 3817543.07 frames. ], batch size: 52, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:04:44,377 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.225e+02 1.497e+02 1.652e+02 1.874e+02 4.992e+02, threshold=3.305e+02, percent-clipped=2.0
+2024-08-26 18:04:51,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=98320.0, ans=0.125
+2024-08-26 18:05:16,059 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.15 vs. limit=15.0
+2024-08-26 18:05:24,685 INFO [train.py:1114] (2/4) Epoch 8, batch 1050, loss[loss=0.2205, simple_loss=0.2866, pruned_loss=0.05488, ctc_loss=0.1118, over 19822.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.2921, pruned_loss=0.06358, ctc_loss=0.1188, over 3824746.11 frames. ], batch size: 57, lr: 1.85e-02, grad_scale: 16.0
+2024-08-26 18:05:28,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=98533.33333333333, ans=0.0
+2024-08-26 18:05:43,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=98586.66666666667, ans=0.125
+2024-08-26 18:05:47,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=98586.66666666667, ans=0.125
+2024-08-26 18:05:58,373 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=98693.33333333333, ans=0.0
+2024-08-26 18:06:04,073 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=98693.33333333333, ans=0.025
+2024-08-26 18:06:14,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=98746.66666666667, ans=0.0
+2024-08-26 18:06:18,149 INFO [train.py:1114] (2/4) Epoch 8, batch 1100, loss[loss=0.2233, simple_loss=0.2866, pruned_loss=0.05849, ctc_loss=0.1073, over 19583.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.2918, pruned_loss=0.06345, ctc_loss=0.1185, over 3831219.00 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 16.0
+2024-08-26 18:06:24,660 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.215e+02 1.469e+02 1.560e+02 1.744e+02 3.443e+02, threshold=3.121e+02, percent-clipped=2.0
+2024-08-26 18:06:29,529 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=98853.33333333333, ans=0.125
+2024-08-26 18:06:56,843 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=98960.0, ans=0.125
+2024-08-26 18:07:10,073 INFO [train.py:1114] (2/4) Epoch 8, batch 1150, loss[loss=0.1908, simple_loss=0.2644, pruned_loss=0.0423, ctc_loss=0.08138, over 19609.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.2915, pruned_loss=0.0631, ctc_loss=0.1181, over 3830468.15 frames. ], batch size: 52, lr: 1.84e-02, grad_scale: 16.0
+2024-08-26 18:07:17,104 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=99066.66666666667, ans=0.125
+2024-08-26 18:07:21,839 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=99120.0, ans=0.125
+2024-08-26 18:07:29,736 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.77 vs. limit=15.0
+2024-08-26 18:07:33,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=99173.33333333333, ans=0.1
+2024-08-26 18:07:51,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer_na.min_abs, batch_count=99280.0, ans=0.02
+2024-08-26 18:07:57,681 INFO [train.py:1114] (2/4) Epoch 8, batch 1200, loss[loss=0.2278, simple_loss=0.2991, pruned_loss=0.0562, ctc_loss=0.1104, over 19841.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2931, pruned_loss=0.06397, ctc_loss=0.1198, over 3825339.57 frames. ], batch size: 57, lr: 1.84e-02, grad_scale: 32.0
+2024-08-26 18:08:03,553 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=99333.33333333333, ans=0.2
+2024-08-26 18:08:04,258 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.256e+02 1.491e+02 1.608e+02 2.003e+02 2.840e+02, threshold=3.216e+02, percent-clipped=0.0
+2024-08-26 18:08:19,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=99440.0, ans=0.025
+2024-08-26 18:08:37,684 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.03 vs. limit=15.0
+2024-08-26 18:08:42,949 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=99546.66666666667, ans=0.125
+2024-08-26 18:08:49,183 INFO [train.py:1114] (2/4) Epoch 8, batch 1250, loss[loss=0.2395, simple_loss=0.3019, pruned_loss=0.06555, ctc_loss=0.115, over 19507.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.2932, pruned_loss=0.06402, ctc_loss=0.1196, over 3843318.97 frames. ], batch size: 61, lr: 1.84e-02, grad_scale: 32.0
+2024-08-26 18:08:56,173 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=99600.0, ans=0.125
+2024-08-26 18:09:40,597 INFO [train.py:1114] (2/4) Epoch 8, batch 1300, loss[loss=0.2684, simple_loss=0.3173, pruned_loss=0.07878, ctc_loss=0.1546, over 18875.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2925, pruned_loss=0.06361, ctc_loss=0.1188, over 3845959.95 frames. ], batch size: 76, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:09:41,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=99866.66666666667, ans=0.1
+2024-08-26 18:09:44,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=99866.66666666667, ans=0.2
+2024-08-26 18:09:47,132 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.481e+02 1.661e+02 1.866e+02 3.142e+02, threshold=3.323e+02, percent-clipped=0.0
+2024-08-26 18:10:05,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=99973.33333333333, ans=0.125
+2024-08-26 18:10:06,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=99973.33333333333, ans=0.1
+2024-08-26 18:10:07,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=99973.33333333333, ans=0.2
+2024-08-26 18:10:20,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100080.0, ans=0.125
+2024-08-26 18:10:20,928 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=100080.0, ans=0.125
+2024-08-26 18:10:27,283 INFO [train.py:1114] (2/4) Epoch 8, batch 1350, loss[loss=0.2075, simple_loss=0.2767, pruned_loss=0.04957, ctc_loss=0.09805, over 19755.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2917, pruned_loss=0.06284, ctc_loss=0.1175, over 3857252.32 frames. ], batch size: 54, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:10:30,776 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=12.37 vs. limit=15.0
+2024-08-26 18:10:36,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=100186.66666666667, ans=0.0
+2024-08-26 18:10:40,501 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100186.66666666667, ans=0.1
+2024-08-26 18:10:44,303 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100186.66666666667, ans=0.1
+2024-08-26 18:11:07,420 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100346.66666666667, ans=0.1
+2024-08-26 18:11:08,282 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=100346.66666666667, ans=0.125
+2024-08-26 18:11:14,664 INFO [train.py:1114] (2/4) Epoch 8, batch 1400, loss[loss=0.2057, simple_loss=0.2632, pruned_loss=0.0526, ctc_loss=0.1076, over 19645.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.291, pruned_loss=0.06246, ctc_loss=0.1167, over 3864103.57 frames. ], batch size: 46, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:11:21,935 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=100400.0, ans=0.125
+2024-08-26 18:11:23,739 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.577e+02 1.859e+02 2.331e+02 3.237e+02, threshold=3.718e+02, percent-clipped=0.0
+2024-08-26 18:11:24,326 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.89 vs. limit=15.0
+2024-08-26 18:11:24,814 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100400.0, ans=0.0
+2024-08-26 18:11:30,936 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=100453.33333333333, ans=0.0
+2024-08-26 18:11:31,745 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=100453.33333333333, ans=0.0
+2024-08-26 18:11:32,697 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100453.33333333333, ans=0.1
+2024-08-26 18:11:38,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=100453.33333333333, ans=0.125
+2024-08-26 18:11:40,434 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=100506.66666666667, ans=0.2
+2024-08-26 18:11:45,515 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=17.53 vs. limit=22.5
+2024-08-26 18:11:46,280 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=100506.66666666667, ans=0.125
+2024-08-26 18:11:52,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=100560.0, ans=0.5
+2024-08-26 18:12:09,360 INFO [train.py:1114] (2/4) Epoch 8, batch 1450, loss[loss=0.2264, simple_loss=0.2907, pruned_loss=0.0593, ctc_loss=0.1086, over 19681.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2918, pruned_loss=0.06279, ctc_loss=0.1174, over 3862316.08 frames. ], batch size: 63, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:12:21,678 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=100720.0, ans=0.2
+2024-08-26 18:12:22,563 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=100720.0, ans=0.2
+2024-08-26 18:12:33,193 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.68 vs. limit=15.0
+2024-08-26 18:12:34,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=100773.33333333333, ans=0.0
+2024-08-26 18:12:37,742 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.12 vs. limit=15.0
+2024-08-26 18:12:44,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=100826.66666666667, ans=0.025
+2024-08-26 18:13:00,705 INFO [train.py:1114] (2/4) Epoch 8, batch 1500, loss[loss=0.223, simple_loss=0.2858, pruned_loss=0.05884, ctc_loss=0.1061, over 19585.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2922, pruned_loss=0.0628, ctc_loss=0.1173, over 3861611.60 frames. ], batch size: 57, lr: 1.83e-02, grad_scale: 32.0
+2024-08-26 18:13:07,544 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.257e+02 1.450e+02 1.594e+02 1.806e+02 5.150e+02, threshold=3.189e+02, percent-clipped=1.0
+2024-08-26 18:13:15,306 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=100986.66666666667, ans=0.0
+2024-08-26 18:13:16,746 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.95 vs. limit=15.0
+2024-08-26 18:13:28,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=101093.33333333333, ans=0.125
+2024-08-26 18:13:37,211 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=101093.33333333333, ans=0.0
+2024-08-26 18:13:42,373 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.98 vs. limit=10.0
+2024-08-26 18:13:48,298 INFO [train.py:1114] (2/4) Epoch 8, batch 1550, loss[loss=0.2382, simple_loss=0.3015, pruned_loss=0.06425, ctc_loss=0.1161, over 19589.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.2925, pruned_loss=0.06332, ctc_loss=0.1184, over 3846035.29 frames. ], batch size: 60, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:13:55,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=101200.0, ans=0.2
+2024-08-26 18:14:03,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=101253.33333333333, ans=0.125
+2024-08-26 18:14:21,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=101360.0, ans=0.125
+2024-08-26 18:14:28,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.const_attention_rate, batch_count=101360.0, ans=0.025
+2024-08-26 18:14:33,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=101413.33333333333, ans=0.1
+2024-08-26 18:14:37,319 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=101413.33333333333, ans=0.125
+2024-08-26 18:14:39,284 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=101413.33333333333, ans=0.125
+2024-08-26 18:14:40,865 INFO [train.py:1114] (2/4) Epoch 8, batch 1600, loss[loss=0.25, simple_loss=0.3137, pruned_loss=0.0675, ctc_loss=0.1283, over 19851.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.2923, pruned_loss=0.0633, ctc_loss=0.1185, over 3835025.06 frames. ], batch size: 57, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:14:47,307 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 1.562e+02 1.716e+02 2.059e+02 3.797e+02, threshold=3.431e+02, percent-clipped=2.0
+2024-08-26 18:14:48,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101466.66666666667, ans=0.1
+2024-08-26 18:14:50,474 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=101520.0, ans=0.0
+2024-08-26 18:15:18,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=101626.66666666667, ans=0.0
+2024-08-26 18:15:31,636 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.63 vs. limit=15.0
+2024-08-26 18:15:32,088 INFO [train.py:1114] (2/4) Epoch 8, batch 1650, loss[loss=0.2187, simple_loss=0.2894, pruned_loss=0.05333, ctc_loss=0.1033, over 19654.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.2917, pruned_loss=0.06303, ctc_loss=0.1181, over 3830497.38 frames. ], batch size: 59, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:15:33,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=101733.33333333333, ans=0.2
+2024-08-26 18:15:48,225 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=101786.66666666667, ans=0.125
+2024-08-26 18:16:00,060 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.11 vs. limit=15.0
+2024-08-26 18:16:04,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101893.33333333333, ans=0.1
+2024-08-26 18:16:10,999 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.46 vs. limit=12.0
+2024-08-26 18:16:16,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=101946.66666666667, ans=0.125
+2024-08-26 18:16:18,706 INFO [train.py:1114] (2/4) Epoch 8, batch 1700, loss[loss=0.2074, simple_loss=0.2589, pruned_loss=0.05686, ctc_loss=0.1055, over 19672.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2912, pruned_loss=0.06261, ctc_loss=0.1173, over 3845037.64 frames. ], batch size: 46, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:16:25,301 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.495e+02 1.737e+02 2.089e+02 3.401e+02, threshold=3.475e+02, percent-clipped=0.0
+2024-08-26 18:16:45,921 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=102160.0, ans=0.125
+2024-08-26 18:16:56,669 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=102213.33333333333, ans=0.5
+2024-08-26 18:17:03,796 INFO [train.py:1114] (2/4) Epoch 8, batch 1750, loss[loss=0.1959, simple_loss=0.2577, pruned_loss=0.04961, ctc_loss=0.08714, over 19663.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.2905, pruned_loss=0.06236, ctc_loss=0.1166, over 3849345.66 frames. ], batch size: 45, lr: 1.82e-02, grad_scale: 32.0
+2024-08-26 18:17:04,899 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102266.66666666667, ans=0.125
+2024-08-26 18:17:13,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=102320.0, ans=0.0
+2024-08-26 18:17:19,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=102320.0, ans=0.125
+2024-08-26 18:17:31,554 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.42 vs. limit=10.0
+2024-08-26 18:17:32,757 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=102426.66666666667, ans=10.0
+2024-08-26 18:17:34,775 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.92 vs. limit=12.0
+2024-08-26 18:17:46,959 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=102480.0, ans=0.125
+2024-08-26 18:17:48,514 INFO [train.py:1114] (2/4) Epoch 8, batch 1800, loss[loss=0.2353, simple_loss=0.2983, pruned_loss=0.06202, ctc_loss=0.1206, over 19624.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.2905, pruned_loss=0.06224, ctc_loss=0.1165, over 3851056.13 frames. ], batch size: 55, lr: 1.81e-02, grad_scale: 32.0
+2024-08-26 18:17:50,532 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102533.33333333333, ans=0.1
+2024-08-26 18:17:53,218 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.const_attention_rate, batch_count=102533.33333333333, ans=0.025
+2024-08-26 18:17:56,842 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.517e+02 1.665e+02 1.949e+02 3.105e+02, threshold=3.330e+02, percent-clipped=0.0
+2024-08-26 18:18:06,652 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=102586.66666666667, ans=0.1
+2024-08-26 18:18:09,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102640.0, ans=0.1
+2024-08-26 18:18:33,242 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:18:36,735 INFO [train.py:1114] (2/4) Epoch 8, batch 1850, loss[loss=0.244, simple_loss=0.3063, pruned_loss=0.06603, ctc_loss=0.1238, over 19594.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.2898, pruned_loss=0.06166, ctc_loss=0.1153, over 3854441.68 frames. ], batch size: 57, lr: 1.81e-02, grad_scale: 32.0
+2024-08-26 18:18:37,779 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=102800.0, ans=0.125
+2024-08-26 18:19:07,230 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=102960.0, ans=0.125
+2024-08-26 18:19:14,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=103013.33333333333, ans=0.125
+2024-08-26 18:19:19,699 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=103013.33333333333, ans=0.125
+2024-08-26 18:19:21,233 INFO [train.py:1114] (2/4) Epoch 8, batch 1900, loss[loss=0.2364, simple_loss=0.3026, pruned_loss=0.06166, ctc_loss=0.1173, over 19626.00 frames. ], tot_loss[loss=0.231, simple_loss=0.291, pruned_loss=0.06222, ctc_loss=0.1163, over 3859222.94 frames. ], batch size: 59, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:19:21,480 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=103066.66666666667, ans=0.09899494936611666
+2024-08-26 18:19:21,801 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.89 vs. limit=15.0
+2024-08-26 18:19:28,166 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.533e+02 1.714e+02 2.014e+02 3.062e+02, threshold=3.427e+02, percent-clipped=0.0
+2024-08-26 18:19:33,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=103120.0, ans=0.0
+2024-08-26 18:19:47,394 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103226.66666666667, ans=0.1
+2024-08-26 18:20:02,728 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.86 vs. limit=15.0
+2024-08-26 18:20:04,902 INFO [train.py:1114] (2/4) Epoch 8, batch 1950, loss[loss=0.2222, simple_loss=0.285, pruned_loss=0.05789, ctc_loss=0.1092, over 19608.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.2922, pruned_loss=0.06235, ctc_loss=0.1162, over 3868747.99 frames. ], batch size: 52, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:20:12,338 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=103333.33333333333, ans=0.125
+2024-08-26 18:20:14,109 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=103386.66666666667, ans=0.0
+2024-08-26 18:20:23,994 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103440.0, ans=0.125
+2024-08-26 18:20:27,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=103440.0, ans=0.0
+2024-08-26 18:20:33,185 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.72 vs. limit=15.0
+2024-08-26 18:20:37,503 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.74 vs. limit=15.0
+2024-08-26 18:20:51,119 INFO [train.py:1114] (2/4) Epoch 8, batch 2000, loss[loss=0.1833, simple_loss=0.2461, pruned_loss=0.04316, ctc_loss=0.08549, over 19671.00 frames. ], tot_loss[loss=0.232, simple_loss=0.2924, pruned_loss=0.0625, ctc_loss=0.1165, over 3852877.11 frames. ], batch size: 45, lr: 1.81e-02, grad_scale: 16.0
+2024-08-26 18:20:53,919 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=103600.0, ans=0.125
+2024-08-26 18:20:58,839 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=103600.0, ans=0.025
+2024-08-26 18:20:59,756 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=103600.0, ans=0.0
+2024-08-26 18:21:00,308 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.277e+02 1.619e+02 1.835e+02 2.136e+02 5.632e+02, threshold=3.670e+02, percent-clipped=2.0
+2024-08-26 18:21:03,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=103653.33333333333, ans=0.125
+2024-08-26 18:21:07,962 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=103653.33333333333, ans=6.0
+2024-08-26 18:21:08,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=103653.33333333333, ans=0.0
+2024-08-26 18:21:20,852 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.59 vs. limit=15.0
+2024-08-26 18:21:27,342 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=103813.33333333333, ans=0.125
+2024-08-26 18:21:36,061 INFO [train.py:1114] (2/4) Epoch 8, batch 2050, loss[loss=0.2248, simple_loss=0.2813, pruned_loss=0.06136, ctc_loss=0.1141, over 19713.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.2914, pruned_loss=0.06232, ctc_loss=0.1162, over 3850629.72 frames. ], batch size: 47, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:21:42,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=103866.66666666667, ans=0.2
+2024-08-26 18:21:45,068 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=103920.0, ans=0.0
+2024-08-26 18:21:56,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=103973.33333333333, ans=0.125
+2024-08-26 18:21:58,174 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=103973.33333333333, ans=0.1
+2024-08-26 18:22:01,484 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=104026.66666666667, ans=0.1
+2024-08-26 18:22:03,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=104026.66666666667, ans=0.125
+2024-08-26 18:22:19,597 INFO [train.py:1114] (2/4) Epoch 8, batch 2100, loss[loss=0.2487, simple_loss=0.3035, pruned_loss=0.06993, ctc_loss=0.1352, over 19765.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2905, pruned_loss=0.06175, ctc_loss=0.1155, over 3858009.65 frames. ], batch size: 54, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:22:25,350 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=21.76 vs. limit=22.5
+2024-08-26 18:22:26,830 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=104133.33333333333, ans=0.125
+2024-08-26 18:22:27,471 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.502e+02 1.673e+02 2.007e+02 2.886e+02, threshold=3.346e+02, percent-clipped=0.0
+2024-08-26 18:22:32,310 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.82 vs. limit=12.0
+2024-08-26 18:22:58,422 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.17 vs. limit=22.5
+2024-08-26 18:23:00,506 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=104346.66666666667, ans=0.0
+2024-08-26 18:23:03,052 INFO [train.py:1114] (2/4) Epoch 8, batch 2150, loss[loss=0.2167, simple_loss=0.2816, pruned_loss=0.05454, ctc_loss=0.1067, over 19862.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.2893, pruned_loss=0.06116, ctc_loss=0.1144, over 3868816.83 frames. ], batch size: 52, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:23:19,840 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:23:43,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=104613.33333333333, ans=0.0
+2024-08-26 18:23:46,678 INFO [train.py:1114] (2/4) Epoch 8, batch 2200, loss[loss=0.2459, simple_loss=0.2971, pruned_loss=0.07055, ctc_loss=0.1342, over 19589.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.2902, pruned_loss=0.06166, ctc_loss=0.1153, over 3866506.60 frames. ], batch size: 57, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:23:54,537 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.596e+02 1.839e+02 2.214e+02 3.376e+02, threshold=3.678e+02, percent-clipped=1.0
+2024-08-26 18:23:54,700 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=104720.0, ans=0.0
+2024-08-26 18:23:57,643 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.00 vs. limit=15.0
+2024-08-26 18:24:09,653 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=104773.33333333333, ans=0.125
+2024-08-26 18:24:21,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=104880.0, ans=0.125
+2024-08-26 18:24:30,584 INFO [train.py:1114] (2/4) Epoch 8, batch 2250, loss[loss=0.2169, simple_loss=0.2896, pruned_loss=0.05162, ctc_loss=0.1025, over 19607.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2905, pruned_loss=0.06173, ctc_loss=0.1156, over 3866394.67 frames. ], batch size: 55, lr: 1.80e-02, grad_scale: 16.0
+2024-08-26 18:24:35,037 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104933.33333333333, ans=0.125
+2024-08-26 18:24:37,486 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=104933.33333333333, ans=0.125
+2024-08-26 18:24:54,859 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=105040.0, ans=0.0
+2024-08-26 18:25:02,189 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=105093.33333333333, ans=0.2
+2024-08-26 18:25:04,743 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=105093.33333333333, ans=0.1
+2024-08-26 18:25:14,631 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=105146.66666666667, ans=0.2
+2024-08-26 18:25:16,103 INFO [train.py:1114] (2/4) Epoch 8, batch 2300, loss[loss=0.2041, simple_loss=0.2685, pruned_loss=0.05116, ctc_loss=0.09373, over 19512.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.2896, pruned_loss=0.06178, ctc_loss=0.1156, over 3859835.77 frames. ], batch size: 49, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:25:23,769 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.263e+02 1.553e+02 1.767e+02 2.002e+02 4.280e+02, threshold=3.534e+02, percent-clipped=3.0
+2024-08-26 18:25:25,856 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.38 vs. limit=15.0
+2024-08-26 18:25:41,141 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=105360.0, ans=0.0
+2024-08-26 18:25:54,082 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.57 vs. limit=15.0
+2024-08-26 18:25:58,621 INFO [train.py:1114] (2/4) Epoch 8, batch 2350, loss[loss=0.262, simple_loss=0.3188, pruned_loss=0.07578, ctc_loss=0.1344, over 19671.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.2897, pruned_loss=0.0621, ctc_loss=0.1158, over 3862756.37 frames. ], batch size: 63, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:26:12,385 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=105520.0, ans=0.125
+2024-08-26 18:26:22,262 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=105573.33333333333, ans=0.125
+2024-08-26 18:26:27,816 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=105626.66666666667, ans=0.125
+2024-08-26 18:26:32,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=105626.66666666667, ans=0.125
+2024-08-26 18:26:36,314 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=105680.0, ans=0.0
+2024-08-26 18:26:42,922 INFO [train.py:1114] (2/4) Epoch 8, batch 2400, loss[loss=0.2484, simple_loss=0.3058, pruned_loss=0.07007, ctc_loss=0.127, over 19234.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.2918, pruned_loss=0.063, ctc_loss=0.1173, over 3857949.06 frames. ], batch size: 71, lr: 1.79e-02, grad_scale: 32.0
+2024-08-26 18:26:50,599 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.526e+02 1.733e+02 1.998e+02 3.354e+02, threshold=3.467e+02, percent-clipped=0.0
+2024-08-26 18:26:53,697 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.61 vs. limit=10.0
+2024-08-26 18:26:59,416 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=105840.0, ans=0.125
+2024-08-26 18:27:25,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=105946.66666666667, ans=0.025
+2024-08-26 18:27:27,046 INFO [train.py:1114] (2/4) Epoch 8, batch 2450, loss[loss=0.3285, simple_loss=0.3416, pruned_loss=0.1147, ctc_loss=0.215, over 13439.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.2968, pruned_loss=0.06686, ctc_loss=0.1248, over 3729396.28 frames. ], batch size: 140, lr: 1.79e-02, grad_scale: 16.0
+2024-08-26 18:27:30,060 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:27:39,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=106053.33333333333, ans=0.125
+2024-08-26 18:27:41,675 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=106053.33333333333, ans=10.0
+2024-08-26 18:27:44,814 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.19 vs. limit=15.0
+2024-08-26 18:27:47,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106106.66666666667, ans=0.1
+2024-08-26 18:27:48,258 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=106106.66666666667, ans=0.0
+2024-08-26 18:28:39,348 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:28:47,206 INFO [train.py:1114] (2/4) Epoch 9, batch 0, loss[loss=0.2161, simple_loss=0.2735, pruned_loss=0.05848, ctc_loss=0.1045, over 19818.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2735, pruned_loss=0.05848, ctc_loss=0.1045, over 19818.00 frames. ], batch size: 49, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:28:47,206 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 18:28:56,816 INFO [train.py:1146] (2/4) Epoch 9, validation: loss=0.1927, simple_loss=0.2844, pruned_loss=0.03737, ctc_loss=0.06585, over 944034.00 frames.
+2024-08-26 18:28:56,817 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 18:29:04,952 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.49 vs. limit=15.0
+2024-08-26 18:29:09,576 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.55 vs. limit=15.0
+2024-08-26 18:29:16,430 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 1.688e+02 1.849e+02 2.025e+02 3.204e+02, threshold=3.698e+02, percent-clipped=0.0
+2024-08-26 18:29:23,061 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=106314.66666666667, ans=0.125
+2024-08-26 18:29:25,712 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:29:29,695 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.54 vs. limit=10.0
+2024-08-26 18:29:32,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=106368.0, ans=0.125
+2024-08-26 18:29:43,037 INFO [train.py:1114] (2/4) Epoch 9, batch 50, loss[loss=0.203, simple_loss=0.2629, pruned_loss=0.05152, ctc_loss=0.1001, over 19720.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.294, pruned_loss=0.06258, ctc_loss=0.1183, over 844815.75 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:30:07,335 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=106581.33333333333, ans=0.0
+2024-08-26 18:30:11,932 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:30:13,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=106634.66666666667, ans=0.125
+2024-08-26 18:30:26,945 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=106688.0, ans=0.125
+2024-08-26 18:30:33,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=106688.0, ans=0.04949747468305833
+2024-08-26 18:30:39,528 INFO [train.py:1114] (2/4) Epoch 9, batch 100, loss[loss=0.2188, simple_loss=0.2886, pruned_loss=0.05478, ctc_loss=0.09866, over 19704.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.2948, pruned_loss=0.06262, ctc_loss=0.1179, over 1499610.03 frames. ], batch size: 51, lr: 1.69e-02, grad_scale: 32.0
+2024-08-26 18:30:44,241 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=106741.33333333333, ans=0.0
+2024-08-26 18:30:50,949 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.69 vs. limit=15.0
+2024-08-26 18:31:02,333 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.216e+02 1.554e+02 1.735e+02 2.126e+02 3.416e+02, threshold=3.470e+02, percent-clipped=0.0
+2024-08-26 18:31:03,399 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=106848.0, ans=0.125
+2024-08-26 18:31:05,267 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=106848.0, ans=0.0
+2024-08-26 18:31:07,341 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.59 vs. limit=15.0
+2024-08-26 18:31:10,039 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=106901.33333333333, ans=0.125
+2024-08-26 18:31:22,078 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=106954.66666666667, ans=0.0
+2024-08-26 18:31:28,289 INFO [train.py:1114] (2/4) Epoch 9, batch 150, loss[loss=0.2109, simple_loss=0.2711, pruned_loss=0.0555, ctc_loss=0.09937, over 19714.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.2918, pruned_loss=0.06137, ctc_loss=0.1151, over 2027682.09 frames. ], batch size: 47, lr: 1.69e-02, grad_scale: 16.0
+2024-08-26 18:31:37,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=107061.33333333333, ans=0.0
+2024-08-26 18:31:46,024 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107114.66666666667, ans=0.125
+2024-08-26 18:31:46,213 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.74 vs. limit=6.0
+2024-08-26 18:31:49,864 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.21 vs. limit=10.0
+2024-08-26 18:32:06,206 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107221.33333333333, ans=0.125
+2024-08-26 18:32:11,979 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.62 vs. limit=12.0
+2024-08-26 18:32:14,113 INFO [train.py:1114] (2/4) Epoch 9, batch 200, loss[loss=0.2567, simple_loss=0.3137, pruned_loss=0.0725, ctc_loss=0.1368, over 18323.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.2894, pruned_loss=0.06052, ctc_loss=0.1135, over 2435014.65 frames. ], batch size: 85, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:32:18,090 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.53 vs. limit=15.0
+2024-08-26 18:32:24,475 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.84 vs. limit=15.0
+2024-08-26 18:32:30,515 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=107328.0, ans=0.2
+2024-08-26 18:32:35,284 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=107381.33333333333, ans=0.125
+2024-08-26 18:32:36,039 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.227e+02 1.442e+02 1.571e+02 1.787e+02 2.800e+02, threshold=3.143e+02, percent-clipped=0.0
+2024-08-26 18:32:36,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=107381.33333333333, ans=0.04949747468305833
+2024-08-26 18:32:42,734 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=107434.66666666667, ans=0.125
+2024-08-26 18:32:45,411 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=107434.66666666667, ans=0.0
+2024-08-26 18:32:46,456 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=107434.66666666667, ans=0.0
+2024-08-26 18:32:53,157 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.46 vs. limit=22.5
+2024-08-26 18:32:55,720 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:32:56,994 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.53 vs. limit=15.0
+2024-08-26 18:33:01,997 INFO [train.py:1114] (2/4) Epoch 9, batch 250, loss[loss=0.2245, simple_loss=0.2931, pruned_loss=0.05614, ctc_loss=0.1092, over 19373.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.2883, pruned_loss=0.05996, ctc_loss=0.1125, over 2755550.65 frames. ], batch size: 67, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:33:08,637 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=107541.33333333333, ans=0.125
+2024-08-26 18:33:57,488 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=107754.66666666667, ans=0.0
+2024-08-26 18:34:01,005 INFO [train.py:1114] (2/4) Epoch 9, batch 300, loss[loss=0.2378, simple_loss=0.3005, pruned_loss=0.06398, ctc_loss=0.1176, over 19520.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.2876, pruned_loss=0.05955, ctc_loss=0.1117, over 2999755.96 frames. ], batch size: 61, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:34:05,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=107808.0, ans=0.0
+2024-08-26 18:34:14,825 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=107861.33333333333, ans=10.0
+2024-08-26 18:34:22,823 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=107914.66666666667, ans=0.1
+2024-08-26 18:34:24,466 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.264e+02 1.498e+02 1.681e+02 1.999e+02 2.633e+02, threshold=3.363e+02, percent-clipped=0.0
+2024-08-26 18:34:24,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=107914.66666666667, ans=0.125
+2024-08-26 18:34:33,524 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.89 vs. limit=22.5
+2024-08-26 18:34:37,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-08-26 18:34:38,766 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=107968.0, ans=0.0
+2024-08-26 18:34:39,618 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=107968.0, ans=0.125
+2024-08-26 18:34:42,447 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=108021.33333333333, ans=0.2
+2024-08-26 18:34:48,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=108021.33333333333, ans=0.125
+2024-08-26 18:34:50,539 INFO [train.py:1114] (2/4) Epoch 9, batch 350, loss[loss=0.1929, simple_loss=0.2589, pruned_loss=0.04598, ctc_loss=0.08719, over 19763.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.2884, pruned_loss=0.06005, ctc_loss=0.1123, over 3188993.13 frames. ], batch size: 48, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:35:03,738 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.91 vs. limit=6.0
+2024-08-26 18:35:09,977 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2.whitening_limit, batch_count=108128.0, ans=15.0
+2024-08-26 18:35:36,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=108288.0, ans=0.0
+2024-08-26 18:35:38,060 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=108288.0, ans=0.025
+2024-08-26 18:35:40,765 INFO [train.py:1114] (2/4) Epoch 9, batch 400, loss[loss=0.211, simple_loss=0.2843, pruned_loss=0.04966, ctc_loss=0.09563, over 19495.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2873, pruned_loss=0.05935, ctc_loss=0.1111, over 3340930.96 frames. ], batch size: 54, lr: 1.68e-02, grad_scale: 16.0
+2024-08-26 18:35:44,691 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108341.33333333333, ans=0.1
+2024-08-26 18:35:46,622 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108341.33333333333, ans=0.1
+2024-08-26 18:35:55,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=108394.66666666667, ans=0.0
+2024-08-26 18:36:02,028 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.232e+02 1.489e+02 1.712e+02 1.995e+02 4.778e+02, threshold=3.424e+02, percent-clipped=1.0
+2024-08-26 18:36:32,711 INFO [train.py:1114] (2/4) Epoch 9, batch 450, loss[loss=0.2306, simple_loss=0.2982, pruned_loss=0.05953, ctc_loss=0.11, over 19606.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.288, pruned_loss=0.05975, ctc_loss=0.1117, over 3448148.97 frames. ], batch size: 55, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:36:36,697 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=108608.0, ans=0.125
+2024-08-26 18:36:37,573 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=108608.0, ans=0.125
+2024-08-26 18:36:49,566 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.27 vs. limit=12.0
+2024-08-26 18:36:56,785 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=108714.66666666667, ans=0.125
+2024-08-26 18:36:58,648 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:37:01,288 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108714.66666666667, ans=0.125
+2024-08-26 18:37:19,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108821.33333333333, ans=0.1
+2024-08-26 18:37:20,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=108821.33333333333, ans=0.125
+2024-08-26 18:37:20,836 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=108874.66666666667, ans=0.125
+2024-08-26 18:37:21,537 INFO [train.py:1114] (2/4) Epoch 9, batch 500, loss[loss=0.2439, simple_loss=0.3027, pruned_loss=0.06633, ctc_loss=0.1313, over 19674.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2867, pruned_loss=0.0592, ctc_loss=0.1107, over 3544506.49 frames. ], batch size: 63, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:37:24,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=108874.66666666667, ans=0.95
+2024-08-26 18:37:35,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=108928.0, ans=0.0
+2024-08-26 18:37:42,873 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.242e+02 1.480e+02 1.660e+02 1.957e+02 3.087e+02, threshold=3.320e+02, percent-clipped=0.0
+2024-08-26 18:37:45,171 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:37:48,804 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109034.66666666667, ans=0.1
+2024-08-26 18:37:54,040 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=9.66 vs. limit=15.0
+2024-08-26 18:37:59,920 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=109088.0, ans=0.0
+2024-08-26 18:38:06,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=109088.0, ans=0.125
+2024-08-26 18:38:07,952 INFO [train.py:1114] (2/4) Epoch 9, batch 550, loss[loss=0.2594, simple_loss=0.318, pruned_loss=0.07339, ctc_loss=0.1352, over 19227.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2867, pruned_loss=0.05919, ctc_loss=0.1108, over 3606222.95 frames. ], batch size: 71, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:38:24,853 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:38:25,939 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.42 vs. limit=12.0
+2024-08-26 18:38:43,990 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=109301.33333333333, ans=0.125
+2024-08-26 18:38:47,592 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=109354.66666666667, ans=10.0
+2024-08-26 18:38:51,392 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=109354.66666666667, ans=0.0
+2024-08-26 18:38:55,931 INFO [train.py:1114] (2/4) Epoch 9, batch 600, loss[loss=0.2494, simple_loss=0.3019, pruned_loss=0.07221, ctc_loss=0.1312, over 19389.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2872, pruned_loss=0.05932, ctc_loss=0.1109, over 3663539.65 frames. ], batch size: 67, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:39:15,030 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=109461.33333333333, ans=0.0
+2024-08-26 18:39:21,961 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 1.496e+02 1.658e+02 1.980e+02 4.382e+02, threshold=3.316e+02, percent-clipped=1.0
+2024-08-26 18:39:28,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=109514.66666666667, ans=0.0
+2024-08-26 18:39:28,876 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=5.29 vs. limit=12.0
+2024-08-26 18:39:30,395 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.65 vs. limit=15.0
+2024-08-26 18:39:32,934 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=109568.0, ans=0.125
+2024-08-26 18:39:34,813 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109568.0, ans=0.1
+2024-08-26 18:39:35,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=109568.0, ans=0.0
+2024-08-26 18:39:43,481 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.88 vs. limit=15.0
+2024-08-26 18:39:49,353 INFO [train.py:1114] (2/4) Epoch 9, batch 650, loss[loss=0.2191, simple_loss=0.2835, pruned_loss=0.05618, ctc_loss=0.1061, over 19766.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.2861, pruned_loss=0.05888, ctc_loss=0.1099, over 3713816.89 frames. ], batch size: 54, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:39:51,373 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=109674.66666666667, ans=0.2
+2024-08-26 18:40:13,351 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=109781.33333333333, ans=0.0
+2024-08-26 18:40:30,605 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.92 vs. limit=15.0
+2024-08-26 18:40:40,265 INFO [train.py:1114] (2/4) Epoch 9, batch 700, loss[loss=0.1934, simple_loss=0.2631, pruned_loss=0.04407, ctc_loss=0.08915, over 19724.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2862, pruned_loss=0.05873, ctc_loss=0.1098, over 3746667.85 frames. ], batch size: 51, lr: 1.67e-02, grad_scale: 16.0
+2024-08-26 18:40:41,513 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=109941.33333333333, ans=0.125
+2024-08-26 18:40:51,987 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.66 vs. limit=15.0
+2024-08-26 18:41:01,801 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.271e+02 1.503e+02 1.748e+02 2.321e+02 3.813e+02, threshold=3.497e+02, percent-clipped=1.0
+2024-08-26 18:41:02,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=110048.0, ans=0.125
+2024-08-26 18:41:05,977 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.74 vs. limit=22.5
+2024-08-26 18:41:21,883 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.70 vs. limit=6.0
+2024-08-26 18:41:22,454 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=110154.66666666667, ans=0.0
+2024-08-26 18:41:27,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110208.0, ans=0.1
+2024-08-26 18:41:28,310 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.94 vs. limit=10.0
+2024-08-26 18:41:28,637 INFO [train.py:1114] (2/4) Epoch 9, batch 750, loss[loss=0.2263, simple_loss=0.2941, pruned_loss=0.05805, ctc_loss=0.106, over 19493.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.2861, pruned_loss=0.05861, ctc_loss=0.1098, over 3772977.45 frames. ], batch size: 54, lr: 1.66e-02, grad_scale: 16.0
+2024-08-26 18:41:30,773 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110208.0, ans=0.1
+2024-08-26 18:41:37,177 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=110261.33333333333, ans=0.07
+2024-08-26 18:41:38,942 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=110261.33333333333, ans=0.0
+2024-08-26 18:41:41,687 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=110261.33333333333, ans=0.0
+2024-08-26 18:42:11,710 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=110421.33333333333, ans=0.125
+2024-08-26 18:42:22,145 INFO [train.py:1114] (2/4) Epoch 9, batch 800, loss[loss=0.209, simple_loss=0.2646, pruned_loss=0.05616, ctc_loss=0.1027, over 19792.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2862, pruned_loss=0.05891, ctc_loss=0.1104, over 3794489.35 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:42:23,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=110474.66666666667, ans=0.2
+2024-08-26 18:42:43,910 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.269e+02 1.427e+02 1.539e+02 1.792e+02 3.382e+02, threshold=3.078e+02, percent-clipped=0.0
+2024-08-26 18:42:45,883 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=110581.33333333333, ans=0.015
+2024-08-26 18:42:50,087 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.07 vs. limit=15.0
+2024-08-26 18:43:09,188 INFO [train.py:1114] (2/4) Epoch 9, batch 850, loss[loss=0.2293, simple_loss=0.2992, pruned_loss=0.05744, ctc_loss=0.1113, over 19666.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2858, pruned_loss=0.05879, ctc_loss=0.1101, over 3814034.66 frames. ], batch size: 59, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:43:11,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=110741.33333333333, ans=0.125
+2024-08-26 18:43:23,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=110794.66666666667, ans=0.025
+2024-08-26 18:43:24,137 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110794.66666666667, ans=0.1
+2024-08-26 18:43:34,049 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=110848.0, ans=0.125
+2024-08-26 18:43:42,584 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=110901.33333333333, ans=0.0
+2024-08-26 18:43:55,671 INFO [train.py:1114] (2/4) Epoch 9, batch 900, loss[loss=0.2082, simple_loss=0.2703, pruned_loss=0.05398, ctc_loss=0.09544, over 19414.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2867, pruned_loss=0.05956, ctc_loss=0.111, over 3818465.82 frames. ], batch size: 48, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:45:38,154 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.254e+02 1.519e+02 1.752e+02 2.077e+02 5.433e+02, threshold=3.505e+02, percent-clipped=5.0
+2024-08-26 18:46:05,601 INFO [train.py:1114] (2/4) Epoch 9, batch 950, loss[loss=0.1896, simple_loss=0.2587, pruned_loss=0.04351, ctc_loss=0.08387, over 19493.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2867, pruned_loss=0.05954, ctc_loss=0.1111, over 3821199.31 frames. ], batch size: 49, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:46:19,487 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=111328.0, ans=0.0
+2024-08-26 18:46:20,510 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=111328.0, ans=0.125
+2024-08-26 18:46:25,998 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111328.0, ans=0.1
+2024-08-26 18:46:36,359 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=111381.33333333333, ans=0.0
+2024-08-26 18:46:37,283 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=111381.33333333333, ans=0.05
+2024-08-26 18:46:54,502 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.91 vs. limit=8.0
+2024-08-26 18:46:57,419 INFO [train.py:1114] (2/4) Epoch 9, batch 1000, loss[loss=0.1981, simple_loss=0.2748, pruned_loss=0.04341, ctc_loss=0.08628, over 19842.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2877, pruned_loss=0.06002, ctc_loss=0.1119, over 3816943.94 frames. ], batch size: 52, lr: 1.66e-02, grad_scale: 32.0
+2024-08-26 18:47:19,853 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.174e+02 1.461e+02 1.756e+02 2.077e+02 6.803e+02, threshold=3.513e+02, percent-clipped=1.0
+2024-08-26 18:47:25,685 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=111701.33333333333, ans=0.125
+2024-08-26 18:47:43,912 INFO [train.py:1114] (2/4) Epoch 9, batch 1050, loss[loss=0.2112, simple_loss=0.2829, pruned_loss=0.04983, ctc_loss=0.09975, over 19836.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2867, pruned_loss=0.05976, ctc_loss=0.1113, over 3823969.67 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:48:01,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=111914.66666666667, ans=0.0
+2024-08-26 18:48:02,012 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.64 vs. limit=12.0
+2024-08-26 18:48:02,883 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.46 vs. limit=10.0
+2024-08-26 18:48:23,346 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=112021.33333333333, ans=0.125
+2024-08-26 18:48:32,532 INFO [train.py:1114] (2/4) Epoch 9, batch 1100, loss[loss=0.218, simple_loss=0.2874, pruned_loss=0.05432, ctc_loss=0.1001, over 19581.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2865, pruned_loss=0.05948, ctc_loss=0.1109, over 3831415.43 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:48:40,182 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=112074.66666666667, ans=0.0
+2024-08-26 18:48:41,010 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=112128.0, ans=0.0
+2024-08-26 18:48:59,878 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.444e+02 1.690e+02 2.009e+02 4.396e+02, threshold=3.380e+02, percent-clipped=1.0
+2024-08-26 18:49:03,607 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=112181.33333333333, ans=0.0
+2024-08-26 18:49:53,115 INFO [train.py:1114] (2/4) Epoch 9, batch 1150, loss[loss=0.2111, simple_loss=0.2819, pruned_loss=0.05116, ctc_loss=0.09516, over 19579.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2865, pruned_loss=0.05946, ctc_loss=0.1109, over 3830568.01 frames. ], batch size: 52, lr: 1.65e-02, grad_scale: 16.0
+2024-08-26 18:49:57,366 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=112341.33333333333, ans=0.1
+2024-08-26 18:49:59,543 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.90 vs. limit=22.5
+2024-08-26 18:50:04,247 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=112341.33333333333, ans=0.125
+2024-08-26 18:50:07,410 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.06 vs. limit=12.0
+2024-08-26 18:50:19,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=112394.66666666667, ans=0.0
+2024-08-26 18:50:26,284 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.68 vs. limit=15.0
+2024-08-26 18:50:35,538 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=112501.33333333333, ans=0.125
+2024-08-26 18:50:46,489 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=112554.66666666667, ans=0.1
+2024-08-26 18:50:54,149 INFO [train.py:1114] (2/4) Epoch 9, batch 1200, loss[loss=0.2335, simple_loss=0.2979, pruned_loss=0.0616, ctc_loss=0.1146, over 19858.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.2876, pruned_loss=0.05974, ctc_loss=0.1117, over 3825514.24 frames. ], batch size: 57, lr: 1.65e-02, grad_scale: 32.0
+2024-08-26 18:50:58,450 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=112608.0, ans=22.5
+2024-08-26 18:51:05,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=112661.33333333333, ans=0.1
+2024-08-26 18:51:11,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=112661.33333333333, ans=0.09899494936611666
+2024-08-26 18:51:15,311 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=112714.66666666667, ans=0.125
+2024-08-26 18:51:16,818 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.189e+02 1.431e+02 1.600e+02 1.807e+02 3.201e+02, threshold=3.201e+02, percent-clipped=0.0
+2024-08-26 18:51:29,127 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=112768.0, ans=0.2
+2024-08-26 18:51:31,985 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=112821.33333333333, ans=0.125
+2024-08-26 18:51:38,674 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.78 vs. limit=15.0
+2024-08-26 18:51:39,462 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=112821.33333333333, ans=0.125
+2024-08-26 18:51:42,797 INFO [train.py:1114] (2/4) Epoch 9, batch 1250, loss[loss=0.2489, simple_loss=0.3129, pruned_loss=0.06661, ctc_loss=0.1293, over 19524.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2877, pruned_loss=0.05963, ctc_loss=0.1115, over 3842813.32 frames. ], batch size: 61, lr: 1.65e-02, grad_scale: 32.0
+2024-08-26 18:51:43,967 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=112874.66666666667, ans=0.125
+2024-08-26 18:51:47,580 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=112874.66666666667, ans=0.0
+2024-08-26 18:52:23,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=113088.0, ans=0.125
+2024-08-26 18:52:25,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=113088.0, ans=0.125
+2024-08-26 18:52:28,706 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.83 vs. limit=15.0
+2024-08-26 18:52:30,281 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=113088.0, ans=0.2
+2024-08-26 18:52:36,297 INFO [train.py:1114] (2/4) Epoch 9, batch 1300, loss[loss=0.2324, simple_loss=0.2998, pruned_loss=0.05994, ctc_loss=0.1128, over 18940.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.2869, pruned_loss=0.05909, ctc_loss=0.1106, over 3845203.26 frames. ], batch size: 76, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:52:43,797 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113141.33333333333, ans=0.1
+2024-08-26 18:52:58,748 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.162e+02 1.498e+02 1.743e+02 2.034e+02 3.430e+02, threshold=3.487e+02, percent-clipped=2.0
+2024-08-26 18:53:09,342 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=113301.33333333333, ans=0.125
+2024-08-26 18:53:15,898 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=113354.66666666667, ans=0.1
+2024-08-26 18:53:17,181 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=6.38 vs. limit=15.0
+2024-08-26 18:53:23,263 INFO [train.py:1114] (2/4) Epoch 9, batch 1350, loss[loss=0.2263, simple_loss=0.2842, pruned_loss=0.06196, ctc_loss=0.1113, over 19767.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2865, pruned_loss=0.05864, ctc_loss=0.1097, over 3856385.13 frames. ], batch size: 54, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:53:29,863 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=113408.0, ans=0.125
+2024-08-26 18:53:41,813 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.26 vs. limit=15.0
+2024-08-26 18:53:42,293 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=113514.66666666667, ans=0.125
+2024-08-26 18:53:45,979 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=113514.66666666667, ans=0.125
+2024-08-26 18:53:48,798 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 18:53:48,886 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113514.66666666667, ans=0.125
+2024-08-26 18:54:04,676 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=113621.33333333333, ans=0.2
+2024-08-26 18:54:09,874 INFO [train.py:1114] (2/4) Epoch 9, batch 1400, loss[loss=0.1919, simple_loss=0.252, pruned_loss=0.04834, ctc_loss=0.08782, over 19642.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2861, pruned_loss=0.0587, ctc_loss=0.1099, over 3863319.77 frames. ], batch size: 46, lr: 1.64e-02, grad_scale: 32.0
+2024-08-26 18:54:22,882 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=113728.0, ans=0.125
+2024-08-26 18:54:24,222 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.08 vs. limit=22.5
+2024-08-26 18:54:29,729 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=113781.33333333333, ans=0.125
+2024-08-26 18:54:30,644 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=113781.33333333333, ans=0.2
+2024-08-26 18:54:33,075 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.168e+02 1.492e+02 1.644e+02 1.948e+02 2.802e+02, threshold=3.287e+02, percent-clipped=0.0
+2024-08-26 18:54:50,850 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=113888.0, ans=0.125
+2024-08-26 18:54:59,238 INFO [train.py:1114] (2/4) Epoch 9, batch 1450, loss[loss=0.2409, simple_loss=0.3072, pruned_loss=0.06377, ctc_loss=0.1179, over 19659.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.2868, pruned_loss=0.05905, ctc_loss=0.1105, over 3861359.41 frames. ], batch size: 63, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:55:17,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=113994.66666666667, ans=0.0
+2024-08-26 18:55:20,176 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=113994.66666666667, ans=10.0
+2024-08-26 18:55:25,765 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=114048.0, ans=0.125
+2024-08-26 18:55:35,002 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.28 vs. limit=15.0
+2024-08-26 18:55:38,693 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=114101.33333333333, ans=0.125
+2024-08-26 18:55:50,969 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=114154.66666666667, ans=0.125
+2024-08-26 18:55:54,505 INFO [train.py:1114] (2/4) Epoch 9, batch 1500, loss[loss=0.2188, simple_loss=0.2918, pruned_loss=0.05235, ctc_loss=0.1027, over 19598.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.2874, pruned_loss=0.05928, ctc_loss=0.111, over 3861063.45 frames. ], batch size: 57, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:56:06,320 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=114261.33333333333, ans=0.2
+2024-08-26 18:56:18,310 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.542e+02 1.688e+02 1.884e+02 2.711e+02, threshold=3.377e+02, percent-clipped=0.0
+2024-08-26 18:56:26,165 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=114368.0, ans=0.025
+2024-08-26 18:56:38,799 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=114421.33333333333, ans=0.125
+2024-08-26 18:56:41,343 INFO [train.py:1114] (2/4) Epoch 9, batch 1550, loss[loss=0.2447, simple_loss=0.2997, pruned_loss=0.06984, ctc_loss=0.1251, over 19612.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.2876, pruned_loss=0.05965, ctc_loss=0.1118, over 3845360.99 frames. ], batch size: 60, lr: 1.64e-02, grad_scale: 16.0
+2024-08-26 18:56:43,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=114474.66666666667, ans=0.0
+2024-08-26 18:56:51,258 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.92 vs. limit=15.0
+2024-08-26 18:56:51,807 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=114528.0, ans=0.1
+2024-08-26 18:57:07,493 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=114581.33333333333, ans=0.1
+2024-08-26 18:57:29,654 INFO [train.py:1114] (2/4) Epoch 9, batch 1600, loss[loss=0.2431, simple_loss=0.306, pruned_loss=0.06678, ctc_loss=0.1164, over 19833.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.2873, pruned_loss=0.0596, ctc_loss=0.1117, over 3835886.41 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 32.0
+2024-08-26 18:57:39,184 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=114794.66666666667, ans=0.0
+2024-08-26 18:57:40,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=114794.66666666667, ans=0.125
+2024-08-26 18:57:57,610 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.291e+02 1.549e+02 1.720e+02 1.979e+02 3.573e+02, threshold=3.441e+02, percent-clipped=1.0
+2024-08-26 18:57:59,769 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff2.min_abs, batch_count=114848.0, ans=0.1
+2024-08-26 18:58:08,559 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=114901.33333333333, ans=0.125
+2024-08-26 18:58:08,907 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.21 vs. limit=15.0
+2024-08-26 18:58:14,947 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=114954.66666666667, ans=0.1
+2024-08-26 18:58:36,368 INFO [train.py:1114] (2/4) Epoch 9, batch 1650, loss[loss=0.2242, simple_loss=0.2918, pruned_loss=0.05677, ctc_loss=0.1075, over 19659.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2872, pruned_loss=0.05952, ctc_loss=0.1114, over 3832639.29 frames. ], batch size: 59, lr: 1.63e-02, grad_scale: 32.0
+2024-08-26 18:58:42,148 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115008.0, ans=0.1
+2024-08-26 18:59:43,674 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=115114.66666666667, ans=0.2
+2024-08-26 18:59:54,961 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=115168.0, ans=0.2
+2024-08-26 19:00:00,615 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=115168.0, ans=0.025
+2024-08-26 19:00:06,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=115221.33333333333, ans=0.1
+2024-08-26 19:00:11,416 INFO [train.py:1114] (2/4) Epoch 9, batch 1700, loss[loss=0.1862, simple_loss=0.2492, pruned_loss=0.04451, ctc_loss=0.08574, over 19674.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.2865, pruned_loss=0.05888, ctc_loss=0.1103, over 3847087.37 frames. ], batch size: 46, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:00:34,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=115381.33333333333, ans=0.0
+2024-08-26 19:00:34,653 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.254e+02 1.433e+02 1.619e+02 1.844e+02 2.581e+02, threshold=3.239e+02, percent-clipped=0.0
+2024-08-26 19:00:34,950 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=115381.33333333333, ans=0.95
+2024-08-26 19:00:45,706 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=115434.66666666667, ans=0.2
+2024-08-26 19:00:49,244 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:00:56,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=115541.33333333333, ans=0.125
+2024-08-26 19:00:56,878 INFO [train.py:1114] (2/4) Epoch 9, batch 1750, loss[loss=0.1891, simple_loss=0.2522, pruned_loss=0.04485, ctc_loss=0.09082, over 19680.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2855, pruned_loss=0.05824, ctc_loss=0.1092, over 3851166.61 frames. ], batch size: 45, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:00:59,859 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=115541.33333333333, ans=0.125
+2024-08-26 19:01:10,491 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=115594.66666666667, ans=0.0
+2024-08-26 19:01:10,798 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.42 vs. limit=15.0
+2024-08-26 19:01:30,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=115701.33333333333, ans=0.025
+2024-08-26 19:01:38,206 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=115754.66666666667, ans=0.125
+2024-08-26 19:01:41,154 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=9.99 vs. limit=22.5
+2024-08-26 19:01:43,102 INFO [train.py:1114] (2/4) Epoch 9, batch 1800, loss[loss=0.2454, simple_loss=0.3056, pruned_loss=0.06812, ctc_loss=0.1223, over 19621.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.2858, pruned_loss=0.05849, ctc_loss=0.1096, over 3852683.02 frames. ], batch size: 55, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:01:44,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=115808.0, ans=0.0
+2024-08-26 19:02:02,692 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.45 vs. limit=15.0
+2024-08-26 19:02:06,013 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.188e+02 1.500e+02 1.645e+02 1.953e+02 3.789e+02, threshold=3.290e+02, percent-clipped=1.0
+2024-08-26 19:02:06,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=115914.66666666667, ans=0.125
+2024-08-26 19:02:18,141 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.57 vs. limit=15.0
+2024-08-26 19:02:27,282 INFO [train.py:1114] (2/4) Epoch 9, batch 1850, loss[loss=0.2368, simple_loss=0.2954, pruned_loss=0.06405, ctc_loss=0.1252, over 19592.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.285, pruned_loss=0.05809, ctc_loss=0.1087, over 3857004.18 frames. ], batch size: 57, lr: 1.63e-02, grad_scale: 16.0
+2024-08-26 19:02:28,396 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=116074.66666666667, ans=0.125
+2024-08-26 19:02:31,408 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.45 vs. limit=12.0
+2024-08-26 19:03:09,051 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=116288.0, ans=0.0
+2024-08-26 19:03:13,220 INFO [train.py:1114] (2/4) Epoch 9, batch 1900, loss[loss=0.2207, simple_loss=0.2953, pruned_loss=0.053, ctc_loss=0.1003, over 19680.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.2859, pruned_loss=0.05819, ctc_loss=0.1088, over 3861462.69 frames. ], batch size: 59, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:03:16,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=116341.33333333333, ans=0.125
+2024-08-26 19:03:22,191 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=116394.66666666667, ans=0.0
+2024-08-26 19:03:25,028 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.97 vs. limit=22.5
+2024-08-26 19:03:28,651 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.whiten.whitening_limit, batch_count=116394.66666666667, ans=12.0
+2024-08-26 19:03:33,525 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=116448.0, ans=0.1
+2024-08-26 19:03:35,869 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.270e+02 1.509e+02 1.695e+02 1.935e+02 3.320e+02, threshold=3.390e+02, percent-clipped=1.0
+2024-08-26 19:03:49,014 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=116554.66666666667, ans=0.125
+2024-08-26 19:03:56,677 INFO [train.py:1114] (2/4) Epoch 9, batch 1950, loss[loss=0.2159, simple_loss=0.2821, pruned_loss=0.05453, ctc_loss=0.1015, over 19584.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.287, pruned_loss=0.05846, ctc_loss=0.1093, over 3869958.39 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:04:09,993 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=116661.33333333333, ans=0.125
+2024-08-26 19:04:13,534 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:04:17,170 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.21 vs. limit=15.0
+2024-08-26 19:04:19,372 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=116714.66666666667, ans=0.1
+2024-08-26 19:04:30,731 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=116768.0, ans=0.1
+2024-08-26 19:04:31,686 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=116768.0, ans=0.0
+2024-08-26 19:04:37,004 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=116821.33333333333, ans=0.025
+2024-08-26 19:04:45,340 INFO [train.py:1114] (2/4) Epoch 9, batch 2000, loss[loss=0.1976, simple_loss=0.2566, pruned_loss=0.05136, ctc_loss=0.08973, over 19662.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.2877, pruned_loss=0.05896, ctc_loss=0.1103, over 3854092.86 frames. ], batch size: 45, lr: 1.62e-02, grad_scale: 32.0
+2024-08-26 19:04:48,452 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=116874.66666666667, ans=0.125
+2024-08-26 19:04:49,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=116874.66666666667, ans=0.125
+2024-08-26 19:04:57,031 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=116928.0, ans=0.125
+2024-08-26 19:05:02,349 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=116981.33333333333, ans=0.05
+2024-08-26 19:05:09,037 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.202e+02 1.518e+02 1.711e+02 1.998e+02 4.316e+02, threshold=3.422e+02, percent-clipped=2.0
+2024-08-26 19:05:10,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=116981.33333333333, ans=0.0
+2024-08-26 19:05:10,979 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=117034.66666666667, ans=0.125
+2024-08-26 19:05:11,972 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=117034.66666666667, ans=0.1
+2024-08-26 19:05:15,444 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=117034.66666666667, ans=0.5
+2024-08-26 19:05:20,548 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=117088.0, ans=0.2
+2024-08-26 19:05:29,284 INFO [train.py:1114] (2/4) Epoch 9, batch 2050, loss[loss=0.1922, simple_loss=0.2526, pruned_loss=0.04803, ctc_loss=0.08959, over 19713.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.2864, pruned_loss=0.05861, ctc_loss=0.1096, over 3849627.61 frames. ], batch size: 47, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:05:32,169 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=117141.33333333333, ans=0.0
+2024-08-26 19:05:50,549 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=117248.0, ans=0.05
+2024-08-26 19:05:51,243 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=117248.0, ans=0.1
+2024-08-26 19:05:51,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=117248.0, ans=0.0
+2024-08-26 19:05:53,932 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=117248.0, ans=0.125
+2024-08-26 19:06:10,004 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.29 vs. limit=15.0
+2024-08-26 19:06:12,998 INFO [train.py:1114] (2/4) Epoch 9, batch 2100, loss[loss=0.1894, simple_loss=0.2591, pruned_loss=0.04277, ctc_loss=0.08539, over 19747.00 frames. ], tot_loss[loss=0.222, simple_loss=0.285, pruned_loss=0.05783, ctc_loss=0.1084, over 3858525.45 frames. ], batch size: 54, lr: 1.62e-02, grad_scale: 16.0
+2024-08-26 19:06:19,067 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=117408.0, ans=0.125
+2024-08-26 19:06:36,662 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.287e+02 1.488e+02 1.695e+02 1.945e+02 3.088e+02, threshold=3.391e+02, percent-clipped=0.0
+2024-08-26 19:06:40,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=117568.0, ans=0.0
+2024-08-26 19:06:46,193 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=117621.33333333333, ans=0.05
+2024-08-26 19:06:55,560 INFO [train.py:1114] (2/4) Epoch 9, batch 2150, loss[loss=0.204, simple_loss=0.2656, pruned_loss=0.05074, ctc_loss=0.1026, over 19882.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2841, pruned_loss=0.05749, ctc_loss=0.1075, over 3869334.64 frames. ], batch size: 52, lr: 1.62e-02, grad_scale: 8.0
+2024-08-26 19:07:02,760 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=117674.66666666667, ans=0.2
+2024-08-26 19:07:04,591 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.15 vs. limit=6.0
+2024-08-26 19:07:06,100 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=117728.0, ans=0.09899494936611666
+2024-08-26 19:07:08,567 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=117728.0, ans=0.1
+2024-08-26 19:07:15,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=117781.33333333333, ans=0.125
+2024-08-26 19:07:15,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_na.min_abs, batch_count=117781.33333333333, ans=0.02
+2024-08-26 19:07:26,227 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=117834.66666666667, ans=0.125
+2024-08-26 19:07:38,969 INFO [train.py:1114] (2/4) Epoch 9, batch 2200, loss[loss=0.2252, simple_loss=0.2975, pruned_loss=0.05466, ctc_loss=0.109, over 19586.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2841, pruned_loss=0.05722, ctc_loss=0.1072, over 3867498.81 frames. ], batch size: 57, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:07:39,136 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=117941.33333333333, ans=0.2
+2024-08-26 19:07:41,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=117941.33333333333, ans=0.07
+2024-08-26 19:07:46,186 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=117941.33333333333, ans=0.2
+2024-08-26 19:07:58,913 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=118048.0, ans=0.0
+2024-08-26 19:07:58,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=118048.0, ans=0.125
+2024-08-26 19:08:01,878 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.78 vs. limit=10.0
+2024-08-26 19:08:03,129 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.284e+02 1.528e+02 1.792e+02 2.132e+02 3.306e+02, threshold=3.583e+02, percent-clipped=0.0
+2024-08-26 19:08:34,323 INFO [train.py:1114] (2/4) Epoch 9, batch 2250, loss[loss=0.2374, simple_loss=0.2959, pruned_loss=0.06484, ctc_loss=0.1231, over 19599.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2846, pruned_loss=0.05773, ctc_loss=0.108, over 3866850.68 frames. ], batch size: 55, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:08:34,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=118208.0, ans=0.125
+2024-08-26 19:08:35,721 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.60 vs. limit=15.0
+2024-08-26 19:08:57,351 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.36 vs. limit=12.0
+2024-08-26 19:09:00,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=118368.0, ans=0.125
+2024-08-26 19:09:05,905 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=118368.0, ans=0.125
+2024-08-26 19:09:17,827 INFO [train.py:1114] (2/4) Epoch 9, batch 2300, loss[loss=0.2157, simple_loss=0.2694, pruned_loss=0.05961, ctc_loss=0.1069, over 19500.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2844, pruned_loss=0.05816, ctc_loss=0.1087, over 3860346.86 frames. ], batch size: 49, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:09:23,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=118474.66666666667, ans=0.125
+2024-08-26 19:09:42,037 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 1.479e+02 1.669e+02 2.317e+02 3.988e+02, threshold=3.338e+02, percent-clipped=3.0
+2024-08-26 19:09:48,332 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.89 vs. limit=12.0
+2024-08-26 19:09:50,068 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=6.59 vs. limit=15.0
+2024-08-26 19:10:01,368 INFO [train.py:1114] (2/4) Epoch 9, batch 2350, loss[loss=0.234, simple_loss=0.3016, pruned_loss=0.0611, ctc_loss=0.1103, over 19656.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2843, pruned_loss=0.05805, ctc_loss=0.1083, over 3863287.06 frames. ], batch size: 63, lr: 1.61e-02, grad_scale: 8.0
+2024-08-26 19:10:15,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=118794.66666666667, ans=0.125
+2024-08-26 19:11:06,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=118848.0, ans=0.035
+2024-08-26 19:11:06,178 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=118848.0, ans=0.125
+2024-08-26 19:11:19,344 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.53 vs. limit=15.0
+2024-08-26 19:11:22,508 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=118954.66666666667, ans=0.0
+2024-08-26 19:11:23,343 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=118954.66666666667, ans=0.2
+2024-08-26 19:11:32,738 INFO [train.py:1114] (2/4) Epoch 9, batch 2400, loss[loss=0.2407, simple_loss=0.2991, pruned_loss=0.06577, ctc_loss=0.127, over 19295.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.2872, pruned_loss=0.0592, ctc_loss=0.1103, over 3857793.22 frames. ], batch size: 71, lr: 1.61e-02, grad_scale: 16.0
+2024-08-26 19:11:50,085 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=119061.33333333333, ans=0.04949747468305833
+2024-08-26 19:12:04,703 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.280e+02 1.526e+02 1.714e+02 1.892e+02 3.175e+02, threshold=3.427e+02, percent-clipped=0.0
+2024-08-26 19:12:07,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=119168.0, ans=0.125
+2024-08-26 19:12:09,494 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=119168.0, ans=0.125
+2024-08-26 19:12:09,569 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=119168.0, ans=0.05
+2024-08-26 19:12:11,644 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.02 vs. limit=15.0
+2024-08-26 19:12:24,777 INFO [train.py:1114] (2/4) Epoch 9, batch 2450, loss[loss=0.2903, simple_loss=0.3193, pruned_loss=0.09439, ctc_loss=0.181, over 13407.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.2918, pruned_loss=0.06282, ctc_loss=0.1173, over 3728202.43 frames. ], batch size: 141, lr: 1.61e-02, grad_scale: 16.0
+2024-08-26 19:12:37,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=119328.0, ans=0.2
+2024-08-26 19:12:40,826 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=119328.0, ans=0.0
+2024-08-26 19:12:49,089 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=14.46 vs. limit=15.0
+2024-08-26 19:13:12,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn1.whiten.whitening_limit, batch_count=119434.66666666667, ans=22.5
+2024-08-26 19:14:15,880 INFO [train.py:1114] (2/4) Epoch 10, batch 0, loss[loss=0.2198, simple_loss=0.2787, pruned_loss=0.05949, ctc_loss=0.105, over 19806.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2787, pruned_loss=0.05949, ctc_loss=0.105, over 19806.00 frames. ], batch size: 49, lr: 1.53e-02, grad_scale: 16.0
+2024-08-26 19:14:15,881 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 19:14:48,066 INFO [train.py:1146] (2/4) Epoch 10, validation: loss=0.1896, simple_loss=0.2813, pruned_loss=0.03622, ctc_loss=0.0637, over 944034.00 frames.
+2024-08-26 19:14:48,067 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 19:14:52,874 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=119482.66666666667, ans=0.125
+2024-08-26 19:14:58,588 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=119536.0, ans=0.125
+2024-08-26 19:15:03,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=119536.0, ans=0.125
+2024-08-26 19:15:25,084 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.252e+02 1.696e+02 1.867e+02 2.057e+02 3.331e+02, threshold=3.733e+02, percent-clipped=0.0
+2024-08-26 19:15:25,401 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=119696.0, ans=0.125
+2024-08-26 19:15:26,190 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=119696.0, ans=0.125
+2024-08-26 19:15:34,240 INFO [train.py:1114] (2/4) Epoch 10, batch 50, loss[loss=0.208, simple_loss=0.2611, pruned_loss=0.05587, ctc_loss=0.1082, over 19753.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.289, pruned_loss=0.05988, ctc_loss=0.1138, over 844913.23 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 16.0
+2024-08-26 19:15:43,617 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=119802.66666666667, ans=0.1
+2024-08-26 19:15:49,445 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=119802.66666666667, ans=0.2
+2024-08-26 19:15:52,106 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=119856.0, ans=0.0
+2024-08-26 19:15:54,817 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=119856.0, ans=0.2
+2024-08-26 19:16:03,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer_ff2.min_abs, batch_count=119909.33333333333, ans=0.1
+2024-08-26 19:16:03,622 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=5.96 vs. limit=15.0
+2024-08-26 19:16:06,896 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=119909.33333333333, ans=10.0
+2024-08-26 19:16:20,477 INFO [train.py:1114] (2/4) Epoch 10, batch 100, loss[loss=0.2132, simple_loss=0.2707, pruned_loss=0.05689, ctc_loss=0.1049, over 19728.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2894, pruned_loss=0.05918, ctc_loss=0.1119, over 1498754.48 frames. ], batch size: 51, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:16:23,497 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=120016.0, ans=0.0
+2024-08-26 19:16:40,106 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.72 vs. limit=15.0
+2024-08-26 19:16:56,431 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=120176.0, ans=0.5
+2024-08-26 19:17:03,446 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.471e+02 1.633e+02 1.792e+02 2.780e+02, threshold=3.265e+02, percent-clipped=0.0
+2024-08-26 19:17:11,609 INFO [train.py:1114] (2/4) Epoch 10, batch 150, loss[loss=0.2027, simple_loss=0.2582, pruned_loss=0.05286, ctc_loss=0.1035, over 19717.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.2871, pruned_loss=0.05849, ctc_loss=0.1102, over 2027457.93 frames. ], batch size: 47, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:17:11,905 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=120282.66666666667, ans=0.0
+2024-08-26 19:17:13,843 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.59 vs. limit=22.5
+2024-08-26 19:17:19,262 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=120282.66666666667, ans=0.125
+2024-08-26 19:17:22,048 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=120336.0, ans=0.125
+2024-08-26 19:17:24,849 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=11.58 vs. limit=22.5
+2024-08-26 19:17:47,949 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.63 vs. limit=15.0
+2024-08-26 19:17:51,370 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=120442.66666666667, ans=0.1
+2024-08-26 19:18:07,134 INFO [train.py:1114] (2/4) Epoch 10, batch 200, loss[loss=0.2489, simple_loss=0.3067, pruned_loss=0.06902, ctc_loss=0.1328, over 18194.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2848, pruned_loss=0.05755, ctc_loss=0.1084, over 2435318.47 frames. ], batch size: 85, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:18:07,389 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_abs, batch_count=120549.33333333333, ans=0.5
+2024-08-26 19:18:38,041 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=120602.66666666667, ans=0.125
+2024-08-26 19:18:48,106 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=120656.0, ans=0.125
+2024-08-26 19:18:49,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=120656.0, ans=0.0
+2024-08-26 19:18:50,276 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.15 vs. limit=12.0
+2024-08-26 19:19:04,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=120709.33333333333, ans=0.2
+2024-08-26 19:19:11,604 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=120762.66666666667, ans=0.125
+2024-08-26 19:19:12,220 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.459e+02 1.596e+02 1.815e+02 3.041e+02, threshold=3.193e+02, percent-clipped=0.0
+2024-08-26 19:19:44,089 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.83 vs. limit=15.0
+2024-08-26 19:19:48,326 INFO [train.py:1114] (2/4) Epoch 10, batch 250, loss[loss=0.2215, simple_loss=0.2935, pruned_loss=0.05396, ctc_loss=0.104, over 19422.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2839, pruned_loss=0.05692, ctc_loss=0.107, over 2755755.23 frames. ], batch size: 67, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:19:48,537 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=120816.0, ans=0.2
+2024-08-26 19:19:51,396 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=120816.0, ans=0.2
+2024-08-26 19:19:53,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=120816.0, ans=0.125
+2024-08-26 19:19:55,133 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=120816.0, ans=0.0
+2024-08-26 19:20:09,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=120869.33333333333, ans=0.125
+2024-08-26 19:20:16,599 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=120922.66666666667, ans=0.125
+2024-08-26 19:20:36,763 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.const_attention_rate, batch_count=121029.33333333333, ans=0.025
+2024-08-26 19:20:42,988 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=121029.33333333333, ans=0.0
+2024-08-26 19:20:45,093 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.86 vs. limit=15.0
+2024-08-26 19:20:45,515 INFO [train.py:1114] (2/4) Epoch 10, batch 300, loss[loss=0.2541, simple_loss=0.3084, pruned_loss=0.07327, ctc_loss=0.1334, over 19522.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2833, pruned_loss=0.05684, ctc_loss=0.1063, over 2999790.65 frames. ], batch size: 61, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:20:56,726 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=121136.0, ans=0.0
+2024-08-26 19:21:07,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=121189.33333333333, ans=0.0
+2024-08-26 19:21:08,844 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.14 vs. limit=15.0
+2024-08-26 19:21:17,773 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=121242.66666666667, ans=0.125
+2024-08-26 19:21:19,457 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=121242.66666666667, ans=0.125
+2024-08-26 19:21:25,798 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=121242.66666666667, ans=0.1
+2024-08-26 19:21:26,761 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.46 vs. limit=15.0
+2024-08-26 19:21:29,985 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.190e+02 1.480e+02 1.641e+02 1.981e+02 3.456e+02, threshold=3.281e+02, percent-clipped=2.0
+2024-08-26 19:21:37,635 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=121349.33333333333, ans=0.0
+2024-08-26 19:21:38,275 INFO [train.py:1114] (2/4) Epoch 10, batch 350, loss[loss=0.1839, simple_loss=0.251, pruned_loss=0.04248, ctc_loss=0.0797, over 19752.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2838, pruned_loss=0.05712, ctc_loss=0.1065, over 3189245.73 frames. ], batch size: 48, lr: 1.52e-02, grad_scale: 8.0
+2024-08-26 19:21:47,063 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.21 vs. limit=6.0
+2024-08-26 19:21:56,195 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=121456.0, ans=0.05
+2024-08-26 19:22:03,825 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=11.56 vs. limit=22.5
+2024-08-26 19:22:16,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=121562.66666666667, ans=0.0
+2024-08-26 19:22:24,850 INFO [train.py:1114] (2/4) Epoch 10, batch 400, loss[loss=0.2114, simple_loss=0.2912, pruned_loss=0.04714, ctc_loss=0.09359, over 19490.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2836, pruned_loss=0.05707, ctc_loss=0.1064, over 3340173.43 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:22:34,930 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=121669.33333333333, ans=0.0
+2024-08-26 19:22:39,451 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=121669.33333333333, ans=0.125
+2024-08-26 19:22:58,802 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:23:03,837 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.79 vs. limit=15.0
+2024-08-26 19:23:15,577 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=121776.0, ans=0.125
+2024-08-26 19:23:18,033 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 1.471e+02 1.735e+02 2.020e+02 3.245e+02, threshold=3.470e+02, percent-clipped=0.0
+2024-08-26 19:23:26,373 INFO [train.py:1114] (2/4) Epoch 10, batch 450, loss[loss=0.2173, simple_loss=0.2922, pruned_loss=0.05149, ctc_loss=0.09845, over 19622.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2841, pruned_loss=0.05718, ctc_loss=0.1067, over 3449928.55 frames. ], batch size: 55, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:23:27,567 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=121882.66666666667, ans=0.125
+2024-08-26 19:23:53,907 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=121989.33333333333, ans=0.125
+2024-08-26 19:23:54,772 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=121989.33333333333, ans=0.07
+2024-08-26 19:23:55,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=122042.66666666667, ans=0.125
+2024-08-26 19:24:19,331 INFO [train.py:1114] (2/4) Epoch 10, batch 500, loss[loss=0.2291, simple_loss=0.298, pruned_loss=0.05801, ctc_loss=0.1104, over 19669.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.283, pruned_loss=0.05654, ctc_loss=0.1055, over 3545733.99 frames. ], batch size: 63, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:24:40,911 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=122202.66666666667, ans=0.025
+2024-08-26 19:24:41,730 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=122202.66666666667, ans=0.5
+2024-08-26 19:24:49,124 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=122256.0, ans=0.1
+2024-08-26 19:25:07,087 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=122309.33333333333, ans=0.2
+2024-08-26 19:25:11,344 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 1.449e+02 1.637e+02 1.959e+02 3.375e+02, threshold=3.275e+02, percent-clipped=0.0
+2024-08-26 19:25:14,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=122362.66666666667, ans=10.0
+2024-08-26 19:25:17,072 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=122362.66666666667, ans=0.125
+2024-08-26 19:25:19,718 INFO [train.py:1114] (2/4) Epoch 10, batch 550, loss[loss=0.2344, simple_loss=0.2988, pruned_loss=0.06153, ctc_loss=0.1177, over 19210.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.283, pruned_loss=0.05654, ctc_loss=0.1056, over 3607666.55 frames. ], batch size: 71, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:25:23,050 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=122416.0, ans=0.0
+2024-08-26 19:25:41,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=122522.66666666667, ans=0.125
+2024-08-26 19:25:51,219 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.10 vs. limit=15.0
+2024-08-26 19:25:53,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=122576.0, ans=0.1
+2024-08-26 19:26:10,276 INFO [train.py:1114] (2/4) Epoch 10, batch 600, loss[loss=0.2367, simple_loss=0.3023, pruned_loss=0.06227, ctc_loss=0.1163, over 19421.00 frames. ], tot_loss[loss=0.219, simple_loss=0.283, pruned_loss=0.05644, ctc_loss=0.1054, over 3665635.06 frames. ], batch size: 67, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:26:13,386 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.52 vs. limit=15.0
+2024-08-26 19:26:49,505 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=122896.0, ans=0.125
+2024-08-26 19:26:50,246 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.266e+02 1.480e+02 1.661e+02 1.846e+02 3.271e+02, threshold=3.322e+02, percent-clipped=0.0
+2024-08-26 19:26:55,802 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=122896.0, ans=0.125
+2024-08-26 19:26:58,398 INFO [train.py:1114] (2/4) Epoch 10, batch 650, loss[loss=0.2118, simple_loss=0.2795, pruned_loss=0.05251, ctc_loss=0.09788, over 19758.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2824, pruned_loss=0.05616, ctc_loss=0.1049, over 3715687.27 frames. ], batch size: 54, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:27:09,796 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.44 vs. limit=15.0
+2024-08-26 19:27:18,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=123056.0, ans=0.125
+2024-08-26 19:27:51,538 INFO [train.py:1114] (2/4) Epoch 10, batch 700, loss[loss=0.2017, simple_loss=0.266, pruned_loss=0.04966, ctc_loss=0.09507, over 19725.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.283, pruned_loss=0.05634, ctc_loss=0.1052, over 3748664.78 frames. ], batch size: 51, lr: 1.51e-02, grad_scale: 16.0
+2024-08-26 19:27:58,270 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.38 vs. limit=15.0
+2024-08-26 19:27:59,979 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=123269.33333333333, ans=0.09899494936611666
+2024-08-26 19:28:13,800 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=123322.66666666667, ans=0.0
+2024-08-26 19:28:17,880 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.62 vs. limit=15.0
+2024-08-26 19:28:23,777 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=123376.0, ans=0.0
+2024-08-26 19:28:29,132 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.526e+02 1.912e+02 2.394e+02 4.336e+02, threshold=3.825e+02, percent-clipped=8.0
+2024-08-26 19:28:33,148 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.09 vs. limit=15.0
+2024-08-26 19:28:38,773 INFO [train.py:1114] (2/4) Epoch 10, batch 750, loss[loss=0.2116, simple_loss=0.2837, pruned_loss=0.05065, ctc_loss=0.09556, over 19517.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2824, pruned_loss=0.05604, ctc_loss=0.1047, over 3775255.51 frames. ], batch size: 54, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:29:02,980 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=123589.33333333333, ans=0.125
+2024-08-26 19:29:14,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=123642.66666666667, ans=0.125
+2024-08-26 19:29:16,927 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.61 vs. limit=15.0
+2024-08-26 19:29:17,769 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.71 vs. limit=15.0
+2024-08-26 19:29:27,334 INFO [train.py:1114] (2/4) Epoch 10, batch 800, loss[loss=0.1958, simple_loss=0.2501, pruned_loss=0.05128, ctc_loss=0.09754, over 19798.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.282, pruned_loss=0.05582, ctc_loss=0.1045, over 3797522.83 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 32.0
+2024-08-26 19:29:40,739 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.10 vs. limit=15.0
+2024-08-26 19:29:43,373 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.18 vs. limit=12.0
+2024-08-26 19:29:49,204 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.62 vs. limit=10.0
+2024-08-26 19:29:59,585 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=123909.33333333333, ans=0.025
+2024-08-26 19:29:59,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=123909.33333333333, ans=0.125
+2024-08-26 19:30:03,180 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=123909.33333333333, ans=0.0
+2024-08-26 19:30:07,516 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.173e+02 1.505e+02 1.745e+02 2.038e+02 4.368e+02, threshold=3.490e+02, percent-clipped=1.0
+2024-08-26 19:30:13,362 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=123962.66666666667, ans=0.125
+2024-08-26 19:30:17,639 INFO [train.py:1114] (2/4) Epoch 10, batch 850, loss[loss=0.2291, simple_loss=0.2964, pruned_loss=0.05863, ctc_loss=0.1114, over 19643.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2822, pruned_loss=0.05596, ctc_loss=0.1045, over 3814987.05 frames. ], batch size: 59, lr: 1.50e-02, grad_scale: 32.0
+2024-08-26 19:30:22,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=124016.0, ans=0.0
+2024-08-26 19:30:25,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=124016.0, ans=0.2
+2024-08-26 19:30:37,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=124122.66666666667, ans=0.125
+2024-08-26 19:30:39,582 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=124122.66666666667, ans=0.0
+2024-08-26 19:30:51,590 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=124176.0, ans=0.125
+2024-08-26 19:31:14,605 INFO [train.py:1114] (2/4) Epoch 10, batch 900, loss[loss=0.1967, simple_loss=0.2552, pruned_loss=0.05002, ctc_loss=0.09535, over 19427.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2819, pruned_loss=0.05612, ctc_loss=0.1048, over 3818467.02 frames. ], batch size: 48, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:32:10,105 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=124336.0, ans=0.0
+2024-08-26 19:32:25,905 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=124442.66666666667, ans=0.015
+2024-08-26 19:32:35,082 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.199e+02 1.525e+02 1.733e+02 2.036e+02 4.140e+02, threshold=3.466e+02, percent-clipped=3.0
+2024-08-26 19:32:35,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=124496.0, ans=0.125
+2024-08-26 19:32:42,445 INFO [train.py:1114] (2/4) Epoch 10, batch 950, loss[loss=0.2041, simple_loss=0.2723, pruned_loss=0.04921, ctc_loss=0.09364, over 19486.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2827, pruned_loss=0.05639, ctc_loss=0.1054, over 3819321.92 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:32:46,438 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=124549.33333333333, ans=0.025
+2024-08-26 19:32:52,081 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=124549.33333333333, ans=0.1
+2024-08-26 19:33:24,265 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:33:24,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=124762.66666666667, ans=0.0
+2024-08-26 19:33:36,592 INFO [train.py:1114] (2/4) Epoch 10, batch 1000, loss[loss=0.1975, simple_loss=0.2734, pruned_loss=0.04424, ctc_loss=0.08312, over 19840.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2835, pruned_loss=0.05667, ctc_loss=0.1057, over 3814320.55 frames. ], batch size: 52, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:33:39,810 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=124816.0, ans=0.125
+2024-08-26 19:33:47,379 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=124869.33333333333, ans=0.2
+2024-08-26 19:33:52,960 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=124869.33333333333, ans=0.2
+2024-08-26 19:34:02,829 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=124922.66666666667, ans=0.025
+2024-08-26 19:34:16,844 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.79 vs. limit=22.5
+2024-08-26 19:34:19,222 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=125029.33333333333, ans=0.0
+2024-08-26 19:34:19,950 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.433e+02 1.580e+02 1.832e+02 3.141e+02, threshold=3.159e+02, percent-clipped=0.0
+2024-08-26 19:34:27,365 INFO [train.py:1114] (2/4) Epoch 10, batch 1050, loss[loss=0.2336, simple_loss=0.3083, pruned_loss=0.05674, ctc_loss=0.1136, over 19855.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2825, pruned_loss=0.05615, ctc_loss=0.1049, over 3821281.08 frames. ], batch size: 57, lr: 1.50e-02, grad_scale: 16.0
+2024-08-26 19:34:54,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=125082.66666666667, ans=0.125
+2024-08-26 19:34:58,922 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=125136.0, ans=0.025
+2024-08-26 19:35:02,754 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=125136.0, ans=0.125
+2024-08-26 19:35:30,011 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=125296.0, ans=0.0
+2024-08-26 19:35:36,352 INFO [train.py:1114] (2/4) Epoch 10, batch 1100, loss[loss=0.2225, simple_loss=0.2878, pruned_loss=0.0569, ctc_loss=0.1083, over 19592.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2822, pruned_loss=0.056, ctc_loss=0.1047, over 3828470.94 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 8.0
+2024-08-26 19:35:36,536 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=125349.33333333333, ans=0.1
+2024-08-26 19:35:50,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=125402.66666666667, ans=0.125
+2024-08-26 19:35:57,925 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.68 vs. limit=10.0
+2024-08-26 19:36:03,607 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.70 vs. limit=15.0
+2024-08-26 19:36:18,878 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.239e+02 1.433e+02 1.605e+02 1.841e+02 2.779e+02, threshold=3.211e+02, percent-clipped=0.0
+2024-08-26 19:36:24,625 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:36:25,423 INFO [train.py:1114] (2/4) Epoch 10, batch 1150, loss[loss=0.2011, simple_loss=0.2714, pruned_loss=0.04802, ctc_loss=0.08722, over 19580.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2823, pruned_loss=0.05613, ctc_loss=0.1049, over 3829001.93 frames. ], batch size: 52, lr: 1.49e-02, grad_scale: 8.0
+2024-08-26 19:36:34,405 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=125669.33333333333, ans=0.0
+2024-08-26 19:36:47,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=125722.66666666667, ans=0.125
+2024-08-26 19:36:54,664 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=125722.66666666667, ans=0.1
+2024-08-26 19:37:08,641 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.86 vs. limit=22.5
+2024-08-26 19:37:16,191 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=125829.33333333333, ans=0.125
+2024-08-26 19:37:17,038 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=125882.66666666667, ans=0.125
+2024-08-26 19:37:17,640 INFO [train.py:1114] (2/4) Epoch 10, batch 1200, loss[loss=0.2315, simple_loss=0.2929, pruned_loss=0.06218, ctc_loss=0.1146, over 19841.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2828, pruned_loss=0.05616, ctc_loss=0.1051, over 3825625.47 frames. ], batch size: 57, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:37:36,290 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=125989.33333333333, ans=0.125
+2024-08-26 19:37:40,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=125989.33333333333, ans=0.0
+2024-08-26 19:37:57,386 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.175e+02 1.466e+02 1.608e+02 1.824e+02 2.979e+02, threshold=3.216e+02, percent-clipped=0.0
+2024-08-26 19:38:02,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=126096.0, ans=0.2
+2024-08-26 19:38:03,422 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=126149.33333333333, ans=0.125
+2024-08-26 19:38:04,046 INFO [train.py:1114] (2/4) Epoch 10, batch 1250, loss[loss=0.2259, simple_loss=0.2928, pruned_loss=0.05699, ctc_loss=0.1125, over 19509.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2831, pruned_loss=0.056, ctc_loss=0.1049, over 3843280.40 frames. ], batch size: 61, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:38:04,244 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=126149.33333333333, ans=0.125
+2024-08-26 19:38:04,307 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:39:40,679 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.66 vs. limit=22.5
+2024-08-26 19:39:40,806 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.19 vs. limit=15.0
+2024-08-26 19:40:04,477 INFO [train.py:1114] (2/4) Epoch 10, batch 1300, loss[loss=0.2279, simple_loss=0.2875, pruned_loss=0.06082, ctc_loss=0.1169, over 18891.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2825, pruned_loss=0.05567, ctc_loss=0.1042, over 3846876.52 frames. ], batch size: 76, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:40:09,728 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=126416.0, ans=0.125
+2024-08-26 19:40:29,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=126522.66666666667, ans=0.125
+2024-08-26 19:40:30,916 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=126522.66666666667, ans=0.125
+2024-08-26 19:40:31,851 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=126522.66666666667, ans=0.0
+2024-08-26 19:40:35,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=126522.66666666667, ans=0.125
+2024-08-26 19:40:52,519 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=126629.33333333333, ans=0.0
+2024-08-26 19:40:54,259 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.480e+02 1.716e+02 1.981e+02 3.061e+02, threshold=3.432e+02, percent-clipped=0.0
+2024-08-26 19:41:00,868 INFO [train.py:1114] (2/4) Epoch 10, batch 1350, loss[loss=0.2022, simple_loss=0.2751, pruned_loss=0.04745, ctc_loss=0.0862, over 19765.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2819, pruned_loss=0.05536, ctc_loss=0.1035, over 3857709.68 frames. ], batch size: 54, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:41:22,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=126736.0, ans=0.0
+2024-08-26 19:41:32,968 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=126842.66666666667, ans=0.1
+2024-08-26 19:41:44,249 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.00 vs. limit=6.0
+2024-08-26 19:41:46,927 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=126896.0, ans=0.125
+2024-08-26 19:41:52,401 INFO [train.py:1114] (2/4) Epoch 10, batch 1400, loss[loss=0.1991, simple_loss=0.2548, pruned_loss=0.05284, ctc_loss=0.09432, over 19682.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2816, pruned_loss=0.0552, ctc_loss=0.1031, over 3865118.71 frames. ], batch size: 46, lr: 1.49e-02, grad_scale: 16.0
+2024-08-26 19:42:01,063 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=127002.66666666667, ans=0.1
+2024-08-26 19:42:19,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=127002.66666666667, ans=0.125
+2024-08-26 19:42:32,000 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.48 vs. limit=15.0
+2024-08-26 19:42:33,576 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=127109.33333333333, ans=0.0
+2024-08-26 19:42:39,761 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=127162.66666666667, ans=0.2
+2024-08-26 19:42:43,188 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.136e+02 1.452e+02 1.585e+02 1.952e+02 4.788e+02, threshold=3.170e+02, percent-clipped=2.0
+2024-08-26 19:42:46,569 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.60 vs. limit=6.0
+2024-08-26 19:42:49,157 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=127216.0, ans=0.125
+2024-08-26 19:42:49,762 INFO [train.py:1114] (2/4) Epoch 10, batch 1450, loss[loss=0.2517, simple_loss=0.309, pruned_loss=0.07092, ctc_loss=0.1314, over 19658.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2823, pruned_loss=0.05563, ctc_loss=0.1038, over 3863069.26 frames. ], batch size: 63, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:42:52,285 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.91 vs. limit=10.0
+2024-08-26 19:42:54,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=127216.0, ans=0.2
+2024-08-26 19:42:55,605 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=127216.0, ans=0.0
+2024-08-26 19:43:01,337 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.91 vs. limit=15.0
+2024-08-26 19:43:11,938 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=127322.66666666667, ans=0.0
+2024-08-26 19:43:24,443 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=127322.66666666667, ans=0.125
+2024-08-26 19:43:29,037 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=127376.0, ans=0.125
+2024-08-26 19:43:30,893 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=127376.0, ans=0.07
+2024-08-26 19:43:40,871 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=127429.33333333333, ans=0.125
+2024-08-26 19:43:48,225 INFO [train.py:1114] (2/4) Epoch 10, batch 1500, loss[loss=0.2223, simple_loss=0.2874, pruned_loss=0.05611, ctc_loss=0.1124, over 19582.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2828, pruned_loss=0.05576, ctc_loss=0.104, over 3862044.66 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:43:49,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=127482.66666666667, ans=0.0
+2024-08-26 19:43:53,935 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=127482.66666666667, ans=0.125
+2024-08-26 19:44:05,925 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=127536.0, ans=0.07
+2024-08-26 19:44:06,944 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=127536.0, ans=0.0
+2024-08-26 19:44:10,547 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=127589.33333333333, ans=0.1
+2024-08-26 19:44:29,449 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=127642.66666666667, ans=0.0
+2024-08-26 19:44:34,279 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=127696.0, ans=0.0
+2024-08-26 19:44:37,661 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.427e+02 1.587e+02 1.794e+02 3.285e+02, threshold=3.174e+02, percent-clipped=1.0
+2024-08-26 19:44:38,898 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=127696.0, ans=0.5
+2024-08-26 19:44:52,497 INFO [train.py:1114] (2/4) Epoch 10, batch 1550, loss[loss=0.225, simple_loss=0.2905, pruned_loss=0.05822, ctc_loss=0.1075, over 19608.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.283, pruned_loss=0.05621, ctc_loss=0.105, over 3845947.00 frames. ], batch size: 60, lr: 1.48e-02, grad_scale: 16.0
+2024-08-26 19:45:19,958 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=127909.33333333333, ans=0.125
+2024-08-26 19:45:22,106 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten.whitening_limit, batch_count=127909.33333333333, ans=15.0
+2024-08-26 19:45:28,531 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=127909.33333333333, ans=0.125
+2024-08-26 19:45:31,259 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=127962.66666666667, ans=0.125
+2024-08-26 19:45:43,643 INFO [train.py:1114] (2/4) Epoch 10, batch 1600, loss[loss=0.2241, simple_loss=0.2945, pruned_loss=0.05524, ctc_loss=0.108, over 19828.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2825, pruned_loss=0.05614, ctc_loss=0.1049, over 3834494.88 frames. ], batch size: 57, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:45:51,804 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.05 vs. limit=15.0
+2024-08-26 19:45:58,926 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=128069.33333333333, ans=0.2
+2024-08-26 19:46:00,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128069.33333333333, ans=0.1
+2024-08-26 19:46:14,948 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=128176.0, ans=0.0
+2024-08-26 19:46:26,519 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.210e+02 1.460e+02 1.671e+02 2.068e+02 2.984e+02, threshold=3.342e+02, percent-clipped=0.0
+2024-08-26 19:46:33,080 INFO [train.py:1114] (2/4) Epoch 10, batch 1650, loss[loss=0.2117, simple_loss=0.2802, pruned_loss=0.05218, ctc_loss=0.09687, over 19667.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2821, pruned_loss=0.05593, ctc_loss=0.1044, over 3831066.69 frames. ], batch size: 59, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:46:38,892 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=128282.66666666667, ans=0.0
+2024-08-26 19:46:46,481 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=128336.0, ans=0.2
+2024-08-26 19:46:57,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=128389.33333333333, ans=0.1
+2024-08-26 19:46:58,313 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.35 vs. limit=15.0
+2024-08-26 19:46:58,866 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=128389.33333333333, ans=0.2
+2024-08-26 19:47:02,550 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:47:11,957 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128442.66666666667, ans=0.1
+2024-08-26 19:47:28,670 INFO [train.py:1114] (2/4) Epoch 10, batch 1700, loss[loss=0.189, simple_loss=0.2498, pruned_loss=0.04705, ctc_loss=0.08518, over 19668.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2814, pruned_loss=0.05518, ctc_loss=0.1034, over 3845478.45 frames. ], batch size: 46, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:47:29,855 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=128549.33333333333, ans=0.0
+2024-08-26 19:47:43,096 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=128602.66666666667, ans=0.0
+2024-08-26 19:47:45,163 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.41 vs. limit=12.0
+2024-08-26 19:47:56,382 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=128709.33333333333, ans=0.1
+2024-08-26 19:48:11,123 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:48:18,887 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.440e+02 1.568e+02 1.897e+02 2.765e+02, threshold=3.136e+02, percent-clipped=0.0
+2024-08-26 19:48:23,548 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=128762.66666666667, ans=0.125
+2024-08-26 19:48:25,123 INFO [train.py:1114] (2/4) Epoch 10, batch 1750, loss[loss=0.1864, simple_loss=0.252, pruned_loss=0.04383, ctc_loss=0.08287, over 19644.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2808, pruned_loss=0.05501, ctc_loss=0.1028, over 3850000.57 frames. ], batch size: 45, lr: 1.48e-02, grad_scale: 32.0
+2024-08-26 19:48:33,036 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=128869.33333333333, ans=0.0
+2024-08-26 19:48:41,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=128922.66666666667, ans=0.0
+2024-08-26 19:48:47,357 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=128922.66666666667, ans=0.125
+2024-08-26 19:49:08,985 INFO [train.py:1114] (2/4) Epoch 10, batch 1800, loss[loss=0.205, simple_loss=0.2797, pruned_loss=0.0477, ctc_loss=0.08736, over 19621.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2812, pruned_loss=0.05528, ctc_loss=0.1032, over 3852826.67 frames. ], batch size: 55, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:49:17,628 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.56 vs. limit=15.0
+2024-08-26 19:49:37,682 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.93 vs. limit=15.0
+2024-08-26 19:49:45,940 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=129296.0, ans=0.0
+2024-08-26 19:49:49,352 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.150e+02 1.466e+02 1.715e+02 2.130e+02 3.505e+02, threshold=3.430e+02, percent-clipped=4.0
+2024-08-26 19:49:55,607 INFO [train.py:1114] (2/4) Epoch 10, batch 1850, loss[loss=0.2512, simple_loss=0.3101, pruned_loss=0.06978, ctc_loss=0.1316, over 19576.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2811, pruned_loss=0.05523, ctc_loss=0.1031, over 3855910.98 frames. ], batch size: 57, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:49:55,809 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=129349.33333333333, ans=0.125
+2024-08-26 19:50:08,586 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.73 vs. limit=15.0
+2024-08-26 19:50:14,172 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.const_attention_rate, batch_count=129456.0, ans=0.025
+2024-08-26 19:50:30,261 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=129509.33333333333, ans=0.125
+2024-08-26 19:50:50,325 INFO [train.py:1114] (2/4) Epoch 10, batch 1900, loss[loss=0.2032, simple_loss=0.2781, pruned_loss=0.04619, ctc_loss=0.08958, over 19650.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2817, pruned_loss=0.0553, ctc_loss=0.103, over 3860739.14 frames. ], batch size: 59, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:50:50,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=129616.0, ans=0.125
+2024-08-26 19:50:51,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=129616.0, ans=0.125
+2024-08-26 19:51:00,956 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=129669.33333333333, ans=0.2
+2024-08-26 19:51:04,341 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=129669.33333333333, ans=0.0
+2024-08-26 19:51:21,748 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=129776.0, ans=0.1
+2024-08-26 19:51:25,239 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=129829.33333333333, ans=0.2
+2024-08-26 19:51:27,692 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.498e+02 1.655e+02 1.944e+02 4.101e+02, threshold=3.311e+02, percent-clipped=1.0
+2024-08-26 19:51:33,748 INFO [train.py:1114] (2/4) Epoch 10, batch 1950, loss[loss=0.2024, simple_loss=0.2703, pruned_loss=0.04846, ctc_loss=0.09388, over 19575.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2826, pruned_loss=0.05551, ctc_loss=0.1033, over 3869785.17 frames. ], batch size: 52, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:51:35,249 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.11 vs. limit=12.0
+2024-08-26 19:51:45,246 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=129936.0, ans=0.0
+2024-08-26 19:51:46,869 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=129936.0, ans=0.125
+2024-08-26 19:51:53,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=129989.33333333333, ans=0.2
+2024-08-26 19:52:00,037 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=130042.66666666667, ans=0.125
+2024-08-26 19:52:33,395 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=130042.66666666667, ans=0.125
+2024-08-26 19:52:51,606 INFO [train.py:1114] (2/4) Epoch 10, batch 2000, loss[loss=0.2037, simple_loss=0.2573, pruned_loss=0.05449, ctc_loss=0.1029, over 19625.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2838, pruned_loss=0.05627, ctc_loss=0.1047, over 3855609.89 frames. ], batch size: 45, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:53:02,636 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.14 vs. limit=15.0
+2024-08-26 19:53:14,891 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.11 vs. limit=15.0
+2024-08-26 19:53:26,742 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=130362.66666666667, ans=0.125
+2024-08-26 19:53:27,858 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.46 vs. limit=15.0
+2024-08-26 19:53:29,079 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.467e+02 1.617e+02 1.850e+02 3.299e+02, threshold=3.233e+02, percent-clipped=0.0
+2024-08-26 19:53:35,210 INFO [train.py:1114] (2/4) Epoch 10, batch 2050, loss[loss=0.2133, simple_loss=0.2675, pruned_loss=0.05886, ctc_loss=0.1034, over 19751.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2825, pruned_loss=0.05605, ctc_loss=0.1042, over 3851886.73 frames. ], batch size: 47, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:53:53,755 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=130522.66666666667, ans=0.0
+2024-08-26 19:53:56,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=130522.66666666667, ans=0.125
+2024-08-26 19:54:04,198 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=130576.0, ans=0.0
+2024-08-26 19:54:18,077 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=130682.66666666667, ans=0.0
+2024-08-26 19:54:18,663 INFO [train.py:1114] (2/4) Epoch 10, batch 2100, loss[loss=0.1995, simple_loss=0.2683, pruned_loss=0.04704, ctc_loss=0.09182, over 19774.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2814, pruned_loss=0.05532, ctc_loss=0.1029, over 3858696.97 frames. ], batch size: 54, lr: 1.47e-02, grad_scale: 32.0
+2024-08-26 19:54:18,834 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=130682.66666666667, ans=0.2
+2024-08-26 19:54:27,526 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=130736.0, ans=0.125
+2024-08-26 19:54:27,777 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.68 vs. limit=15.0
+2024-08-26 19:54:30,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=130736.0, ans=0.0
+2024-08-26 19:54:35,219 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=130789.33333333333, ans=0.125
+2024-08-26 19:54:35,284 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=130789.33333333333, ans=0.025
+2024-08-26 19:54:49,369 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=130842.66666666667, ans=0.125
+2024-08-26 19:54:52,842 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=130842.66666666667, ans=0.0
+2024-08-26 19:54:54,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=130896.0, ans=0.125
+2024-08-26 19:54:56,945 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.237e+02 1.404e+02 1.614e+02 1.979e+02 3.349e+02, threshold=3.228e+02, percent-clipped=1.0
+2024-08-26 19:55:03,183 INFO [train.py:1114] (2/4) Epoch 10, batch 2150, loss[loss=0.2078, simple_loss=0.2728, pruned_loss=0.05115, ctc_loss=0.1013, over 19872.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2807, pruned_loss=0.05497, ctc_loss=0.1022, over 3869140.22 frames. ], batch size: 52, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:55:05,092 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=130949.33333333333, ans=0.125
+2024-08-26 19:55:08,741 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.16 vs. limit=22.5
+2024-08-26 19:55:08,845 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.77 vs. limit=6.0
+2024-08-26 19:55:14,548 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=131002.66666666667, ans=0.0
+2024-08-26 19:55:15,542 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=131002.66666666667, ans=0.125
+2024-08-26 19:55:16,273 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 19:55:26,797 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=131056.0, ans=0.0
+2024-08-26 19:55:32,756 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=131109.33333333334, ans=0.125
+2024-08-26 19:55:35,640 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.19 vs. limit=22.5
+2024-08-26 19:55:46,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=131162.66666666666, ans=0.125
+2024-08-26 19:55:50,185 INFO [train.py:1114] (2/4) Epoch 10, batch 2200, loss[loss=0.2371, simple_loss=0.3045, pruned_loss=0.06135, ctc_loss=0.1176, over 19591.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.281, pruned_loss=0.05506, ctc_loss=0.1024, over 3867343.91 frames. ], batch size: 57, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:56:11,055 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.90 vs. limit=15.0
+2024-08-26 19:56:22,853 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=131322.66666666666, ans=0.125
+2024-08-26 19:56:26,364 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=131376.0, ans=0.0
+2024-08-26 19:56:28,739 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=10.75 vs. limit=15.0
+2024-08-26 19:56:34,557 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=131376.0, ans=0.125
+2024-08-26 19:56:35,266 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131429.33333333334, ans=0.1
+2024-08-26 19:56:35,295 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=131429.33333333334, ans=0.05
+2024-08-26 19:56:38,534 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.505e+02 1.694e+02 1.989e+02 3.015e+02, threshold=3.388e+02, percent-clipped=0.0
+2024-08-26 19:56:41,681 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.26 vs. limit=10.0
+2024-08-26 19:56:43,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=131429.33333333334, ans=0.1
+2024-08-26 19:56:44,012 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=131482.66666666666, ans=0.2
+2024-08-26 19:56:44,633 INFO [train.py:1114] (2/4) Epoch 10, batch 2250, loss[loss=0.2154, simple_loss=0.29, pruned_loss=0.05063, ctc_loss=0.0991, over 19614.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2808, pruned_loss=0.05476, ctc_loss=0.102, over 3867782.49 frames. ], batch size: 55, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:56:50,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=131482.66666666666, ans=0.1
+2024-08-26 19:56:51,228 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.37 vs. limit=15.0
+2024-08-26 19:56:55,397 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=131536.0, ans=0.2
+2024-08-26 19:57:25,374 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=131696.0, ans=0.125
+2024-08-26 19:57:27,772 INFO [train.py:1114] (2/4) Epoch 10, batch 2300, loss[loss=0.19, simple_loss=0.2595, pruned_loss=0.04432, ctc_loss=0.07978, over 19489.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2803, pruned_loss=0.05496, ctc_loss=0.1023, over 3861314.54 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 19:57:53,192 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=131909.33333333334, ans=0.2
+2024-08-26 19:57:56,458 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=131909.33333333334, ans=0.2
+2024-08-26 19:57:56,530 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=131909.33333333334, ans=0.125
+2024-08-26 19:58:02,616 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=131962.66666666666, ans=0.125
+2024-08-26 19:58:05,767 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.213e+02 1.499e+02 1.709e+02 2.092e+02 3.241e+02, threshold=3.418e+02, percent-clipped=0.0
+2024-08-26 19:58:43,744 INFO [train.py:1114] (2/4) Epoch 10, batch 2350, loss[loss=0.2253, simple_loss=0.2994, pruned_loss=0.05491, ctc_loss=0.1036, over 19675.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2805, pruned_loss=0.05519, ctc_loss=0.1028, over 3863755.80 frames. ], batch size: 63, lr: 1.46e-02, grad_scale: 16.0
+2024-08-26 19:58:46,404 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=132016.0, ans=0.1
+2024-08-26 19:58:46,879 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.20 vs. limit=15.0
+2024-08-26 19:58:47,300 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=132016.0, ans=0.1
+2024-08-26 19:59:01,900 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=132069.33333333334, ans=0.125
+2024-08-26 19:59:07,725 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=132122.66666666666, ans=0.125
+2024-08-26 19:59:08,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=132122.66666666666, ans=0.025
+2024-08-26 19:59:18,055 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=132176.0, ans=0.05
+2024-08-26 19:59:21,652 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.56 vs. limit=12.0
+2024-08-26 19:59:32,687 INFO [train.py:1114] (2/4) Epoch 10, batch 2400, loss[loss=0.2247, simple_loss=0.2886, pruned_loss=0.05831, ctc_loss=0.1103, over 19302.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2826, pruned_loss=0.05606, ctc_loss=0.1041, over 3858059.15 frames. ], batch size: 71, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 20:00:19,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten.whitening_limit, batch_count=132442.66666666666, ans=15.0
+2024-08-26 20:00:20,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=132442.66666666666, ans=0.0
+2024-08-26 20:00:21,103 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=132442.66666666666, ans=0.09899494936611666
+2024-08-26 20:00:36,870 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.213e+02 1.532e+02 1.694e+02 1.900e+02 3.260e+02, threshold=3.387e+02, percent-clipped=0.0
+2024-08-26 20:00:42,849 INFO [train.py:1114] (2/4) Epoch 10, batch 2450, loss[loss=0.2922, simple_loss=0.3155, pruned_loss=0.09829, ctc_loss=0.1808, over 13563.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2871, pruned_loss=0.05939, ctc_loss=0.1107, over 3731945.56 frames. ], batch size: 140, lr: 1.46e-02, grad_scale: 32.0
+2024-08-26 20:00:58,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.const_attention_rate, batch_count=132549.33333333334, ans=0.025
+2024-08-26 20:01:06,016 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=132602.66666666666, ans=0.125
+2024-08-26 20:01:11,110 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=132656.0, ans=0.0
+2024-08-26 20:03:28,142 INFO [train.py:1114] (2/4) Epoch 11, batch 0, loss[loss=0.2159, simple_loss=0.2788, pruned_loss=0.05593, ctc_loss=0.1027, over 19782.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2788, pruned_loss=0.05593, ctc_loss=0.1027, over 19782.00 frames. ], batch size: 49, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:03:28,143 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 20:03:42,229 INFO [train.py:1146] (2/4) Epoch 11, validation: loss=0.1858, simple_loss=0.2776, pruned_loss=0.03491, ctc_loss=0.06042, over 944034.00 frames.
+2024-08-26 20:03:42,230 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 20:03:47,463 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.83 vs. limit=6.0
+2024-08-26 20:04:00,471 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=132864.0, ans=0.0
+2024-08-26 20:04:03,721 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.79 vs. limit=5.0
+2024-08-26 20:04:11,047 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=132864.0, ans=0.125
+2024-08-26 20:04:11,391 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.77 vs. limit=10.0
+2024-08-26 20:04:16,684 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=132917.33333333334, ans=0.0
+2024-08-26 20:04:17,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=132917.33333333334, ans=0.125
+2024-08-26 20:04:32,368 INFO [train.py:1114] (2/4) Epoch 11, batch 50, loss[loss=0.1877, simple_loss=0.2543, pruned_loss=0.04435, ctc_loss=0.08076, over 19726.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2852, pruned_loss=0.05679, ctc_loss=0.1066, over 844464.80 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:04:37,357 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=133024.0, ans=0.125
+2024-08-26 20:04:37,952 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.177e+02 1.624e+02 1.801e+02 2.017e+02 3.320e+02, threshold=3.603e+02, percent-clipped=0.0
+2024-08-26 20:04:50,297 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=133077.33333333334, ans=0.2
+2024-08-26 20:05:21,260 INFO [train.py:1114] (2/4) Epoch 11, batch 100, loss[loss=0.2134, simple_loss=0.2769, pruned_loss=0.05528, ctc_loss=0.09853, over 19710.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2846, pruned_loss=0.05574, ctc_loss=0.1048, over 1499152.40 frames. ], batch size: 51, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:05:22,430 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=133290.66666666666, ans=0.2
+2024-08-26 20:05:35,866 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.96 vs. limit=15.0
+2024-08-26 20:05:57,453 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=133450.66666666666, ans=0.0
+2024-08-26 20:06:05,918 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.05 vs. limit=15.0
+2024-08-26 20:06:10,884 INFO [train.py:1114] (2/4) Epoch 11, batch 150, loss[loss=0.2065, simple_loss=0.2601, pruned_loss=0.05498, ctc_loss=0.1076, over 19685.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2821, pruned_loss=0.05471, ctc_loss=0.1029, over 2028078.50 frames. ], batch size: 47, lr: 1.39e-02, grad_scale: 32.0
+2024-08-26 20:06:13,362 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=23.38 vs. limit=22.5
+2024-08-26 20:06:16,413 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.151e+02 1.457e+02 1.584e+02 1.841e+02 2.561e+02, threshold=3.167e+02, percent-clipped=0.0
+2024-08-26 20:06:19,446 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:06:36,772 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=133664.0, ans=0.025
+2024-08-26 20:06:37,597 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=133664.0, ans=0.2
+2024-08-26 20:06:40,424 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=133717.33333333334, ans=0.125
+2024-08-26 20:06:42,337 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=133717.33333333334, ans=0.125
+2024-08-26 20:06:53,291 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_na.min_abs, batch_count=133717.33333333334, ans=0.02
+2024-08-26 20:07:45,741 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=133770.66666666666, ans=0.125
+2024-08-26 20:08:02,628 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=133770.66666666666, ans=0.0
+2024-08-26 20:08:08,067 INFO [train.py:1114] (2/4) Epoch 11, batch 200, loss[loss=0.2377, simple_loss=0.2987, pruned_loss=0.06411, ctc_loss=0.1212, over 18112.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2815, pruned_loss=0.05472, ctc_loss=0.1027, over 2434418.27 frames. ], batch size: 85, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:08:13,321 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.59 vs. limit=15.0
+2024-08-26 20:08:32,345 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=133930.66666666666, ans=0.0
+2024-08-26 20:08:33,200 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=133930.66666666666, ans=0.1
+2024-08-26 20:08:55,612 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=134037.33333333334, ans=0.125
+2024-08-26 20:09:00,074 INFO [train.py:1114] (2/4) Epoch 11, batch 250, loss[loss=0.2094, simple_loss=0.286, pruned_loss=0.04728, ctc_loss=0.09554, over 19405.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2805, pruned_loss=0.05391, ctc_loss=0.101, over 2754380.29 frames. ], batch size: 67, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:09:05,646 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.170e+02 1.397e+02 1.518e+02 1.749e+02 2.921e+02, threshold=3.037e+02, percent-clipped=0.0
+2024-08-26 20:09:07,322 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.43 vs. limit=15.0
+2024-08-26 20:09:12,778 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=134144.0, ans=0.2
+2024-08-26 20:09:14,680 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=134144.0, ans=0.0
+2024-08-26 20:09:14,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=134144.0, ans=0.125
+2024-08-26 20:09:17,528 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=134144.0, ans=0.2
+2024-08-26 20:09:18,476 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=134144.0, ans=0.09899494936611666
+2024-08-26 20:09:25,067 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=134197.33333333334, ans=0.1
+2024-08-26 20:09:30,005 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.96 vs. limit=15.0
+2024-08-26 20:09:43,468 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=134304.0, ans=0.0
+2024-08-26 20:09:44,260 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=134304.0, ans=0.2
+2024-08-26 20:09:46,203 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=134304.0, ans=0.0
+2024-08-26 20:09:51,426 INFO [train.py:1114] (2/4) Epoch 11, batch 300, loss[loss=0.2049, simple_loss=0.2797, pruned_loss=0.04702, ctc_loss=0.09027, over 19517.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2798, pruned_loss=0.05371, ctc_loss=0.1008, over 2998972.64 frames. ], batch size: 61, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:10:02,464 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.15 vs. limit=15.0
+2024-08-26 20:10:12,791 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=134464.0, ans=0.125
+2024-08-26 20:10:41,580 INFO [train.py:1114] (2/4) Epoch 11, batch 350, loss[loss=0.1965, simple_loss=0.2575, pruned_loss=0.04981, ctc_loss=0.08995, over 19767.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2801, pruned_loss=0.05387, ctc_loss=0.1012, over 3189034.92 frames. ], batch size: 48, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:10:44,762 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=134624.0, ans=0.125
+2024-08-26 20:10:46,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=134624.0, ans=0.125
+2024-08-26 20:10:47,202 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.110e+02 1.479e+02 1.637e+02 2.052e+02 3.441e+02, threshold=3.275e+02, percent-clipped=1.0
+2024-08-26 20:11:01,495 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=134730.66666666666, ans=0.2
+2024-08-26 20:11:20,464 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=134784.0, ans=0.125
+2024-08-26 20:11:25,964 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=134837.33333333334, ans=0.125
+2024-08-26 20:11:30,036 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.59 vs. limit=15.0
+2024-08-26 20:11:30,589 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=134890.66666666666, ans=0.125
+2024-08-26 20:11:31,275 INFO [train.py:1114] (2/4) Epoch 11, batch 400, loss[loss=0.1903, simple_loss=0.2678, pruned_loss=0.04048, ctc_loss=0.07965, over 19512.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2791, pruned_loss=0.05335, ctc_loss=0.1002, over 3342037.91 frames. ], batch size: 54, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:11:32,410 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=134890.66666666666, ans=0.1
+2024-08-26 20:11:40,781 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=134890.66666666666, ans=0.125
+2024-08-26 20:12:03,390 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff3.min_abs, batch_count=135050.66666666666, ans=0.2
+2024-08-26 20:12:18,537 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.11 vs. limit=15.0
+2024-08-26 20:12:20,811 INFO [train.py:1114] (2/4) Epoch 11, batch 450, loss[loss=0.2116, simple_loss=0.2895, pruned_loss=0.04822, ctc_loss=0.09308, over 19623.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2791, pruned_loss=0.05327, ctc_loss=0.0997, over 3450391.50 frames. ], batch size: 55, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:12:29,028 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.192e+02 1.489e+02 1.652e+02 2.008e+02 3.634e+02, threshold=3.305e+02, percent-clipped=1.0
+2024-08-26 20:13:01,091 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.93 vs. limit=10.0
+2024-08-26 20:13:01,672 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=135370.66666666666, ans=0.2
+2024-08-26 20:13:05,798 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.50 vs. limit=22.5
+2024-08-26 20:13:11,617 INFO [train.py:1114] (2/4) Epoch 11, batch 500, loss[loss=0.2194, simple_loss=0.2892, pruned_loss=0.05543, ctc_loss=0.09706, over 19673.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2781, pruned_loss=0.05311, ctc_loss=0.09922, over 3545705.63 frames. ], batch size: 63, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:13:15,813 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.10 vs. limit=22.5
+2024-08-26 20:13:19,758 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=135424.0, ans=0.0
+2024-08-26 20:13:24,580 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.61 vs. limit=15.0
+2024-08-26 20:13:24,592 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.87 vs. limit=15.0
+2024-08-26 20:13:36,603 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.13 vs. limit=15.0
+2024-08-26 20:13:42,349 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=135584.0, ans=0.0
+2024-08-26 20:13:50,587 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=135637.33333333334, ans=0.035
+2024-08-26 20:13:58,587 INFO [train.py:1114] (2/4) Epoch 11, batch 550, loss[loss=0.2236, simple_loss=0.2874, pruned_loss=0.0585, ctc_loss=0.1071, over 19228.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2784, pruned_loss=0.05341, ctc_loss=0.09979, over 3609068.07 frames. ], batch size: 71, lr: 1.38e-02, grad_scale: 32.0
+2024-08-26 20:13:58,733 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=135690.66666666666, ans=0.0
+2024-08-26 20:14:04,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=135690.66666666666, ans=0.0
+2024-08-26 20:14:04,340 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=135690.66666666666, ans=0.125
+2024-08-26 20:14:06,854 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.194e+02 1.449e+02 1.695e+02 2.078e+02 4.377e+02, threshold=3.390e+02, percent-clipped=1.0
+2024-08-26 20:14:10,255 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.93 vs. limit=15.0
+2024-08-26 20:14:14,032 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.45 vs. limit=12.0
+2024-08-26 20:14:19,624 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.75 vs. limit=12.0
+2024-08-26 20:14:25,075 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=135797.33333333334, ans=0.125
+2024-08-26 20:14:37,650 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=135850.66666666666, ans=0.1
+2024-08-26 20:14:50,537 INFO [train.py:1114] (2/4) Epoch 11, batch 600, loss[loss=0.22, simple_loss=0.2896, pruned_loss=0.05429, ctc_loss=0.1047, over 19405.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2788, pruned_loss=0.05336, ctc_loss=0.09975, over 3666460.41 frames. ], batch size: 67, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:14:52,027 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=23.85 vs. limit=22.5
+2024-08-26 20:15:05,400 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=136010.66666666666, ans=0.125
+2024-08-26 20:15:07,245 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=136010.66666666666, ans=0.0
+2024-08-26 20:15:09,028 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=136064.0, ans=0.0
+2024-08-26 20:15:41,576 INFO [train.py:1114] (2/4) Epoch 11, batch 650, loss[loss=0.2024, simple_loss=0.2713, pruned_loss=0.04897, ctc_loss=0.08879, over 19769.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2781, pruned_loss=0.05305, ctc_loss=0.09908, over 3716733.32 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:15:44,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=136224.0, ans=0.125
+2024-08-26 20:15:45,515 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=136224.0, ans=0.2
+2024-08-26 20:15:47,087 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.457e+02 1.627e+02 2.058e+02 3.143e+02, threshold=3.253e+02, percent-clipped=0.0
+2024-08-26 20:16:02,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=136330.66666666666, ans=0.1
+2024-08-26 20:16:04,169 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.00 vs. limit=15.0
+2024-08-26 20:16:06,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=136330.66666666666, ans=0.125
+2024-08-26 20:16:07,908 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=5.72 vs. limit=15.0
+2024-08-26 20:16:13,123 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=136384.0, ans=0.125
+2024-08-26 20:16:27,818 INFO [train.py:1114] (2/4) Epoch 11, batch 700, loss[loss=0.1832, simple_loss=0.2555, pruned_loss=0.03985, ctc_loss=0.07786, over 19736.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2778, pruned_loss=0.05268, ctc_loss=0.09861, over 3748573.45 frames. ], batch size: 51, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:16:30,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=136490.66666666666, ans=0.0
+2024-08-26 20:16:39,263 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=136544.0, ans=0.125
+2024-08-26 20:16:40,195 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=136544.0, ans=0.2
+2024-08-26 20:16:42,133 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=136544.0, ans=0.0
+2024-08-26 20:17:00,561 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=136650.66666666666, ans=0.05
+2024-08-26 20:17:05,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=136650.66666666666, ans=0.0
+2024-08-26 20:17:06,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=136704.0, ans=0.0
+2024-08-26 20:17:11,662 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.59 vs. limit=22.5
+2024-08-26 20:17:16,586 INFO [train.py:1114] (2/4) Epoch 11, batch 750, loss[loss=0.1941, simple_loss=0.262, pruned_loss=0.04579, ctc_loss=0.08651, over 19502.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2778, pruned_loss=0.0527, ctc_loss=0.09862, over 3773497.42 frames. ], batch size: 54, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:17:23,101 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=136757.33333333334, ans=0.125
+2024-08-26 20:17:24,641 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.496e+02 1.727e+02 2.151e+02 3.286e+02, threshold=3.455e+02, percent-clipped=1.0
+2024-08-26 20:17:26,848 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:17:32,436 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=136810.66666666666, ans=0.2
+2024-08-26 20:17:38,795 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=136864.0, ans=0.1
+2024-08-26 20:17:52,952 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=136917.33333333334, ans=0.0
+2024-08-26 20:18:08,122 INFO [train.py:1114] (2/4) Epoch 11, batch 800, loss[loss=0.1843, simple_loss=0.2447, pruned_loss=0.04561, ctc_loss=0.0817, over 19800.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2772, pruned_loss=0.05267, ctc_loss=0.09854, over 3795681.07 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:18:08,460 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=137024.0, ans=0.1
+2024-08-26 20:18:16,663 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=137077.33333333334, ans=0.125
+2024-08-26 20:18:26,905 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=137130.66666666666, ans=0.025
+2024-08-26 20:18:32,314 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=137130.66666666666, ans=0.2
+2024-08-26 20:18:41,031 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.48 vs. limit=10.0
+2024-08-26 20:19:09,199 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.42 vs. limit=10.0
+2024-08-26 20:19:27,341 INFO [train.py:1114] (2/4) Epoch 11, batch 850, loss[loss=0.2393, simple_loss=0.3038, pruned_loss=0.06356, ctc_loss=0.119, over 19636.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2768, pruned_loss=0.05257, ctc_loss=0.09836, over 3814999.06 frames. ], batch size: 59, lr: 1.37e-02, grad_scale: 32.0
+2024-08-26 20:19:39,237 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=137290.66666666666, ans=0.1
+2024-08-26 20:19:39,850 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.096e+02 1.453e+02 1.601e+02 1.920e+02 5.497e+02, threshold=3.202e+02, percent-clipped=1.0
+2024-08-26 20:19:42,231 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=137290.66666666666, ans=0.025
+2024-08-26 20:19:47,989 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=137344.0, ans=0.125
+2024-08-26 20:19:52,146 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=137344.0, ans=0.125
+2024-08-26 20:19:54,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=137344.0, ans=0.125
+2024-08-26 20:20:01,681 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=137397.33333333334, ans=0.125
+2024-08-26 20:20:46,254 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=137504.0, ans=0.125
+2024-08-26 20:20:53,069 INFO [train.py:1114] (2/4) Epoch 11, batch 900, loss[loss=0.192, simple_loss=0.2503, pruned_loss=0.0484, ctc_loss=0.09208, over 19422.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2777, pruned_loss=0.05316, ctc_loss=0.0993, over 3820246.27 frames. ], batch size: 48, lr: 1.37e-02, grad_scale: 16.0
+2024-08-26 20:21:09,135 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=137610.66666666666, ans=0.04949747468305833
+2024-08-26 20:21:17,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=137664.0, ans=0.125
+2024-08-26 20:21:18,822 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=137664.0, ans=0.125
+2024-08-26 20:21:19,776 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=137664.0, ans=0.1
+2024-08-26 20:21:28,347 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=137717.33333333334, ans=0.5
+2024-08-26 20:21:28,461 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=137717.33333333334, ans=0.025
+2024-08-26 20:21:33,654 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=137717.33333333334, ans=0.2
+2024-08-26 20:21:37,800 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.19 vs. limit=10.0
+2024-08-26 20:21:48,794 INFO [train.py:1114] (2/4) Epoch 11, batch 950, loss[loss=0.1944, simple_loss=0.2623, pruned_loss=0.04506, ctc_loss=0.09117, over 19493.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2786, pruned_loss=0.05362, ctc_loss=0.1002, over 3820822.53 frames. ], batch size: 49, lr: 1.37e-02, grad_scale: 16.0
+2024-08-26 20:21:54,735 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=137824.0, ans=0.125
+2024-08-26 20:21:55,404 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 1.468e+02 1.744e+02 2.017e+02 3.816e+02, threshold=3.488e+02, percent-clipped=2.0
+2024-08-26 20:22:28,917 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=138037.33333333334, ans=0.05
+2024-08-26 20:22:40,823 INFO [train.py:1114] (2/4) Epoch 11, batch 1000, loss[loss=0.2078, simple_loss=0.2758, pruned_loss=0.05162, ctc_loss=0.09148, over 19845.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.279, pruned_loss=0.05381, ctc_loss=0.1003, over 3816113.47 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:22:53,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=138144.0, ans=0.0
+2024-08-26 20:22:58,498 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.87 vs. limit=15.0
+2024-08-26 20:23:02,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=138197.33333333334, ans=0.125
+2024-08-26 20:23:05,253 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.46 vs. limit=6.0
+2024-08-26 20:23:07,731 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=138197.33333333334, ans=0.125
+2024-08-26 20:23:22,416 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.75 vs. limit=15.0
+2024-08-26 20:23:28,371 INFO [train.py:1114] (2/4) Epoch 11, batch 1050, loss[loss=0.2246, simple_loss=0.2956, pruned_loss=0.05685, ctc_loss=0.1, over 19836.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2789, pruned_loss=0.05379, ctc_loss=0.1003, over 3822419.89 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:23:29,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=138357.33333333334, ans=0.0
+2024-08-26 20:23:32,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=138357.33333333334, ans=0.0
+2024-08-26 20:23:34,921 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.220e+02 1.363e+02 1.534e+02 1.839e+02 4.578e+02, threshold=3.069e+02, percent-clipped=1.0
+2024-08-26 20:23:35,374 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.83 vs. limit=6.0
+2024-08-26 20:24:12,739 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=138410.66666666666, ans=0.0
+2024-08-26 20:24:35,757 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.09 vs. limit=15.0
+2024-08-26 20:24:43,973 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=138464.0, ans=0.125
+2024-08-26 20:24:51,514 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=138517.33333333334, ans=0.125
+2024-08-26 20:25:04,268 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=138570.66666666666, ans=0.1
+2024-08-26 20:25:06,041 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 20:25:07,607 INFO [train.py:1114] (2/4) Epoch 11, batch 1100, loss[loss=0.1991, simple_loss=0.2691, pruned_loss=0.04605, ctc_loss=0.09277, over 19600.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2782, pruned_loss=0.05332, ctc_loss=0.0995, over 3829270.27 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:25:14,449 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=138624.0, ans=0.09899494936611666
+2024-08-26 20:25:15,486 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=138624.0, ans=0.025
+2024-08-26 20:25:27,955 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=138730.66666666666, ans=0.125
+2024-08-26 20:25:30,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=138730.66666666666, ans=0.125
+2024-08-26 20:25:31,492 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=138730.66666666666, ans=0.0
+2024-08-26 20:25:52,075 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.67 vs. limit=15.0
+2024-08-26 20:25:56,914 INFO [train.py:1114] (2/4) Epoch 11, batch 1150, loss[loss=0.1928, simple_loss=0.2688, pruned_loss=0.04243, ctc_loss=0.0798, over 19572.00 frames. ], tot_loss[loss=0.212, simple_loss=0.278, pruned_loss=0.05315, ctc_loss=0.09915, over 3828256.24 frames. ], batch size: 52, lr: 1.36e-02, grad_scale: 16.0
+2024-08-26 20:25:59,464 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.68 vs. limit=22.5
+2024-08-26 20:26:03,585 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.470e+02 1.661e+02 1.952e+02 3.516e+02, threshold=3.323e+02, percent-clipped=2.0
+2024-08-26 20:26:04,152 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.51 vs. limit=15.0
+2024-08-26 20:26:31,775 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=139050.66666666666, ans=0.04949747468305833
+2024-08-26 20:26:33,635 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=139050.66666666666, ans=0.125
+2024-08-26 20:26:45,576 INFO [train.py:1114] (2/4) Epoch 11, batch 1200, loss[loss=0.2212, simple_loss=0.2892, pruned_loss=0.05551, ctc_loss=0.1057, over 19840.00 frames. ], tot_loss[loss=0.213, simple_loss=0.279, pruned_loss=0.05349, ctc_loss=0.09982, over 3823999.86 frames. ], batch size: 57, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:26:46,757 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=139157.33333333334, ans=0.0
+2024-08-26 20:26:49,692 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=139157.33333333334, ans=0.125
+2024-08-26 20:26:55,292 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=139210.66666666666, ans=0.125
+2024-08-26 20:27:21,058 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.31 vs. limit=10.0
+2024-08-26 20:27:41,890 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=139370.66666666666, ans=0.0
+2024-08-26 20:27:43,717 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.const_attention_rate, batch_count=139370.66666666666, ans=0.025
+2024-08-26 20:27:44,278 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.72 vs. limit=15.0
+2024-08-26 20:28:18,702 INFO [train.py:1114] (2/4) Epoch 11, batch 1250, loss[loss=0.2242, simple_loss=0.2939, pruned_loss=0.05683, ctc_loss=0.1023, over 19537.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2792, pruned_loss=0.05344, ctc_loss=0.09954, over 3842558.34 frames. ], batch size: 61, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:28:20,303 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.48 vs. limit=10.0
+2024-08-26 20:28:27,599 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.224e+02 1.425e+02 1.545e+02 1.729e+02 3.064e+02, threshold=3.089e+02, percent-clipped=0.0
+2024-08-26 20:28:29,974 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer_na.min_abs, batch_count=139477.33333333334, ans=0.02
+2024-08-26 20:28:41,215 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.23 vs. limit=6.0
+2024-08-26 20:28:46,296 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=139530.66666666666, ans=0.125
+2024-08-26 20:29:02,232 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=139637.33333333334, ans=0.0
+2024-08-26 20:29:12,949 INFO [train.py:1114] (2/4) Epoch 11, batch 1300, loss[loss=0.215, simple_loss=0.2894, pruned_loss=0.05029, ctc_loss=0.1002, over 18926.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2786, pruned_loss=0.05301, ctc_loss=0.0989, over 3846410.19 frames. ], batch size: 76, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:29:19,703 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=139690.66666666666, ans=0.0
+2024-08-26 20:29:28,385 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=139744.0, ans=0.125
+2024-08-26 20:33:35,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=139744.0, ans=0.125
+2024-08-26 20:33:38,502 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=16.43 vs. limit=22.5
+2024-08-26 20:35:30,152 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.23 vs. limit=15.0
+2024-08-26 20:35:43,764 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=139904.0, ans=0.125
+2024-08-26 20:35:47,672 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=139904.0, ans=0.2
+2024-08-26 20:35:49,746 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.77 vs. limit=6.0
+2024-08-26 20:35:52,050 INFO [train.py:1114] (2/4) Epoch 11, batch 1350, loss[loss=0.2151, simple_loss=0.2815, pruned_loss=0.05439, ctc_loss=0.1, over 19745.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2776, pruned_loss=0.0523, ctc_loss=0.09775, over 3857898.02 frames. ], batch size: 54, lr: 1.36e-02, grad_scale: 32.0
+2024-08-26 20:35:54,406 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.07 vs. limit=10.0
+2024-08-26 20:35:58,551 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.092e+02 1.441e+02 1.644e+02 1.919e+02 3.174e+02, threshold=3.287e+02, percent-clipped=1.0
+2024-08-26 20:36:23,235 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.69 vs. limit=15.0
+2024-08-26 20:36:25,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=140117.33333333334, ans=0.125
+2024-08-26 20:36:26,657 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=140117.33333333334, ans=10.0
+2024-08-26 20:36:32,210 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=140170.66666666666, ans=0.125
+2024-08-26 20:36:41,175 INFO [train.py:1114] (2/4) Epoch 11, batch 1400, loss[loss=0.1957, simple_loss=0.2539, pruned_loss=0.04979, ctc_loss=0.09497, over 19688.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2776, pruned_loss=0.05235, ctc_loss=0.09795, over 3865014.61 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:36:51,389 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=140224.0, ans=0.125
+2024-08-26 20:36:54,306 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=140277.33333333334, ans=0.125
+2024-08-26 20:37:51,000 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=140437.33333333334, ans=0.04949747468305833
+2024-08-26 20:38:01,749 INFO [train.py:1114] (2/4) Epoch 11, batch 1450, loss[loss=0.2138, simple_loss=0.2857, pruned_loss=0.0515, ctc_loss=0.09708, over 19682.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2783, pruned_loss=0.05259, ctc_loss=0.09844, over 3862777.01 frames. ], batch size: 63, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:38:05,540 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=140490.66666666666, ans=0.5
+2024-08-26 20:38:06,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=140490.66666666666, ans=0.125
+2024-08-26 20:38:08,102 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.486e+02 1.636e+02 1.926e+02 3.321e+02, threshold=3.272e+02, percent-clipped=1.0
+2024-08-26 20:38:28,413 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=140597.33333333334, ans=0.125
+2024-08-26 20:38:42,153 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=140704.0, ans=0.0
+2024-08-26 20:38:50,495 INFO [train.py:1114] (2/4) Epoch 11, batch 1500, loss[loss=0.2264, simple_loss=0.303, pruned_loss=0.05427, ctc_loss=0.1031, over 19580.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2787, pruned_loss=0.05259, ctc_loss=0.09854, over 3861628.27 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:39:02,196 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=140810.66666666666, ans=0.0
+2024-08-26 20:39:20,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn2.whiten.whitening_limit, batch_count=140917.33333333334, ans=22.5
+2024-08-26 20:39:31,800 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=140970.66666666666, ans=0.125
+2024-08-26 20:39:38,996 INFO [train.py:1114] (2/4) Epoch 11, batch 1550, loss[loss=0.2533, simple_loss=0.3095, pruned_loss=0.07281, ctc_loss=0.1288, over 19619.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2786, pruned_loss=0.05297, ctc_loss=0.0991, over 3845590.94 frames. ], batch size: 60, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:39:39,199 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=141024.0, ans=0.2
+2024-08-26 20:39:42,847 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=141024.0, ans=0.125
+2024-08-26 20:39:45,244 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.246e+02 1.401e+02 1.612e+02 1.919e+02 3.103e+02, threshold=3.225e+02, percent-clipped=0.0
+2024-08-26 20:39:46,764 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.whiten.whitening_limit, batch_count=141024.0, ans=15.0
+2024-08-26 20:40:08,725 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=141184.0, ans=0.0
+2024-08-26 20:40:12,208 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.48 vs. limit=12.0
+2024-08-26 20:40:29,937 INFO [train.py:1114] (2/4) Epoch 11, batch 1600, loss[loss=0.2394, simple_loss=0.3095, pruned_loss=0.06219, ctc_loss=0.1124, over 19841.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2788, pruned_loss=0.05325, ctc_loss=0.09968, over 3834471.70 frames. ], batch size: 57, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:40:31,914 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=141290.66666666666, ans=0.1
+2024-08-26 20:40:46,374 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.39 vs. limit=15.0
+2024-08-26 20:40:48,770 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=141397.33333333334, ans=0.125
+2024-08-26 20:41:18,737 INFO [train.py:1114] (2/4) Epoch 11, batch 1650, loss[loss=0.2138, simple_loss=0.2807, pruned_loss=0.05367, ctc_loss=0.09907, over 19663.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2781, pruned_loss=0.05291, ctc_loss=0.09899, over 3830731.41 frames. ], batch size: 59, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:41:25,304 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.119e+02 1.523e+02 1.726e+02 1.964e+02 3.202e+02, threshold=3.451e+02, percent-clipped=0.0
+2024-08-26 20:41:34,100 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.86 vs. limit=15.0
+2024-08-26 20:41:52,559 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=13.22 vs. limit=22.5
+2024-08-26 20:41:53,563 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.49 vs. limit=15.0
+2024-08-26 20:42:04,600 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=141770.66666666666, ans=0.1
+2024-08-26 20:42:07,194 INFO [train.py:1114] (2/4) Epoch 11, batch 1700, loss[loss=0.1997, simple_loss=0.256, pruned_loss=0.05263, ctc_loss=0.09544, over 19670.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2778, pruned_loss=0.05266, ctc_loss=0.09863, over 3845271.98 frames. ], batch size: 46, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:42:31,233 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=141877.33333333334, ans=0.1
+2024-08-26 20:42:34,740 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=141930.66666666666, ans=0.0
+2024-08-26 20:42:35,013 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=3.17 vs. limit=15.0
+2024-08-26 20:42:42,579 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=141984.0, ans=0.125
+2024-08-26 20:42:58,609 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=142037.33333333334, ans=0.125
+2024-08-26 20:43:00,057 INFO [train.py:1114] (2/4) Epoch 11, batch 1750, loss[loss=0.1739, simple_loss=0.2381, pruned_loss=0.04075, ctc_loss=0.07031, over 19655.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2775, pruned_loss=0.05245, ctc_loss=0.09806, over 3850929.04 frames. ], batch size: 45, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:43:06,154 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.149e+02 1.441e+02 1.591e+02 1.781e+02 2.526e+02, threshold=3.183e+02, percent-clipped=0.0
+2024-08-26 20:43:18,908 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=142197.33333333334, ans=0.125
+2024-08-26 20:43:20,939 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.07 vs. limit=15.0
+2024-08-26 20:43:41,727 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=142304.0, ans=0.0
+2024-08-26 20:43:41,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=142304.0, ans=0.125
+2024-08-26 20:43:50,406 INFO [train.py:1114] (2/4) Epoch 11, batch 1800, loss[loss=0.2156, simple_loss=0.2852, pruned_loss=0.05317, ctc_loss=0.09931, over 19597.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2774, pruned_loss=0.05218, ctc_loss=0.09767, over 3852758.36 frames. ], batch size: 55, lr: 1.35e-02, grad_scale: 32.0
+2024-08-26 20:43:51,556 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=142357.33333333334, ans=0.0
+2024-08-26 20:43:52,406 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=142357.33333333334, ans=0.04949747468305833
+2024-08-26 20:43:57,614 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=142357.33333333334, ans=0.125
+2024-08-26 20:44:01,854 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=142410.66666666666, ans=0.125
+2024-08-26 20:44:14,294 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=142464.0, ans=0.125
+2024-08-26 20:44:26,034 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=142517.33333333334, ans=0.125
+2024-08-26 20:44:40,937 INFO [train.py:1114] (2/4) Epoch 11, batch 1850, loss[loss=0.2172, simple_loss=0.2911, pruned_loss=0.05146, ctc_loss=0.1012, over 19561.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2769, pruned_loss=0.0519, ctc_loss=0.09719, over 3856960.27 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:44:47,989 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.156e+02 1.442e+02 1.639e+02 2.043e+02 4.343e+02, threshold=3.277e+02, percent-clipped=6.0
+2024-08-26 20:44:51,322 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=10.72 vs. limit=22.5
+2024-08-26 20:44:55,419 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=142677.33333333334, ans=0.2
+2024-08-26 20:45:01,980 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=142730.66666666666, ans=0.125
+2024-08-26 20:45:04,781 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.44 vs. limit=15.0
+2024-08-26 20:45:15,822 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.36 vs. limit=22.5
+2024-08-26 20:45:16,517 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.54 vs. limit=15.0
+2024-08-26 20:45:19,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=142837.33333333334, ans=0.0
+2024-08-26 20:45:29,153 INFO [train.py:1114] (2/4) Epoch 11, batch 1900, loss[loss=0.2323, simple_loss=0.3049, pruned_loss=0.05829, ctc_loss=0.1079, over 19655.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2777, pruned_loss=0.05214, ctc_loss=0.09745, over 3861403.51 frames. ], batch size: 59, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:46:23,319 INFO [train.py:1114] (2/4) Epoch 11, batch 1950, loss[loss=0.2195, simple_loss=0.2826, pruned_loss=0.05798, ctc_loss=0.1012, over 19592.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2788, pruned_loss=0.05263, ctc_loss=0.098, over 3870394.31 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:47:24,642 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.500e+02 1.631e+02 1.894e+02 3.317e+02, threshold=3.262e+02, percent-clipped=1.0
+2024-08-26 20:47:29,929 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=143210.66666666666, ans=0.125
+2024-08-26 20:47:58,747 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.const_attention_rate, batch_count=143264.0, ans=0.025
+2024-08-26 20:47:59,198 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.94 vs. limit=15.0
+2024-08-26 20:48:03,434 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.86 vs. limit=15.0
+2024-08-26 20:48:04,171 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=143264.0, ans=0.125
+2024-08-26 20:48:18,139 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=143317.33333333334, ans=0.05
+2024-08-26 20:48:33,061 INFO [train.py:1114] (2/4) Epoch 11, batch 2000, loss[loss=0.1757, simple_loss=0.2392, pruned_loss=0.04077, ctc_loss=0.07655, over 19610.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2792, pruned_loss=0.05296, ctc_loss=0.09869, over 3855528.82 frames. ], batch size: 45, lr: 1.34e-02, grad_scale: 32.0
+2024-08-26 20:48:36,015 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=143424.0, ans=0.04949747468305833
+2024-08-26 20:48:38,154 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.83 vs. limit=15.0
+2024-08-26 20:48:59,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=143584.0, ans=0.125
+2024-08-26 20:49:38,440 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=143690.66666666666, ans=0.125
+2024-08-26 20:49:39,119 INFO [train.py:1114] (2/4) Epoch 11, batch 2050, loss[loss=0.198, simple_loss=0.2567, pruned_loss=0.05113, ctc_loss=0.09249, over 19726.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2781, pruned_loss=0.05279, ctc_loss=0.09839, over 3851246.06 frames. ], batch size: 47, lr: 1.34e-02, grad_scale: 32.0
+2024-08-26 20:49:39,614 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.25 vs. limit=10.0
+2024-08-26 20:49:47,238 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.205e+02 1.448e+02 1.585e+02 1.933e+02 3.153e+02, threshold=3.170e+02, percent-clipped=0.0
+2024-08-26 20:49:50,180 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=143744.0, ans=0.125
+2024-08-26 20:49:52,818 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=143744.0, ans=0.125
+2024-08-26 20:50:09,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=143797.33333333334, ans=0.0
+2024-08-26 20:50:18,159 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.const_attention_rate, batch_count=143797.33333333334, ans=0.025
+2024-08-26 20:50:35,417 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=143904.0, ans=0.125
+2024-08-26 20:50:37,970 INFO [train.py:1114] (2/4) Epoch 11, batch 2100, loss[loss=0.2214, simple_loss=0.285, pruned_loss=0.05797, ctc_loss=0.1048, over 19773.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2775, pruned_loss=0.05235, ctc_loss=0.0978, over 3858179.99 frames. ], batch size: 54, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:50:38,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-26 20:50:42,401 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-26 20:50:42,432 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=143957.33333333334, ans=0.09899494936611666
+2024-08-26 20:50:43,573 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.01 vs. limit=22.5
+2024-08-26 20:50:45,304 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=143957.33333333334, ans=0.125
+2024-08-26 20:50:49,079 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=7.72 vs. limit=15.0
+2024-08-26 20:50:52,224 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=144010.66666666666, ans=0.0
+2024-08-26 20:50:55,878 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=144064.0, ans=0.0
+2024-08-26 20:51:01,966 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=144064.0, ans=0.125
+2024-08-26 20:51:07,271 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=144117.33333333334, ans=0.0
+2024-08-26 20:51:08,025 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=144117.33333333334, ans=0.1
+2024-08-26 20:51:22,999 INFO [train.py:1114] (2/4) Epoch 11, batch 2150, loss[loss=0.1974, simple_loss=0.2718, pruned_loss=0.04551, ctc_loss=0.07994, over 19856.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2767, pruned_loss=0.05202, ctc_loss=0.097, over 3869964.16 frames. ], batch size: 52, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:51:30,833 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.138e+02 1.485e+02 1.672e+02 2.037e+02 4.338e+02, threshold=3.345e+02, percent-clipped=7.0
+2024-08-26 20:51:58,984 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=144437.33333333334, ans=0.125
+2024-08-26 20:51:59,909 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=144437.33333333334, ans=0.125
+2024-08-26 20:52:00,815 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=144437.33333333334, ans=0.125
+2024-08-26 20:52:06,896 INFO [train.py:1114] (2/4) Epoch 11, batch 2200, loss[loss=0.2294, simple_loss=0.29, pruned_loss=0.06216, ctc_loss=0.1113, over 19595.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2767, pruned_loss=0.05198, ctc_loss=0.09697, over 3868361.65 frames. ], batch size: 57, lr: 1.34e-02, grad_scale: 16.0
+2024-08-26 20:52:07,132 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=144490.66666666666, ans=0.125
+2024-08-26 20:52:19,308 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=144544.0, ans=0.125
+2024-08-26 20:52:20,238 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=144544.0, ans=0.0
+2024-08-26 20:52:25,782 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.51 vs. limit=10.0
+2024-08-26 20:52:35,824 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=144650.66666666666, ans=0.0
+2024-08-26 20:52:39,368 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=144650.66666666666, ans=0.125
+2024-08-26 20:52:45,673 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=144704.0, ans=0.05
+2024-08-26 20:52:50,853 INFO [train.py:1114] (2/4) Epoch 11, batch 2250, loss[loss=0.2195, simple_loss=0.2894, pruned_loss=0.05346, ctc_loss=0.1068, over 19626.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.277, pruned_loss=0.05199, ctc_loss=0.09697, over 3867766.00 frames. ], batch size: 55, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:52:58,757 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.208e+02 1.461e+02 1.628e+02 1.934e+02 8.673e+02, threshold=3.256e+02, percent-clipped=2.0
+2024-08-26 20:53:04,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=144810.66666666666, ans=0.125
+2024-08-26 20:53:11,037 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=144864.0, ans=0.125
+2024-08-26 20:53:18,194 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=144917.33333333334, ans=0.2
+2024-08-26 20:53:24,987 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=144917.33333333334, ans=0.125
+2024-08-26 20:53:35,351 INFO [train.py:1114] (2/4) Epoch 11, batch 2300, loss[loss=0.1907, simple_loss=0.263, pruned_loss=0.04333, ctc_loss=0.07938, over 19502.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2765, pruned_loss=0.05215, ctc_loss=0.09722, over 3860445.86 frames. ], batch size: 49, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:53:39,749 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=145024.0, ans=0.0
+2024-08-26 20:53:49,659 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=145077.33333333334, ans=0.125
+2024-08-26 20:54:07,660 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=4.56 vs. limit=10.0
+2024-08-26 20:54:08,236 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.66 vs. limit=15.0
+2024-08-26 20:54:20,122 INFO [train.py:1114] (2/4) Epoch 11, batch 2350, loss[loss=0.2478, simple_loss=0.2999, pruned_loss=0.07204, ctc_loss=0.1292, over 19690.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2768, pruned_loss=0.05236, ctc_loss=0.09752, over 3862862.35 frames. ], batch size: 63, lr: 1.33e-02, grad_scale: 16.0
+2024-08-26 20:54:20,325 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=145290.66666666666, ans=0.125
+2024-08-26 20:54:21,051 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=145290.66666666666, ans=0.125
+2024-08-26 20:54:28,777 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.182e+02 1.482e+02 1.673e+02 1.901e+02 2.829e+02, threshold=3.345e+02, percent-clipped=0.0
+2024-08-26 20:54:35,378 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=5.10 vs. limit=12.0
+2024-08-26 20:54:42,142 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=145397.33333333334, ans=0.09899494936611666
+2024-08-26 20:55:04,263 INFO [train.py:1114] (2/4) Epoch 11, batch 2400, loss[loss=0.2394, simple_loss=0.2983, pruned_loss=0.06532, ctc_loss=0.1247, over 19284.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2796, pruned_loss=0.05334, ctc_loss=0.09937, over 3857542.92 frames. ], batch size: 71, lr: 1.33e-02, grad_scale: 32.0
+2024-08-26 20:55:13,074 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=145610.66666666666, ans=0.0
+2024-08-26 20:55:18,467 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=145610.66666666666, ans=0.2
+2024-08-26 20:55:24,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=145664.0, ans=0.125
+2024-08-26 20:55:25,387 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=145664.0, ans=0.1
+2024-08-26 20:55:38,429 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=145717.33333333334, ans=0.2
+2024-08-26 20:55:49,234 INFO [train.py:1114] (2/4) Epoch 11, batch 2450, loss[loss=0.3079, simple_loss=0.3303, pruned_loss=0.1033, ctc_loss=0.1977, over 12991.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2837, pruned_loss=0.05653, ctc_loss=0.1053, over 3729308.10 frames. ], batch size: 142, lr: 1.33e-02, grad_scale: 32.0
+2024-08-26 20:55:49,683 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.74 vs. limit=15.0
+2024-08-26 20:55:55,118 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=11.58 vs. limit=12.0
+2024-08-26 20:55:58,187 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.283e+02 1.577e+02 1.748e+02 1.957e+02 3.323e+02, threshold=3.496e+02, percent-clipped=0.0
+2024-08-26 20:55:58,441 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=145877.33333333334, ans=0.0
+2024-08-26 20:56:11,084 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.47 vs. limit=15.0
+2024-08-26 20:56:17,538 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=12.94 vs. limit=15.0
+2024-08-26 21:01:32,167 INFO [train.py:1114] (2/4) Epoch 12, batch 0, loss[loss=0.1935, simple_loss=0.2576, pruned_loss=0.04745, ctc_loss=0.086, over 19829.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.2576, pruned_loss=0.04745, ctc_loss=0.086, over 19829.00 frames. ], batch size: 49, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:01:32,167 INFO [train.py:1137] (2/4) Computing validation loss
+2024-08-26 21:01:49,719 INFO [zipformer.py:1858] (2/4) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.8305, 5.0661, 5.6313, 5.3073], device='cuda:2')
+2024-08-26 21:01:52,240 INFO [train.py:1146] (2/4) Epoch 12, validation: loss=0.1812, simple_loss=0.274, pruned_loss=0.03284, ctc_loss=0.05683, over 944034.00 frames.
+2024-08-26 21:01:52,241 INFO [train.py:1147] (2/4) Maximum memory allocated so far is 12825MB
+2024-08-26 21:02:26,554 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=146138.66666666666, ans=0.2
+2024-08-26 21:02:38,572 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.const_attention_rate, batch_count=146192.0, ans=0.025
+2024-08-26 21:02:42,163 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=146245.33333333334, ans=0.125
+2024-08-26 21:02:49,063 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.57 vs. limit=22.5
+2024-08-26 21:02:49,752 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=146298.66666666666, ans=0.0
+2024-08-26 21:02:50,488 INFO [train.py:1114] (2/4) Epoch 12, batch 50, loss[loss=0.1753, simple_loss=0.2465, pruned_loss=0.03789, ctc_loss=0.07053, over 19714.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2823, pruned_loss=0.05501, ctc_loss=0.1029, over 844495.10 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:02:54,661 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.55 vs. limit=10.0
+2024-08-26 21:02:58,947 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=146352.0, ans=0.125
+2024-08-26 21:03:03,119 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.const_attention_rate, batch_count=146352.0, ans=0.025
+2024-08-26 21:03:11,148 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.132e+02 1.556e+02 1.742e+02 1.990e+02 3.045e+02, threshold=3.484e+02, percent-clipped=0.0
+2024-08-26 21:03:18,848 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=146458.66666666666, ans=0.125
+2024-08-26 21:03:31,887 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=146512.0, ans=0.05
+2024-08-26 21:04:09,144 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=146512.0, ans=0.1
+2024-08-26 21:04:10,879 INFO [train.py:1114] (2/4) Epoch 12, batch 100, loss[loss=0.19, simple_loss=0.2629, pruned_loss=0.04257, ctc_loss=0.07992, over 19727.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2819, pruned_loss=0.05383, ctc_loss=0.101, over 1498946.70 frames. ], batch size: 51, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:04:13,178 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.22 vs. limit=15.0
+2024-08-26 21:04:43,118 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=146672.0, ans=0.1
+2024-08-26 21:04:53,455 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=146725.33333333334, ans=0.1
+2024-08-26 21:04:54,305 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=146725.33333333334, ans=0.5
+2024-08-26 21:05:05,130 INFO [train.py:1114] (2/4) Epoch 12, batch 150, loss[loss=0.1912, simple_loss=0.2548, pruned_loss=0.04681, ctc_loss=0.0849, over 19733.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2783, pruned_loss=0.05235, ctc_loss=0.09817, over 2027009.71 frames. ], batch size: 47, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:05:05,764 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=7.47 vs. limit=15.0
+2024-08-26 21:05:25,625 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.236e+02 1.421e+02 1.535e+02 1.745e+02 2.429e+02, threshold=3.070e+02, percent-clipped=0.0
+2024-08-26 21:05:29,584 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=146938.66666666666, ans=0.2
+2024-08-26 21:05:35,329 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=146992.0, ans=0.125
+2024-08-26 21:05:43,640 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=147045.33333333334, ans=0.1
+2024-08-26 21:05:52,008 INFO [train.py:1114] (2/4) Epoch 12, batch 200, loss[loss=0.2096, simple_loss=0.2792, pruned_loss=0.05065, ctc_loss=0.09693, over 18276.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2762, pruned_loss=0.0517, ctc_loss=0.09673, over 2434759.55 frames. ], batch size: 85, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:05:53,358 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten.whitening_limit, batch_count=147098.66666666666, ans=15.0
+2024-08-26 21:06:07,300 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.47 vs. limit=15.0
+2024-08-26 21:06:20,976 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=147258.66666666666, ans=0.1
+2024-08-26 21:06:33,398 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.41 vs. limit=15.0
+2024-08-26 21:06:38,642 INFO [train.py:1114] (2/4) Epoch 12, batch 250, loss[loss=0.2116, simple_loss=0.2838, pruned_loss=0.05129, ctc_loss=0.09196, over 19430.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2762, pruned_loss=0.05129, ctc_loss=0.09588, over 2755371.50 frames. ], batch size: 67, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:06:58,732 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=147472.0, ans=0.05
+2024-08-26 21:06:59,406 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.157e+02 1.414e+02 1.495e+02 1.680e+02 4.024e+02, threshold=2.991e+02, percent-clipped=1.0
+2024-08-26 21:07:06,150 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=147525.33333333334, ans=0.0
+2024-08-26 21:07:26,030 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=147578.66666666666, ans=0.125
+2024-08-26 21:07:28,817 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=147578.66666666666, ans=0.125
+2024-08-26 21:07:35,053 INFO [train.py:1114] (2/4) Epoch 12, batch 300, loss[loss=0.2086, simple_loss=0.2808, pruned_loss=0.04953, ctc_loss=0.09336, over 19497.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2751, pruned_loss=0.05076, ctc_loss=0.09476, over 3001501.23 frames. ], batch size: 61, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:07:37,076 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=147632.0, ans=0.125
+2024-08-26 21:07:58,502 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=147738.66666666666, ans=10.0
+2024-08-26 21:08:04,066 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=147792.0, ans=0.125
+2024-08-26 21:08:09,859 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.71 vs. limit=22.5
+2024-08-26 21:08:15,235 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.const_attention_rate, batch_count=147845.33333333334, ans=0.025
+2024-08-26 21:08:30,175 INFO [train.py:1114] (2/4) Epoch 12, batch 350, loss[loss=0.1807, simple_loss=0.2548, pruned_loss=0.03824, ctc_loss=0.07544, over 19756.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2761, pruned_loss=0.05104, ctc_loss=0.09536, over 3191725.20 frames. ], batch size: 48, lr: 1.27e-02, grad_scale: 16.0
+2024-08-26 21:08:33,229 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.const_attention_rate, batch_count=147898.66666666666, ans=0.025
+2024-08-26 21:12:10,844 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 1.537e+02 1.863e+02 2.287e+02 4.040e+02, threshold=3.725e+02, percent-clipped=5.0
+2024-08-26 21:12:18,425 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=148058.66666666666, ans=0.04949747468305833
+2024-08-26 21:12:23,151 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=148058.66666666666, ans=0.125
+2024-08-26 21:13:47,513 INFO [train.py:1114] (2/4) Epoch 12, batch 400, loss[loss=0.2149, simple_loss=0.281, pruned_loss=0.05348, ctc_loss=0.1046, over 19493.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2756, pruned_loss=0.05081, ctc_loss=0.09511, over 3343684.26 frames. ], batch size: 54, lr: 1.27e-02, grad_scale: 32.0
+2024-08-26 21:13:48,879 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.76 vs. limit=15.0
+2024-08-26 21:14:06,882 INFO [scaling.py:1024] (2/4) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.86 vs. limit=8.0
+2024-08-26 21:14:14,892 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=148325.33333333334, ans=0.125
+2024-08-26 21:14:34,562 INFO [train.py:1114] (2/4) Epoch 12, batch 450, loss[loss=0.2038, simple_loss=0.2782, pruned_loss=0.04658, ctc_loss=0.09081, over 19628.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2754, pruned_loss=0.05078, ctc_loss=0.09529, over 3452496.09 frames. ], batch size: 55, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:15:05,751 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.234e+02 1.502e+02 1.695e+02 2.071e+02 2.894e+02, threshold=3.390e+02, percent-clipped=0.0
+2024-08-26 21:15:19,629 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=148592.0, ans=0.0
+2024-08-26 21:15:28,845 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=148645.33333333334, ans=0.0
+2024-08-26 21:15:31,501 INFO [train.py:1114] (2/4) Epoch 12, batch 500, loss[loss=0.2189, simple_loss=0.2828, pruned_loss=0.05652, ctc_loss=0.105, over 19702.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2744, pruned_loss=0.05034, ctc_loss=0.09438, over 3547344.25 frames. ], batch size: 63, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:15:37,047 INFO [scaling.py:1120] (2/4) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00
+2024-08-26 21:15:54,236 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=148805.33333333334, ans=0.125
+2024-08-26 21:16:10,336 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=148912.0, ans=0.125
+2024-08-26 21:16:18,091 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.75 vs. limit=15.0
+2024-08-26 21:16:19,316 INFO [train.py:1114] (2/4) Epoch 12, batch 550, loss[loss=0.2239, simple_loss=0.292, pruned_loss=0.05672, ctc_loss=0.1059, over 19297.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2748, pruned_loss=0.05042, ctc_loss=0.09461, over 3608911.18 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:16:25,362 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.18 vs. limit=15.0
+2024-08-26 21:16:27,889 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=149018.66666666666, ans=0.0
+2024-08-26 21:16:38,102 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=149072.0, ans=0.125
+2024-08-26 21:16:39,714 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.200e+02 1.448e+02 1.617e+02 1.906e+02 3.977e+02, threshold=3.234e+02, percent-clipped=1.0
+2024-08-26 21:16:40,029 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=149072.0, ans=0.125
+2024-08-26 21:17:34,238 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=149178.66666666666, ans=0.1
+2024-08-26 21:17:45,789 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=149178.66666666666, ans=0.125
+2024-08-26 21:17:47,333 INFO [train.py:1114] (2/4) Epoch 12, batch 600, loss[loss=0.2052, simple_loss=0.2766, pruned_loss=0.04892, ctc_loss=0.08993, over 19397.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2752, pruned_loss=0.05046, ctc_loss=0.09466, over 3665652.38 frames. ], batch size: 67, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:18:09,718 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=149338.66666666666, ans=0.1
+2024-08-26 21:18:14,797 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.65 vs. limit=22.5
+2024-08-26 21:18:42,022 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=149445.33333333334, ans=0.0
+2024-08-26 21:18:46,464 INFO [train.py:1114] (2/4) Epoch 12, batch 650, loss[loss=0.2182, simple_loss=0.2865, pruned_loss=0.05579, ctc_loss=0.09599, over 19779.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2746, pruned_loss=0.05017, ctc_loss=0.09403, over 3716151.89 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:18:59,212 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.const_attention_rate, batch_count=149498.66666666666, ans=0.025
+2024-08-26 21:19:15,976 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=5.47 vs. limit=12.0
+2024-08-26 21:19:16,417 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.144e+02 1.436e+02 1.583e+02 1.844e+02 2.674e+02, threshold=3.165e+02, percent-clipped=0.0
+2024-08-26 21:19:31,870 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=149658.66666666666, ans=0.125
+2024-08-26 21:19:38,459 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=149712.0, ans=0.125
+2024-08-26 21:19:40,479 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=149712.0, ans=0.0
+2024-08-26 21:19:45,033 INFO [train.py:1114] (2/4) Epoch 12, batch 700, loss[loss=0.1916, simple_loss=0.2626, pruned_loss=0.04398, ctc_loss=0.08172, over 19718.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2749, pruned_loss=0.05019, ctc_loss=0.09403, over 3749713.72 frames. ], batch size: 51, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:19:54,601 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=149818.66666666666, ans=0.0
+2024-08-26 21:20:06,616 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=149872.0, ans=0.0
+2024-08-26 21:20:06,636 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=149872.0, ans=0.1
+2024-08-26 21:20:19,470 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=149925.33333333334, ans=0.125
+2024-08-26 21:20:24,861 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=149978.66666666666, ans=0.2
+2024-08-26 21:20:26,668 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=149978.66666666666, ans=0.0
+2024-08-26 21:20:31,165 INFO [train.py:1114] (2/4) Epoch 12, batch 750, loss[loss=0.1952, simple_loss=0.2686, pruned_loss=0.04358, ctc_loss=0.08674, over 19501.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2747, pruned_loss=0.05027, ctc_loss=0.09413, over 3775081.97 frames. ], batch size: 54, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:20:31,453 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=150032.0, ans=0.07
+2024-08-26 21:20:34,161 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=150032.0, ans=0.125
+2024-08-26 21:20:37,057 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=150032.0, ans=0.125
+2024-08-26 21:20:37,077 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=150032.0, ans=0.2
+2024-08-26 21:20:43,522 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=150085.33333333334, ans=0.125
+2024-08-26 21:20:51,908 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.231e+02 1.592e+02 1.843e+02 2.247e+02 3.979e+02, threshold=3.686e+02, percent-clipped=6.0
+2024-08-26 21:20:54,975 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=150138.66666666666, ans=0.125
+2024-08-26 21:20:56,009 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=150138.66666666666, ans=0.09899494936611666
+2024-08-26 21:20:58,899 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=150192.0, ans=0.125
+2024-08-26 21:21:10,240 INFO [scaling.py:1024] (2/4) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.48 vs. limit=22.5
+2024-08-26 21:21:22,388 INFO [train.py:1114] (2/4) Epoch 12, batch 800, loss[loss=0.1854, simple_loss=0.2504, pruned_loss=0.04391, ctc_loss=0.08153, over 19432.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2747, pruned_loss=0.05036, ctc_loss=0.09429, over 3796040.55 frames. ], batch size: 48, lr: 1.26e-02, grad_scale: 32.0
+2024-08-26 21:21:22,603 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=150298.66666666666, ans=0.0
+2024-08-26 21:21:24,478 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=150298.66666666666, ans=0.125
+2024-08-26 21:21:29,044 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=150298.66666666666, ans=0.1
+2024-08-26 21:22:03,888 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=150512.0, ans=0.0
+2024-08-26 21:22:12,293 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=150565.33333333334, ans=0.0
+2024-08-26 21:22:12,972 INFO [train.py:1114] (2/4) Epoch 12, batch 850, loss[loss=0.2128, simple_loss=0.2825, pruned_loss=0.05335, ctc_loss=0.0911, over 19668.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2747, pruned_loss=0.05039, ctc_loss=0.0942, over 3814711.63 frames. ], batch size: 59, lr: 1.26e-02, grad_scale: 16.0
+2024-08-26 21:22:15,145 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=150565.33333333334, ans=0.125
+2024-08-26 21:22:27,328 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=150618.66666666666, ans=0.125
+2024-08-26 21:22:34,289 WARNING [optim.py:487] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.171e+02 1.451e+02 1.599e+02 1.811e+02 2.698e+02, threshold=3.198e+02, percent-clipped=0.0
+2024-08-26 21:22:36,381 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=150672.0, ans=0.125
+2024-08-26 21:22:43,183 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=150725.33333333334, ans=0.125
+2024-08-26 21:22:53,660 INFO [scaling.py:214] (2/4) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=150778.66666666666, ans=0.1
+2024-08-26 21:23:00,251 INFO [train.py:1114] (2/4) Epoch 12, batch 900, loss[loss=0.1844, simple_loss=0.2502, pruned_loss=0.043, ctc_loss=0.08145, over 19437.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2748, pruned_loss=0.05082, ctc_loss=0.09482, over 3818187.22 frames. ], batch size: 48, lr: 1.25e-02, grad_scale: 16.0
+2024-08-26 21:23:04,915 INFO [scaling.py:2